Merge commit 'f30e1bdf97ae8b2b2918da585f887a4d3a23a347' into boehm-demers-weiser-gc
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/boehm-gc.h"
24 #include "libguile/_scm.h"
25
26 #if HAVE_UNISTD_H
27 #include <unistd.h>
28 #endif
29 #include <stdio.h>
30 #include <assert.h>
31
32 #ifdef HAVE_STRING_H
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34 #endif
35
36 #if HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52
53 #ifdef __MINGW32__
54 #ifndef ETIMEDOUT
55 # define ETIMEDOUT WSAETIMEDOUT
56 #endif
57 # include <fcntl.h>
58 # include <process.h>
59 # define pipe(fd) _pipe (fd, 256, O_BINARY)
60 #endif /* __MINGW32__ */
61
62 /*** Queues */
63
64 /* Make an empty queue data structure.
65 */
66 static SCM
67 make_queue ()
68 {
69 return scm_cons (SCM_EOL, SCM_EOL);
70 }
71
72 /* Put T at the back of Q and return a handle that can be used with
73 remqueue to remove T from Q again.
74 */
75 static SCM
76 enqueue (SCM q, SCM t)
77 {
78 SCM c = scm_cons (t, SCM_EOL);
79 if (scm_is_null (SCM_CDR (q)))
80 SCM_SETCDR (q, c);
81 else
82 SCM_SETCDR (SCM_CAR (q), c);
83 SCM_SETCAR (q, c);
84 return c;
85 }
86
87 /* Remove the element that the handle C refers to from the queue Q. C
88 must have been returned from a call to enqueue. The return value
89 is zero when the element referred to by C has already been removed.
90 Otherwise, 1 is returned.
91 */
92 static int
93 remqueue (SCM q, SCM c)
94 {
95 SCM p, prev = q;
96 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
97 {
98 if (scm_is_eq (p, c))
99 {
100 if (scm_is_eq (c, SCM_CAR (q)))
101 SCM_SETCAR (q, SCM_CDR (c));
102 SCM_SETCDR (prev, SCM_CDR (c));
103 return 1;
104 }
105 prev = p;
106 }
107 return 0;
108 }
109
110 /* Remove the front-most element from the queue Q and return it.
111 Return SCM_BOOL_F when Q is empty.
112 */
113 static SCM
114 dequeue (SCM q)
115 {
116 SCM c = SCM_CDR (q);
117 if (scm_is_null (c))
118 return SCM_BOOL_F;
119 else
120 {
121 SCM_SETCDR (q, SCM_CDR (c));
122 if (scm_is_null (SCM_CDR (q)))
123 SCM_SETCAR (q, SCM_EOL);
124 return SCM_CAR (c);
125 }
126 }
127
128 /*** Thread smob routines */
129
130
131 static int
132 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
133 {
134 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
135 scm_puts ("#<thread ", port);
136 scm_uintprint ((size_t)t->pthread, 10, port);
137 scm_puts (" (", port);
138 scm_uintprint ((scm_t_bits)t, 16, port);
139 scm_puts (")>", port);
140 return 1;
141 }
142
143 static size_t
144 thread_free (SCM obj)
145 {
146 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
147 assert (t->exited);
148 scm_gc_free (t, sizeof (*t), "thread");
149 return 0;
150 }
151
152 /*** Blocking on queues. */
153
154 /* See also scm_i_queue_async_cell for how such a block is
155 interrputed.
156 */
157
158 /* Put the current thread on QUEUE and go to sleep, waiting for it to
159 be woken up by a call to 'unblock_from_queue', or to be
160 interrupted. Upon return of this function, the current thread is
161 no longer on QUEUE, even when the sleep has been interrupted.
162
163 The QUEUE data structure is assumed to be protected by MUTEX and
164 the caller of block_self must hold MUTEX. It will be atomically
165 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
166
167 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
168 as MUTEX is needed.
169
170 When WAITTIME is not NULL, the sleep will be aborted at that time.
171
172 The return value of block_self is an errno value. It will be zero
173 when the sleep has been successfully completed by a call to
174 unblock_from_queue, EINTR when it has been interrupted by the
175 delivery of a system async, and ETIMEDOUT when the timeout has
176 expired.
177
178 The system asyncs themselves are not executed by block_self.
179 */
180 static int
181 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
182 const scm_t_timespec *waittime)
183 {
184 scm_i_thread *t = SCM_I_CURRENT_THREAD;
185 SCM q_handle;
186 int err;
187
188 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
189 err = EINTR;
190 else
191 {
192 t->block_asyncs++;
193 q_handle = enqueue (queue, t->handle);
194 if (waittime == NULL)
195 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
196 else
197 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
198
199 /* When we are still on QUEUE, we have been interrupted. We
200 report this only when no other error (such as a timeout) has
201 happened above.
202 */
203 if (remqueue (queue, q_handle) && err == 0)
204 err = EINTR;
205 t->block_asyncs--;
206 scm_i_reset_sleep (t);
207 }
208
209 return err;
210 }
211
212 /* Wake up the first thread on QUEUE, if any. The caller must hold
213 the mutex that protects QUEUE. The awoken thread is returned, or
214 #f when the queue was empty.
215 */
216 static SCM
217 unblock_from_queue (SCM queue)
218 {
219 SCM thread = dequeue (queue);
220 if (scm_is_true (thread))
221 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
222 return thread;
223 }
224
225 /* Getting into and out of guile mode.
226 */
227
228 /* Ken Raeburn observes that the implementation of suspend and resume
229 (and the things that build on top of them) are very likely not
230 correct (see below). We will need fix this eventually, and that's
231 why scm_leave_guile/scm_enter_guile are not exported in the API.
232
233 Ken writes:
234
235 Consider this sequence:
236
237 Function foo, called in Guile mode, calls suspend (maybe indirectly
238 through scm_leave_guile), which does this:
239
240 // record top of stack for the GC
241 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
242 var 't'
243 // save registers.
244 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
245 setjmp (t->regs); // here's most of the magic
246
247 ... and returns.
248
249 Function foo has a SCM value X, a handle on a non-immediate object, in
250 a caller-saved register R, and it's the only reference to the object
251 currently.
252
253 The compiler wants to use R in suspend, so it pushes the current
254 value, X, into a stack slot which will be reloaded on exit from
255 suspend; then it loads stuff into R and goes about its business. The
256 setjmp call saves (some of) the current registers, including R, which
257 no longer contains X. (This isn't a problem for a normal
258 setjmp/longjmp situation, where longjmp would be called before
259 setjmp's caller returns; the old value for X would be loaded back from
260 the stack after the longjmp, before the function returned.)
261
262 So, suspend returns, loading X back into R (and invalidating the jump
263 buffer) in the process. The caller foo then goes off and calls a
264 bunch of other functions out of Guile mode, occasionally storing X on
265 the stack again, but, say, much deeper on the stack than suspend's
266 stack frame went, and the stack slot where suspend had written X has
267 long since been overwritten with other values.
268
269 Okay, nothing actively broken so far. Now, let garbage collection
270 run, triggered by another thread.
271
272 The thread calling foo is out of Guile mode at the time, so the
273 garbage collector just scans a range of stack addresses. Too bad that
274 X isn't stored there. So the pointed-to storage goes onto the free
275 list, and I think you can see where things go from there.
276
277 Is there anything I'm missing that'll prevent this scenario from
278 happening? I mean, aside from, "well, suspend and scm_leave_guile
279 don't have many local variables, so they probably won't need to save
280 any registers on most systems, so we hope everything will wind up in
281 the jump buffer and we'll just get away with it"?
282
283 (And, going the other direction, if scm_leave_guile and suspend push
284 the stack pointer over onto a new page, and foo doesn't make further
285 function calls and thus the stack pointer no longer includes that
286 page, are we guaranteed that the kernel cannot release the now-unused
287 stack page that contains the top-of-stack pointer we just saved? I
288 don't know if any OS actually does that. If it does, we could get
289 faults in garbage collection.)
290
291 I don't think scm_without_guile has to have this problem, as it gets
292 more control over the stack handling -- but it should call setjmp
293 itself. I'd probably try something like:
294
295 // record top of stack for the GC
296 t->top = SCM_STACK_PTR (&t);
297 // save registers.
298 SCM_FLUSH_REGISTER_WINDOWS;
299 setjmp (t->regs);
300 res = func(data);
301 scm_enter_guile (t);
302
303 ... though even that's making some assumptions about the stack
304 ordering of local variables versus caller-saved registers.
305
306 For something like scm_leave_guile to work, I don't think it can just
307 rely on invalidated jump buffers. A valid jump buffer, and a handle
308 on the stack state at the point when the jump buffer was initialized,
309 together, would work fine, but I think then we're talking about macros
310 invoking setjmp in the caller's stack frame, and requiring that the
311 caller of scm_leave_guile also call scm_enter_guile before returning,
312 kind of like pthread_cleanup_push/pop calls that have to be paired up
313 in a function. (In fact, the pthread ones have to be paired up
314 syntactically, as if they might expand to a compound statement
315 incorporating the user's code, and invoking a compiler's
316 exception-handling primitives. Which might be something to think
317 about for cases where Guile is used with C++ exceptions or
318 pthread_cancel.)
319 */
320
321 scm_i_pthread_key_t scm_i_thread_key;
322
323 static void
324 resume (scm_i_thread *t)
325 {
326 t->top = NULL;
327 if (t->clear_freelists_p)
328 {
329 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
330 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
331 t->clear_freelists_p = 0;
332 }
333 }
334
335 typedef void* scm_t_guile_ticket;
336
337 static void
338 scm_enter_guile (scm_t_guile_ticket ticket)
339 {
340 scm_i_thread *t = (scm_i_thread *)ticket;
341 if (t)
342 {
343 scm_i_pthread_mutex_lock (&t->heap_mutex);
344 resume (t);
345 }
346 }
347
348 static scm_i_thread *
349 suspend (void)
350 {
351 scm_i_thread *t = SCM_I_CURRENT_THREAD;
352
353 /* record top of stack for the GC */
354 t->top = SCM_STACK_PTR (&t);
355 /* save registers. */
356 SCM_FLUSH_REGISTER_WINDOWS;
357 setjmp (t->regs);
358 return t;
359 }
360
361 static scm_t_guile_ticket
362 scm_leave_guile ()
363 {
364 scm_i_thread *t = suspend ();
365 scm_i_pthread_mutex_unlock (&t->heap_mutex);
366 return (scm_t_guile_ticket) t;
367 }
368
369 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
370 static scm_i_thread *all_threads = NULL;
371 static int thread_count;
372
373 static SCM scm_i_default_dynamic_state;
374
375 /* Perform first stage of thread initialisation, in non-guile mode.
376 */
377 static void
378 guilify_self_1 (SCM_STACKITEM *base)
379 {
380 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
381
382 t->pthread = scm_i_pthread_self ();
383 t->handle = SCM_BOOL_F;
384 t->result = SCM_BOOL_F;
385 t->join_queue = SCM_EOL;
386 t->dynamic_state = SCM_BOOL_F;
387 t->dynwinds = SCM_EOL;
388 t->active_asyncs = SCM_EOL;
389 t->block_asyncs = 1;
390 t->pending_asyncs = 1;
391 t->last_debug_frame = NULL;
392 t->base = base;
393 t->continuation_root = SCM_EOL;
394 t->continuation_base = base;
395 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
396 t->sleep_mutex = NULL;
397 t->sleep_object = SCM_BOOL_F;
398 t->sleep_fd = -1;
399 /* XXX - check for errors. */
400 pipe (t->sleep_pipe);
401 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
402 t->clear_freelists_p = 0;
403 t->gc_running_p = 0;
404 t->current_mark_stack_ptr = NULL;
405 t->current_mark_stack_limit = NULL;
406 t->exited = 0;
407
408 t->freelist = SCM_EOL;
409 t->freelist2 = SCM_EOL;
410 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
411 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
412
413 scm_i_pthread_setspecific (scm_i_thread_key, t);
414
415 scm_i_pthread_mutex_lock (&t->heap_mutex);
416
417 scm_i_pthread_mutex_lock (&thread_admin_mutex);
418 t->next_thread = all_threads;
419 all_threads = t;
420 thread_count++;
421 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
422 }
423
424 /* Perform second stage of thread initialisation, in guile mode.
425 */
426 static void
427 guilify_self_2 (SCM parent)
428 {
429 scm_i_thread *t = SCM_I_CURRENT_THREAD;
430
431 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
432
433 t->continuation_root = scm_cons (t->handle, SCM_EOL);
434 t->continuation_base = t->base;
435
436 if (scm_is_true (parent))
437 t->dynamic_state = scm_make_dynamic_state (parent);
438 else
439 t->dynamic_state = scm_i_make_initial_dynamic_state ();
440
441 t->join_queue = make_queue ();
442 t->block_asyncs = 0;
443 }
444
445 /* Perform thread tear-down, in guile mode.
446 */
447 static void *
448 do_thread_exit (void *v)
449 {
450 scm_i_thread *t = (scm_i_thread *)v;
451
452 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
453
454 t->exited = 1;
455 close (t->sleep_pipe[0]);
456 close (t->sleep_pipe[1]);
457 while (scm_is_true (unblock_from_queue (t->join_queue)))
458 ;
459
460 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
461 return NULL;
462 }
463
464 static void
465 on_thread_exit (void *v)
466 {
467 scm_i_thread *t = (scm_i_thread *)v, **tp;
468
469 scm_i_pthread_setspecific (scm_i_thread_key, v);
470
471 /* Unblocking the joining threads needs to happen in guile mode
472 since the queue is a SCM data structure.
473 */
474 scm_with_guile (do_thread_exit, v);
475
476 /* Removing ourself from the list of all threads needs to happen in
477 non-guile mode since all SCM values on our stack become
478 unprotected once we are no longer in the list.
479 */
480 scm_leave_guile ();
481 scm_i_pthread_mutex_lock (&thread_admin_mutex);
482 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
483 if (*tp == t)
484 {
485 *tp = t->next_thread;
486 break;
487 }
488 thread_count--;
489 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
490
491 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
492 }
493
494 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
495
496 static void
497 init_thread_key (void)
498 {
499 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
500 }
501
502 /* Perform any initializations necessary to bring the current thread
503 into guile mode, initializing Guile itself, if necessary.
504
505 BASE is the stack base to use with GC.
506
507 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
508 which case the default dynamic state is used.
509
510 Return zero when the thread was in guile mode already; otherwise
511 return 1.
512 */
513
514 static int
515 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
516 {
517 scm_i_thread *t;
518
519 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
520
521 if ((t = SCM_I_CURRENT_THREAD) == NULL)
522 {
523 /* This thread has not been guilified yet.
524 */
525
526 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
527 if (scm_initialized_p == 0)
528 {
529 /* First thread ever to enter Guile. Run the full
530 initialization.
531 */
532 scm_i_init_guile (base);
533 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
534 }
535 else
536 {
537 /* Guile is already initialized, but this thread enters it for
538 the first time. Only initialize this thread.
539 */
540 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
541 guilify_self_1 (base);
542 guilify_self_2 (parent);
543 }
544 return 1;
545 }
546 else if (t->top)
547 {
548 /* This thread is already guilified but not in guile mode, just
549 resume it.
550
551 XXX - base might be lower than when this thread was first
552 guilified.
553 */
554 scm_enter_guile ((scm_t_guile_ticket) t);
555 return 1;
556 }
557 else
558 {
559 /* Thread is already in guile mode. Nothing to do.
560 */
561 return 0;
562 }
563 }
564
565 #if SCM_USE_PTHREAD_THREADS
566 /* pthread_getattr_np not available on MacOS X and Solaris 10. */
567 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
568
569 #define HAVE_GET_THREAD_STACK_BASE
570
571 static SCM_STACKITEM *
572 get_thread_stack_base ()
573 {
574 pthread_attr_t attr;
575 void *start, *end;
576 size_t size;
577
578 pthread_getattr_np (pthread_self (), &attr);
579 pthread_attr_getstack (&attr, &start, &size);
580 end = (char *)start + size;
581
582 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
583 for the main thread, but we can use scm_get_stack_base in that
584 case.
585 */
586
587 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
588 if ((void *)&attr < start || (void *)&attr >= end)
589 return scm_get_stack_base ();
590 else
591 #endif
592 {
593 #if SCM_STACK_GROWS_UP
594 return start;
595 #else
596 return end;
597 #endif
598 }
599 }
600
601 #endif /* HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP */
602
603 #else /* !SCM_USE_PTHREAD_THREADS */
604
605 #define HAVE_GET_THREAD_STACK_BASE
606
607 static SCM_STACKITEM *
608 get_thread_stack_base ()
609 {
610 return scm_get_stack_base ();
611 }
612
613 #endif /* !SCM_USE_PTHREAD_THREADS */
614
615 #ifdef HAVE_GET_THREAD_STACK_BASE
616
617 void
618 scm_init_guile ()
619 {
620 scm_i_init_thread_for_guile (get_thread_stack_base (),
621 scm_i_default_dynamic_state);
622 }
623
624 #endif
625
626 void *
627 scm_with_guile (void *(*func)(void *), void *data)
628 {
629 return scm_i_with_guile_and_parent (func, data,
630 scm_i_default_dynamic_state);
631 }
632
633 void *
634 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
635 SCM parent)
636 {
637 void *res;
638 int really_entered;
639 SCM_STACKITEM base_item;
640 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
641 res = scm_c_with_continuation_barrier (func, data);
642 if (really_entered)
643 scm_leave_guile ();
644 return res;
645 }
646
647 void *
648 scm_without_guile (void *(*func)(void *), void *data)
649 {
650 void *res;
651 scm_t_guile_ticket t;
652 t = scm_leave_guile ();
653 res = func (data);
654 scm_enter_guile (t);
655 return res;
656 }
657
658 /*** Thread creation */
659
660 typedef struct {
661 SCM parent;
662 SCM thunk;
663 SCM handler;
664 SCM thread;
665 scm_i_pthread_mutex_t mutex;
666 scm_i_pthread_cond_t cond;
667 } launch_data;
668
669 static void *
670 really_launch (void *d)
671 {
672 launch_data *data = (launch_data *)d;
673 SCM thunk = data->thunk, handler = data->handler;
674 scm_i_thread *t;
675
676 t = SCM_I_CURRENT_THREAD;
677
678 scm_i_scm_pthread_mutex_lock (&data->mutex);
679 data->thread = scm_current_thread ();
680 scm_i_pthread_cond_signal (&data->cond);
681 scm_i_pthread_mutex_unlock (&data->mutex);
682
683 if (SCM_UNBNDP (handler))
684 t->result = scm_call_0 (thunk);
685 else
686 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
687
688 return 0;
689 }
690
691 static void *
692 launch_thread (void *d)
693 {
694 launch_data *data = (launch_data *)d;
695 scm_i_pthread_detach (scm_i_pthread_self ());
696 scm_i_with_guile_and_parent (really_launch, d, data->parent);
697 return NULL;
698 }
699
700 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
701 (SCM thunk, SCM handler),
702 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
703 "returning a new thread object representing the thread. The procedure\n"
704 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
705 "\n"
706 "When @var{handler} is specified, then @var{thunk} is called from\n"
707 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
708 "handler. This catch is established inside the continuation barrier.\n"
709 "\n"
710 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
711 "the @emph{exit value} of the thread and the thread is terminated.")
712 #define FUNC_NAME s_scm_call_with_new_thread
713 {
714 launch_data data;
715 scm_i_pthread_t id;
716 int err;
717
718 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
719 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
720 handler, SCM_ARG2, FUNC_NAME);
721
722 data.parent = scm_current_dynamic_state ();
723 data.thunk = thunk;
724 data.handler = handler;
725 data.thread = SCM_BOOL_F;
726 scm_i_pthread_mutex_init (&data.mutex, NULL);
727 scm_i_pthread_cond_init (&data.cond, NULL);
728
729 scm_i_scm_pthread_mutex_lock (&data.mutex);
730 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
731 if (err)
732 {
733 scm_i_pthread_mutex_unlock (&data.mutex);
734 errno = err;
735 scm_syserror (NULL);
736 }
737 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
738 scm_i_pthread_mutex_unlock (&data.mutex);
739
740 return data.thread;
741 }
742 #undef FUNC_NAME
743
744 typedef struct {
745 SCM parent;
746 scm_t_catch_body body;
747 void *body_data;
748 scm_t_catch_handler handler;
749 void *handler_data;
750 SCM thread;
751 scm_i_pthread_mutex_t mutex;
752 scm_i_pthread_cond_t cond;
753 } spawn_data;
754
755 static void *
756 really_spawn (void *d)
757 {
758 spawn_data *data = (spawn_data *)d;
759 scm_t_catch_body body = data->body;
760 void *body_data = data->body_data;
761 scm_t_catch_handler handler = data->handler;
762 void *handler_data = data->handler_data;
763 scm_i_thread *t = SCM_I_CURRENT_THREAD;
764
765 scm_i_scm_pthread_mutex_lock (&data->mutex);
766 data->thread = scm_current_thread ();
767 scm_i_pthread_cond_signal (&data->cond);
768 scm_i_pthread_mutex_unlock (&data->mutex);
769
770 if (handler == NULL)
771 t->result = body (body_data);
772 else
773 t->result = scm_internal_catch (SCM_BOOL_T,
774 body, body_data,
775 handler, handler_data);
776
777 return 0;
778 }
779
780 static void *
781 spawn_thread (void *d)
782 {
783 spawn_data *data = (spawn_data *)d;
784 scm_i_pthread_detach (scm_i_pthread_self ());
785 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
786 return NULL;
787 }
788
789 SCM
790 scm_spawn_thread (scm_t_catch_body body, void *body_data,
791 scm_t_catch_handler handler, void *handler_data)
792 {
793 spawn_data data;
794 scm_i_pthread_t id;
795 int err;
796
797 data.parent = scm_current_dynamic_state ();
798 data.body = body;
799 data.body_data = body_data;
800 data.handler = handler;
801 data.handler_data = handler_data;
802 data.thread = SCM_BOOL_F;
803 scm_i_pthread_mutex_init (&data.mutex, NULL);
804 scm_i_pthread_cond_init (&data.cond, NULL);
805
806 scm_i_scm_pthread_mutex_lock (&data.mutex);
807 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
808 if (err)
809 {
810 scm_i_pthread_mutex_unlock (&data.mutex);
811 errno = err;
812 scm_syserror (NULL);
813 }
814 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
815 scm_i_pthread_mutex_unlock (&data.mutex);
816
817 return data.thread;
818 }
819
820 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
821 (),
822 "Move the calling thread to the end of the scheduling queue.")
823 #define FUNC_NAME s_scm_yield
824 {
825 return scm_from_bool (scm_i_sched_yield ());
826 }
827 #undef FUNC_NAME
828
829 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
830 (SCM thread),
831 "Suspend execution of the calling thread until the target @var{thread} "
832 "terminates, unless the target @var{thread} has already terminated. ")
833 #define FUNC_NAME s_scm_join_thread
834 {
835 scm_i_thread *t;
836 SCM res;
837
838 SCM_VALIDATE_THREAD (1, thread);
839 if (scm_is_eq (scm_current_thread (), thread))
840 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
841
842 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
843
844 t = SCM_I_THREAD_DATA (thread);
845 if (!t->exited)
846 {
847 while (1)
848 {
849 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
850 if (t->exited)
851 break;
852 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
853 SCM_TICK;
854 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
855 }
856 }
857 res = t->result;
858
859 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
860 return res;
861 }
862 #undef FUNC_NAME
863
864 /*** Fat mutexes */
865
866 /* We implement our own mutex type since we want them to be 'fair', we
867 want to do fancy things while waiting for them (like running
868 asyncs) and we might want to add things that are nice for
869 debugging.
870 */
871
872 typedef struct {
873 scm_i_pthread_mutex_t lock;
874 SCM owner;
875 int level; /* how much the owner owns us.
876 < 0 for non-recursive mutexes */
877 SCM waiting; /* the threads waiting for this mutex. */
878 } fat_mutex;
879
880 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
881 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
882
883
884 static size_t
885 fat_mutex_free (SCM mx)
886 {
887 fat_mutex *m = SCM_MUTEX_DATA (mx);
888 scm_i_pthread_mutex_destroy (&m->lock);
889 scm_gc_free (m, sizeof (fat_mutex), "mutex");
890 return 0;
891 }
892
893 static int
894 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
895 {
896 fat_mutex *m = SCM_MUTEX_DATA (mx);
897 scm_puts ("#<mutex ", port);
898 scm_uintprint ((scm_t_bits)m, 16, port);
899 scm_puts (">", port);
900 return 1;
901 }
902
903 static SCM
904 make_fat_mutex (int recursive)
905 {
906 fat_mutex *m;
907 SCM mx;
908
909 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
910 scm_i_pthread_mutex_init (&m->lock, NULL);
911 m->owner = SCM_BOOL_F;
912 m->level = recursive? 0 : -1;
913 m->waiting = SCM_EOL;
914 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
915 m->waiting = make_queue ();
916 return mx;
917 }
918
919 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
920 (void),
921 "Create a new mutex. ")
922 #define FUNC_NAME s_scm_make_mutex
923 {
924 return make_fat_mutex (0);
925 }
926 #undef FUNC_NAME
927
928 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
929 (void),
930 "Create a new recursive mutex. ")
931 #define FUNC_NAME s_scm_make_recursive_mutex
932 {
933 return make_fat_mutex (1);
934 }
935 #undef FUNC_NAME
936
937 static char *
938 fat_mutex_lock (SCM mutex)
939 {
940 fat_mutex *m = SCM_MUTEX_DATA (mutex);
941 SCM thread = scm_current_thread ();
942 char *msg = NULL;
943
944 scm_i_scm_pthread_mutex_lock (&m->lock);
945 if (scm_is_false (m->owner))
946 m->owner = thread;
947 else if (scm_is_eq (m->owner, thread))
948 {
949 if (m->level >= 0)
950 m->level++;
951 else
952 msg = "mutex already locked by current thread";
953 }
954 else
955 {
956 while (1)
957 {
958 block_self (m->waiting, mutex, &m->lock, NULL);
959 if (scm_is_eq (m->owner, thread))
960 break;
961 scm_i_pthread_mutex_unlock (&m->lock);
962 SCM_TICK;
963 scm_i_scm_pthread_mutex_lock (&m->lock);
964 }
965 }
966 scm_i_pthread_mutex_unlock (&m->lock);
967 return msg;
968 }
969
970 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
971 (SCM mx),
972 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
973 "blocks until the mutex becomes available. The function returns when "
974 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
975 "a thread already owns will succeed right away and will not block the "
976 "thread. That is, Guile's mutexes are @emph{recursive}. ")
977 #define FUNC_NAME s_scm_lock_mutex
978 {
979 char *msg;
980
981 SCM_VALIDATE_MUTEX (1, mx);
982 msg = fat_mutex_lock (mx);
983 if (msg)
984 scm_misc_error (NULL, msg, SCM_EOL);
985 return SCM_BOOL_T;
986 }
987 #undef FUNC_NAME
988
989 void
990 scm_dynwind_lock_mutex (SCM mutex)
991 {
992 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
993 SCM_F_WIND_EXPLICITLY);
994 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
995 SCM_F_WIND_EXPLICITLY);
996 }
997
998 static char *
999 fat_mutex_trylock (fat_mutex *m, int *resp)
1000 {
1001 char *msg = NULL;
1002 SCM thread = scm_current_thread ();
1003
1004 *resp = 1;
1005 scm_i_pthread_mutex_lock (&m->lock);
1006 if (scm_is_false (m->owner))
1007 m->owner = thread;
1008 else if (scm_is_eq (m->owner, thread))
1009 {
1010 if (m->level >= 0)
1011 m->level++;
1012 else
1013 msg = "mutex already locked by current thread";
1014 }
1015 else
1016 *resp = 0;
1017 scm_i_pthread_mutex_unlock (&m->lock);
1018 return msg;
1019 }
1020
1021 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1022 (SCM mutex),
1023 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1024 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1025 #define FUNC_NAME s_scm_try_mutex
1026 {
1027 char *msg;
1028 int res;
1029
1030 SCM_VALIDATE_MUTEX (1, mutex);
1031
1032 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1033 if (msg)
1034 scm_misc_error (NULL, msg, SCM_EOL);
1035 return scm_from_bool (res);
1036 }
1037 #undef FUNC_NAME
1038
1039 static char *
1040 fat_mutex_unlock (fat_mutex *m)
1041 {
1042 char *msg = NULL;
1043
1044 scm_i_scm_pthread_mutex_lock (&m->lock);
1045 if (!scm_is_eq (m->owner, scm_current_thread ()))
1046 {
1047 if (scm_is_false (m->owner))
1048 msg = "mutex not locked";
1049 else
1050 msg = "mutex not locked by current thread";
1051 }
1052 else if (m->level > 0)
1053 m->level--;
1054 else
1055 m->owner = unblock_from_queue (m->waiting);
1056 scm_i_pthread_mutex_unlock (&m->lock);
1057
1058 return msg;
1059 }
1060
1061 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1062 (SCM mx),
1063 "Unlocks @var{mutex} if the calling thread owns the lock on "
1064 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1065 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1066 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1067 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1068 "with a call to @code{unlock-mutex}. Only the last call to "
1069 "@code{unlock-mutex} will actually unlock the mutex. ")
1070 #define FUNC_NAME s_scm_unlock_mutex
1071 {
1072 char *msg;
1073 SCM_VALIDATE_MUTEX (1, mx);
1074
1075 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1076 if (msg)
1077 scm_misc_error (NULL, msg, SCM_EOL);
1078 return SCM_BOOL_T;
1079 }
1080 #undef FUNC_NAME
1081
1082 #if 0
1083
1084 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1085 (SCM mx),
1086 "Return the thread owning @var{mx}, or @code{#f}.")
1087 #define FUNC_NAME s_scm_mutex_owner
1088 {
1089 SCM_VALIDATE_MUTEX (1, mx);
1090 return (SCM_MUTEX_DATA(mx))->owner;
1091 }
1092 #undef FUNC_NAME
1093
1094 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1095 (SCM mx),
1096 "Return the lock level of a recursive mutex, or -1\n"
1097 "for a standard mutex.")
1098 #define FUNC_NAME s_scm_mutex_level
1099 {
1100 SCM_VALIDATE_MUTEX (1, mx);
1101 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1102 }
1103 #undef FUNC_NAME
1104
1105 #endif
1106
1107 /*** Fat condition variables */
1108
1109 typedef struct {
1110 scm_i_pthread_mutex_t lock;
1111 SCM waiting; /* the threads waiting for this condition. */
1112 } fat_cond;
1113
1114 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1115 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1116
1117 static size_t
1118 fat_cond_free (SCM mx)
1119 {
1120 fat_cond *c = SCM_CONDVAR_DATA (mx);
1121 scm_i_pthread_mutex_destroy (&c->lock);
1122 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1123 return 0;
1124 }
1125
1126 static int
1127 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1128 {
1129 fat_cond *c = SCM_CONDVAR_DATA (cv);
1130 scm_puts ("#<condition-variable ", port);
1131 scm_uintprint ((scm_t_bits)c, 16, port);
1132 scm_puts (">", port);
1133 return 1;
1134 }
1135
1136 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1137 (void),
1138 "Make a new condition variable.")
1139 #define FUNC_NAME s_scm_make_condition_variable
1140 {
1141 fat_cond *c;
1142 SCM cv;
1143
1144 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1145 scm_i_pthread_mutex_init (&c->lock, 0);
1146 c->waiting = SCM_EOL;
1147 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1148 c->waiting = make_queue ();
1149 return cv;
1150 }
1151 #undef FUNC_NAME
1152
1153 static int
1154 fat_cond_timedwait (SCM cond, SCM mutex,
1155 const scm_t_timespec *waittime)
1156 {
1157 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1158 fat_cond *c = SCM_CONDVAR_DATA (cond);
1159 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1160 const char *msg;
1161 int err = 0;
1162
1163 while (1)
1164 {
1165 scm_i_scm_pthread_mutex_lock (&c->lock);
1166 msg = fat_mutex_unlock (m);
1167 t->block_asyncs++;
1168 if (msg == NULL)
1169 {
1170 err = block_self (c->waiting, cond, &c->lock, waittime);
1171 scm_i_pthread_mutex_unlock (&c->lock);
1172 fat_mutex_lock (mutex);
1173 }
1174 else
1175 scm_i_pthread_mutex_unlock (&c->lock);
1176 t->block_asyncs--;
1177 scm_async_click ();
1178
1179 if (msg)
1180 scm_misc_error (NULL, msg, SCM_EOL);
1181
1182 scm_remember_upto_here_2 (cond, mutex);
1183
1184 if (err == 0)
1185 return 1;
1186 if (err == ETIMEDOUT)
1187 return 0;
1188 if (err != EINTR)
1189 {
1190 errno = err;
1191 scm_syserror (NULL);
1192 }
1193 }
1194 }
1195
1196 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1197 (SCM cv, SCM mx, SCM t),
1198 "Wait until @var{cond-var} has been signalled. While waiting, "
1199 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1200 "is locked again when this function returns. When @var{time} is given, "
1201 "it specifies a point in time where the waiting should be aborted. It "
1202 "can be either a integer as returned by @code{current-time} or a pair "
1203 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1204 "mutex is locked and @code{#f} is returned. When the condition "
1205 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1206 "is returned. ")
1207 #define FUNC_NAME s_scm_timed_wait_condition_variable
1208 {
1209 scm_t_timespec waittime, *waitptr = NULL;
1210
1211 SCM_VALIDATE_CONDVAR (1, cv);
1212 SCM_VALIDATE_MUTEX (2, mx);
1213
1214 if (!SCM_UNBNDP (t))
1215 {
1216 if (scm_is_pair (t))
1217 {
1218 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1219 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1220 }
1221 else
1222 {
1223 waittime.tv_sec = scm_to_ulong (t);
1224 waittime.tv_nsec = 0;
1225 }
1226 waitptr = &waittime;
1227 }
1228
1229 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1230 }
1231 #undef FUNC_NAME
1232
1233 static void
1234 fat_cond_signal (fat_cond *c)
1235 {
1236 scm_i_scm_pthread_mutex_lock (&c->lock);
1237 unblock_from_queue (c->waiting);
1238 scm_i_pthread_mutex_unlock (&c->lock);
1239 }
1240
1241 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1242 (SCM cv),
1243 "Wake up one thread that is waiting for @var{cv}")
1244 #define FUNC_NAME s_scm_signal_condition_variable
1245 {
1246 SCM_VALIDATE_CONDVAR (1, cv);
1247 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1248 return SCM_BOOL_T;
1249 }
1250 #undef FUNC_NAME
1251
1252 static void
1253 fat_cond_broadcast (fat_cond *c)
1254 {
1255 scm_i_scm_pthread_mutex_lock (&c->lock);
1256 while (scm_is_true (unblock_from_queue (c->waiting)))
1257 ;
1258 scm_i_pthread_mutex_unlock (&c->lock);
1259 }
1260
1261 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1262 (SCM cv),
1263 "Wake up all threads that are waiting for @var{cv}. ")
1264 #define FUNC_NAME s_scm_broadcast_condition_variable
1265 {
1266 SCM_VALIDATE_CONDVAR (1, cv);
1267 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1268 return SCM_BOOL_T;
1269 }
1270 #undef FUNC_NAME
1271
1272 /*** Marking stacks */
1273
1274 /* XXX - what to do with this? Do we need to handle this for blocked
1275 threads as well?
1276 */
1277 #ifdef __ia64__
1278 # define SCM_MARK_BACKING_STORE() do { \
1279 ucontext_t ctx; \
1280 SCM_STACKITEM * top, * bot; \
1281 getcontext (&ctx); \
1282 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1283 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1284 / sizeof (SCM_STACKITEM))); \
1285 bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
1286 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1287 scm_mark_locations (bot, top - bot); } while (0)
1288 #else
1289 # define SCM_MARK_BACKING_STORE()
1290 #endif
1291
1292
1293
1294 /*** Select */
1295
1296 int
1297 scm_std_select (int nfds,
1298 SELECT_TYPE *readfds,
1299 SELECT_TYPE *writefds,
1300 SELECT_TYPE *exceptfds,
1301 struct timeval *timeout)
1302 {
1303 fd_set my_readfds;
1304 int res, eno, wakeup_fd;
1305 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1306 scm_t_guile_ticket ticket;
1307
1308 if (readfds == NULL)
1309 {
1310 FD_ZERO (&my_readfds);
1311 readfds = &my_readfds;
1312 }
1313
1314 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1315 SCM_TICK;
1316
1317 wakeup_fd = t->sleep_pipe[0];
1318 ticket = scm_leave_guile ();
1319 FD_SET (wakeup_fd, readfds);
1320 if (wakeup_fd >= nfds)
1321 nfds = wakeup_fd+1;
1322 res = select (nfds, readfds, writefds, exceptfds, timeout);
1323 t->sleep_fd = -1;
1324 eno = errno;
1325 scm_enter_guile (ticket);
1326
1327 scm_i_reset_sleep (t);
1328
1329 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1330 {
1331 char dummy;
1332 read (wakeup_fd, &dummy, 1);
1333 FD_CLR (wakeup_fd, readfds);
1334 res -= 1;
1335 if (res == 0)
1336 {
1337 eno = EINTR;
1338 res = -1;
1339 }
1340 }
1341 errno = eno;
1342 return res;
1343 }
1344
1345 /* Convenience API for blocking while in guile mode. */
1346
1347 #if SCM_USE_PTHREAD_THREADS
1348
1349 int
1350 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1351 {
1352 scm_t_guile_ticket t = scm_leave_guile ();
1353 int res = scm_i_pthread_mutex_lock (mutex);
1354 scm_enter_guile (t);
1355 return res;
1356 }
1357
1358 static void
1359 do_unlock (void *data)
1360 {
1361 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1362 }
1363
1364 void
1365 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1366 {
1367 scm_i_scm_pthread_mutex_lock (mutex);
1368 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1369 }
1370
1371 int
1372 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1373 {
1374 scm_t_guile_ticket t = scm_leave_guile ();
1375 int res = scm_i_pthread_cond_wait (cond, mutex);
1376 scm_enter_guile (t);
1377 return res;
1378 }
1379
1380 int
1381 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1382 scm_i_pthread_mutex_t *mutex,
1383 const scm_t_timespec *wt)
1384 {
1385 scm_t_guile_ticket t = scm_leave_guile ();
1386 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1387 scm_enter_guile (t);
1388 return res;
1389 }
1390
1391 #endif
1392
1393 unsigned long
1394 scm_std_usleep (unsigned long usecs)
1395 {
1396 struct timeval tv;
1397 tv.tv_usec = usecs % 1000000;
1398 tv.tv_sec = usecs / 1000000;
1399 scm_std_select (0, NULL, NULL, NULL, &tv);
1400 return tv.tv_sec * 1000000 + tv.tv_usec;
1401 }
1402
1403 unsigned int
1404 scm_std_sleep (unsigned int secs)
1405 {
1406 struct timeval tv;
1407 tv.tv_usec = 0;
1408 tv.tv_sec = secs;
1409 scm_std_select (0, NULL, NULL, NULL, &tv);
1410 return tv.tv_sec;
1411 }
1412
1413 /*** Misc */
1414
1415 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1416 (void),
1417 "Return the thread that called this function.")
1418 #define FUNC_NAME s_scm_current_thread
1419 {
1420 return SCM_I_CURRENT_THREAD->handle;
1421 }
1422 #undef FUNC_NAME
1423
1424 static SCM
1425 scm_c_make_list (size_t n, SCM fill)
1426 {
1427 SCM res = SCM_EOL;
1428 while (n-- > 0)
1429 res = scm_cons (fill, res);
1430 return res;
1431 }
1432
1433 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1434 (void),
1435 "Return a list of all threads.")
1436 #define FUNC_NAME s_scm_all_threads
1437 {
1438 /* We can not allocate while holding the thread_admin_mutex because
1439 of the way GC is done.
1440 */
1441 int n = thread_count;
1442 scm_i_thread *t;
1443 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1444
1445 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1446 l = &list;
1447 for (t = all_threads; t && n > 0; t = t->next_thread)
1448 {
1449 SCM_SETCAR (*l, t->handle);
1450 l = SCM_CDRLOC (*l);
1451 n--;
1452 }
1453 *l = SCM_EOL;
1454 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1455 return list;
1456 }
1457 #undef FUNC_NAME
1458
1459 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1460 (SCM thread),
1461 "Return @code{#t} iff @var{thread} has exited.\n")
1462 #define FUNC_NAME s_scm_thread_exited_p
1463 {
1464 return scm_from_bool (scm_c_thread_exited_p (thread));
1465 }
1466 #undef FUNC_NAME
1467
1468 int
1469 scm_c_thread_exited_p (SCM thread)
1470 #define FUNC_NAME s_scm_thread_exited_p
1471 {
1472 scm_i_thread *t;
1473 SCM_VALIDATE_THREAD (1, thread);
1474 t = SCM_I_THREAD_DATA (thread);
1475 return t->exited;
1476 }
1477 #undef FUNC_NAME
1478
1479 static scm_i_pthread_cond_t wake_up_cond;
1480 int scm_i_thread_go_to_sleep;
1481 static int threads_initialized_p = 0;
1482
1483 void
1484 scm_i_thread_put_to_sleep ()
1485 {
1486 if (threads_initialized_p)
1487 {
1488 scm_i_thread *t;
1489
1490 scm_leave_guile ();
1491 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1492
1493 /* Signal all threads to go to sleep
1494 */
1495 scm_i_thread_go_to_sleep = 1;
1496 for (t = all_threads; t; t = t->next_thread)
1497 scm_i_pthread_mutex_lock (&t->heap_mutex);
1498 scm_i_thread_go_to_sleep = 0;
1499 }
1500 }
1501
1502 void
1503 scm_i_thread_invalidate_freelists ()
1504 {
1505 /* thread_admin_mutex is already locked. */
1506
1507 scm_i_thread *t;
1508 for (t = all_threads; t; t = t->next_thread)
1509 if (t != SCM_I_CURRENT_THREAD)
1510 t->clear_freelists_p = 1;
1511 }
1512
1513 void
1514 scm_i_thread_wake_up ()
1515 {
1516 if (threads_initialized_p)
1517 {
1518 scm_i_thread *t;
1519
1520 scm_i_pthread_cond_broadcast (&wake_up_cond);
1521 for (t = all_threads; t; t = t->next_thread)
1522 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1523 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1524 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1525 }
1526 }
1527
1528 void
1529 scm_i_thread_sleep_for_gc ()
1530 {
1531 scm_i_thread *t = suspend ();
1532 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1533 resume (t);
1534 }
1535
1536 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1537 */
1538 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1539 int scm_i_critical_section_level = 0;
1540
1541 static SCM dynwind_critical_section_mutex;
1542
1543 void
1544 scm_dynwind_critical_section (SCM mutex)
1545 {
1546 if (scm_is_false (mutex))
1547 mutex = dynwind_critical_section_mutex;
1548 scm_dynwind_lock_mutex (mutex);
1549 scm_dynwind_block_asyncs ();
1550 }
1551
1552 /*** Initialization */
1553
1554 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1555 scm_i_pthread_mutex_t scm_i_misc_mutex;
1556
1557 #if SCM_USE_PTHREAD_THREADS
1558 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1559 #endif
1560
1561 void
1562 scm_threads_prehistory (SCM_STACKITEM *base)
1563 {
1564 #if SCM_USE_PTHREAD_THREADS
1565 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1566 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1567 PTHREAD_MUTEX_RECURSIVE);
1568 #endif
1569
1570 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1571 scm_i_pthread_mutexattr_recursive);
1572 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1573 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1574 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1575 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1576
1577 guilify_self_1 (base);
1578 }
1579
1580 scm_t_bits scm_tc16_thread;
1581 scm_t_bits scm_tc16_mutex;
1582 scm_t_bits scm_tc16_condvar;
1583
1584 void
1585 scm_init_threads ()
1586 {
1587 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1588 scm_set_smob_print (scm_tc16_thread, thread_print);
1589 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
1590
1591 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1592 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1593 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1594
1595 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1596 sizeof (fat_cond));
1597 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1598 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1599
1600 scm_i_default_dynamic_state = SCM_BOOL_F;
1601 guilify_self_2 (SCM_BOOL_F);
1602 threads_initialized_p = 1;
1603
1604 dynwind_critical_section_mutex =
1605 scm_permanent_object (scm_make_recursive_mutex ());
1606 }
1607
1608 void
1609 scm_init_threads_default_dynamic_state ()
1610 {
1611 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1612 scm_i_default_dynamic_state = scm_permanent_object (state);
1613 }
1614
1615 void
1616 scm_init_thread_procs ()
1617 {
1618 #include "libguile/threads.x"
1619 }
1620
1621 /*
1622 Local Variables:
1623 c-file-style: "gnu"
1624 End:
1625 */