Fix compilation error due to strict aliasing rules on `i386-unknown-freebsd7.0'.
[bpt/guile.git] / libguile / threads.c
... / ...
CommitLineData
1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19\f
20#ifdef HAVE_CONFIG_H
21# include <config.h>
22#endif
23
24#include "libguile/_scm.h"
25
26#if HAVE_UNISTD_H
27#include <unistd.h>
28#endif
29#include <stdio.h>
30#include <assert.h>
31
32#ifdef HAVE_STRING_H
33#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34#endif
35
36#if HAVE_SYS_TIME_H
37#include <sys/time.h>
38#endif
39
40#include "libguile/validate.h"
41#include "libguile/root.h"
42#include "libguile/eval.h"
43#include "libguile/async.h"
44#include "libguile/ports.h"
45#include "libguile/threads.h"
46#include "libguile/dynwind.h"
47#include "libguile/iselect.h"
48#include "libguile/fluids.h"
49#include "libguile/continuations.h"
50#include "libguile/gc.h"
51#include "libguile/init.h"
52#include "libguile/scmsigs.h"
53#include "libguile/strings.h"
54
55#ifdef __MINGW32__
56#ifndef ETIMEDOUT
57# define ETIMEDOUT WSAETIMEDOUT
58#endif
59# include <fcntl.h>
60# include <process.h>
61# define pipe(fd) _pipe (fd, 256, O_BINARY)
62#endif /* __MINGW32__ */
63
64static void
65to_timespec (SCM t, scm_t_timespec *waittime)
66{
67 if (scm_is_pair (t))
68 {
69 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
70 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
71 }
72 else
73 {
74 double time = scm_to_double (t);
75 double sec = scm_c_truncate (time);
76
77 waittime->tv_sec = (long) sec;
78 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
79 }
80}
81
82/*** Queues */
83
84/* Make an empty queue data structure.
85 */
86static SCM
87make_queue ()
88{
89 return scm_cons (SCM_EOL, SCM_EOL);
90}
91
92/* Put T at the back of Q and return a handle that can be used with
93 remqueue to remove T from Q again.
94 */
95static SCM
96enqueue (SCM q, SCM t)
97{
98 SCM c = scm_cons (t, SCM_EOL);
99 if (scm_is_null (SCM_CDR (q)))
100 SCM_SETCDR (q, c);
101 else
102 SCM_SETCDR (SCM_CAR (q), c);
103 SCM_SETCAR (q, c);
104 return c;
105}
106
107/* Remove the element that the handle C refers to from the queue Q. C
108 must have been returned from a call to enqueue. The return value
109 is zero when the element referred to by C has already been removed.
110 Otherwise, 1 is returned.
111*/
112static int
113remqueue (SCM q, SCM c)
114{
115 SCM p, prev = q;
116 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
117 {
118 if (scm_is_eq (p, c))
119 {
120 if (scm_is_eq (c, SCM_CAR (q)))
121 SCM_SETCAR (q, SCM_CDR (c));
122 SCM_SETCDR (prev, SCM_CDR (c));
123 return 1;
124 }
125 prev = p;
126 }
127 return 0;
128}
129
130/* Remove the front-most element from the queue Q and return it.
131 Return SCM_BOOL_F when Q is empty.
132*/
133static SCM
134dequeue (SCM q)
135{
136 SCM c = SCM_CDR (q);
137 if (scm_is_null (c))
138 return SCM_BOOL_F;
139 else
140 {
141 SCM_SETCDR (q, SCM_CDR (c));
142 if (scm_is_null (SCM_CDR (q)))
143 SCM_SETCAR (q, SCM_EOL);
144 return SCM_CAR (c);
145 }
146}
147
148/*** Thread smob routines */
149
150static SCM
151thread_mark (SCM obj)
152{
153 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
154 scm_gc_mark (t->result);
155 scm_gc_mark (t->cleanup_handler);
156 scm_gc_mark (t->join_queue);
157 scm_gc_mark (t->mutexes);
158 scm_gc_mark (t->dynwinds);
159 scm_gc_mark (t->active_asyncs);
160 scm_gc_mark (t->continuation_root);
161 return t->dynamic_state;
162}
163
164static int
165thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
166{
167 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
168 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
169 the struct case, hence we go via a union, and extract according to the
170 size of pthread_t. */
171 union {
172 scm_i_pthread_t p;
173 unsigned short us;
174 unsigned int ui;
175 unsigned long ul;
176 scm_t_uintmax um;
177 } u;
178 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
179 scm_i_pthread_t p = t->pthread;
180 scm_t_uintmax id;
181 u.p = p;
182 if (sizeof (p) == sizeof (unsigned short))
183 id = u.us;
184 else if (sizeof (p) == sizeof (unsigned int))
185 id = u.ui;
186 else if (sizeof (p) == sizeof (unsigned long))
187 id = u.ul;
188 else
189 id = u.um;
190
191 scm_puts ("#<thread ", port);
192 scm_uintprint (id, 10, port);
193 scm_puts (" (", port);
194 scm_uintprint ((scm_t_bits)t, 16, port);
195 scm_puts (")>", port);
196 return 1;
197}
198
199static size_t
200thread_free (SCM obj)
201{
202 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
203 assert (t->exited);
204 scm_gc_free (t, sizeof (*t), "thread");
205 return 0;
206}
207
208/*** Blocking on queues. */
209
210/* See also scm_i_queue_async_cell for how such a block is
211 interrputed.
212*/
213
214/* Put the current thread on QUEUE and go to sleep, waiting for it to
215 be woken up by a call to 'unblock_from_queue', or to be
216 interrupted. Upon return of this function, the current thread is
217 no longer on QUEUE, even when the sleep has been interrupted.
218
219 The QUEUE data structure is assumed to be protected by MUTEX and
220 the caller of block_self must hold MUTEX. It will be atomically
221 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
222
223 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
224 as MUTEX is needed.
225
226 When WAITTIME is not NULL, the sleep will be aborted at that time.
227
228 The return value of block_self is an errno value. It will be zero
229 when the sleep has been successfully completed by a call to
230 unblock_from_queue, EINTR when it has been interrupted by the
231 delivery of a system async, and ETIMEDOUT when the timeout has
232 expired.
233
234 The system asyncs themselves are not executed by block_self.
235*/
236static int
237block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
238 const scm_t_timespec *waittime)
239{
240 scm_i_thread *t = SCM_I_CURRENT_THREAD;
241 SCM q_handle;
242 int err;
243
244 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
245 err = EINTR;
246 else
247 {
248 t->block_asyncs++;
249 q_handle = enqueue (queue, t->handle);
250 if (waittime == NULL)
251 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
252 else
253 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
254
255 /* When we are still on QUEUE, we have been interrupted. We
256 report this only when no other error (such as a timeout) has
257 happened above.
258 */
259 if (remqueue (queue, q_handle) && err == 0)
260 err = EINTR;
261 t->block_asyncs--;
262 scm_i_reset_sleep (t);
263 }
264
265 return err;
266}
267
268/* Wake up the first thread on QUEUE, if any. The caller must hold
269 the mutex that protects QUEUE. The awoken thread is returned, or
270 #f when the queue was empty.
271 */
272static SCM
273unblock_from_queue (SCM queue)
274{
275 SCM thread = dequeue (queue);
276 if (scm_is_true (thread))
277 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
278 return thread;
279}
280
281/* Getting into and out of guile mode.
282 */
283
284/* Ken Raeburn observes that the implementation of suspend and resume
285 (and the things that build on top of them) are very likely not
286 correct (see below). We will need fix this eventually, and that's
287 why scm_leave_guile/scm_enter_guile are not exported in the API.
288
289 Ken writes:
290
291 Consider this sequence:
292
293 Function foo, called in Guile mode, calls suspend (maybe indirectly
294 through scm_leave_guile), which does this:
295
296 // record top of stack for the GC
297 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
298 var 't'
299 // save registers.
300 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
301 setjmp (t->regs); // here's most of the magic
302
303 ... and returns.
304
305 Function foo has a SCM value X, a handle on a non-immediate object, in
306 a caller-saved register R, and it's the only reference to the object
307 currently.
308
309 The compiler wants to use R in suspend, so it pushes the current
310 value, X, into a stack slot which will be reloaded on exit from
311 suspend; then it loads stuff into R and goes about its business. The
312 setjmp call saves (some of) the current registers, including R, which
313 no longer contains X. (This isn't a problem for a normal
314 setjmp/longjmp situation, where longjmp would be called before
315 setjmp's caller returns; the old value for X would be loaded back from
316 the stack after the longjmp, before the function returned.)
317
318 So, suspend returns, loading X back into R (and invalidating the jump
319 buffer) in the process. The caller foo then goes off and calls a
320 bunch of other functions out of Guile mode, occasionally storing X on
321 the stack again, but, say, much deeper on the stack than suspend's
322 stack frame went, and the stack slot where suspend had written X has
323 long since been overwritten with other values.
324
325 Okay, nothing actively broken so far. Now, let garbage collection
326 run, triggered by another thread.
327
328 The thread calling foo is out of Guile mode at the time, so the
329 garbage collector just scans a range of stack addresses. Too bad that
330 X isn't stored there. So the pointed-to storage goes onto the free
331 list, and I think you can see where things go from there.
332
333 Is there anything I'm missing that'll prevent this scenario from
334 happening? I mean, aside from, "well, suspend and scm_leave_guile
335 don't have many local variables, so they probably won't need to save
336 any registers on most systems, so we hope everything will wind up in
337 the jump buffer and we'll just get away with it"?
338
339 (And, going the other direction, if scm_leave_guile and suspend push
340 the stack pointer over onto a new page, and foo doesn't make further
341 function calls and thus the stack pointer no longer includes that
342 page, are we guaranteed that the kernel cannot release the now-unused
343 stack page that contains the top-of-stack pointer we just saved? I
344 don't know if any OS actually does that. If it does, we could get
345 faults in garbage collection.)
346
347 I don't think scm_without_guile has to have this problem, as it gets
348 more control over the stack handling -- but it should call setjmp
349 itself. I'd probably try something like:
350
351 // record top of stack for the GC
352 t->top = SCM_STACK_PTR (&t);
353 // save registers.
354 SCM_FLUSH_REGISTER_WINDOWS;
355 setjmp (t->regs);
356 res = func(data);
357 scm_enter_guile (t);
358
359 ... though even that's making some assumptions about the stack
360 ordering of local variables versus caller-saved registers.
361
362 For something like scm_leave_guile to work, I don't think it can just
363 rely on invalidated jump buffers. A valid jump buffer, and a handle
364 on the stack state at the point when the jump buffer was initialized,
365 together, would work fine, but I think then we're talking about macros
366 invoking setjmp in the caller's stack frame, and requiring that the
367 caller of scm_leave_guile also call scm_enter_guile before returning,
368 kind of like pthread_cleanup_push/pop calls that have to be paired up
369 in a function. (In fact, the pthread ones have to be paired up
370 syntactically, as if they might expand to a compound statement
371 incorporating the user's code, and invoking a compiler's
372 exception-handling primitives. Which might be something to think
373 about for cases where Guile is used with C++ exceptions or
374 pthread_cancel.)
375*/
376
377scm_i_pthread_key_t scm_i_thread_key;
378
379static void
380resume (scm_i_thread *t)
381{
382 t->top = NULL;
383 if (t->clear_freelists_p)
384 {
385 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
386 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
387 t->clear_freelists_p = 0;
388 }
389}
390
391typedef void* scm_t_guile_ticket;
392
393static void
394scm_enter_guile (scm_t_guile_ticket ticket)
395{
396 scm_i_thread *t = (scm_i_thread *)ticket;
397 if (t)
398 {
399 scm_i_pthread_mutex_lock (&t->heap_mutex);
400 resume (t);
401 }
402}
403
404static scm_i_thread *
405suspend (void)
406{
407 scm_i_thread *t = SCM_I_CURRENT_THREAD;
408
409 /* record top of stack for the GC */
410 t->top = SCM_STACK_PTR (&t);
411 /* save registers. */
412 SCM_FLUSH_REGISTER_WINDOWS;
413 setjmp (t->regs);
414 return t;
415}
416
417static scm_t_guile_ticket
418scm_leave_guile ()
419{
420 scm_i_thread *t = suspend ();
421 scm_i_pthread_mutex_unlock (&t->heap_mutex);
422 return (scm_t_guile_ticket) t;
423}
424
425static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
426static scm_i_thread *all_threads = NULL;
427static int thread_count;
428
429static SCM scm_i_default_dynamic_state;
430
431/* Perform first stage of thread initialisation, in non-guile mode.
432 */
433static void
434guilify_self_1 (SCM_STACKITEM *base)
435{
436 scm_i_thread *t = malloc (sizeof (scm_i_thread));
437
438 t->pthread = scm_i_pthread_self ();
439 t->handle = SCM_BOOL_F;
440 t->result = SCM_BOOL_F;
441 t->cleanup_handler = SCM_BOOL_F;
442 t->mutexes = SCM_EOL;
443 t->join_queue = SCM_EOL;
444 t->dynamic_state = SCM_BOOL_F;
445 t->dynwinds = SCM_EOL;
446 t->active_asyncs = SCM_EOL;
447 t->block_asyncs = 1;
448 t->pending_asyncs = 1;
449 t->last_debug_frame = NULL;
450 t->base = base;
451#ifdef __ia64__
452 /* Calculate and store off the base of this thread's register
453 backing store (RBS). Unfortunately our implementation(s) of
454 scm_ia64_register_backing_store_base are only reliable for the
455 main thread. For other threads, therefore, find out the current
456 top of the RBS, and use that as a maximum. */
457 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
458 {
459 ucontext_t ctx;
460 void *bsp;
461 getcontext (&ctx);
462 bsp = scm_ia64_ar_bsp (&ctx);
463 if (t->register_backing_store_base > bsp)
464 t->register_backing_store_base = bsp;
465 }
466#endif
467 t->continuation_root = SCM_EOL;
468 t->continuation_base = base;
469 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
470 t->sleep_mutex = NULL;
471 t->sleep_object = SCM_BOOL_F;
472 t->sleep_fd = -1;
473 /* XXX - check for errors. */
474 pipe (t->sleep_pipe);
475 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
476 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
477 t->clear_freelists_p = 0;
478 t->gc_running_p = 0;
479 t->canceled = 0;
480 t->exited = 0;
481
482 t->freelist = SCM_EOL;
483 t->freelist2 = SCM_EOL;
484 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
485 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
486
487 scm_i_pthread_setspecific (scm_i_thread_key, t);
488
489 scm_i_pthread_mutex_lock (&t->heap_mutex);
490
491 scm_i_pthread_mutex_lock (&thread_admin_mutex);
492 t->next_thread = all_threads;
493 all_threads = t;
494 thread_count++;
495 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
496}
497
498/* Perform second stage of thread initialisation, in guile mode.
499 */
500static void
501guilify_self_2 (SCM parent)
502{
503 scm_i_thread *t = SCM_I_CURRENT_THREAD;
504
505 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
506 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
507 t->continuation_root = scm_cons (t->handle, SCM_EOL);
508 t->continuation_base = t->base;
509
510 if (scm_is_true (parent))
511 t->dynamic_state = scm_make_dynamic_state (parent);
512 else
513 t->dynamic_state = scm_i_make_initial_dynamic_state ();
514
515 t->join_queue = make_queue ();
516 t->block_asyncs = 0;
517}
518
519\f
520/*** Fat mutexes */
521
522/* We implement our own mutex type since we want them to be 'fair', we
523 want to do fancy things while waiting for them (like running
524 asyncs) and we might want to add things that are nice for
525 debugging.
526*/
527
528typedef struct {
529 scm_i_pthread_mutex_t lock;
530 SCM owner;
531 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
532
533 int recursive; /* allow recursive locking? */
534 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
535 int allow_external_unlock; /* is it an error to unlock a mutex that is not
536 owned by the current thread? */
537
538 SCM waiting; /* the threads waiting for this mutex. */
539} fat_mutex;
540
541#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
542#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
543
544/* Perform thread tear-down, in guile mode.
545 */
546static void *
547do_thread_exit (void *v)
548{
549 scm_i_thread *t = (scm_i_thread *) v;
550
551 if (!scm_is_false (t->cleanup_handler))
552 {
553 SCM ptr = t->cleanup_handler;
554
555 t->cleanup_handler = SCM_BOOL_F;
556 t->result = scm_internal_catch (SCM_BOOL_T,
557 (scm_t_catch_body) scm_call_0, ptr,
558 scm_handle_by_message_noexit, NULL);
559 }
560
561 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
562
563 t->exited = 1;
564 close (t->sleep_pipe[0]);
565 close (t->sleep_pipe[1]);
566 while (scm_is_true (unblock_from_queue (t->join_queue)))
567 ;
568
569 while (!scm_is_null (t->mutexes))
570 {
571 SCM mutex = SCM_CAR (t->mutexes);
572 fat_mutex *m = SCM_MUTEX_DATA (mutex);
573 scm_i_pthread_mutex_lock (&m->lock);
574
575 unblock_from_queue (m->waiting);
576
577 scm_i_pthread_mutex_unlock (&m->lock);
578 t->mutexes = SCM_CDR (t->mutexes);
579 }
580
581 scm_i_pthread_mutex_unlock (&t->admin_mutex);
582
583 return NULL;
584}
585
586static void
587on_thread_exit (void *v)
588{
589 /* This handler is executed in non-guile mode. */
590 scm_i_thread *t = (scm_i_thread *) v, **tp;
591
592 scm_i_pthread_setspecific (scm_i_thread_key, v);
593
594 /* Ensure the signal handling thread has been launched, because we might be
595 shutting it down. */
596 scm_i_ensure_signal_delivery_thread ();
597
598 /* Unblocking the joining threads needs to happen in guile mode
599 since the queue is a SCM data structure. */
600 scm_with_guile (do_thread_exit, v);
601
602 /* Removing ourself from the list of all threads needs to happen in
603 non-guile mode since all SCM values on our stack become
604 unprotected once we are no longer in the list. */
605 scm_i_pthread_mutex_lock (&thread_admin_mutex);
606 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
607 if (*tp == t)
608 {
609 *tp = t->next_thread;
610 break;
611 }
612 thread_count--;
613
614 /* If there's only one other thread, it could be the signal delivery
615 thread, so we need to notify it to shut down by closing its read pipe.
616 If it's not the signal delivery thread, then closing the read pipe isn't
617 going to hurt. */
618 if (thread_count <= 1)
619 scm_i_close_signal_pipe ();
620
621 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
622
623 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
624}
625
626static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
627
628static void
629init_thread_key (void)
630{
631 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
632}
633
634/* Perform any initializations necessary to bring the current thread
635 into guile mode, initializing Guile itself, if necessary.
636
637 BASE is the stack base to use with GC.
638
639 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
640 which case the default dynamic state is used.
641
642 Return zero when the thread was in guile mode already; otherwise
643 return 1.
644*/
645
646static int
647scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
648{
649 scm_i_thread *t;
650
651 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
652
653 if ((t = SCM_I_CURRENT_THREAD) == NULL)
654 {
655 /* This thread has not been guilified yet.
656 */
657
658 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
659 if (scm_initialized_p == 0)
660 {
661 /* First thread ever to enter Guile. Run the full
662 initialization.
663 */
664 scm_i_init_guile (base);
665 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
666 }
667 else
668 {
669 /* Guile is already initialized, but this thread enters it for
670 the first time. Only initialize this thread.
671 */
672 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
673 guilify_self_1 (base);
674 guilify_self_2 (parent);
675 }
676 return 1;
677 }
678 else if (t->top)
679 {
680 /* This thread is already guilified but not in guile mode, just
681 resume it.
682
683 XXX - base might be lower than when this thread was first
684 guilified.
685 */
686 scm_enter_guile ((scm_t_guile_ticket) t);
687 return 1;
688 }
689 else
690 {
691 /* Thread is already in guile mode. Nothing to do.
692 */
693 return 0;
694 }
695}
696
697#if SCM_USE_PTHREAD_THREADS
698
699#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
700/* This method for GNU/Linux and perhaps some other systems.
701 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
702 available on them. */
703#define HAVE_GET_THREAD_STACK_BASE
704
705static SCM_STACKITEM *
706get_thread_stack_base ()
707{
708 pthread_attr_t attr;
709 void *start, *end;
710 size_t size;
711
712 pthread_getattr_np (pthread_self (), &attr);
713 pthread_attr_getstack (&attr, &start, &size);
714 end = (char *)start + size;
715
716 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
717 for the main thread, but we can use scm_get_stack_base in that
718 case.
719 */
720
721#ifndef PTHREAD_ATTR_GETSTACK_WORKS
722 if ((void *)&attr < start || (void *)&attr >= end)
723 return scm_get_stack_base ();
724 else
725#endif
726 {
727#if SCM_STACK_GROWS_UP
728 return start;
729#else
730 return end;
731#endif
732 }
733}
734
735#elif HAVE_PTHREAD_GET_STACKADDR_NP
736/* This method for MacOS X.
737 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
738 but as of 2006 there's nothing obvious at apple.com. */
739#define HAVE_GET_THREAD_STACK_BASE
740static SCM_STACKITEM *
741get_thread_stack_base ()
742{
743 return pthread_get_stackaddr_np (pthread_self ());
744}
745
746#elif defined (__MINGW32__)
747/* This method for mingw. In mingw the basic scm_get_stack_base can be used
748 in any thread. We don't like hard-coding the name of a system, but there
749 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
750 work. */
751#define HAVE_GET_THREAD_STACK_BASE
752static SCM_STACKITEM *
753get_thread_stack_base ()
754{
755 return scm_get_stack_base ();
756}
757
758#endif /* pthread methods of get_thread_stack_base */
759
760#else /* !SCM_USE_PTHREAD_THREADS */
761
762#define HAVE_GET_THREAD_STACK_BASE
763
764static SCM_STACKITEM *
765get_thread_stack_base ()
766{
767 return scm_get_stack_base ();
768}
769
770#endif /* !SCM_USE_PTHREAD_THREADS */
771
772#ifdef HAVE_GET_THREAD_STACK_BASE
773
774void
775scm_init_guile ()
776{
777 scm_i_init_thread_for_guile (get_thread_stack_base (),
778 scm_i_default_dynamic_state);
779}
780
781#endif
782
783void *
784scm_with_guile (void *(*func)(void *), void *data)
785{
786 return scm_i_with_guile_and_parent (func, data,
787 scm_i_default_dynamic_state);
788}
789
790SCM_UNUSED static void
791scm_leave_guile_cleanup (void *x)
792{
793 scm_leave_guile ();
794}
795
796void *
797scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
798{
799 void *res;
800 int really_entered;
801 SCM_STACKITEM base_item;
802
803 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
804 if (really_entered)
805 {
806 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
807 res = scm_c_with_continuation_barrier (func, data);
808 scm_i_pthread_cleanup_pop (0);
809 scm_leave_guile ();
810 }
811 else
812 res = scm_c_with_continuation_barrier (func, data);
813
814 return res;
815}
816
817void *
818scm_without_guile (void *(*func)(void *), void *data)
819{
820 void *res;
821 scm_t_guile_ticket t;
822 t = scm_leave_guile ();
823 res = func (data);
824 scm_enter_guile (t);
825 return res;
826}
827
828/*** Thread creation */
829
830typedef struct {
831 SCM parent;
832 SCM thunk;
833 SCM handler;
834 SCM thread;
835 scm_i_pthread_mutex_t mutex;
836 scm_i_pthread_cond_t cond;
837} launch_data;
838
839static void *
840really_launch (void *d)
841{
842 launch_data *data = (launch_data *)d;
843 SCM thunk = data->thunk, handler = data->handler;
844 scm_i_thread *t;
845
846 t = SCM_I_CURRENT_THREAD;
847
848 scm_i_scm_pthread_mutex_lock (&data->mutex);
849 data->thread = scm_current_thread ();
850 scm_i_pthread_cond_signal (&data->cond);
851 scm_i_pthread_mutex_unlock (&data->mutex);
852
853 if (SCM_UNBNDP (handler))
854 t->result = scm_call_0 (thunk);
855 else
856 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
857
858 return 0;
859}
860
861static void *
862launch_thread (void *d)
863{
864 launch_data *data = (launch_data *)d;
865 scm_i_pthread_detach (scm_i_pthread_self ());
866 scm_i_with_guile_and_parent (really_launch, d, data->parent);
867 return NULL;
868}
869
870SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
871 (SCM thunk, SCM handler),
872 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
873 "returning a new thread object representing the thread. The procedure\n"
874 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
875 "\n"
876 "When @var{handler} is specified, then @var{thunk} is called from\n"
877 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
878 "handler. This catch is established inside the continuation barrier.\n"
879 "\n"
880 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
881 "the @emph{exit value} of the thread and the thread is terminated.")
882#define FUNC_NAME s_scm_call_with_new_thread
883{
884 launch_data data;
885 scm_i_pthread_t id;
886 int err;
887
888 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
889 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
890 handler, SCM_ARG2, FUNC_NAME);
891
892 data.parent = scm_current_dynamic_state ();
893 data.thunk = thunk;
894 data.handler = handler;
895 data.thread = SCM_BOOL_F;
896 scm_i_pthread_mutex_init (&data.mutex, NULL);
897 scm_i_pthread_cond_init (&data.cond, NULL);
898
899 scm_i_scm_pthread_mutex_lock (&data.mutex);
900 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
901 if (err)
902 {
903 scm_i_pthread_mutex_unlock (&data.mutex);
904 errno = err;
905 scm_syserror (NULL);
906 }
907 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
908 scm_i_pthread_mutex_unlock (&data.mutex);
909
910 return data.thread;
911}
912#undef FUNC_NAME
913
914typedef struct {
915 SCM parent;
916 scm_t_catch_body body;
917 void *body_data;
918 scm_t_catch_handler handler;
919 void *handler_data;
920 SCM thread;
921 scm_i_pthread_mutex_t mutex;
922 scm_i_pthread_cond_t cond;
923} spawn_data;
924
925static void *
926really_spawn (void *d)
927{
928 spawn_data *data = (spawn_data *)d;
929 scm_t_catch_body body = data->body;
930 void *body_data = data->body_data;
931 scm_t_catch_handler handler = data->handler;
932 void *handler_data = data->handler_data;
933 scm_i_thread *t = SCM_I_CURRENT_THREAD;
934
935 scm_i_scm_pthread_mutex_lock (&data->mutex);
936 data->thread = scm_current_thread ();
937 scm_i_pthread_cond_signal (&data->cond);
938 scm_i_pthread_mutex_unlock (&data->mutex);
939
940 if (handler == NULL)
941 t->result = body (body_data);
942 else
943 t->result = scm_internal_catch (SCM_BOOL_T,
944 body, body_data,
945 handler, handler_data);
946
947 return 0;
948}
949
950static void *
951spawn_thread (void *d)
952{
953 spawn_data *data = (spawn_data *)d;
954 scm_i_pthread_detach (scm_i_pthread_self ());
955 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
956 return NULL;
957}
958
959SCM
960scm_spawn_thread (scm_t_catch_body body, void *body_data,
961 scm_t_catch_handler handler, void *handler_data)
962{
963 spawn_data data;
964 scm_i_pthread_t id;
965 int err;
966
967 data.parent = scm_current_dynamic_state ();
968 data.body = body;
969 data.body_data = body_data;
970 data.handler = handler;
971 data.handler_data = handler_data;
972 data.thread = SCM_BOOL_F;
973 scm_i_pthread_mutex_init (&data.mutex, NULL);
974 scm_i_pthread_cond_init (&data.cond, NULL);
975
976 scm_i_scm_pthread_mutex_lock (&data.mutex);
977 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
978 if (err)
979 {
980 scm_i_pthread_mutex_unlock (&data.mutex);
981 errno = err;
982 scm_syserror (NULL);
983 }
984 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
985 scm_i_pthread_mutex_unlock (&data.mutex);
986
987 return data.thread;
988}
989
990SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
991 (),
992"Move the calling thread to the end of the scheduling queue.")
993#define FUNC_NAME s_scm_yield
994{
995 return scm_from_bool (scm_i_sched_yield ());
996}
997#undef FUNC_NAME
998
999SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1000 (SCM thread),
1001"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1002"cannot be the current thread, and if @var{thread} has already terminated or "
1003"been signaled to terminate, this function is a no-op.")
1004#define FUNC_NAME s_scm_cancel_thread
1005{
1006 scm_i_thread *t = NULL;
1007
1008 SCM_VALIDATE_THREAD (1, thread);
1009 t = SCM_I_THREAD_DATA (thread);
1010 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1011 if (!t->canceled)
1012 {
1013 t->canceled = 1;
1014 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1015 scm_i_pthread_cancel (t->pthread);
1016 }
1017 else
1018 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1019
1020 return SCM_UNSPECIFIED;
1021}
1022#undef FUNC_NAME
1023
1024SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1025 (SCM thread, SCM proc),
1026"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1027"This handler will be called when the thread exits.")
1028#define FUNC_NAME s_scm_set_thread_cleanup_x
1029{
1030 scm_i_thread *t;
1031
1032 SCM_VALIDATE_THREAD (1, thread);
1033 if (!scm_is_false (proc))
1034 SCM_VALIDATE_THUNK (2, proc);
1035
1036 t = SCM_I_THREAD_DATA (thread);
1037 scm_i_pthread_mutex_lock (&t->admin_mutex);
1038
1039 if (!(t->exited || t->canceled))
1040 t->cleanup_handler = proc;
1041
1042 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1043
1044 return SCM_UNSPECIFIED;
1045}
1046#undef FUNC_NAME
1047
1048SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1049 (SCM thread),
1050"Return the cleanup handler installed for the thread @var{thread}.")
1051#define FUNC_NAME s_scm_thread_cleanup
1052{
1053 scm_i_thread *t;
1054 SCM ret;
1055
1056 SCM_VALIDATE_THREAD (1, thread);
1057
1058 t = SCM_I_THREAD_DATA (thread);
1059 scm_i_pthread_mutex_lock (&t->admin_mutex);
1060 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1061 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1062
1063 return ret;
1064}
1065#undef FUNC_NAME
1066
1067SCM scm_join_thread (SCM thread)
1068{
1069 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1070}
1071
1072SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1073 (SCM thread, SCM timeout, SCM timeoutval),
1074"Suspend execution of the calling thread until the target @var{thread} "
1075"terminates, unless the target @var{thread} has already terminated. ")
1076#define FUNC_NAME s_scm_join_thread_timed
1077{
1078 scm_i_thread *t;
1079 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1080 SCM res = SCM_BOOL_F;
1081
1082 if (! (SCM_UNBNDP (timeoutval)))
1083 res = timeoutval;
1084
1085 SCM_VALIDATE_THREAD (1, thread);
1086 if (scm_is_eq (scm_current_thread (), thread))
1087 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1088
1089 t = SCM_I_THREAD_DATA (thread);
1090 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1091
1092 if (! SCM_UNBNDP (timeout))
1093 {
1094 to_timespec (timeout, &ctimeout);
1095 timeout_ptr = &ctimeout;
1096 }
1097
1098 if (t->exited)
1099 res = t->result;
1100 else
1101 {
1102 while (1)
1103 {
1104 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1105 timeout_ptr);
1106 if (err == 0)
1107 {
1108 if (t->exited)
1109 {
1110 res = t->result;
1111 break;
1112 }
1113 }
1114 else if (err == ETIMEDOUT)
1115 break;
1116
1117 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1118 SCM_TICK;
1119 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1120 }
1121 }
1122
1123 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1124
1125 return res;
1126}
1127#undef FUNC_NAME
1128
1129SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1130 (SCM obj),
1131 "Return @code{#t} if @var{obj} is a thread.")
1132#define FUNC_NAME s_scm_thread_p
1133{
1134 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1135}
1136#undef FUNC_NAME
1137
1138static SCM
1139fat_mutex_mark (SCM mx)
1140{
1141 fat_mutex *m = SCM_MUTEX_DATA (mx);
1142 scm_gc_mark (m->owner);
1143 return m->waiting;
1144}
1145
1146static size_t
1147fat_mutex_free (SCM mx)
1148{
1149 fat_mutex *m = SCM_MUTEX_DATA (mx);
1150 scm_i_pthread_mutex_destroy (&m->lock);
1151 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1152 return 0;
1153}
1154
1155static int
1156fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1157{
1158 fat_mutex *m = SCM_MUTEX_DATA (mx);
1159 scm_puts ("#<mutex ", port);
1160 scm_uintprint ((scm_t_bits)m, 16, port);
1161 scm_puts (">", port);
1162 return 1;
1163}
1164
1165static SCM
1166make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1167{
1168 fat_mutex *m;
1169 SCM mx;
1170
1171 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1172 scm_i_pthread_mutex_init (&m->lock, NULL);
1173 m->owner = SCM_BOOL_F;
1174 m->level = 0;
1175
1176 m->recursive = recursive;
1177 m->unchecked_unlock = unchecked_unlock;
1178 m->allow_external_unlock = external_unlock;
1179
1180 m->waiting = SCM_EOL;
1181 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1182 m->waiting = make_queue ();
1183 return mx;
1184}
1185
1186SCM scm_make_mutex (void)
1187{
1188 return scm_make_mutex_with_flags (SCM_EOL);
1189}
1190
1191SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1192SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1193SCM_SYMBOL (recursive_sym, "recursive");
1194
1195SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1196 (SCM flags),
1197 "Create a new mutex. ")
1198#define FUNC_NAME s_scm_make_mutex_with_flags
1199{
1200 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1201
1202 SCM ptr = flags;
1203 while (! scm_is_null (ptr))
1204 {
1205 SCM flag = SCM_CAR (ptr);
1206 if (scm_is_eq (flag, unchecked_unlock_sym))
1207 unchecked_unlock = 1;
1208 else if (scm_is_eq (flag, allow_external_unlock_sym))
1209 external_unlock = 1;
1210 else if (scm_is_eq (flag, recursive_sym))
1211 recursive = 1;
1212 else
1213 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1214 ptr = SCM_CDR (ptr);
1215 }
1216 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1217}
1218#undef FUNC_NAME
1219
1220SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1221 (void),
1222 "Create a new recursive mutex. ")
1223#define FUNC_NAME s_scm_make_recursive_mutex
1224{
1225 return make_fat_mutex (1, 0, 0);
1226}
1227#undef FUNC_NAME
1228
1229SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1230
1231static SCM
1232fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1233{
1234 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1235
1236 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1237 SCM err = SCM_BOOL_F;
1238
1239 struct timeval current_time;
1240
1241 scm_i_scm_pthread_mutex_lock (&m->lock);
1242
1243 while (1)
1244 {
1245 if (m->level == 0)
1246 {
1247 m->owner = new_owner;
1248 m->level++;
1249
1250 if (SCM_I_IS_THREAD (new_owner))
1251 {
1252 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1253 scm_i_pthread_mutex_lock (&t->admin_mutex);
1254 t->mutexes = scm_cons (mutex, t->mutexes);
1255 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1256 }
1257 *ret = 1;
1258 break;
1259 }
1260 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1261 {
1262 m->owner = new_owner;
1263 err = scm_cons (scm_abandoned_mutex_error_key,
1264 scm_from_locale_string ("lock obtained on abandoned "
1265 "mutex"));
1266 *ret = 1;
1267 break;
1268 }
1269 else if (scm_is_eq (m->owner, new_owner))
1270 {
1271 if (m->recursive)
1272 {
1273 m->level++;
1274 *ret = 1;
1275 }
1276 else
1277 {
1278 err = scm_cons (scm_misc_error_key,
1279 scm_from_locale_string ("mutex already locked "
1280 "by thread"));
1281 *ret = 0;
1282 }
1283 break;
1284 }
1285 else
1286 {
1287 if (timeout != NULL)
1288 {
1289 gettimeofday (&current_time, NULL);
1290 if (current_time.tv_sec > timeout->tv_sec ||
1291 (current_time.tv_sec == timeout->tv_sec &&
1292 current_time.tv_usec * 1000 > timeout->tv_nsec))
1293 {
1294 *ret = 0;
1295 break;
1296 }
1297 }
1298 block_self (m->waiting, mutex, &m->lock, timeout);
1299 scm_i_pthread_mutex_unlock (&m->lock);
1300 SCM_TICK;
1301 scm_i_scm_pthread_mutex_lock (&m->lock);
1302 }
1303 }
1304 scm_i_pthread_mutex_unlock (&m->lock);
1305 return err;
1306}
1307
1308SCM scm_lock_mutex (SCM mx)
1309{
1310 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1311}
1312
1313SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1314 (SCM m, SCM timeout, SCM owner),
1315"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1316"blocks until the mutex becomes available. The function returns when "
1317"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1318"a thread already owns will succeed right away and will not block the "
1319"thread. That is, Guile's mutexes are @emph{recursive}. ")
1320#define FUNC_NAME s_scm_lock_mutex_timed
1321{
1322 SCM exception;
1323 int ret = 0;
1324 scm_t_timespec cwaittime, *waittime = NULL;
1325
1326 SCM_VALIDATE_MUTEX (1, m);
1327
1328 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1329 {
1330 to_timespec (timeout, &cwaittime);
1331 waittime = &cwaittime;
1332 }
1333
1334 exception = fat_mutex_lock (m, waittime, owner, &ret);
1335 if (!scm_is_false (exception))
1336 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1337 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1338}
1339#undef FUNC_NAME
1340
1341void
1342scm_dynwind_lock_mutex (SCM mutex)
1343{
1344 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1345 SCM_F_WIND_EXPLICITLY);
1346 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1347 SCM_F_WIND_EXPLICITLY);
1348}
1349
1350SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1351 (SCM mutex),
1352"Try to lock @var{mutex}. If the mutex is already locked by someone "
1353"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1354#define FUNC_NAME s_scm_try_mutex
1355{
1356 SCM exception;
1357 int ret = 0;
1358 scm_t_timespec cwaittime, *waittime = NULL;
1359
1360 SCM_VALIDATE_MUTEX (1, mutex);
1361
1362 to_timespec (scm_from_int(0), &cwaittime);
1363 waittime = &cwaittime;
1364
1365 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1366 if (!scm_is_false (exception))
1367 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1368 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1369}
1370#undef FUNC_NAME
1371
1372/*** Fat condition variables */
1373
1374typedef struct {
1375 scm_i_pthread_mutex_t lock;
1376 SCM waiting; /* the threads waiting for this condition. */
1377} fat_cond;
1378
1379#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1380#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1381
1382static int
1383fat_mutex_unlock (SCM mutex, SCM cond,
1384 const scm_t_timespec *waittime, int relock)
1385{
1386 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1387 fat_cond *c = NULL;
1388 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1389 int err = 0, ret = 0;
1390
1391 scm_i_scm_pthread_mutex_lock (&m->lock);
1392
1393 SCM owner = m->owner;
1394
1395 if (!scm_is_eq (owner, scm_current_thread ()))
1396 {
1397 if (m->level == 0)
1398 {
1399 if (!m->unchecked_unlock)
1400 {
1401 scm_i_pthread_mutex_unlock (&m->lock);
1402 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1403 }
1404 owner = scm_current_thread ();
1405 }
1406 else if (!m->allow_external_unlock)
1407 {
1408 scm_i_pthread_mutex_unlock (&m->lock);
1409 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1410 }
1411 }
1412
1413 if (! (SCM_UNBNDP (cond)))
1414 {
1415 c = SCM_CONDVAR_DATA (cond);
1416 while (1)
1417 {
1418 int brk = 0;
1419
1420 scm_i_scm_pthread_mutex_lock (&c->lock);
1421 if (m->level > 0)
1422 m->level--;
1423 if (m->level == 0)
1424 m->owner = unblock_from_queue (m->waiting);
1425
1426 scm_i_pthread_mutex_unlock (&m->lock);
1427
1428 t->block_asyncs++;
1429
1430 err = block_self (c->waiting, cond, &c->lock, waittime);
1431
1432 if (err == 0)
1433 {
1434 ret = 1;
1435 brk = 1;
1436 }
1437 else if (err == ETIMEDOUT)
1438 {
1439 ret = 0;
1440 brk = 1;
1441 }
1442 else if (err != EINTR)
1443 {
1444 errno = err;
1445 scm_i_pthread_mutex_unlock (&c->lock);
1446 scm_syserror (NULL);
1447 }
1448
1449 if (brk)
1450 {
1451 if (relock)
1452 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1453 scm_i_pthread_mutex_unlock (&c->lock);
1454 break;
1455 }
1456
1457 scm_i_pthread_mutex_unlock (&c->lock);
1458
1459 t->block_asyncs--;
1460 scm_async_click ();
1461
1462 scm_remember_upto_here_2 (cond, mutex);
1463
1464 scm_i_scm_pthread_mutex_lock (&m->lock);
1465 }
1466 }
1467 else
1468 {
1469 if (m->level > 0)
1470 m->level--;
1471 if (m->level == 0)
1472 m->owner = unblock_from_queue (m->waiting);
1473
1474 scm_i_pthread_mutex_unlock (&m->lock);
1475 ret = 1;
1476 }
1477
1478 return ret;
1479}
1480
1481SCM scm_unlock_mutex (SCM mx)
1482{
1483 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1484}
1485
1486SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1487 (SCM mx, SCM cond, SCM timeout),
1488"Unlocks @var{mutex} if the calling thread owns the lock on "
1489"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1490"thread results in undefined behaviour. Once a mutex has been unlocked, "
1491"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1492"lock. Every call to @code{lock-mutex} by this thread must be matched "
1493"with a call to @code{unlock-mutex}. Only the last call to "
1494"@code{unlock-mutex} will actually unlock the mutex. ")
1495#define FUNC_NAME s_scm_unlock_mutex_timed
1496{
1497 scm_t_timespec cwaittime, *waittime = NULL;
1498
1499 SCM_VALIDATE_MUTEX (1, mx);
1500 if (! (SCM_UNBNDP (cond)))
1501 {
1502 SCM_VALIDATE_CONDVAR (2, cond);
1503
1504 if (! (SCM_UNBNDP (timeout)))
1505 {
1506 to_timespec (timeout, &cwaittime);
1507 waittime = &cwaittime;
1508 }
1509 }
1510
1511 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1512}
1513#undef FUNC_NAME
1514
1515SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1516 (SCM obj),
1517 "Return @code{#t} if @var{obj} is a mutex.")
1518#define FUNC_NAME s_scm_mutex_p
1519{
1520 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1521}
1522#undef FUNC_NAME
1523
1524SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1525 (SCM mx),
1526 "Return the thread owning @var{mx}, or @code{#f}.")
1527#define FUNC_NAME s_scm_mutex_owner
1528{
1529 SCM owner;
1530 fat_mutex *m = NULL;
1531
1532 SCM_VALIDATE_MUTEX (1, mx);
1533 m = SCM_MUTEX_DATA (mx);
1534 scm_i_pthread_mutex_lock (&m->lock);
1535 owner = m->owner;
1536 scm_i_pthread_mutex_unlock (&m->lock);
1537
1538 return owner;
1539}
1540#undef FUNC_NAME
1541
1542SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1543 (SCM mx),
1544 "Return the lock level of mutex @var{mx}.")
1545#define FUNC_NAME s_scm_mutex_level
1546{
1547 SCM_VALIDATE_MUTEX (1, mx);
1548 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1549}
1550#undef FUNC_NAME
1551
1552SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1553 (SCM mx),
1554 "Returns @code{#t} if the mutex @var{mx} is locked.")
1555#define FUNC_NAME s_scm_mutex_locked_p
1556{
1557 SCM_VALIDATE_MUTEX (1, mx);
1558 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1559}
1560#undef FUNC_NAME
1561
1562static SCM
1563fat_cond_mark (SCM cv)
1564{
1565 fat_cond *c = SCM_CONDVAR_DATA (cv);
1566 return c->waiting;
1567}
1568
1569static size_t
1570fat_cond_free (SCM mx)
1571{
1572 fat_cond *c = SCM_CONDVAR_DATA (mx);
1573 scm_i_pthread_mutex_destroy (&c->lock);
1574 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1575 return 0;
1576}
1577
1578static int
1579fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1580{
1581 fat_cond *c = SCM_CONDVAR_DATA (cv);
1582 scm_puts ("#<condition-variable ", port);
1583 scm_uintprint ((scm_t_bits)c, 16, port);
1584 scm_puts (">", port);
1585 return 1;
1586}
1587
1588SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1589 (void),
1590 "Make a new condition variable.")
1591#define FUNC_NAME s_scm_make_condition_variable
1592{
1593 fat_cond *c;
1594 SCM cv;
1595
1596 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1597 scm_i_pthread_mutex_init (&c->lock, 0);
1598 c->waiting = SCM_EOL;
1599 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1600 c->waiting = make_queue ();
1601 return cv;
1602}
1603#undef FUNC_NAME
1604
1605SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1606 (SCM cv, SCM mx, SCM t),
1607"Wait until @var{cond-var} has been signalled. While waiting, "
1608"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1609"is locked again when this function returns. When @var{time} is given, "
1610"it specifies a point in time where the waiting should be aborted. It "
1611"can be either a integer as returned by @code{current-time} or a pair "
1612"as returned by @code{gettimeofday}. When the waiting is aborted the "
1613"mutex is locked and @code{#f} is returned. When the condition "
1614"variable is in fact signalled, the mutex is also locked and @code{#t} "
1615"is returned. ")
1616#define FUNC_NAME s_scm_timed_wait_condition_variable
1617{
1618 scm_t_timespec waittime, *waitptr = NULL;
1619
1620 SCM_VALIDATE_CONDVAR (1, cv);
1621 SCM_VALIDATE_MUTEX (2, mx);
1622
1623 if (!SCM_UNBNDP (t))
1624 {
1625 to_timespec (t, &waittime);
1626 waitptr = &waittime;
1627 }
1628
1629 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1630}
1631#undef FUNC_NAME
1632
1633static void
1634fat_cond_signal (fat_cond *c)
1635{
1636 scm_i_scm_pthread_mutex_lock (&c->lock);
1637 unblock_from_queue (c->waiting);
1638 scm_i_pthread_mutex_unlock (&c->lock);
1639}
1640
1641SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1642 (SCM cv),
1643 "Wake up one thread that is waiting for @var{cv}")
1644#define FUNC_NAME s_scm_signal_condition_variable
1645{
1646 SCM_VALIDATE_CONDVAR (1, cv);
1647 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1648 return SCM_BOOL_T;
1649}
1650#undef FUNC_NAME
1651
1652static void
1653fat_cond_broadcast (fat_cond *c)
1654{
1655 scm_i_scm_pthread_mutex_lock (&c->lock);
1656 while (scm_is_true (unblock_from_queue (c->waiting)))
1657 ;
1658 scm_i_pthread_mutex_unlock (&c->lock);
1659}
1660
1661SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1662 (SCM cv),
1663 "Wake up all threads that are waiting for @var{cv}. ")
1664#define FUNC_NAME s_scm_broadcast_condition_variable
1665{
1666 SCM_VALIDATE_CONDVAR (1, cv);
1667 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1668 return SCM_BOOL_T;
1669}
1670#undef FUNC_NAME
1671
1672SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1673 (SCM obj),
1674 "Return @code{#t} if @var{obj} is a condition variable.")
1675#define FUNC_NAME s_scm_condition_variable_p
1676{
1677 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1678}
1679#undef FUNC_NAME
1680
1681/*** Marking stacks */
1682
1683/* XXX - what to do with this? Do we need to handle this for blocked
1684 threads as well?
1685*/
1686#ifdef __ia64__
1687# define SCM_MARK_BACKING_STORE() do { \
1688 ucontext_t ctx; \
1689 SCM_STACKITEM * top, * bot; \
1690 getcontext (&ctx); \
1691 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1692 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1693 / sizeof (SCM_STACKITEM))); \
1694 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1695 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1696 scm_mark_locations (bot, top - bot); } while (0)
1697#else
1698# define SCM_MARK_BACKING_STORE()
1699#endif
1700
1701void
1702scm_threads_mark_stacks (void)
1703{
1704 scm_i_thread *t;
1705 for (t = all_threads; t; t = t->next_thread)
1706 {
1707 /* Check that thread has indeed been suspended.
1708 */
1709 assert (t->top);
1710
1711 scm_gc_mark (t->handle);
1712
1713#if SCM_STACK_GROWS_UP
1714 scm_mark_locations (t->base, t->top - t->base);
1715#else
1716 scm_mark_locations (t->top, t->base - t->top);
1717#endif
1718 scm_mark_locations ((void *) &t->regs,
1719 ((size_t) sizeof(t->regs)
1720 / sizeof (SCM_STACKITEM)));
1721 }
1722
1723 SCM_MARK_BACKING_STORE ();
1724}
1725
1726/*** Select */
1727
1728int
1729scm_std_select (int nfds,
1730 SELECT_TYPE *readfds,
1731 SELECT_TYPE *writefds,
1732 SELECT_TYPE *exceptfds,
1733 struct timeval *timeout)
1734{
1735 fd_set my_readfds;
1736 int res, eno, wakeup_fd;
1737 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1738 scm_t_guile_ticket ticket;
1739
1740 if (readfds == NULL)
1741 {
1742 FD_ZERO (&my_readfds);
1743 readfds = &my_readfds;
1744 }
1745
1746 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1747 SCM_TICK;
1748
1749 wakeup_fd = t->sleep_pipe[0];
1750 ticket = scm_leave_guile ();
1751 FD_SET (wakeup_fd, readfds);
1752 if (wakeup_fd >= nfds)
1753 nfds = wakeup_fd+1;
1754 res = select (nfds, readfds, writefds, exceptfds, timeout);
1755 t->sleep_fd = -1;
1756 eno = errno;
1757 scm_enter_guile (ticket);
1758
1759 scm_i_reset_sleep (t);
1760
1761 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1762 {
1763 char dummy;
1764 read (wakeup_fd, &dummy, 1);
1765 FD_CLR (wakeup_fd, readfds);
1766 res -= 1;
1767 if (res == 0)
1768 {
1769 eno = EINTR;
1770 res = -1;
1771 }
1772 }
1773 errno = eno;
1774 return res;
1775}
1776
1777/* Convenience API for blocking while in guile mode. */
1778
1779#if SCM_USE_PTHREAD_THREADS
1780
1781int
1782scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1783{
1784 scm_t_guile_ticket t = scm_leave_guile ();
1785 int res = scm_i_pthread_mutex_lock (mutex);
1786 scm_enter_guile (t);
1787 return res;
1788}
1789
1790static void
1791do_unlock (void *data)
1792{
1793 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1794}
1795
1796void
1797scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1798{
1799 scm_i_scm_pthread_mutex_lock (mutex);
1800 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1801}
1802
1803int
1804scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1805{
1806 scm_t_guile_ticket t = scm_leave_guile ();
1807 int res = scm_i_pthread_cond_wait (cond, mutex);
1808 scm_enter_guile (t);
1809 return res;
1810}
1811
1812int
1813scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1814 scm_i_pthread_mutex_t *mutex,
1815 const scm_t_timespec *wt)
1816{
1817 scm_t_guile_ticket t = scm_leave_guile ();
1818 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1819 scm_enter_guile (t);
1820 return res;
1821}
1822
1823#endif
1824
1825unsigned long
1826scm_std_usleep (unsigned long usecs)
1827{
1828 struct timeval tv;
1829 tv.tv_usec = usecs % 1000000;
1830 tv.tv_sec = usecs / 1000000;
1831 scm_std_select (0, NULL, NULL, NULL, &tv);
1832 return tv.tv_sec * 1000000 + tv.tv_usec;
1833}
1834
1835unsigned int
1836scm_std_sleep (unsigned int secs)
1837{
1838 struct timeval tv;
1839 tv.tv_usec = 0;
1840 tv.tv_sec = secs;
1841 scm_std_select (0, NULL, NULL, NULL, &tv);
1842 return tv.tv_sec;
1843}
1844
1845/*** Misc */
1846
1847SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1848 (void),
1849 "Return the thread that called this function.")
1850#define FUNC_NAME s_scm_current_thread
1851{
1852 return SCM_I_CURRENT_THREAD->handle;
1853}
1854#undef FUNC_NAME
1855
1856static SCM
1857scm_c_make_list (size_t n, SCM fill)
1858{
1859 SCM res = SCM_EOL;
1860 while (n-- > 0)
1861 res = scm_cons (fill, res);
1862 return res;
1863}
1864
1865SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1866 (void),
1867 "Return a list of all threads.")
1868#define FUNC_NAME s_scm_all_threads
1869{
1870 /* We can not allocate while holding the thread_admin_mutex because
1871 of the way GC is done.
1872 */
1873 int n = thread_count;
1874 scm_i_thread *t;
1875 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1876
1877 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1878 l = &list;
1879 for (t = all_threads; t && n > 0; t = t->next_thread)
1880 {
1881 if (t != scm_i_signal_delivery_thread)
1882 {
1883 SCM_SETCAR (*l, t->handle);
1884 l = SCM_CDRLOC (*l);
1885 }
1886 n--;
1887 }
1888 *l = SCM_EOL;
1889 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1890 return list;
1891}
1892#undef FUNC_NAME
1893
1894SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1895 (SCM thread),
1896 "Return @code{#t} iff @var{thread} has exited.\n")
1897#define FUNC_NAME s_scm_thread_exited_p
1898{
1899 return scm_from_bool (scm_c_thread_exited_p (thread));
1900}
1901#undef FUNC_NAME
1902
1903int
1904scm_c_thread_exited_p (SCM thread)
1905#define FUNC_NAME s_scm_thread_exited_p
1906{
1907 scm_i_thread *t;
1908 SCM_VALIDATE_THREAD (1, thread);
1909 t = SCM_I_THREAD_DATA (thread);
1910 return t->exited;
1911}
1912#undef FUNC_NAME
1913
1914static scm_i_pthread_cond_t wake_up_cond;
1915int scm_i_thread_go_to_sleep;
1916static int threads_initialized_p = 0;
1917
1918void
1919scm_i_thread_put_to_sleep ()
1920{
1921 if (threads_initialized_p)
1922 {
1923 scm_i_thread *t;
1924
1925 scm_leave_guile ();
1926 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1927
1928 /* Signal all threads to go to sleep
1929 */
1930 scm_i_thread_go_to_sleep = 1;
1931 for (t = all_threads; t; t = t->next_thread)
1932 scm_i_pthread_mutex_lock (&t->heap_mutex);
1933 scm_i_thread_go_to_sleep = 0;
1934 }
1935}
1936
1937void
1938scm_i_thread_invalidate_freelists ()
1939{
1940 /* thread_admin_mutex is already locked. */
1941
1942 scm_i_thread *t;
1943 for (t = all_threads; t; t = t->next_thread)
1944 if (t != SCM_I_CURRENT_THREAD)
1945 t->clear_freelists_p = 1;
1946}
1947
1948void
1949scm_i_thread_wake_up ()
1950{
1951 if (threads_initialized_p)
1952 {
1953 scm_i_thread *t;
1954
1955 scm_i_pthread_cond_broadcast (&wake_up_cond);
1956 for (t = all_threads; t; t = t->next_thread)
1957 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1958 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1959 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1960 }
1961}
1962
1963void
1964scm_i_thread_sleep_for_gc ()
1965{
1966 scm_i_thread *t = suspend ();
1967 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1968 resume (t);
1969}
1970
1971/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1972 */
1973scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1974int scm_i_critical_section_level = 0;
1975
1976static SCM dynwind_critical_section_mutex;
1977
1978void
1979scm_dynwind_critical_section (SCM mutex)
1980{
1981 if (scm_is_false (mutex))
1982 mutex = dynwind_critical_section_mutex;
1983 scm_dynwind_lock_mutex (mutex);
1984 scm_dynwind_block_asyncs ();
1985}
1986
1987/*** Initialization */
1988
1989scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1990scm_i_pthread_mutex_t scm_i_misc_mutex;
1991
1992#if SCM_USE_PTHREAD_THREADS
1993pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1994#endif
1995
1996void
1997scm_threads_prehistory (SCM_STACKITEM *base)
1998{
1999#if SCM_USE_PTHREAD_THREADS
2000 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2001 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2002 PTHREAD_MUTEX_RECURSIVE);
2003#endif
2004
2005 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2006 scm_i_pthread_mutexattr_recursive);
2007 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2008 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2009 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2010 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2011
2012 guilify_self_1 (base);
2013}
2014
2015scm_t_bits scm_tc16_thread;
2016scm_t_bits scm_tc16_mutex;
2017scm_t_bits scm_tc16_condvar;
2018
2019void
2020scm_init_threads ()
2021{
2022 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2023 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2024 scm_set_smob_print (scm_tc16_thread, thread_print);
2025 scm_set_smob_free (scm_tc16_thread, thread_free);
2026
2027 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2028 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2029 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2030 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2031
2032 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2033 sizeof (fat_cond));
2034 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2035 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2036 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2037
2038 scm_i_default_dynamic_state = SCM_BOOL_F;
2039 guilify_self_2 (SCM_BOOL_F);
2040 threads_initialized_p = 1;
2041
2042 dynwind_critical_section_mutex =
2043 scm_permanent_object (scm_make_recursive_mutex ());
2044}
2045
2046void
2047scm_init_threads_default_dynamic_state ()
2048{
2049 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2050 scm_i_default_dynamic_state = scm_permanent_object (state);
2051}
2052
2053void
2054scm_init_thread_procs ()
2055{
2056#include "libguile/threads.x"
2057}
2058
2059/*
2060 Local Variables:
2061 c-file-style: "gnu"
2062 End:
2063*/