Merge branch 'master' into boehm-demers-weiser-gc
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/boehm-gc.h"
25 #include "libguile/_scm.h"
26
27 #if HAVE_UNISTD_H
28 #include <unistd.h>
29 #endif
30 #include <stdio.h>
31 #include <assert.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include "libguile/validate.h"
42 #include "libguile/root.h"
43 #include "libguile/eval.h"
44 #include "libguile/async.h"
45 #include "libguile/ports.h"
46 #include "libguile/threads.h"
47 #include "libguile/dynwind.h"
48 #include "libguile/iselect.h"
49 #include "libguile/fluids.h"
50 #include "libguile/continuations.h"
51 #include "libguile/gc.h"
52 #include "libguile/init.h"
53 #include "libguile/scmsigs.h"
54 #include "libguile/strings.h"
55
56 #ifdef __MINGW32__
57 #ifndef ETIMEDOUT
58 # define ETIMEDOUT WSAETIMEDOUT
59 #endif
60 # include <fcntl.h>
61 # include <process.h>
62 # define pipe(fd) _pipe (fd, 256, O_BINARY)
63 #endif /* __MINGW32__ */
64
65 #include <full-read.h>
66
67 \f
68 static void
69 to_timespec (SCM t, scm_t_timespec *waittime)
70 {
71 if (scm_is_pair (t))
72 {
73 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
74 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
75 }
76 else
77 {
78 double time = scm_to_double (t);
79 double sec = scm_c_truncate (time);
80
81 waittime->tv_sec = (long) sec;
82 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
83 }
84 }
85
86 /*** Queues */
87
88 /* Make an empty queue data structure.
89 */
90 static SCM
91 make_queue ()
92 {
93 return scm_cons (SCM_EOL, SCM_EOL);
94 }
95
96 /* Put T at the back of Q and return a handle that can be used with
97 remqueue to remove T from Q again.
98 */
99 static SCM
100 enqueue (SCM q, SCM t)
101 {
102 SCM c = scm_cons (t, SCM_EOL);
103 SCM_CRITICAL_SECTION_START;
104 if (scm_is_null (SCM_CDR (q)))
105 SCM_SETCDR (q, c);
106 else
107 SCM_SETCDR (SCM_CAR (q), c);
108 SCM_SETCAR (q, c);
109 SCM_CRITICAL_SECTION_END;
110 return c;
111 }
112
113 /* Remove the element that the handle C refers to from the queue Q. C
114 must have been returned from a call to enqueue. The return value
115 is zero when the element referred to by C has already been removed.
116 Otherwise, 1 is returned.
117 */
118 static int
119 remqueue (SCM q, SCM c)
120 {
121 SCM p, prev = q;
122 SCM_CRITICAL_SECTION_START;
123 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
124 {
125 if (scm_is_eq (p, c))
126 {
127 if (scm_is_eq (c, SCM_CAR (q)))
128 SCM_SETCAR (q, SCM_CDR (c));
129 SCM_SETCDR (prev, SCM_CDR (c));
130 SCM_CRITICAL_SECTION_END;
131 return 1;
132 }
133 prev = p;
134 }
135 SCM_CRITICAL_SECTION_END;
136 return 0;
137 }
138
139 /* Remove the front-most element from the queue Q and return it.
140 Return SCM_BOOL_F when Q is empty.
141 */
142 static SCM
143 dequeue (SCM q)
144 {
145 SCM c;
146 SCM_CRITICAL_SECTION_START;
147 c = SCM_CDR (q);
148 if (scm_is_null (c))
149 {
150 SCM_CRITICAL_SECTION_END;
151 return SCM_BOOL_F;
152 }
153 else
154 {
155 SCM_SETCDR (q, SCM_CDR (c));
156 if (scm_is_null (SCM_CDR (q)))
157 SCM_SETCAR (q, SCM_EOL);
158 SCM_CRITICAL_SECTION_END;
159 return SCM_CAR (c);
160 }
161 }
162
163 /*** Thread smob routines */
164
165
166 static int
167 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
168 {
169 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
170 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
171 the struct case, hence we go via a union, and extract according to the
172 size of pthread_t. */
173 union {
174 scm_i_pthread_t p;
175 unsigned short us;
176 unsigned int ui;
177 unsigned long ul;
178 scm_t_uintmax um;
179 } u;
180 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
181 scm_i_pthread_t p = t->pthread;
182 scm_t_uintmax id;
183 u.p = p;
184 if (sizeof (p) == sizeof (unsigned short))
185 id = u.us;
186 else if (sizeof (p) == sizeof (unsigned int))
187 id = u.ui;
188 else if (sizeof (p) == sizeof (unsigned long))
189 id = u.ul;
190 else
191 id = u.um;
192
193 scm_puts ("#<thread ", port);
194 scm_uintprint (id, 10, port);
195 scm_puts (" (", port);
196 scm_uintprint ((scm_t_bits)t, 16, port);
197 scm_puts (")>", port);
198 return 1;
199 }
200
201 static size_t
202 thread_free (SCM obj)
203 {
204 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
205 assert (t->exited);
206 scm_gc_free (t, sizeof (*t), "thread");
207 return 0;
208 }
209
210 /*** Blocking on queues. */
211
212 /* See also scm_i_queue_async_cell for how such a block is
213 interrputed.
214 */
215
216 /* Put the current thread on QUEUE and go to sleep, waiting for it to
217 be woken up by a call to 'unblock_from_queue', or to be
218 interrupted. Upon return of this function, the current thread is
219 no longer on QUEUE, even when the sleep has been interrupted.
220
221 The caller of block_self must hold MUTEX. It will be atomically
222 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
223
224 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
225 as MUTEX is needed.
226
227 When WAITTIME is not NULL, the sleep will be aborted at that time.
228
229 The return value of block_self is an errno value. It will be zero
230 when the sleep has been successfully completed by a call to
231 unblock_from_queue, EINTR when it has been interrupted by the
232 delivery of a system async, and ETIMEDOUT when the timeout has
233 expired.
234
235 The system asyncs themselves are not executed by block_self.
236 */
237 static int
238 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
239 const scm_t_timespec *waittime)
240 {
241 scm_i_thread *t = SCM_I_CURRENT_THREAD;
242 SCM q_handle;
243 int err;
244
245 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
246 err = EINTR;
247 else
248 {
249 t->block_asyncs++;
250 q_handle = enqueue (queue, t->handle);
251 if (waittime == NULL)
252 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
253 else
254 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
255
256 /* When we are still on QUEUE, we have been interrupted. We
257 report this only when no other error (such as a timeout) has
258 happened above.
259 */
260 if (remqueue (queue, q_handle) && err == 0)
261 err = EINTR;
262 t->block_asyncs--;
263 scm_i_reset_sleep (t);
264 }
265
266 return err;
267 }
268
269 /* Wake up the first thread on QUEUE, if any. The awoken thread is
270 returned, or #f if the queue was empty.
271 */
272 static SCM
273 unblock_from_queue (SCM queue)
274 {
275 SCM thread = dequeue (queue);
276 if (scm_is_true (thread))
277 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
278 return thread;
279 }
280
281 /* Getting into and out of guile mode.
282 */
283
284 /* Ken Raeburn observes that the implementation of suspend and resume
285 (and the things that build on top of them) are very likely not
286 correct (see below). We will need fix this eventually, and that's
287 why scm_leave_guile/scm_enter_guile are not exported in the API.
288
289 Ken writes:
290
291 Consider this sequence:
292
293 Function foo, called in Guile mode, calls suspend (maybe indirectly
294 through scm_leave_guile), which does this:
295
296 // record top of stack for the GC
297 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
298 var 't'
299 // save registers.
300 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
301 setjmp (t->regs); // here's most of the magic
302
303 ... and returns.
304
305 Function foo has a SCM value X, a handle on a non-immediate object, in
306 a caller-saved register R, and it's the only reference to the object
307 currently.
308
309 The compiler wants to use R in suspend, so it pushes the current
310 value, X, into a stack slot which will be reloaded on exit from
311 suspend; then it loads stuff into R and goes about its business. The
312 setjmp call saves (some of) the current registers, including R, which
313 no longer contains X. (This isn't a problem for a normal
314 setjmp/longjmp situation, where longjmp would be called before
315 setjmp's caller returns; the old value for X would be loaded back from
316 the stack after the longjmp, before the function returned.)
317
318 So, suspend returns, loading X back into R (and invalidating the jump
319 buffer) in the process. The caller foo then goes off and calls a
320 bunch of other functions out of Guile mode, occasionally storing X on
321 the stack again, but, say, much deeper on the stack than suspend's
322 stack frame went, and the stack slot where suspend had written X has
323 long since been overwritten with other values.
324
325 Okay, nothing actively broken so far. Now, let garbage collection
326 run, triggered by another thread.
327
328 The thread calling foo is out of Guile mode at the time, so the
329 garbage collector just scans a range of stack addresses. Too bad that
330 X isn't stored there. So the pointed-to storage goes onto the free
331 list, and I think you can see where things go from there.
332
333 Is there anything I'm missing that'll prevent this scenario from
334 happening? I mean, aside from, "well, suspend and scm_leave_guile
335 don't have many local variables, so they probably won't need to save
336 any registers on most systems, so we hope everything will wind up in
337 the jump buffer and we'll just get away with it"?
338
339 (And, going the other direction, if scm_leave_guile and suspend push
340 the stack pointer over onto a new page, and foo doesn't make further
341 function calls and thus the stack pointer no longer includes that
342 page, are we guaranteed that the kernel cannot release the now-unused
343 stack page that contains the top-of-stack pointer we just saved? I
344 don't know if any OS actually does that. If it does, we could get
345 faults in garbage collection.)
346
347 I don't think scm_without_guile has to have this problem, as it gets
348 more control over the stack handling -- but it should call setjmp
349 itself. I'd probably try something like:
350
351 // record top of stack for the GC
352 t->top = SCM_STACK_PTR (&t);
353 // save registers.
354 SCM_FLUSH_REGISTER_WINDOWS;
355 setjmp (t->regs);
356 res = func(data);
357 scm_enter_guile (t);
358
359 ... though even that's making some assumptions about the stack
360 ordering of local variables versus caller-saved registers.
361
362 For something like scm_leave_guile to work, I don't think it can just
363 rely on invalidated jump buffers. A valid jump buffer, and a handle
364 on the stack state at the point when the jump buffer was initialized,
365 together, would work fine, but I think then we're talking about macros
366 invoking setjmp in the caller's stack frame, and requiring that the
367 caller of scm_leave_guile also call scm_enter_guile before returning,
368 kind of like pthread_cleanup_push/pop calls that have to be paired up
369 in a function. (In fact, the pthread ones have to be paired up
370 syntactically, as if they might expand to a compound statement
371 incorporating the user's code, and invoking a compiler's
372 exception-handling primitives. Which might be something to think
373 about for cases where Guile is used with C++ exceptions or
374 pthread_cancel.)
375 */
376
377 scm_i_pthread_key_t scm_i_thread_key;
378
379 static void
380 resume (scm_i_thread *t)
381 {
382 t->top = NULL;
383 }
384
385 typedef void* scm_t_guile_ticket;
386
387 static void
388 scm_enter_guile (scm_t_guile_ticket ticket)
389 {
390 scm_i_thread *t = (scm_i_thread *)ticket;
391 if (t)
392 {
393 resume (t);
394 }
395 }
396
397 static scm_i_thread *
398 suspend (void)
399 {
400 scm_i_thread *t = SCM_I_CURRENT_THREAD;
401
402 /* record top of stack for the GC */
403 t->top = SCM_STACK_PTR (&t);
404 /* save registers. */
405 SCM_FLUSH_REGISTER_WINDOWS;
406 setjmp (t->regs);
407 return t;
408 }
409
410 static scm_t_guile_ticket
411 scm_leave_guile ()
412 {
413 scm_i_thread *t = suspend ();
414 return (scm_t_guile_ticket) t;
415 }
416
417 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
418 static scm_i_thread *all_threads = NULL;
419 static int thread_count;
420
421 static SCM scm_i_default_dynamic_state;
422
423 /* Perform first stage of thread initialisation, in non-guile mode.
424 */
425 static void
426 guilify_self_1 (SCM_STACKITEM *base)
427 {
428 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
429
430 t->pthread = scm_i_pthread_self ();
431 t->handle = SCM_BOOL_F;
432 t->result = SCM_BOOL_F;
433 t->cleanup_handler = SCM_BOOL_F;
434 t->mutexes = SCM_EOL;
435 t->held_mutex = NULL;
436 t->join_queue = SCM_EOL;
437 t->dynamic_state = SCM_BOOL_F;
438 t->dynwinds = SCM_EOL;
439 t->active_asyncs = SCM_EOL;
440 t->block_asyncs = 1;
441 t->pending_asyncs = 1;
442 t->last_debug_frame = NULL;
443 t->base = base;
444 #ifdef __ia64__
445 /* Calculate and store off the base of this thread's register
446 backing store (RBS). Unfortunately our implementation(s) of
447 scm_ia64_register_backing_store_base are only reliable for the
448 main thread. For other threads, therefore, find out the current
449 top of the RBS, and use that as a maximum. */
450 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
451 {
452 ucontext_t ctx;
453 void *bsp;
454 getcontext (&ctx);
455 bsp = scm_ia64_ar_bsp (&ctx);
456 if (t->register_backing_store_base > bsp)
457 t->register_backing_store_base = bsp;
458 }
459 #endif
460 t->continuation_root = SCM_EOL;
461 t->continuation_base = base;
462 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
463 t->sleep_mutex = NULL;
464 t->sleep_object = SCM_BOOL_F;
465 t->sleep_fd = -1;
466
467 if (pipe (t->sleep_pipe) != 0)
468 /* FIXME: Error conditions during the initialization phase are handled
469 gracelessly since public functions such as `scm_init_guile ()'
470 currently have type `void'. */
471 abort ();
472
473 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
474 t->current_mark_stack_ptr = NULL;
475 t->current_mark_stack_limit = NULL;
476 t->canceled = 0;
477 t->exited = 0;
478 t->guile_mode = 0;
479
480 scm_i_pthread_setspecific (scm_i_thread_key, t);
481
482 scm_i_pthread_mutex_lock (&thread_admin_mutex);
483 t->next_thread = all_threads;
484 all_threads = t;
485 thread_count++;
486 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
487 }
488
489 /* Perform second stage of thread initialisation, in guile mode.
490 */
491 static void
492 guilify_self_2 (SCM parent)
493 {
494 scm_i_thread *t = SCM_I_CURRENT_THREAD;
495
496 t->guile_mode = 1;
497
498 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
499
500 t->continuation_root = scm_cons (t->handle, SCM_EOL);
501 t->continuation_base = t->base;
502
503 if (scm_is_true (parent))
504 t->dynamic_state = scm_make_dynamic_state (parent);
505 else
506 t->dynamic_state = scm_i_make_initial_dynamic_state ();
507
508 t->join_queue = make_queue ();
509 t->block_asyncs = 0;
510 }
511
512 \f
513 /*** Fat mutexes */
514
515 /* We implement our own mutex type since we want them to be 'fair', we
516 want to do fancy things while waiting for them (like running
517 asyncs) and we might want to add things that are nice for
518 debugging.
519 */
520
521 typedef struct {
522 scm_i_pthread_mutex_t lock;
523 SCM owner;
524 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
525
526 int recursive; /* allow recursive locking? */
527 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
528 int allow_external_unlock; /* is it an error to unlock a mutex that is not
529 owned by the current thread? */
530
531 SCM waiting; /* the threads waiting for this mutex. */
532 } fat_mutex;
533
534 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
535 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
536
537 /* Perform thread tear-down, in guile mode.
538 */
539 static void *
540 do_thread_exit (void *v)
541 {
542 scm_i_thread *t = (scm_i_thread *) v;
543
544 if (!scm_is_false (t->cleanup_handler))
545 {
546 SCM ptr = t->cleanup_handler;
547
548 t->cleanup_handler = SCM_BOOL_F;
549 t->result = scm_internal_catch (SCM_BOOL_T,
550 (scm_t_catch_body) scm_call_0, ptr,
551 scm_handle_by_message_noexit, NULL);
552 }
553
554 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
555
556 t->exited = 1;
557 close (t->sleep_pipe[0]);
558 close (t->sleep_pipe[1]);
559 while (scm_is_true (unblock_from_queue (t->join_queue)))
560 ;
561
562 while (!scm_is_null (t->mutexes))
563 {
564 SCM mutex = SCM_CAR (t->mutexes);
565 fat_mutex *m = SCM_MUTEX_DATA (mutex);
566 scm_i_pthread_mutex_lock (&m->lock);
567
568 unblock_from_queue (m->waiting);
569
570 scm_i_pthread_mutex_unlock (&m->lock);
571 t->mutexes = SCM_CDR (t->mutexes);
572 }
573
574 scm_i_pthread_mutex_unlock (&t->admin_mutex);
575
576 return NULL;
577 }
578
579 static void
580 on_thread_exit (void *v)
581 {
582 /* This handler is executed in non-guile mode. */
583 scm_i_thread *t = (scm_i_thread *) v, **tp;
584
585 /* If this thread was cancelled while doing a cond wait, it will
586 still have a mutex locked, so we unlock it here. */
587 if (t->held_mutex)
588 {
589 scm_i_pthread_mutex_unlock (t->held_mutex);
590 t->held_mutex = NULL;
591 }
592
593 scm_i_pthread_setspecific (scm_i_thread_key, v);
594
595 /* Ensure the signal handling thread has been launched, because we might be
596 shutting it down. */
597 scm_i_ensure_signal_delivery_thread ();
598
599 /* Unblocking the joining threads needs to happen in guile mode
600 since the queue is a SCM data structure. */
601
602 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
603 assume the GC is usable at this point, and notably that thread-local
604 storage (TLS) hasn't been deallocated yet. */
605 do_thread_exit (v);
606
607 /* Removing ourself from the list of all threads needs to happen in
608 non-guile mode since all SCM values on our stack become
609 unprotected once we are no longer in the list. */
610 scm_i_pthread_mutex_lock (&thread_admin_mutex);
611 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
612 if (*tp == t)
613 {
614 *tp = t->next_thread;
615 break;
616 }
617 thread_count--;
618
619 /* If there's only one other thread, it could be the signal delivery
620 thread, so we need to notify it to shut down by closing its read pipe.
621 If it's not the signal delivery thread, then closing the read pipe isn't
622 going to hurt. */
623 if (thread_count <= 1)
624 scm_i_close_signal_pipe ();
625
626 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
627
628 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
629 }
630
631 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
632
633 static void
634 init_thread_key (void)
635 {
636 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
637 }
638
639 /* Perform any initializations necessary to bring the current thread
640 into guile mode, initializing Guile itself, if necessary.
641
642 BASE is the stack base to use with GC.
643
644 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
645 which case the default dynamic state is used.
646
647 Return zero when the thread was in guile mode already; otherwise
648 return 1.
649 */
650
651 static int
652 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
653 {
654 scm_i_thread *t;
655
656 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
657
658 if ((t = SCM_I_CURRENT_THREAD) == NULL)
659 {
660 /* This thread has not been guilified yet.
661 */
662
663 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
664 if (scm_initialized_p == 0)
665 {
666 /* First thread ever to enter Guile. Run the full
667 initialization.
668 */
669 scm_i_init_guile (base);
670 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
671 }
672 else
673 {
674 /* Guile is already initialized, but this thread enters it for
675 the first time. Only initialize this thread.
676 */
677 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
678 guilify_self_1 (base);
679 guilify_self_2 (parent);
680 }
681 return 1;
682 }
683 else if (t->top)
684 {
685 /* This thread is already guilified but not in guile mode, just
686 resume it.
687
688 A user call to scm_with_guile() will lead us to here. This could
689 happen from anywhere on the stack, and in particular lower on the
690 stack than when it was when this thread was first guilified. Thus,
691 `base' must be updated. */
692 #if SCM_STACK_GROWS_UP
693 if (base < t->base)
694 t->base = base;
695 #else
696 if (base > t->base)
697 t->base = base;
698 #endif
699
700 scm_enter_guile ((scm_t_guile_ticket) t);
701 return 1;
702 }
703 else
704 {
705 /* Thread is already in guile mode. Nothing to do.
706 */
707 return 0;
708 }
709 }
710
711 #if SCM_USE_PTHREAD_THREADS
712
713 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
714 /* This method for GNU/Linux and perhaps some other systems.
715 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
716 available on them. */
717 #define HAVE_GET_THREAD_STACK_BASE
718
719 static SCM_STACKITEM *
720 get_thread_stack_base ()
721 {
722 pthread_attr_t attr;
723 void *start, *end;
724 size_t size;
725
726 pthread_getattr_np (pthread_self (), &attr);
727 pthread_attr_getstack (&attr, &start, &size);
728 end = (char *)start + size;
729
730 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
731 for the main thread, but we can use scm_get_stack_base in that
732 case.
733 */
734
735 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
736 if ((void *)&attr < start || (void *)&attr >= end)
737 return (SCM_STACKITEM *) GC_stackbottom;
738 else
739 #endif
740 {
741 #if SCM_STACK_GROWS_UP
742 return start;
743 #else
744 return end;
745 #endif
746 }
747 }
748
749 #elif HAVE_PTHREAD_GET_STACKADDR_NP
750 /* This method for MacOS X.
751 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
752 but as of 2006 there's nothing obvious at apple.com. */
753 #define HAVE_GET_THREAD_STACK_BASE
754 static SCM_STACKITEM *
755 get_thread_stack_base ()
756 {
757 return pthread_get_stackaddr_np (pthread_self ());
758 }
759
760 #elif defined (__MINGW32__)
761 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
762 in any thread. We don't like hard-coding the name of a system, but there
763 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
764 work. */
765 #define HAVE_GET_THREAD_STACK_BASE
766 static SCM_STACKITEM *
767 get_thread_stack_base ()
768 {
769 return (SCM_STACKITEM *) GC_stackbottom;
770 }
771
772 #endif /* pthread methods of get_thread_stack_base */
773
774 #else /* !SCM_USE_PTHREAD_THREADS */
775
776 #define HAVE_GET_THREAD_STACK_BASE
777
778 static SCM_STACKITEM *
779 get_thread_stack_base ()
780 {
781 return (SCM_STACKITEM *) GC_stackbottom;
782 }
783
784 #endif /* !SCM_USE_PTHREAD_THREADS */
785
786 #ifdef HAVE_GET_THREAD_STACK_BASE
787
788 void
789 scm_init_guile ()
790 {
791 scm_i_init_thread_for_guile (get_thread_stack_base (),
792 scm_i_default_dynamic_state);
793 }
794
795 #endif
796
797 void *
798 scm_with_guile (void *(*func)(void *), void *data)
799 {
800 return scm_i_with_guile_and_parent (func, data,
801 scm_i_default_dynamic_state);
802 }
803
804 SCM_UNUSED static void
805 scm_leave_guile_cleanup (void *x)
806 {
807 scm_leave_guile ();
808 on_thread_exit (SCM_I_CURRENT_THREAD);
809 }
810
811 void *
812 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
813 {
814 void *res;
815 int really_entered;
816 SCM_STACKITEM base_item;
817
818 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
819 if (really_entered)
820 {
821 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
822 res = scm_c_with_continuation_barrier (func, data);
823 scm_i_pthread_cleanup_pop (0);
824 scm_leave_guile ();
825 }
826 else
827 res = scm_c_with_continuation_barrier (func, data);
828
829 return res;
830 }
831
832 \f
833 /*** Non-guile mode. */
834
835 #if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
836
837 /* This declaration is missing from the public headers of GC 7.1. */
838 extern void GC_do_blocking (void (*) (void *), void *);
839
840 #endif
841
842 #ifdef HAVE_GC_DO_BLOCKING
843 struct without_guile_arg
844 {
845 void * (*function) (void *);
846 void *data;
847 void *result;
848 };
849
850 static void
851 without_guile_trampoline (void *closure)
852 {
853 struct without_guile_arg *arg;
854
855 SCM_I_CURRENT_THREAD->guile_mode = 0;
856
857 arg = (struct without_guile_arg *) closure;
858 arg->result = arg->function (arg->data);
859
860 SCM_I_CURRENT_THREAD->guile_mode = 1;
861 }
862 #endif
863
864 void *
865 scm_without_guile (void *(*func)(void *), void *data)
866 {
867 void *result;
868
869 #ifdef HAVE_GC_DO_BLOCKING
870 if (SCM_I_CURRENT_THREAD->guile_mode)
871 {
872 struct without_guile_arg arg;
873
874 arg.function = func;
875 arg.data = data;
876 GC_do_blocking (without_guile_trampoline, &arg);
877 result = arg.result;
878 }
879 else
880 #endif
881 result = func (data);
882
883 return result;
884 }
885
886 \f
887 /*** Thread creation */
888
889 typedef struct {
890 SCM parent;
891 SCM thunk;
892 SCM handler;
893 SCM thread;
894 scm_i_pthread_mutex_t mutex;
895 scm_i_pthread_cond_t cond;
896 } launch_data;
897
898 static void *
899 really_launch (void *d)
900 {
901 launch_data *data = (launch_data *)d;
902 SCM thunk = data->thunk, handler = data->handler;
903 scm_i_thread *t;
904
905 t = SCM_I_CURRENT_THREAD;
906
907 scm_i_scm_pthread_mutex_lock (&data->mutex);
908 data->thread = scm_current_thread ();
909 scm_i_pthread_cond_signal (&data->cond);
910 scm_i_pthread_mutex_unlock (&data->mutex);
911
912 if (SCM_UNBNDP (handler))
913 t->result = scm_call_0 (thunk);
914 else
915 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
916
917 /* Trigger a call to `on_thread_exit ()'. */
918 pthread_exit (NULL);
919
920 return 0;
921 }
922
923 static void *
924 launch_thread (void *d)
925 {
926 launch_data *data = (launch_data *)d;
927 scm_i_pthread_detach (scm_i_pthread_self ());
928 scm_i_with_guile_and_parent (really_launch, d, data->parent);
929 return NULL;
930 }
931
932 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
933 (SCM thunk, SCM handler),
934 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
935 "returning a new thread object representing the thread. The procedure\n"
936 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
937 "\n"
938 "When @var{handler} is specified, then @var{thunk} is called from\n"
939 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
940 "handler. This catch is established inside the continuation barrier.\n"
941 "\n"
942 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
943 "the @emph{exit value} of the thread and the thread is terminated.")
944 #define FUNC_NAME s_scm_call_with_new_thread
945 {
946 launch_data data;
947 scm_i_pthread_t id;
948 int err;
949
950 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
951 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
952 handler, SCM_ARG2, FUNC_NAME);
953
954 data.parent = scm_current_dynamic_state ();
955 data.thunk = thunk;
956 data.handler = handler;
957 data.thread = SCM_BOOL_F;
958 scm_i_pthread_mutex_init (&data.mutex, NULL);
959 scm_i_pthread_cond_init (&data.cond, NULL);
960
961 scm_i_scm_pthread_mutex_lock (&data.mutex);
962 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
963 if (err)
964 {
965 scm_i_pthread_mutex_unlock (&data.mutex);
966 errno = err;
967 scm_syserror (NULL);
968 }
969 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
970 scm_i_pthread_mutex_unlock (&data.mutex);
971
972 return data.thread;
973 }
974 #undef FUNC_NAME
975
976 typedef struct {
977 SCM parent;
978 scm_t_catch_body body;
979 void *body_data;
980 scm_t_catch_handler handler;
981 void *handler_data;
982 SCM thread;
983 scm_i_pthread_mutex_t mutex;
984 scm_i_pthread_cond_t cond;
985 } spawn_data;
986
987 static void *
988 really_spawn (void *d)
989 {
990 spawn_data *data = (spawn_data *)d;
991 scm_t_catch_body body = data->body;
992 void *body_data = data->body_data;
993 scm_t_catch_handler handler = data->handler;
994 void *handler_data = data->handler_data;
995 scm_i_thread *t = SCM_I_CURRENT_THREAD;
996
997 scm_i_scm_pthread_mutex_lock (&data->mutex);
998 data->thread = scm_current_thread ();
999 scm_i_pthread_cond_signal (&data->cond);
1000 scm_i_pthread_mutex_unlock (&data->mutex);
1001
1002 if (handler == NULL)
1003 t->result = body (body_data);
1004 else
1005 t->result = scm_internal_catch (SCM_BOOL_T,
1006 body, body_data,
1007 handler, handler_data);
1008
1009 return 0;
1010 }
1011
1012 static void *
1013 spawn_thread (void *d)
1014 {
1015 spawn_data *data = (spawn_data *)d;
1016 scm_i_pthread_detach (scm_i_pthread_self ());
1017 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1018 return NULL;
1019 }
1020
1021 SCM
1022 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1023 scm_t_catch_handler handler, void *handler_data)
1024 {
1025 spawn_data data;
1026 scm_i_pthread_t id;
1027 int err;
1028
1029 data.parent = scm_current_dynamic_state ();
1030 data.body = body;
1031 data.body_data = body_data;
1032 data.handler = handler;
1033 data.handler_data = handler_data;
1034 data.thread = SCM_BOOL_F;
1035 scm_i_pthread_mutex_init (&data.mutex, NULL);
1036 scm_i_pthread_cond_init (&data.cond, NULL);
1037
1038 scm_i_scm_pthread_mutex_lock (&data.mutex);
1039 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1040 if (err)
1041 {
1042 scm_i_pthread_mutex_unlock (&data.mutex);
1043 errno = err;
1044 scm_syserror (NULL);
1045 }
1046 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1047 scm_i_pthread_mutex_unlock (&data.mutex);
1048
1049 return data.thread;
1050 }
1051
1052 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1053 (),
1054 "Move the calling thread to the end of the scheduling queue.")
1055 #define FUNC_NAME s_scm_yield
1056 {
1057 return scm_from_bool (scm_i_sched_yield ());
1058 }
1059 #undef FUNC_NAME
1060
1061 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1062 (SCM thread),
1063 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1064 "cannot be the current thread, and if @var{thread} has already terminated or "
1065 "been signaled to terminate, this function is a no-op.")
1066 #define FUNC_NAME s_scm_cancel_thread
1067 {
1068 scm_i_thread *t = NULL;
1069
1070 SCM_VALIDATE_THREAD (1, thread);
1071 t = SCM_I_THREAD_DATA (thread);
1072 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1073 if (!t->canceled)
1074 {
1075 t->canceled = 1;
1076 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1077 scm_i_pthread_cancel (t->pthread);
1078 }
1079 else
1080 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1081
1082 return SCM_UNSPECIFIED;
1083 }
1084 #undef FUNC_NAME
1085
1086 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1087 (SCM thread, SCM proc),
1088 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1089 "This handler will be called when the thread exits.")
1090 #define FUNC_NAME s_scm_set_thread_cleanup_x
1091 {
1092 scm_i_thread *t;
1093
1094 SCM_VALIDATE_THREAD (1, thread);
1095 if (!scm_is_false (proc))
1096 SCM_VALIDATE_THUNK (2, proc);
1097
1098 t = SCM_I_THREAD_DATA (thread);
1099 scm_i_pthread_mutex_lock (&t->admin_mutex);
1100
1101 if (!(t->exited || t->canceled))
1102 t->cleanup_handler = proc;
1103
1104 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1105
1106 return SCM_UNSPECIFIED;
1107 }
1108 #undef FUNC_NAME
1109
1110 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1111 (SCM thread),
1112 "Return the cleanup handler installed for the thread @var{thread}.")
1113 #define FUNC_NAME s_scm_thread_cleanup
1114 {
1115 scm_i_thread *t;
1116 SCM ret;
1117
1118 SCM_VALIDATE_THREAD (1, thread);
1119
1120 t = SCM_I_THREAD_DATA (thread);
1121 scm_i_pthread_mutex_lock (&t->admin_mutex);
1122 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1123 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1124
1125 return ret;
1126 }
1127 #undef FUNC_NAME
1128
1129 SCM scm_join_thread (SCM thread)
1130 {
1131 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1132 }
1133
1134 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1135 (SCM thread, SCM timeout, SCM timeoutval),
1136 "Suspend execution of the calling thread until the target @var{thread} "
1137 "terminates, unless the target @var{thread} has already terminated. ")
1138 #define FUNC_NAME s_scm_join_thread_timed
1139 {
1140 scm_i_thread *t;
1141 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1142 SCM res = SCM_BOOL_F;
1143
1144 if (! (SCM_UNBNDP (timeoutval)))
1145 res = timeoutval;
1146
1147 SCM_VALIDATE_THREAD (1, thread);
1148 if (scm_is_eq (scm_current_thread (), thread))
1149 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1150
1151 t = SCM_I_THREAD_DATA (thread);
1152 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1153
1154 if (! SCM_UNBNDP (timeout))
1155 {
1156 to_timespec (timeout, &ctimeout);
1157 timeout_ptr = &ctimeout;
1158 }
1159
1160 if (t->exited)
1161 res = t->result;
1162 else
1163 {
1164 while (1)
1165 {
1166 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1167 timeout_ptr);
1168 if (err == 0)
1169 {
1170 if (t->exited)
1171 {
1172 res = t->result;
1173 break;
1174 }
1175 }
1176 else if (err == ETIMEDOUT)
1177 break;
1178
1179 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1180 SCM_TICK;
1181 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1182 }
1183 }
1184
1185 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1186
1187 return res;
1188 }
1189 #undef FUNC_NAME
1190
1191 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1192 (SCM obj),
1193 "Return @code{#t} if @var{obj} is a thread.")
1194 #define FUNC_NAME s_scm_thread_p
1195 {
1196 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1197 }
1198 #undef FUNC_NAME
1199
1200
1201 static size_t
1202 fat_mutex_free (SCM mx)
1203 {
1204 fat_mutex *m = SCM_MUTEX_DATA (mx);
1205 scm_i_pthread_mutex_destroy (&m->lock);
1206 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1207 return 0;
1208 }
1209
1210 static int
1211 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1212 {
1213 fat_mutex *m = SCM_MUTEX_DATA (mx);
1214 scm_puts ("#<mutex ", port);
1215 scm_uintprint ((scm_t_bits)m, 16, port);
1216 scm_puts (">", port);
1217 return 1;
1218 }
1219
1220 static SCM
1221 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1222 {
1223 fat_mutex *m;
1224 SCM mx;
1225
1226 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1227 scm_i_pthread_mutex_init (&m->lock, NULL);
1228 m->owner = SCM_BOOL_F;
1229 m->level = 0;
1230
1231 m->recursive = recursive;
1232 m->unchecked_unlock = unchecked_unlock;
1233 m->allow_external_unlock = external_unlock;
1234
1235 m->waiting = SCM_EOL;
1236 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1237 m->waiting = make_queue ();
1238 return mx;
1239 }
1240
1241 SCM scm_make_mutex (void)
1242 {
1243 return scm_make_mutex_with_flags (SCM_EOL);
1244 }
1245
1246 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1247 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1248 SCM_SYMBOL (recursive_sym, "recursive");
1249
1250 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1251 (SCM flags),
1252 "Create a new mutex. ")
1253 #define FUNC_NAME s_scm_make_mutex_with_flags
1254 {
1255 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1256
1257 SCM ptr = flags;
1258 while (! scm_is_null (ptr))
1259 {
1260 SCM flag = SCM_CAR (ptr);
1261 if (scm_is_eq (flag, unchecked_unlock_sym))
1262 unchecked_unlock = 1;
1263 else if (scm_is_eq (flag, allow_external_unlock_sym))
1264 external_unlock = 1;
1265 else if (scm_is_eq (flag, recursive_sym))
1266 recursive = 1;
1267 else
1268 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1269 ptr = SCM_CDR (ptr);
1270 }
1271 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1272 }
1273 #undef FUNC_NAME
1274
1275 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1276 (void),
1277 "Create a new recursive mutex. ")
1278 #define FUNC_NAME s_scm_make_recursive_mutex
1279 {
1280 return make_fat_mutex (1, 0, 0);
1281 }
1282 #undef FUNC_NAME
1283
1284 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1285
1286 static SCM
1287 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1288 {
1289 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1290
1291 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1292 SCM err = SCM_BOOL_F;
1293
1294 struct timeval current_time;
1295
1296 scm_i_scm_pthread_mutex_lock (&m->lock);
1297
1298 while (1)
1299 {
1300 if (m->level == 0)
1301 {
1302 m->owner = new_owner;
1303 m->level++;
1304
1305 if (SCM_I_IS_THREAD (new_owner))
1306 {
1307 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1308 scm_i_pthread_mutex_lock (&t->admin_mutex);
1309 t->mutexes = scm_cons (mutex, t->mutexes);
1310 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1311 }
1312 *ret = 1;
1313 break;
1314 }
1315 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1316 {
1317 m->owner = new_owner;
1318 err = scm_cons (scm_abandoned_mutex_error_key,
1319 scm_from_locale_string ("lock obtained on abandoned "
1320 "mutex"));
1321 *ret = 1;
1322 break;
1323 }
1324 else if (scm_is_eq (m->owner, new_owner))
1325 {
1326 if (m->recursive)
1327 {
1328 m->level++;
1329 *ret = 1;
1330 }
1331 else
1332 {
1333 err = scm_cons (scm_misc_error_key,
1334 scm_from_locale_string ("mutex already locked "
1335 "by thread"));
1336 *ret = 0;
1337 }
1338 break;
1339 }
1340 else
1341 {
1342 if (timeout != NULL)
1343 {
1344 gettimeofday (&current_time, NULL);
1345 if (current_time.tv_sec > timeout->tv_sec ||
1346 (current_time.tv_sec == timeout->tv_sec &&
1347 current_time.tv_usec * 1000 > timeout->tv_nsec))
1348 {
1349 *ret = 0;
1350 break;
1351 }
1352 }
1353 block_self (m->waiting, mutex, &m->lock, timeout);
1354 scm_i_pthread_mutex_unlock (&m->lock);
1355 SCM_TICK;
1356 scm_i_scm_pthread_mutex_lock (&m->lock);
1357 }
1358 }
1359 scm_i_pthread_mutex_unlock (&m->lock);
1360 return err;
1361 }
1362
1363 SCM scm_lock_mutex (SCM mx)
1364 {
1365 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1366 }
1367
1368 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1369 (SCM m, SCM timeout, SCM owner),
1370 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1371 "blocks until the mutex becomes available. The function returns when "
1372 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1373 "a thread already owns will succeed right away and will not block the "
1374 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1375 #define FUNC_NAME s_scm_lock_mutex_timed
1376 {
1377 SCM exception;
1378 int ret = 0;
1379 scm_t_timespec cwaittime, *waittime = NULL;
1380
1381 SCM_VALIDATE_MUTEX (1, m);
1382
1383 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1384 {
1385 to_timespec (timeout, &cwaittime);
1386 waittime = &cwaittime;
1387 }
1388
1389 exception = fat_mutex_lock (m, waittime, owner, &ret);
1390 if (!scm_is_false (exception))
1391 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1392 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1393 }
1394 #undef FUNC_NAME
1395
1396 void
1397 scm_dynwind_lock_mutex (SCM mutex)
1398 {
1399 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1400 SCM_F_WIND_EXPLICITLY);
1401 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1402 SCM_F_WIND_EXPLICITLY);
1403 }
1404
1405 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1406 (SCM mutex),
1407 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1408 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1409 #define FUNC_NAME s_scm_try_mutex
1410 {
1411 SCM exception;
1412 int ret = 0;
1413 scm_t_timespec cwaittime, *waittime = NULL;
1414
1415 SCM_VALIDATE_MUTEX (1, mutex);
1416
1417 to_timespec (scm_from_int(0), &cwaittime);
1418 waittime = &cwaittime;
1419
1420 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1421 if (!scm_is_false (exception))
1422 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1423 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1424 }
1425 #undef FUNC_NAME
1426
1427 /*** Fat condition variables */
1428
1429 typedef struct {
1430 scm_i_pthread_mutex_t lock;
1431 SCM waiting; /* the threads waiting for this condition. */
1432 } fat_cond;
1433
1434 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1435 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1436
1437 static int
1438 fat_mutex_unlock (SCM mutex, SCM cond,
1439 const scm_t_timespec *waittime, int relock)
1440 {
1441 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1442 fat_cond *c = NULL;
1443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1444 int err = 0, ret = 0;
1445
1446 scm_i_scm_pthread_mutex_lock (&m->lock);
1447
1448 SCM owner = m->owner;
1449
1450 if (!scm_is_eq (owner, scm_current_thread ()))
1451 {
1452 if (m->level == 0)
1453 {
1454 if (!m->unchecked_unlock)
1455 {
1456 scm_i_pthread_mutex_unlock (&m->lock);
1457 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1458 }
1459 owner = scm_current_thread ();
1460 }
1461 else if (!m->allow_external_unlock)
1462 {
1463 scm_i_pthread_mutex_unlock (&m->lock);
1464 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1465 }
1466 }
1467
1468 if (! (SCM_UNBNDP (cond)))
1469 {
1470 c = SCM_CONDVAR_DATA (cond);
1471 while (1)
1472 {
1473 int brk = 0;
1474
1475 if (m->level > 0)
1476 m->level--;
1477 if (m->level == 0)
1478 m->owner = unblock_from_queue (m->waiting);
1479
1480 t->block_asyncs++;
1481
1482 err = block_self (c->waiting, cond, &m->lock, waittime);
1483 scm_i_pthread_mutex_unlock (&m->lock);
1484
1485 if (err == 0)
1486 {
1487 ret = 1;
1488 brk = 1;
1489 }
1490 else if (err == ETIMEDOUT)
1491 {
1492 ret = 0;
1493 brk = 1;
1494 }
1495 else if (err != EINTR)
1496 {
1497 errno = err;
1498 scm_syserror (NULL);
1499 }
1500
1501 if (brk)
1502 {
1503 if (relock)
1504 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1505 break;
1506 }
1507
1508 t->block_asyncs--;
1509 scm_async_click ();
1510
1511 scm_remember_upto_here_2 (cond, mutex);
1512
1513 scm_i_scm_pthread_mutex_lock (&m->lock);
1514 }
1515 }
1516 else
1517 {
1518 if (m->level > 0)
1519 m->level--;
1520 if (m->level == 0)
1521 m->owner = unblock_from_queue (m->waiting);
1522
1523 scm_i_pthread_mutex_unlock (&m->lock);
1524 ret = 1;
1525 }
1526
1527 return ret;
1528 }
1529
1530 SCM scm_unlock_mutex (SCM mx)
1531 {
1532 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1533 }
1534
1535 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1536 (SCM mx, SCM cond, SCM timeout),
1537 "Unlocks @var{mutex} if the calling thread owns the lock on "
1538 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1539 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1540 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1541 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1542 "with a call to @code{unlock-mutex}. Only the last call to "
1543 "@code{unlock-mutex} will actually unlock the mutex. ")
1544 #define FUNC_NAME s_scm_unlock_mutex_timed
1545 {
1546 scm_t_timespec cwaittime, *waittime = NULL;
1547
1548 SCM_VALIDATE_MUTEX (1, mx);
1549 if (! (SCM_UNBNDP (cond)))
1550 {
1551 SCM_VALIDATE_CONDVAR (2, cond);
1552
1553 if (! (SCM_UNBNDP (timeout)))
1554 {
1555 to_timespec (timeout, &cwaittime);
1556 waittime = &cwaittime;
1557 }
1558 }
1559
1560 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1561 }
1562 #undef FUNC_NAME
1563
1564 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1565 (SCM obj),
1566 "Return @code{#t} if @var{obj} is a mutex.")
1567 #define FUNC_NAME s_scm_mutex_p
1568 {
1569 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1570 }
1571 #undef FUNC_NAME
1572
1573 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1574 (SCM mx),
1575 "Return the thread owning @var{mx}, or @code{#f}.")
1576 #define FUNC_NAME s_scm_mutex_owner
1577 {
1578 SCM owner;
1579 fat_mutex *m = NULL;
1580
1581 SCM_VALIDATE_MUTEX (1, mx);
1582 m = SCM_MUTEX_DATA (mx);
1583 scm_i_pthread_mutex_lock (&m->lock);
1584 owner = m->owner;
1585 scm_i_pthread_mutex_unlock (&m->lock);
1586
1587 return owner;
1588 }
1589 #undef FUNC_NAME
1590
1591 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1592 (SCM mx),
1593 "Return the lock level of mutex @var{mx}.")
1594 #define FUNC_NAME s_scm_mutex_level
1595 {
1596 SCM_VALIDATE_MUTEX (1, mx);
1597 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1598 }
1599 #undef FUNC_NAME
1600
1601 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1602 (SCM mx),
1603 "Returns @code{#t} if the mutex @var{mx} is locked.")
1604 #define FUNC_NAME s_scm_mutex_locked_p
1605 {
1606 SCM_VALIDATE_MUTEX (1, mx);
1607 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1608 }
1609 #undef FUNC_NAME
1610
1611 static size_t
1612 fat_cond_free (SCM mx)
1613 {
1614 fat_cond *c = SCM_CONDVAR_DATA (mx);
1615 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1616 return 0;
1617 }
1618
1619 static int
1620 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1621 {
1622 fat_cond *c = SCM_CONDVAR_DATA (cv);
1623 scm_puts ("#<condition-variable ", port);
1624 scm_uintprint ((scm_t_bits)c, 16, port);
1625 scm_puts (">", port);
1626 return 1;
1627 }
1628
1629 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1630 (void),
1631 "Make a new condition variable.")
1632 #define FUNC_NAME s_scm_make_condition_variable
1633 {
1634 fat_cond *c;
1635 SCM cv;
1636
1637 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1638 c->waiting = SCM_EOL;
1639 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1640 c->waiting = make_queue ();
1641 return cv;
1642 }
1643 #undef FUNC_NAME
1644
1645 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1646 (SCM cv, SCM mx, SCM t),
1647 "Wait until @var{cond-var} has been signalled. While waiting, "
1648 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1649 "is locked again when this function returns. When @var{time} is given, "
1650 "it specifies a point in time where the waiting should be aborted. It "
1651 "can be either a integer as returned by @code{current-time} or a pair "
1652 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1653 "mutex is locked and @code{#f} is returned. When the condition "
1654 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1655 "is returned. ")
1656 #define FUNC_NAME s_scm_timed_wait_condition_variable
1657 {
1658 scm_t_timespec waittime, *waitptr = NULL;
1659
1660 SCM_VALIDATE_CONDVAR (1, cv);
1661 SCM_VALIDATE_MUTEX (2, mx);
1662
1663 if (!SCM_UNBNDP (t))
1664 {
1665 to_timespec (t, &waittime);
1666 waitptr = &waittime;
1667 }
1668
1669 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1670 }
1671 #undef FUNC_NAME
1672
1673 static void
1674 fat_cond_signal (fat_cond *c)
1675 {
1676 unblock_from_queue (c->waiting);
1677 }
1678
1679 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1680 (SCM cv),
1681 "Wake up one thread that is waiting for @var{cv}")
1682 #define FUNC_NAME s_scm_signal_condition_variable
1683 {
1684 SCM_VALIDATE_CONDVAR (1, cv);
1685 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1686 return SCM_BOOL_T;
1687 }
1688 #undef FUNC_NAME
1689
1690 static void
1691 fat_cond_broadcast (fat_cond *c)
1692 {
1693 while (scm_is_true (unblock_from_queue (c->waiting)))
1694 ;
1695 }
1696
1697 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1698 (SCM cv),
1699 "Wake up all threads that are waiting for @var{cv}. ")
1700 #define FUNC_NAME s_scm_broadcast_condition_variable
1701 {
1702 SCM_VALIDATE_CONDVAR (1, cv);
1703 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1704 return SCM_BOOL_T;
1705 }
1706 #undef FUNC_NAME
1707
1708 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1709 (SCM obj),
1710 "Return @code{#t} if @var{obj} is a condition variable.")
1711 #define FUNC_NAME s_scm_condition_variable_p
1712 {
1713 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1714 }
1715 #undef FUNC_NAME
1716
1717 /*** Marking stacks */
1718
1719 /* XXX - what to do with this? Do we need to handle this for blocked
1720 threads as well?
1721 */
1722 #ifdef __ia64__
1723 # define SCM_MARK_BACKING_STORE() do { \
1724 ucontext_t ctx; \
1725 SCM_STACKITEM * top, * bot; \
1726 getcontext (&ctx); \
1727 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1728 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1729 / sizeof (SCM_STACKITEM))); \
1730 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1731 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1732 scm_mark_locations (bot, top - bot); } while (0)
1733 #else
1734 # define SCM_MARK_BACKING_STORE()
1735 #endif
1736
1737
1738 \f
1739 /*** Select */
1740
1741 struct select_args
1742 {
1743 int nfds;
1744 SELECT_TYPE *read_fds;
1745 SELECT_TYPE *write_fds;
1746 SELECT_TYPE *except_fds;
1747 struct timeval *timeout;
1748
1749 int result;
1750 int errno_value;
1751 };
1752
1753 static void *
1754 do_std_select (void *args)
1755 {
1756 struct select_args *select_args;
1757
1758 select_args = (struct select_args *) args;
1759
1760 select_args->result =
1761 select (select_args->nfds,
1762 select_args->read_fds, select_args->write_fds,
1763 select_args->except_fds, select_args->timeout);
1764 select_args->errno_value = errno;
1765
1766 return NULL;
1767 }
1768
1769 int
1770 scm_std_select (int nfds,
1771 SELECT_TYPE *readfds,
1772 SELECT_TYPE *writefds,
1773 SELECT_TYPE *exceptfds,
1774 struct timeval *timeout)
1775 {
1776 fd_set my_readfds;
1777 int res, eno, wakeup_fd;
1778 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1779 struct select_args args;
1780
1781 if (readfds == NULL)
1782 {
1783 FD_ZERO (&my_readfds);
1784 readfds = &my_readfds;
1785 }
1786
1787 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1788 SCM_TICK;
1789
1790 wakeup_fd = t->sleep_pipe[0];
1791 FD_SET (wakeup_fd, readfds);
1792 if (wakeup_fd >= nfds)
1793 nfds = wakeup_fd+1;
1794
1795 args.nfds = nfds;
1796 args.read_fds = readfds;
1797 args.write_fds = writefds;
1798 args.except_fds = exceptfds;
1799 args.timeout = timeout;
1800
1801 /* Explicitly cooperate with the GC. */
1802 scm_without_guile (do_std_select, &args);
1803
1804 res = args.result;
1805 eno = args.errno_value;
1806
1807 t->sleep_fd = -1;
1808 scm_i_reset_sleep (t);
1809
1810 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1811 {
1812 char dummy;
1813 full_read (wakeup_fd, &dummy, 1);
1814
1815 FD_CLR (wakeup_fd, readfds);
1816 res -= 1;
1817 if (res == 0)
1818 {
1819 eno = EINTR;
1820 res = -1;
1821 }
1822 }
1823 errno = eno;
1824 return res;
1825 }
1826
1827 /* Convenience API for blocking while in guile mode. */
1828
1829 #if SCM_USE_PTHREAD_THREADS
1830
1831 /* It seems reasonable to not run procedures related to mutex and condition
1832 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1833 without it, and (ii) the only potential gain would be GC latency. See
1834 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1835 for a discussion of the pros and cons. */
1836
1837 int
1838 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1839 {
1840 int res = scm_i_pthread_mutex_lock (mutex);
1841 return res;
1842 }
1843
1844 static void
1845 do_unlock (void *data)
1846 {
1847 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1848 }
1849
1850 void
1851 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1852 {
1853 scm_i_scm_pthread_mutex_lock (mutex);
1854 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1855 }
1856
1857 int
1858 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1859 {
1860 scm_t_guile_ticket t = scm_leave_guile ();
1861 ((scm_i_thread *)t)->held_mutex = mutex;
1862 int res = scm_i_pthread_cond_wait (cond, mutex);
1863 ((scm_i_thread *)t)->held_mutex = NULL;
1864 scm_enter_guile (t);
1865 return res;
1866 }
1867
1868 int
1869 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1870 scm_i_pthread_mutex_t *mutex,
1871 const scm_t_timespec *wt)
1872 {
1873 scm_t_guile_ticket t = scm_leave_guile ();
1874 ((scm_i_thread *)t)->held_mutex = mutex;
1875 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1876 ((scm_i_thread *)t)->held_mutex = NULL;
1877 scm_enter_guile (t);
1878 return res;
1879 }
1880
1881 #endif
1882
1883 unsigned long
1884 scm_std_usleep (unsigned long usecs)
1885 {
1886 struct timeval tv;
1887 tv.tv_usec = usecs % 1000000;
1888 tv.tv_sec = usecs / 1000000;
1889 scm_std_select (0, NULL, NULL, NULL, &tv);
1890 return tv.tv_sec * 1000000 + tv.tv_usec;
1891 }
1892
1893 unsigned int
1894 scm_std_sleep (unsigned int secs)
1895 {
1896 struct timeval tv;
1897 tv.tv_usec = 0;
1898 tv.tv_sec = secs;
1899 scm_std_select (0, NULL, NULL, NULL, &tv);
1900 return tv.tv_sec;
1901 }
1902
1903 /*** Misc */
1904
1905 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1906 (void),
1907 "Return the thread that called this function.")
1908 #define FUNC_NAME s_scm_current_thread
1909 {
1910 return SCM_I_CURRENT_THREAD->handle;
1911 }
1912 #undef FUNC_NAME
1913
1914 static SCM
1915 scm_c_make_list (size_t n, SCM fill)
1916 {
1917 SCM res = SCM_EOL;
1918 while (n-- > 0)
1919 res = scm_cons (fill, res);
1920 return res;
1921 }
1922
1923 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1924 (void),
1925 "Return a list of all threads.")
1926 #define FUNC_NAME s_scm_all_threads
1927 {
1928 /* We can not allocate while holding the thread_admin_mutex because
1929 of the way GC is done.
1930 */
1931 int n = thread_count;
1932 scm_i_thread *t;
1933 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1934
1935 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1936 l = &list;
1937 for (t = all_threads; t && n > 0; t = t->next_thread)
1938 {
1939 if (t != scm_i_signal_delivery_thread)
1940 {
1941 SCM_SETCAR (*l, t->handle);
1942 l = SCM_CDRLOC (*l);
1943 }
1944 n--;
1945 }
1946 *l = SCM_EOL;
1947 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1948 return list;
1949 }
1950 #undef FUNC_NAME
1951
1952 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1953 (SCM thread),
1954 "Return @code{#t} iff @var{thread} has exited.\n")
1955 #define FUNC_NAME s_scm_thread_exited_p
1956 {
1957 return scm_from_bool (scm_c_thread_exited_p (thread));
1958 }
1959 #undef FUNC_NAME
1960
1961 int
1962 scm_c_thread_exited_p (SCM thread)
1963 #define FUNC_NAME s_scm_thread_exited_p
1964 {
1965 scm_i_thread *t;
1966 SCM_VALIDATE_THREAD (1, thread);
1967 t = SCM_I_THREAD_DATA (thread);
1968 return t->exited;
1969 }
1970 #undef FUNC_NAME
1971
1972 static scm_i_pthread_cond_t wake_up_cond;
1973 static int threads_initialized_p = 0;
1974
1975
1976 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1977 */
1978 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1979 int scm_i_critical_section_level = 0;
1980
1981 static SCM dynwind_critical_section_mutex;
1982
1983 void
1984 scm_dynwind_critical_section (SCM mutex)
1985 {
1986 if (scm_is_false (mutex))
1987 mutex = dynwind_critical_section_mutex;
1988 scm_dynwind_lock_mutex (mutex);
1989 scm_dynwind_block_asyncs ();
1990 }
1991
1992 /*** Initialization */
1993
1994 scm_i_pthread_mutex_t scm_i_misc_mutex;
1995
1996 #if SCM_USE_PTHREAD_THREADS
1997 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1998 #endif
1999
2000 void
2001 scm_threads_prehistory (SCM_STACKITEM *base)
2002 {
2003 #if SCM_USE_PTHREAD_THREADS
2004 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2005 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2006 PTHREAD_MUTEX_RECURSIVE);
2007 #endif
2008
2009 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2010 scm_i_pthread_mutexattr_recursive);
2011 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2012 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2013
2014 guilify_self_1 (base);
2015 }
2016
2017 scm_t_bits scm_tc16_thread;
2018 scm_t_bits scm_tc16_mutex;
2019 scm_t_bits scm_tc16_condvar;
2020
2021 void
2022 scm_init_threads ()
2023 {
2024 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2025 scm_set_smob_print (scm_tc16_thread, thread_print);
2026 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
2027
2028 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2029 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2030 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2031
2032 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2033 sizeof (fat_cond));
2034 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2035 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2036
2037 scm_i_default_dynamic_state = SCM_BOOL_F;
2038 guilify_self_2 (SCM_BOOL_F);
2039 threads_initialized_p = 1;
2040
2041 dynwind_critical_section_mutex =
2042 scm_permanent_object (scm_make_recursive_mutex ());
2043 }
2044
2045 void
2046 scm_init_threads_default_dynamic_state ()
2047 {
2048 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2049 scm_i_default_dynamic_state = scm_permanent_object (state);
2050 }
2051
2052 void
2053 scm_init_thread_procs ()
2054 {
2055 #include "libguile/threads.x"
2056 }
2057
2058 /*
2059 Local Variables:
2060 c-file-style: "gnu"
2061 End:
2062 */