Use Gnulib's `full-write' and `full-read' modules.
[bpt/guile.git] / libguile / threads.c
... / ...
CommitLineData
1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19\f
20#ifdef HAVE_CONFIG_H
21# include <config.h>
22#endif
23
24#include "libguile/_scm.h"
25
26#if HAVE_UNISTD_H
27#include <unistd.h>
28#endif
29#include <stdio.h>
30#include <assert.h>
31
32#ifdef HAVE_STRING_H
33#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34#endif
35
36#if HAVE_SYS_TIME_H
37#include <sys/time.h>
38#endif
39
40#include "libguile/validate.h"
41#include "libguile/root.h"
42#include "libguile/eval.h"
43#include "libguile/async.h"
44#include "libguile/ports.h"
45#include "libguile/threads.h"
46#include "libguile/dynwind.h"
47#include "libguile/iselect.h"
48#include "libguile/fluids.h"
49#include "libguile/continuations.h"
50#include "libguile/gc.h"
51#include "libguile/init.h"
52#include "libguile/scmsigs.h"
53#include "libguile/strings.h"
54
55#ifdef __MINGW32__
56#ifndef ETIMEDOUT
57# define ETIMEDOUT WSAETIMEDOUT
58#endif
59# include <fcntl.h>
60# include <process.h>
61# define pipe(fd) _pipe (fd, 256, O_BINARY)
62#endif /* __MINGW32__ */
63
64static void
65to_timespec (SCM t, scm_t_timespec *waittime)
66{
67 if (scm_is_pair (t))
68 {
69 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
70 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
71 }
72 else
73 {
74 double time = scm_to_double (t);
75 double sec = scm_c_truncate (time);
76
77 waittime->tv_sec = (long) sec;
78 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
79 }
80}
81
82/*** Queues */
83
84/* Make an empty queue data structure.
85 */
86static SCM
87make_queue ()
88{
89 return scm_cons (SCM_EOL, SCM_EOL);
90}
91
92/* Put T at the back of Q and return a handle that can be used with
93 remqueue to remove T from Q again.
94 */
95static SCM
96enqueue (SCM q, SCM t)
97{
98 SCM c = scm_cons (t, SCM_EOL);
99 SCM_CRITICAL_SECTION_START;
100 if (scm_is_null (SCM_CDR (q)))
101 SCM_SETCDR (q, c);
102 else
103 SCM_SETCDR (SCM_CAR (q), c);
104 SCM_SETCAR (q, c);
105 SCM_CRITICAL_SECTION_END;
106 return c;
107}
108
109/* Remove the element that the handle C refers to from the queue Q. C
110 must have been returned from a call to enqueue. The return value
111 is zero when the element referred to by C has already been removed.
112 Otherwise, 1 is returned.
113*/
114static int
115remqueue (SCM q, SCM c)
116{
117 SCM p, prev = q;
118 SCM_CRITICAL_SECTION_START;
119 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
120 {
121 if (scm_is_eq (p, c))
122 {
123 if (scm_is_eq (c, SCM_CAR (q)))
124 SCM_SETCAR (q, SCM_CDR (c));
125 SCM_SETCDR (prev, SCM_CDR (c));
126 SCM_CRITICAL_SECTION_END;
127 return 1;
128 }
129 prev = p;
130 }
131 SCM_CRITICAL_SECTION_END;
132 return 0;
133}
134
135/* Remove the front-most element from the queue Q and return it.
136 Return SCM_BOOL_F when Q is empty.
137*/
138static SCM
139dequeue (SCM q)
140{
141 SCM c;
142 SCM_CRITICAL_SECTION_START;
143 c = SCM_CDR (q);
144 if (scm_is_null (c))
145 {
146 SCM_CRITICAL_SECTION_END;
147 return SCM_BOOL_F;
148 }
149 else
150 {
151 SCM_SETCDR (q, SCM_CDR (c));
152 if (scm_is_null (SCM_CDR (q)))
153 SCM_SETCAR (q, SCM_EOL);
154 SCM_CRITICAL_SECTION_END;
155 return SCM_CAR (c);
156 }
157}
158
159/*** Thread smob routines */
160
161static SCM
162thread_mark (SCM obj)
163{
164 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
165 scm_gc_mark (t->result);
166 scm_gc_mark (t->cleanup_handler);
167 scm_gc_mark (t->join_queue);
168 scm_gc_mark (t->mutexes);
169 scm_gc_mark (t->dynwinds);
170 scm_gc_mark (t->active_asyncs);
171 scm_gc_mark (t->continuation_root);
172 return t->dynamic_state;
173}
174
175static int
176thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
177{
178 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
179 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
180 the struct case, hence we go via a union, and extract according to the
181 size of pthread_t. */
182 union {
183 scm_i_pthread_t p;
184 unsigned short us;
185 unsigned int ui;
186 unsigned long ul;
187 scm_t_uintmax um;
188 } u;
189 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
190 scm_i_pthread_t p = t->pthread;
191 scm_t_uintmax id;
192 u.p = p;
193 if (sizeof (p) == sizeof (unsigned short))
194 id = u.us;
195 else if (sizeof (p) == sizeof (unsigned int))
196 id = u.ui;
197 else if (sizeof (p) == sizeof (unsigned long))
198 id = u.ul;
199 else
200 id = u.um;
201
202 scm_puts ("#<thread ", port);
203 scm_uintprint (id, 10, port);
204 scm_puts (" (", port);
205 scm_uintprint ((scm_t_bits)t, 16, port);
206 scm_puts (")>", port);
207 return 1;
208}
209
210static size_t
211thread_free (SCM obj)
212{
213 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
214 assert (t->exited);
215 scm_gc_free (t, sizeof (*t), "thread");
216 return 0;
217}
218
219/*** Blocking on queues. */
220
221/* See also scm_i_queue_async_cell for how such a block is
222 interrputed.
223*/
224
225/* Put the current thread on QUEUE and go to sleep, waiting for it to
226 be woken up by a call to 'unblock_from_queue', or to be
227 interrupted. Upon return of this function, the current thread is
228 no longer on QUEUE, even when the sleep has been interrupted.
229
230 The caller of block_self must hold MUTEX. It will be atomically
231 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
232
233 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
234 as MUTEX is needed.
235
236 When WAITTIME is not NULL, the sleep will be aborted at that time.
237
238 The return value of block_self is an errno value. It will be zero
239 when the sleep has been successfully completed by a call to
240 unblock_from_queue, EINTR when it has been interrupted by the
241 delivery of a system async, and ETIMEDOUT when the timeout has
242 expired.
243
244 The system asyncs themselves are not executed by block_self.
245*/
246static int
247block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
248 const scm_t_timespec *waittime)
249{
250 scm_i_thread *t = SCM_I_CURRENT_THREAD;
251 SCM q_handle;
252 int err;
253
254 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
255 err = EINTR;
256 else
257 {
258 t->block_asyncs++;
259 q_handle = enqueue (queue, t->handle);
260 if (waittime == NULL)
261 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
262 else
263 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
264
265 /* When we are still on QUEUE, we have been interrupted. We
266 report this only when no other error (such as a timeout) has
267 happened above.
268 */
269 if (remqueue (queue, q_handle) && err == 0)
270 err = EINTR;
271 t->block_asyncs--;
272 scm_i_reset_sleep (t);
273 }
274
275 return err;
276}
277
278/* Wake up the first thread on QUEUE, if any. The awoken thread is
279 returned, or #f if the queue was empty.
280 */
281static SCM
282unblock_from_queue (SCM queue)
283{
284 SCM thread = dequeue (queue);
285 if (scm_is_true (thread))
286 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
287 return thread;
288}
289
290/* Getting into and out of guile mode.
291 */
292
293/* Ken Raeburn observes that the implementation of suspend and resume
294 (and the things that build on top of them) are very likely not
295 correct (see below). We will need fix this eventually, and that's
296 why scm_leave_guile/scm_enter_guile are not exported in the API.
297
298 Ken writes:
299
300 Consider this sequence:
301
302 Function foo, called in Guile mode, calls suspend (maybe indirectly
303 through scm_leave_guile), which does this:
304
305 // record top of stack for the GC
306 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
307 var 't'
308 // save registers.
309 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
310 setjmp (t->regs); // here's most of the magic
311
312 ... and returns.
313
314 Function foo has a SCM value X, a handle on a non-immediate object, in
315 a caller-saved register R, and it's the only reference to the object
316 currently.
317
318 The compiler wants to use R in suspend, so it pushes the current
319 value, X, into a stack slot which will be reloaded on exit from
320 suspend; then it loads stuff into R and goes about its business. The
321 setjmp call saves (some of) the current registers, including R, which
322 no longer contains X. (This isn't a problem for a normal
323 setjmp/longjmp situation, where longjmp would be called before
324 setjmp's caller returns; the old value for X would be loaded back from
325 the stack after the longjmp, before the function returned.)
326
327 So, suspend returns, loading X back into R (and invalidating the jump
328 buffer) in the process. The caller foo then goes off and calls a
329 bunch of other functions out of Guile mode, occasionally storing X on
330 the stack again, but, say, much deeper on the stack than suspend's
331 stack frame went, and the stack slot where suspend had written X has
332 long since been overwritten with other values.
333
334 Okay, nothing actively broken so far. Now, let garbage collection
335 run, triggered by another thread.
336
337 The thread calling foo is out of Guile mode at the time, so the
338 garbage collector just scans a range of stack addresses. Too bad that
339 X isn't stored there. So the pointed-to storage goes onto the free
340 list, and I think you can see where things go from there.
341
342 Is there anything I'm missing that'll prevent this scenario from
343 happening? I mean, aside from, "well, suspend and scm_leave_guile
344 don't have many local variables, so they probably won't need to save
345 any registers on most systems, so we hope everything will wind up in
346 the jump buffer and we'll just get away with it"?
347
348 (And, going the other direction, if scm_leave_guile and suspend push
349 the stack pointer over onto a new page, and foo doesn't make further
350 function calls and thus the stack pointer no longer includes that
351 page, are we guaranteed that the kernel cannot release the now-unused
352 stack page that contains the top-of-stack pointer we just saved? I
353 don't know if any OS actually does that. If it does, we could get
354 faults in garbage collection.)
355
356 I don't think scm_without_guile has to have this problem, as it gets
357 more control over the stack handling -- but it should call setjmp
358 itself. I'd probably try something like:
359
360 // record top of stack for the GC
361 t->top = SCM_STACK_PTR (&t);
362 // save registers.
363 SCM_FLUSH_REGISTER_WINDOWS;
364 setjmp (t->regs);
365 res = func(data);
366 scm_enter_guile (t);
367
368 ... though even that's making some assumptions about the stack
369 ordering of local variables versus caller-saved registers.
370
371 For something like scm_leave_guile to work, I don't think it can just
372 rely on invalidated jump buffers. A valid jump buffer, and a handle
373 on the stack state at the point when the jump buffer was initialized,
374 together, would work fine, but I think then we're talking about macros
375 invoking setjmp in the caller's stack frame, and requiring that the
376 caller of scm_leave_guile also call scm_enter_guile before returning,
377 kind of like pthread_cleanup_push/pop calls that have to be paired up
378 in a function. (In fact, the pthread ones have to be paired up
379 syntactically, as if they might expand to a compound statement
380 incorporating the user's code, and invoking a compiler's
381 exception-handling primitives. Which might be something to think
382 about for cases where Guile is used with C++ exceptions or
383 pthread_cancel.)
384*/
385
386scm_i_pthread_key_t scm_i_thread_key;
387
388static void
389resume (scm_i_thread *t)
390{
391 t->top = NULL;
392 if (t->clear_freelists_p)
393 {
394 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
395 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
396 t->clear_freelists_p = 0;
397 }
398}
399
400typedef void* scm_t_guile_ticket;
401
402static void
403scm_enter_guile (scm_t_guile_ticket ticket)
404{
405 scm_i_thread *t = (scm_i_thread *)ticket;
406 if (t)
407 {
408 scm_i_pthread_mutex_lock (&t->heap_mutex);
409 resume (t);
410 }
411}
412
413static scm_i_thread *
414suspend (void)
415{
416 scm_i_thread *t = SCM_I_CURRENT_THREAD;
417
418 /* record top of stack for the GC */
419 t->top = SCM_STACK_PTR (&t);
420 /* save registers. */
421 SCM_FLUSH_REGISTER_WINDOWS;
422 setjmp (t->regs);
423 return t;
424}
425
426static scm_t_guile_ticket
427scm_leave_guile ()
428{
429 scm_i_thread *t = suspend ();
430 scm_i_pthread_mutex_unlock (&t->heap_mutex);
431 return (scm_t_guile_ticket) t;
432}
433
434static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
435static scm_i_thread *all_threads = NULL;
436static int thread_count;
437
438static SCM scm_i_default_dynamic_state;
439
440/* Perform first stage of thread initialisation, in non-guile mode.
441 */
442static void
443guilify_self_1 (SCM_STACKITEM *base)
444{
445 scm_i_thread *t = malloc (sizeof (scm_i_thread));
446
447 t->pthread = scm_i_pthread_self ();
448 t->handle = SCM_BOOL_F;
449 t->result = SCM_BOOL_F;
450 t->cleanup_handler = SCM_BOOL_F;
451 t->mutexes = SCM_EOL;
452 t->held_mutex = NULL;
453 t->join_queue = SCM_EOL;
454 t->dynamic_state = SCM_BOOL_F;
455 t->dynwinds = SCM_EOL;
456 t->active_asyncs = SCM_EOL;
457 t->block_asyncs = 1;
458 t->pending_asyncs = 1;
459 t->last_debug_frame = NULL;
460 t->base = base;
461#ifdef __ia64__
462 /* Calculate and store off the base of this thread's register
463 backing store (RBS). Unfortunately our implementation(s) of
464 scm_ia64_register_backing_store_base are only reliable for the
465 main thread. For other threads, therefore, find out the current
466 top of the RBS, and use that as a maximum. */
467 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
468 {
469 ucontext_t ctx;
470 void *bsp;
471 getcontext (&ctx);
472 bsp = scm_ia64_ar_bsp (&ctx);
473 if (t->register_backing_store_base > bsp)
474 t->register_backing_store_base = bsp;
475 }
476#endif
477 t->continuation_root = SCM_EOL;
478 t->continuation_base = base;
479 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
480 t->sleep_mutex = NULL;
481 t->sleep_object = SCM_BOOL_F;
482 t->sleep_fd = -1;
483 /* XXX - check for errors. */
484 pipe (t->sleep_pipe);
485 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
486 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
487 t->clear_freelists_p = 0;
488 t->gc_running_p = 0;
489 t->canceled = 0;
490 t->exited = 0;
491
492 t->freelist = SCM_EOL;
493 t->freelist2 = SCM_EOL;
494 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
495 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
496
497 scm_i_pthread_setspecific (scm_i_thread_key, t);
498
499 scm_i_pthread_mutex_lock (&t->heap_mutex);
500
501 scm_i_pthread_mutex_lock (&thread_admin_mutex);
502 t->next_thread = all_threads;
503 all_threads = t;
504 thread_count++;
505 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
506}
507
508/* Perform second stage of thread initialisation, in guile mode.
509 */
510static void
511guilify_self_2 (SCM parent)
512{
513 scm_i_thread *t = SCM_I_CURRENT_THREAD;
514
515 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
516 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
517 t->continuation_root = scm_cons (t->handle, SCM_EOL);
518 t->continuation_base = t->base;
519
520 if (scm_is_true (parent))
521 t->dynamic_state = scm_make_dynamic_state (parent);
522 else
523 t->dynamic_state = scm_i_make_initial_dynamic_state ();
524
525 t->join_queue = make_queue ();
526 t->block_asyncs = 0;
527}
528
529\f
530/*** Fat mutexes */
531
532/* We implement our own mutex type since we want them to be 'fair', we
533 want to do fancy things while waiting for them (like running
534 asyncs) and we might want to add things that are nice for
535 debugging.
536*/
537
538typedef struct {
539 scm_i_pthread_mutex_t lock;
540 SCM owner;
541 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
542
543 int recursive; /* allow recursive locking? */
544 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
545 int allow_external_unlock; /* is it an error to unlock a mutex that is not
546 owned by the current thread? */
547
548 SCM waiting; /* the threads waiting for this mutex. */
549} fat_mutex;
550
551#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
552#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
553
554/* Perform thread tear-down, in guile mode.
555 */
556static void *
557do_thread_exit (void *v)
558{
559 scm_i_thread *t = (scm_i_thread *) v;
560
561 if (!scm_is_false (t->cleanup_handler))
562 {
563 SCM ptr = t->cleanup_handler;
564
565 t->cleanup_handler = SCM_BOOL_F;
566 t->result = scm_internal_catch (SCM_BOOL_T,
567 (scm_t_catch_body) scm_call_0, ptr,
568 scm_handle_by_message_noexit, NULL);
569 }
570
571 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
572
573 t->exited = 1;
574 close (t->sleep_pipe[0]);
575 close (t->sleep_pipe[1]);
576 while (scm_is_true (unblock_from_queue (t->join_queue)))
577 ;
578
579 while (!scm_is_null (t->mutexes))
580 {
581 SCM mutex = SCM_CAR (t->mutexes);
582 fat_mutex *m = SCM_MUTEX_DATA (mutex);
583 scm_i_pthread_mutex_lock (&m->lock);
584
585 unblock_from_queue (m->waiting);
586
587 scm_i_pthread_mutex_unlock (&m->lock);
588 t->mutexes = SCM_CDR (t->mutexes);
589 }
590
591 scm_i_pthread_mutex_unlock (&t->admin_mutex);
592
593 return NULL;
594}
595
596static void
597on_thread_exit (void *v)
598{
599 /* This handler is executed in non-guile mode. */
600 scm_i_thread *t = (scm_i_thread *) v, **tp;
601
602 /* If this thread was cancelled while doing a cond wait, it will
603 still have a mutex locked, so we unlock it here. */
604 if (t->held_mutex)
605 {
606 scm_i_pthread_mutex_unlock (t->held_mutex);
607 t->held_mutex = NULL;
608 }
609
610 scm_i_pthread_setspecific (scm_i_thread_key, v);
611
612 /* Ensure the signal handling thread has been launched, because we might be
613 shutting it down. */
614 scm_i_ensure_signal_delivery_thread ();
615
616 /* Unblocking the joining threads needs to happen in guile mode
617 since the queue is a SCM data structure. */
618 scm_with_guile (do_thread_exit, v);
619
620 /* Removing ourself from the list of all threads needs to happen in
621 non-guile mode since all SCM values on our stack become
622 unprotected once we are no longer in the list. */
623 scm_i_pthread_mutex_lock (&thread_admin_mutex);
624 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
625 if (*tp == t)
626 {
627 *tp = t->next_thread;
628 break;
629 }
630 thread_count--;
631
632 /* If there's only one other thread, it could be the signal delivery
633 thread, so we need to notify it to shut down by closing its read pipe.
634 If it's not the signal delivery thread, then closing the read pipe isn't
635 going to hurt. */
636 if (thread_count <= 1)
637 scm_i_close_signal_pipe ();
638
639 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
640
641 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
642}
643
644static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
645
646static void
647init_thread_key (void)
648{
649 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
650}
651
652/* Perform any initializations necessary to bring the current thread
653 into guile mode, initializing Guile itself, if necessary.
654
655 BASE is the stack base to use with GC.
656
657 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
658 which case the default dynamic state is used.
659
660 Return zero when the thread was in guile mode already; otherwise
661 return 1.
662*/
663
664static int
665scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
666{
667 scm_i_thread *t;
668
669 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
670
671 if ((t = SCM_I_CURRENT_THREAD) == NULL)
672 {
673 /* This thread has not been guilified yet.
674 */
675
676 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
677 if (scm_initialized_p == 0)
678 {
679 /* First thread ever to enter Guile. Run the full
680 initialization.
681 */
682 scm_i_init_guile (base);
683 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
684 }
685 else
686 {
687 /* Guile is already initialized, but this thread enters it for
688 the first time. Only initialize this thread.
689 */
690 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
691 guilify_self_1 (base);
692 guilify_self_2 (parent);
693 }
694 return 1;
695 }
696 else if (t->top)
697 {
698 /* This thread is already guilified but not in guile mode, just
699 resume it.
700
701 A user call to scm_with_guile() will lead us to here. This could
702 happen from anywhere on the stack, and in particular lower on the
703 stack than when it was when this thread was first guilified. Thus,
704 `base' must be updated. */
705#if SCM_STACK_GROWS_UP
706 if (base < t->base)
707 t->base = base;
708#else
709 if (base > t->base)
710 t->base = base;
711#endif
712
713 scm_enter_guile ((scm_t_guile_ticket) t);
714 return 1;
715 }
716 else
717 {
718 /* Thread is already in guile mode. Nothing to do.
719 */
720 return 0;
721 }
722}
723
724#if SCM_USE_PTHREAD_THREADS
725
726#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
727/* This method for GNU/Linux and perhaps some other systems.
728 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
729 available on them. */
730#define HAVE_GET_THREAD_STACK_BASE
731
732static SCM_STACKITEM *
733get_thread_stack_base ()
734{
735 pthread_attr_t attr;
736 void *start, *end;
737 size_t size;
738
739 pthread_getattr_np (pthread_self (), &attr);
740 pthread_attr_getstack (&attr, &start, &size);
741 end = (char *)start + size;
742
743 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
744 for the main thread, but we can use scm_get_stack_base in that
745 case.
746 */
747
748#ifndef PTHREAD_ATTR_GETSTACK_WORKS
749 if ((void *)&attr < start || (void *)&attr >= end)
750 return scm_get_stack_base ();
751 else
752#endif
753 {
754#if SCM_STACK_GROWS_UP
755 return start;
756#else
757 return end;
758#endif
759 }
760}
761
762#elif HAVE_PTHREAD_GET_STACKADDR_NP
763/* This method for MacOS X.
764 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
765 but as of 2006 there's nothing obvious at apple.com. */
766#define HAVE_GET_THREAD_STACK_BASE
767static SCM_STACKITEM *
768get_thread_stack_base ()
769{
770 return pthread_get_stackaddr_np (pthread_self ());
771}
772
773#elif defined (__MINGW32__)
774/* This method for mingw. In mingw the basic scm_get_stack_base can be used
775 in any thread. We don't like hard-coding the name of a system, but there
776 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
777 work. */
778#define HAVE_GET_THREAD_STACK_BASE
779static SCM_STACKITEM *
780get_thread_stack_base ()
781{
782 return scm_get_stack_base ();
783}
784
785#endif /* pthread methods of get_thread_stack_base */
786
787#else /* !SCM_USE_PTHREAD_THREADS */
788
789#define HAVE_GET_THREAD_STACK_BASE
790
791static SCM_STACKITEM *
792get_thread_stack_base ()
793{
794 return scm_get_stack_base ();
795}
796
797#endif /* !SCM_USE_PTHREAD_THREADS */
798
799#ifdef HAVE_GET_THREAD_STACK_BASE
800
801void
802scm_init_guile ()
803{
804 scm_i_init_thread_for_guile (get_thread_stack_base (),
805 scm_i_default_dynamic_state);
806}
807
808#endif
809
810void *
811scm_with_guile (void *(*func)(void *), void *data)
812{
813 return scm_i_with_guile_and_parent (func, data,
814 scm_i_default_dynamic_state);
815}
816
817SCM_UNUSED static void
818scm_leave_guile_cleanup (void *x)
819{
820 scm_leave_guile ();
821}
822
823void *
824scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
825{
826 void *res;
827 int really_entered;
828 SCM_STACKITEM base_item;
829
830 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
831 if (really_entered)
832 {
833 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
834 res = scm_c_with_continuation_barrier (func, data);
835 scm_i_pthread_cleanup_pop (0);
836 scm_leave_guile ();
837 }
838 else
839 res = scm_c_with_continuation_barrier (func, data);
840
841 return res;
842}
843
844void *
845scm_without_guile (void *(*func)(void *), void *data)
846{
847 void *res;
848 scm_t_guile_ticket t;
849 t = scm_leave_guile ();
850 res = func (data);
851 scm_enter_guile (t);
852 return res;
853}
854
855/*** Thread creation */
856
857typedef struct {
858 SCM parent;
859 SCM thunk;
860 SCM handler;
861 SCM thread;
862 scm_i_pthread_mutex_t mutex;
863 scm_i_pthread_cond_t cond;
864} launch_data;
865
866static void *
867really_launch (void *d)
868{
869 launch_data *data = (launch_data *)d;
870 SCM thunk = data->thunk, handler = data->handler;
871 scm_i_thread *t;
872
873 t = SCM_I_CURRENT_THREAD;
874
875 scm_i_scm_pthread_mutex_lock (&data->mutex);
876 data->thread = scm_current_thread ();
877 scm_i_pthread_cond_signal (&data->cond);
878 scm_i_pthread_mutex_unlock (&data->mutex);
879
880 if (SCM_UNBNDP (handler))
881 t->result = scm_call_0 (thunk);
882 else
883 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
884
885 return 0;
886}
887
888static void *
889launch_thread (void *d)
890{
891 launch_data *data = (launch_data *)d;
892 scm_i_pthread_detach (scm_i_pthread_self ());
893 scm_i_with_guile_and_parent (really_launch, d, data->parent);
894 return NULL;
895}
896
897SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
898 (SCM thunk, SCM handler),
899 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
900 "returning a new thread object representing the thread. The procedure\n"
901 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
902 "\n"
903 "When @var{handler} is specified, then @var{thunk} is called from\n"
904 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
905 "handler. This catch is established inside the continuation barrier.\n"
906 "\n"
907 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
908 "the @emph{exit value} of the thread and the thread is terminated.")
909#define FUNC_NAME s_scm_call_with_new_thread
910{
911 launch_data data;
912 scm_i_pthread_t id;
913 int err;
914
915 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
916 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
917 handler, SCM_ARG2, FUNC_NAME);
918
919 data.parent = scm_current_dynamic_state ();
920 data.thunk = thunk;
921 data.handler = handler;
922 data.thread = SCM_BOOL_F;
923 scm_i_pthread_mutex_init (&data.mutex, NULL);
924 scm_i_pthread_cond_init (&data.cond, NULL);
925
926 scm_i_scm_pthread_mutex_lock (&data.mutex);
927 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
928 if (err)
929 {
930 scm_i_pthread_mutex_unlock (&data.mutex);
931 errno = err;
932 scm_syserror (NULL);
933 }
934 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
935 scm_i_pthread_mutex_unlock (&data.mutex);
936
937 return data.thread;
938}
939#undef FUNC_NAME
940
941typedef struct {
942 SCM parent;
943 scm_t_catch_body body;
944 void *body_data;
945 scm_t_catch_handler handler;
946 void *handler_data;
947 SCM thread;
948 scm_i_pthread_mutex_t mutex;
949 scm_i_pthread_cond_t cond;
950} spawn_data;
951
952static void *
953really_spawn (void *d)
954{
955 spawn_data *data = (spawn_data *)d;
956 scm_t_catch_body body = data->body;
957 void *body_data = data->body_data;
958 scm_t_catch_handler handler = data->handler;
959 void *handler_data = data->handler_data;
960 scm_i_thread *t = SCM_I_CURRENT_THREAD;
961
962 scm_i_scm_pthread_mutex_lock (&data->mutex);
963 data->thread = scm_current_thread ();
964 scm_i_pthread_cond_signal (&data->cond);
965 scm_i_pthread_mutex_unlock (&data->mutex);
966
967 if (handler == NULL)
968 t->result = body (body_data);
969 else
970 t->result = scm_internal_catch (SCM_BOOL_T,
971 body, body_data,
972 handler, handler_data);
973
974 return 0;
975}
976
977static void *
978spawn_thread (void *d)
979{
980 spawn_data *data = (spawn_data *)d;
981 scm_i_pthread_detach (scm_i_pthread_self ());
982 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
983 return NULL;
984}
985
986SCM
987scm_spawn_thread (scm_t_catch_body body, void *body_data,
988 scm_t_catch_handler handler, void *handler_data)
989{
990 spawn_data data;
991 scm_i_pthread_t id;
992 int err;
993
994 data.parent = scm_current_dynamic_state ();
995 data.body = body;
996 data.body_data = body_data;
997 data.handler = handler;
998 data.handler_data = handler_data;
999 data.thread = SCM_BOOL_F;
1000 scm_i_pthread_mutex_init (&data.mutex, NULL);
1001 scm_i_pthread_cond_init (&data.cond, NULL);
1002
1003 scm_i_scm_pthread_mutex_lock (&data.mutex);
1004 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1005 if (err)
1006 {
1007 scm_i_pthread_mutex_unlock (&data.mutex);
1008 errno = err;
1009 scm_syserror (NULL);
1010 }
1011 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1012 scm_i_pthread_mutex_unlock (&data.mutex);
1013
1014 return data.thread;
1015}
1016
1017SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1018 (),
1019"Move the calling thread to the end of the scheduling queue.")
1020#define FUNC_NAME s_scm_yield
1021{
1022 return scm_from_bool (scm_i_sched_yield ());
1023}
1024#undef FUNC_NAME
1025
1026SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1027 (SCM thread),
1028"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1029"cannot be the current thread, and if @var{thread} has already terminated or "
1030"been signaled to terminate, this function is a no-op.")
1031#define FUNC_NAME s_scm_cancel_thread
1032{
1033 scm_i_thread *t = NULL;
1034
1035 SCM_VALIDATE_THREAD (1, thread);
1036 t = SCM_I_THREAD_DATA (thread);
1037 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1038 if (!t->canceled)
1039 {
1040 t->canceled = 1;
1041 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1042 scm_i_pthread_cancel (t->pthread);
1043 }
1044 else
1045 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1046
1047 return SCM_UNSPECIFIED;
1048}
1049#undef FUNC_NAME
1050
1051SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1052 (SCM thread, SCM proc),
1053"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1054"This handler will be called when the thread exits.")
1055#define FUNC_NAME s_scm_set_thread_cleanup_x
1056{
1057 scm_i_thread *t;
1058
1059 SCM_VALIDATE_THREAD (1, thread);
1060 if (!scm_is_false (proc))
1061 SCM_VALIDATE_THUNK (2, proc);
1062
1063 t = SCM_I_THREAD_DATA (thread);
1064 scm_i_pthread_mutex_lock (&t->admin_mutex);
1065
1066 if (!(t->exited || t->canceled))
1067 t->cleanup_handler = proc;
1068
1069 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1070
1071 return SCM_UNSPECIFIED;
1072}
1073#undef FUNC_NAME
1074
1075SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1076 (SCM thread),
1077"Return the cleanup handler installed for the thread @var{thread}.")
1078#define FUNC_NAME s_scm_thread_cleanup
1079{
1080 scm_i_thread *t;
1081 SCM ret;
1082
1083 SCM_VALIDATE_THREAD (1, thread);
1084
1085 t = SCM_I_THREAD_DATA (thread);
1086 scm_i_pthread_mutex_lock (&t->admin_mutex);
1087 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1088 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1089
1090 return ret;
1091}
1092#undef FUNC_NAME
1093
1094SCM scm_join_thread (SCM thread)
1095{
1096 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1097}
1098
1099SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1100 (SCM thread, SCM timeout, SCM timeoutval),
1101"Suspend execution of the calling thread until the target @var{thread} "
1102"terminates, unless the target @var{thread} has already terminated. ")
1103#define FUNC_NAME s_scm_join_thread_timed
1104{
1105 scm_i_thread *t;
1106 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1107 SCM res = SCM_BOOL_F;
1108
1109 if (! (SCM_UNBNDP (timeoutval)))
1110 res = timeoutval;
1111
1112 SCM_VALIDATE_THREAD (1, thread);
1113 if (scm_is_eq (scm_current_thread (), thread))
1114 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1115
1116 t = SCM_I_THREAD_DATA (thread);
1117 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1118
1119 if (! SCM_UNBNDP (timeout))
1120 {
1121 to_timespec (timeout, &ctimeout);
1122 timeout_ptr = &ctimeout;
1123 }
1124
1125 if (t->exited)
1126 res = t->result;
1127 else
1128 {
1129 while (1)
1130 {
1131 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1132 timeout_ptr);
1133 if (err == 0)
1134 {
1135 if (t->exited)
1136 {
1137 res = t->result;
1138 break;
1139 }
1140 }
1141 else if (err == ETIMEDOUT)
1142 break;
1143
1144 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1145 SCM_TICK;
1146 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1147 }
1148 }
1149
1150 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1151
1152 return res;
1153}
1154#undef FUNC_NAME
1155
1156SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1157 (SCM obj),
1158 "Return @code{#t} if @var{obj} is a thread.")
1159#define FUNC_NAME s_scm_thread_p
1160{
1161 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1162}
1163#undef FUNC_NAME
1164
1165static SCM
1166fat_mutex_mark (SCM mx)
1167{
1168 fat_mutex *m = SCM_MUTEX_DATA (mx);
1169 scm_gc_mark (m->owner);
1170 return m->waiting;
1171}
1172
1173static size_t
1174fat_mutex_free (SCM mx)
1175{
1176 fat_mutex *m = SCM_MUTEX_DATA (mx);
1177 scm_i_pthread_mutex_destroy (&m->lock);
1178 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1179 return 0;
1180}
1181
1182static int
1183fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1184{
1185 fat_mutex *m = SCM_MUTEX_DATA (mx);
1186 scm_puts ("#<mutex ", port);
1187 scm_uintprint ((scm_t_bits)m, 16, port);
1188 scm_puts (">", port);
1189 return 1;
1190}
1191
1192static SCM
1193make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1194{
1195 fat_mutex *m;
1196 SCM mx;
1197
1198 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1199 scm_i_pthread_mutex_init (&m->lock, NULL);
1200 m->owner = SCM_BOOL_F;
1201 m->level = 0;
1202
1203 m->recursive = recursive;
1204 m->unchecked_unlock = unchecked_unlock;
1205 m->allow_external_unlock = external_unlock;
1206
1207 m->waiting = SCM_EOL;
1208 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1209 m->waiting = make_queue ();
1210 return mx;
1211}
1212
1213SCM scm_make_mutex (void)
1214{
1215 return scm_make_mutex_with_flags (SCM_EOL);
1216}
1217
1218SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1219SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1220SCM_SYMBOL (recursive_sym, "recursive");
1221
1222SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1223 (SCM flags),
1224 "Create a new mutex. ")
1225#define FUNC_NAME s_scm_make_mutex_with_flags
1226{
1227 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1228
1229 SCM ptr = flags;
1230 while (! scm_is_null (ptr))
1231 {
1232 SCM flag = SCM_CAR (ptr);
1233 if (scm_is_eq (flag, unchecked_unlock_sym))
1234 unchecked_unlock = 1;
1235 else if (scm_is_eq (flag, allow_external_unlock_sym))
1236 external_unlock = 1;
1237 else if (scm_is_eq (flag, recursive_sym))
1238 recursive = 1;
1239 else
1240 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1241 ptr = SCM_CDR (ptr);
1242 }
1243 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1244}
1245#undef FUNC_NAME
1246
1247SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1248 (void),
1249 "Create a new recursive mutex. ")
1250#define FUNC_NAME s_scm_make_recursive_mutex
1251{
1252 return make_fat_mutex (1, 0, 0);
1253}
1254#undef FUNC_NAME
1255
1256SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1257
1258static SCM
1259fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1260{
1261 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1262
1263 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1264 SCM err = SCM_BOOL_F;
1265
1266 struct timeval current_time;
1267
1268 scm_i_scm_pthread_mutex_lock (&m->lock);
1269
1270 while (1)
1271 {
1272 if (m->level == 0)
1273 {
1274 m->owner = new_owner;
1275 m->level++;
1276
1277 if (SCM_I_IS_THREAD (new_owner))
1278 {
1279 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1280 scm_i_pthread_mutex_lock (&t->admin_mutex);
1281 t->mutexes = scm_cons (mutex, t->mutexes);
1282 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1283 }
1284 *ret = 1;
1285 break;
1286 }
1287 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1288 {
1289 m->owner = new_owner;
1290 err = scm_cons (scm_abandoned_mutex_error_key,
1291 scm_from_locale_string ("lock obtained on abandoned "
1292 "mutex"));
1293 *ret = 1;
1294 break;
1295 }
1296 else if (scm_is_eq (m->owner, new_owner))
1297 {
1298 if (m->recursive)
1299 {
1300 m->level++;
1301 *ret = 1;
1302 }
1303 else
1304 {
1305 err = scm_cons (scm_misc_error_key,
1306 scm_from_locale_string ("mutex already locked "
1307 "by thread"));
1308 *ret = 0;
1309 }
1310 break;
1311 }
1312 else
1313 {
1314 if (timeout != NULL)
1315 {
1316 gettimeofday (&current_time, NULL);
1317 if (current_time.tv_sec > timeout->tv_sec ||
1318 (current_time.tv_sec == timeout->tv_sec &&
1319 current_time.tv_usec * 1000 > timeout->tv_nsec))
1320 {
1321 *ret = 0;
1322 break;
1323 }
1324 }
1325 block_self (m->waiting, mutex, &m->lock, timeout);
1326 scm_i_pthread_mutex_unlock (&m->lock);
1327 SCM_TICK;
1328 scm_i_scm_pthread_mutex_lock (&m->lock);
1329 }
1330 }
1331 scm_i_pthread_mutex_unlock (&m->lock);
1332 return err;
1333}
1334
1335SCM scm_lock_mutex (SCM mx)
1336{
1337 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1338}
1339
1340SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1341 (SCM m, SCM timeout, SCM owner),
1342"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1343"blocks until the mutex becomes available. The function returns when "
1344"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1345"a thread already owns will succeed right away and will not block the "
1346"thread. That is, Guile's mutexes are @emph{recursive}. ")
1347#define FUNC_NAME s_scm_lock_mutex_timed
1348{
1349 SCM exception;
1350 int ret = 0;
1351 scm_t_timespec cwaittime, *waittime = NULL;
1352
1353 SCM_VALIDATE_MUTEX (1, m);
1354
1355 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1356 {
1357 to_timespec (timeout, &cwaittime);
1358 waittime = &cwaittime;
1359 }
1360
1361 exception = fat_mutex_lock (m, waittime, owner, &ret);
1362 if (!scm_is_false (exception))
1363 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1364 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1365}
1366#undef FUNC_NAME
1367
1368void
1369scm_dynwind_lock_mutex (SCM mutex)
1370{
1371 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1372 SCM_F_WIND_EXPLICITLY);
1373 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1374 SCM_F_WIND_EXPLICITLY);
1375}
1376
1377SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1378 (SCM mutex),
1379"Try to lock @var{mutex}. If the mutex is already locked by someone "
1380"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1381#define FUNC_NAME s_scm_try_mutex
1382{
1383 SCM exception;
1384 int ret = 0;
1385 scm_t_timespec cwaittime, *waittime = NULL;
1386
1387 SCM_VALIDATE_MUTEX (1, mutex);
1388
1389 to_timespec (scm_from_int(0), &cwaittime);
1390 waittime = &cwaittime;
1391
1392 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1393 if (!scm_is_false (exception))
1394 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1395 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1396}
1397#undef FUNC_NAME
1398
1399/*** Fat condition variables */
1400
1401typedef struct {
1402 scm_i_pthread_mutex_t lock;
1403 SCM waiting; /* the threads waiting for this condition. */
1404} fat_cond;
1405
1406#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1407#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1408
1409static int
1410fat_mutex_unlock (SCM mutex, SCM cond,
1411 const scm_t_timespec *waittime, int relock)
1412{
1413 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1414 fat_cond *c = NULL;
1415 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1416 int err = 0, ret = 0;
1417
1418 scm_i_scm_pthread_mutex_lock (&m->lock);
1419
1420 SCM owner = m->owner;
1421
1422 if (!scm_is_eq (owner, scm_current_thread ()))
1423 {
1424 if (m->level == 0)
1425 {
1426 if (!m->unchecked_unlock)
1427 {
1428 scm_i_pthread_mutex_unlock (&m->lock);
1429 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1430 }
1431 owner = scm_current_thread ();
1432 }
1433 else if (!m->allow_external_unlock)
1434 {
1435 scm_i_pthread_mutex_unlock (&m->lock);
1436 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1437 }
1438 }
1439
1440 if (! (SCM_UNBNDP (cond)))
1441 {
1442 c = SCM_CONDVAR_DATA (cond);
1443 while (1)
1444 {
1445 int brk = 0;
1446
1447 if (m->level > 0)
1448 m->level--;
1449 if (m->level == 0)
1450 m->owner = unblock_from_queue (m->waiting);
1451
1452 t->block_asyncs++;
1453
1454 err = block_self (c->waiting, cond, &m->lock, waittime);
1455 scm_i_pthread_mutex_unlock (&m->lock);
1456
1457 if (err == 0)
1458 {
1459 ret = 1;
1460 brk = 1;
1461 }
1462 else if (err == ETIMEDOUT)
1463 {
1464 ret = 0;
1465 brk = 1;
1466 }
1467 else if (err != EINTR)
1468 {
1469 errno = err;
1470 scm_syserror (NULL);
1471 }
1472
1473 if (brk)
1474 {
1475 if (relock)
1476 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1477 break;
1478 }
1479
1480 t->block_asyncs--;
1481 scm_async_click ();
1482
1483 scm_remember_upto_here_2 (cond, mutex);
1484
1485 scm_i_scm_pthread_mutex_lock (&m->lock);
1486 }
1487 }
1488 else
1489 {
1490 if (m->level > 0)
1491 m->level--;
1492 if (m->level == 0)
1493 m->owner = unblock_from_queue (m->waiting);
1494
1495 scm_i_pthread_mutex_unlock (&m->lock);
1496 ret = 1;
1497 }
1498
1499 return ret;
1500}
1501
1502SCM scm_unlock_mutex (SCM mx)
1503{
1504 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1505}
1506
1507SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1508 (SCM mx, SCM cond, SCM timeout),
1509"Unlocks @var{mutex} if the calling thread owns the lock on "
1510"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1511"thread results in undefined behaviour. Once a mutex has been unlocked, "
1512"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1513"lock. Every call to @code{lock-mutex} by this thread must be matched "
1514"with a call to @code{unlock-mutex}. Only the last call to "
1515"@code{unlock-mutex} will actually unlock the mutex. ")
1516#define FUNC_NAME s_scm_unlock_mutex_timed
1517{
1518 scm_t_timespec cwaittime, *waittime = NULL;
1519
1520 SCM_VALIDATE_MUTEX (1, mx);
1521 if (! (SCM_UNBNDP (cond)))
1522 {
1523 SCM_VALIDATE_CONDVAR (2, cond);
1524
1525 if (! (SCM_UNBNDP (timeout)))
1526 {
1527 to_timespec (timeout, &cwaittime);
1528 waittime = &cwaittime;
1529 }
1530 }
1531
1532 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1533}
1534#undef FUNC_NAME
1535
1536SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1537 (SCM obj),
1538 "Return @code{#t} if @var{obj} is a mutex.")
1539#define FUNC_NAME s_scm_mutex_p
1540{
1541 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1542}
1543#undef FUNC_NAME
1544
1545SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1546 (SCM mx),
1547 "Return the thread owning @var{mx}, or @code{#f}.")
1548#define FUNC_NAME s_scm_mutex_owner
1549{
1550 SCM owner;
1551 fat_mutex *m = NULL;
1552
1553 SCM_VALIDATE_MUTEX (1, mx);
1554 m = SCM_MUTEX_DATA (mx);
1555 scm_i_pthread_mutex_lock (&m->lock);
1556 owner = m->owner;
1557 scm_i_pthread_mutex_unlock (&m->lock);
1558
1559 return owner;
1560}
1561#undef FUNC_NAME
1562
1563SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1564 (SCM mx),
1565 "Return the lock level of mutex @var{mx}.")
1566#define FUNC_NAME s_scm_mutex_level
1567{
1568 SCM_VALIDATE_MUTEX (1, mx);
1569 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1570}
1571#undef FUNC_NAME
1572
1573SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1574 (SCM mx),
1575 "Returns @code{#t} if the mutex @var{mx} is locked.")
1576#define FUNC_NAME s_scm_mutex_locked_p
1577{
1578 SCM_VALIDATE_MUTEX (1, mx);
1579 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1580}
1581#undef FUNC_NAME
1582
1583static SCM
1584fat_cond_mark (SCM cv)
1585{
1586 fat_cond *c = SCM_CONDVAR_DATA (cv);
1587 return c->waiting;
1588}
1589
1590static size_t
1591fat_cond_free (SCM mx)
1592{
1593 fat_cond *c = SCM_CONDVAR_DATA (mx);
1594 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1595 return 0;
1596}
1597
1598static int
1599fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1600{
1601 fat_cond *c = SCM_CONDVAR_DATA (cv);
1602 scm_puts ("#<condition-variable ", port);
1603 scm_uintprint ((scm_t_bits)c, 16, port);
1604 scm_puts (">", port);
1605 return 1;
1606}
1607
1608SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1609 (void),
1610 "Make a new condition variable.")
1611#define FUNC_NAME s_scm_make_condition_variable
1612{
1613 fat_cond *c;
1614 SCM cv;
1615
1616 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1617 c->waiting = SCM_EOL;
1618 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1619 c->waiting = make_queue ();
1620 return cv;
1621}
1622#undef FUNC_NAME
1623
1624SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1625 (SCM cv, SCM mx, SCM t),
1626"Wait until @var{cond-var} has been signalled. While waiting, "
1627"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1628"is locked again when this function returns. When @var{time} is given, "
1629"it specifies a point in time where the waiting should be aborted. It "
1630"can be either a integer as returned by @code{current-time} or a pair "
1631"as returned by @code{gettimeofday}. When the waiting is aborted the "
1632"mutex is locked and @code{#f} is returned. When the condition "
1633"variable is in fact signalled, the mutex is also locked and @code{#t} "
1634"is returned. ")
1635#define FUNC_NAME s_scm_timed_wait_condition_variable
1636{
1637 scm_t_timespec waittime, *waitptr = NULL;
1638
1639 SCM_VALIDATE_CONDVAR (1, cv);
1640 SCM_VALIDATE_MUTEX (2, mx);
1641
1642 if (!SCM_UNBNDP (t))
1643 {
1644 to_timespec (t, &waittime);
1645 waitptr = &waittime;
1646 }
1647
1648 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1649}
1650#undef FUNC_NAME
1651
1652static void
1653fat_cond_signal (fat_cond *c)
1654{
1655 unblock_from_queue (c->waiting);
1656}
1657
1658SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1659 (SCM cv),
1660 "Wake up one thread that is waiting for @var{cv}")
1661#define FUNC_NAME s_scm_signal_condition_variable
1662{
1663 SCM_VALIDATE_CONDVAR (1, cv);
1664 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1665 return SCM_BOOL_T;
1666}
1667#undef FUNC_NAME
1668
1669static void
1670fat_cond_broadcast (fat_cond *c)
1671{
1672 while (scm_is_true (unblock_from_queue (c->waiting)))
1673 ;
1674}
1675
1676SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1677 (SCM cv),
1678 "Wake up all threads that are waiting for @var{cv}. ")
1679#define FUNC_NAME s_scm_broadcast_condition_variable
1680{
1681 SCM_VALIDATE_CONDVAR (1, cv);
1682 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1683 return SCM_BOOL_T;
1684}
1685#undef FUNC_NAME
1686
1687SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1688 (SCM obj),
1689 "Return @code{#t} if @var{obj} is a condition variable.")
1690#define FUNC_NAME s_scm_condition_variable_p
1691{
1692 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1693}
1694#undef FUNC_NAME
1695
1696/*** Marking stacks */
1697
1698/* XXX - what to do with this? Do we need to handle this for blocked
1699 threads as well?
1700*/
1701#ifdef __ia64__
1702# define SCM_MARK_BACKING_STORE() do { \
1703 ucontext_t ctx; \
1704 SCM_STACKITEM * top, * bot; \
1705 getcontext (&ctx); \
1706 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1707 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1708 / sizeof (SCM_STACKITEM))); \
1709 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1710 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1711 scm_mark_locations (bot, top - bot); } while (0)
1712#else
1713# define SCM_MARK_BACKING_STORE()
1714#endif
1715
1716void
1717scm_threads_mark_stacks (void)
1718{
1719 scm_i_thread *t;
1720 for (t = all_threads; t; t = t->next_thread)
1721 {
1722 /* Check that thread has indeed been suspended.
1723 */
1724 assert (t->top);
1725
1726 scm_gc_mark (t->handle);
1727
1728#if SCM_STACK_GROWS_UP
1729 scm_mark_locations (t->base, t->top - t->base);
1730#else
1731 scm_mark_locations (t->top, t->base - t->top);
1732#endif
1733 scm_mark_locations ((void *) &t->regs,
1734 ((size_t) sizeof(t->regs)
1735 / sizeof (SCM_STACKITEM)));
1736 }
1737
1738 SCM_MARK_BACKING_STORE ();
1739}
1740
1741/*** Select */
1742
1743int
1744scm_std_select (int nfds,
1745 SELECT_TYPE *readfds,
1746 SELECT_TYPE *writefds,
1747 SELECT_TYPE *exceptfds,
1748 struct timeval *timeout)
1749{
1750 fd_set my_readfds;
1751 int res, eno, wakeup_fd;
1752 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1753 scm_t_guile_ticket ticket;
1754
1755 if (readfds == NULL)
1756 {
1757 FD_ZERO (&my_readfds);
1758 readfds = &my_readfds;
1759 }
1760
1761 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1762 SCM_TICK;
1763
1764 wakeup_fd = t->sleep_pipe[0];
1765 ticket = scm_leave_guile ();
1766 FD_SET (wakeup_fd, readfds);
1767 if (wakeup_fd >= nfds)
1768 nfds = wakeup_fd+1;
1769 res = select (nfds, readfds, writefds, exceptfds, timeout);
1770 t->sleep_fd = -1;
1771 eno = errno;
1772 scm_enter_guile (ticket);
1773
1774 scm_i_reset_sleep (t);
1775
1776 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1777 {
1778 char dummy;
1779 read (wakeup_fd, &dummy, 1);
1780 FD_CLR (wakeup_fd, readfds);
1781 res -= 1;
1782 if (res == 0)
1783 {
1784 eno = EINTR;
1785 res = -1;
1786 }
1787 }
1788 errno = eno;
1789 return res;
1790}
1791
1792/* Convenience API for blocking while in guile mode. */
1793
1794#if SCM_USE_PTHREAD_THREADS
1795
1796int
1797scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1798{
1799 scm_t_guile_ticket t = scm_leave_guile ();
1800 int res = scm_i_pthread_mutex_lock (mutex);
1801 scm_enter_guile (t);
1802 return res;
1803}
1804
1805static void
1806do_unlock (void *data)
1807{
1808 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1809}
1810
1811void
1812scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1813{
1814 scm_i_scm_pthread_mutex_lock (mutex);
1815 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1816}
1817
1818int
1819scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1820{
1821 scm_t_guile_ticket t = scm_leave_guile ();
1822 ((scm_i_thread *)t)->held_mutex = mutex;
1823 int res = scm_i_pthread_cond_wait (cond, mutex);
1824 ((scm_i_thread *)t)->held_mutex = NULL;
1825 scm_enter_guile (t);
1826 return res;
1827}
1828
1829int
1830scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1831 scm_i_pthread_mutex_t *mutex,
1832 const scm_t_timespec *wt)
1833{
1834 scm_t_guile_ticket t = scm_leave_guile ();
1835 ((scm_i_thread *)t)->held_mutex = mutex;
1836 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1837 ((scm_i_thread *)t)->held_mutex = NULL;
1838 scm_enter_guile (t);
1839 return res;
1840}
1841
1842#endif
1843
1844unsigned long
1845scm_std_usleep (unsigned long usecs)
1846{
1847 struct timeval tv;
1848 tv.tv_usec = usecs % 1000000;
1849 tv.tv_sec = usecs / 1000000;
1850 scm_std_select (0, NULL, NULL, NULL, &tv);
1851 return tv.tv_sec * 1000000 + tv.tv_usec;
1852}
1853
1854unsigned int
1855scm_std_sleep (unsigned int secs)
1856{
1857 struct timeval tv;
1858 tv.tv_usec = 0;
1859 tv.tv_sec = secs;
1860 scm_std_select (0, NULL, NULL, NULL, &tv);
1861 return tv.tv_sec;
1862}
1863
1864/*** Misc */
1865
1866SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1867 (void),
1868 "Return the thread that called this function.")
1869#define FUNC_NAME s_scm_current_thread
1870{
1871 return SCM_I_CURRENT_THREAD->handle;
1872}
1873#undef FUNC_NAME
1874
1875static SCM
1876scm_c_make_list (size_t n, SCM fill)
1877{
1878 SCM res = SCM_EOL;
1879 while (n-- > 0)
1880 res = scm_cons (fill, res);
1881 return res;
1882}
1883
1884SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1885 (void),
1886 "Return a list of all threads.")
1887#define FUNC_NAME s_scm_all_threads
1888{
1889 /* We can not allocate while holding the thread_admin_mutex because
1890 of the way GC is done.
1891 */
1892 int n = thread_count;
1893 scm_i_thread *t;
1894 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1895
1896 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1897 l = &list;
1898 for (t = all_threads; t && n > 0; t = t->next_thread)
1899 {
1900 if (t != scm_i_signal_delivery_thread)
1901 {
1902 SCM_SETCAR (*l, t->handle);
1903 l = SCM_CDRLOC (*l);
1904 }
1905 n--;
1906 }
1907 *l = SCM_EOL;
1908 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1909 return list;
1910}
1911#undef FUNC_NAME
1912
1913SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1914 (SCM thread),
1915 "Return @code{#t} iff @var{thread} has exited.\n")
1916#define FUNC_NAME s_scm_thread_exited_p
1917{
1918 return scm_from_bool (scm_c_thread_exited_p (thread));
1919}
1920#undef FUNC_NAME
1921
1922int
1923scm_c_thread_exited_p (SCM thread)
1924#define FUNC_NAME s_scm_thread_exited_p
1925{
1926 scm_i_thread *t;
1927 SCM_VALIDATE_THREAD (1, thread);
1928 t = SCM_I_THREAD_DATA (thread);
1929 return t->exited;
1930}
1931#undef FUNC_NAME
1932
1933static scm_i_pthread_cond_t wake_up_cond;
1934int scm_i_thread_go_to_sleep;
1935static int threads_initialized_p = 0;
1936
1937void
1938scm_i_thread_put_to_sleep ()
1939{
1940 if (threads_initialized_p)
1941 {
1942 scm_i_thread *t;
1943
1944 scm_leave_guile ();
1945 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1946
1947 /* Signal all threads to go to sleep
1948 */
1949 scm_i_thread_go_to_sleep = 1;
1950 for (t = all_threads; t; t = t->next_thread)
1951 scm_i_pthread_mutex_lock (&t->heap_mutex);
1952 scm_i_thread_go_to_sleep = 0;
1953 }
1954}
1955
1956void
1957scm_i_thread_invalidate_freelists ()
1958{
1959 /* thread_admin_mutex is already locked. */
1960
1961 scm_i_thread *t;
1962 for (t = all_threads; t; t = t->next_thread)
1963 if (t != SCM_I_CURRENT_THREAD)
1964 t->clear_freelists_p = 1;
1965}
1966
1967void
1968scm_i_thread_wake_up ()
1969{
1970 if (threads_initialized_p)
1971 {
1972 scm_i_thread *t;
1973
1974 scm_i_pthread_cond_broadcast (&wake_up_cond);
1975 for (t = all_threads; t; t = t->next_thread)
1976 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1977 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1978 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1979 }
1980}
1981
1982void
1983scm_i_thread_sleep_for_gc ()
1984{
1985 scm_i_thread *t = suspend ();
1986 t->held_mutex = &t->heap_mutex;
1987 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1988 t->held_mutex = NULL;
1989 resume (t);
1990}
1991
1992/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1993 */
1994scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1995int scm_i_critical_section_level = 0;
1996
1997static SCM dynwind_critical_section_mutex;
1998
1999void
2000scm_dynwind_critical_section (SCM mutex)
2001{
2002 if (scm_is_false (mutex))
2003 mutex = dynwind_critical_section_mutex;
2004 scm_dynwind_lock_mutex (mutex);
2005 scm_dynwind_block_asyncs ();
2006}
2007
2008/*** Initialization */
2009
2010scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2011scm_i_pthread_mutex_t scm_i_misc_mutex;
2012
2013#if SCM_USE_PTHREAD_THREADS
2014pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2015#endif
2016
2017void
2018scm_threads_prehistory (SCM_STACKITEM *base)
2019{
2020#if SCM_USE_PTHREAD_THREADS
2021 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2022 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2023 PTHREAD_MUTEX_RECURSIVE);
2024#endif
2025
2026 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2027 scm_i_pthread_mutexattr_recursive);
2028 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2029 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2030 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2031 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2032
2033 guilify_self_1 (base);
2034}
2035
2036scm_t_bits scm_tc16_thread;
2037scm_t_bits scm_tc16_mutex;
2038scm_t_bits scm_tc16_condvar;
2039
2040void
2041scm_init_threads ()
2042{
2043 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2044 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2045 scm_set_smob_print (scm_tc16_thread, thread_print);
2046 scm_set_smob_free (scm_tc16_thread, thread_free);
2047
2048 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2049 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2050 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2051 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2052
2053 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2054 sizeof (fat_cond));
2055 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2056 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2057 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2058
2059 scm_i_default_dynamic_state = SCM_BOOL_F;
2060 guilify_self_2 (SCM_BOOL_F);
2061 threads_initialized_p = 1;
2062
2063 dynwind_critical_section_mutex =
2064 scm_permanent_object (scm_make_recursive_mutex ());
2065}
2066
2067void
2068scm_init_threads_default_dynamic_state ()
2069{
2070 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2071 scm_i_default_dynamic_state = scm_permanent_object (state);
2072}
2073
2074void
2075scm_init_thread_procs ()
2076{
2077#include "libguile/threads.x"
2078}
2079
2080/*
2081 Local Variables:
2082 c-file-style: "gnu"
2083 End:
2084*/