Resolve a deadlock caused by not checking mutex state after calling `SCM_TICK'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #include "libguile/_scm.h"
22
23 #if HAVE_UNISTD_H
24 #include <unistd.h>
25 #endif
26 #include <stdio.h>
27 #include <assert.h>
28
29 #ifdef HAVE_STRING_H
30 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
31 #endif
32
33 #if HAVE_SYS_TIME_H
34 #include <sys/time.h>
35 #endif
36
37 #include "libguile/validate.h"
38 #include "libguile/root.h"
39 #include "libguile/eval.h"
40 #include "libguile/async.h"
41 #include "libguile/ports.h"
42 #include "libguile/threads.h"
43 #include "libguile/dynwind.h"
44 #include "libguile/iselect.h"
45 #include "libguile/fluids.h"
46 #include "libguile/continuations.h"
47 #include "libguile/gc.h"
48 #include "libguile/init.h"
49 #include "libguile/scmsigs.h"
50 #include "libguile/strings.h"
51
52 #ifdef __MINGW32__
53 #ifndef ETIMEDOUT
54 # define ETIMEDOUT WSAETIMEDOUT
55 #endif
56 # include <fcntl.h>
57 # include <process.h>
58 # define pipe(fd) _pipe (fd, 256, O_BINARY)
59 #endif /* __MINGW32__ */
60
61 static void
62 to_timespec (SCM t, scm_t_timespec *waittime)
63 {
64 if (scm_is_pair (t))
65 {
66 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
67 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
68 }
69 else
70 {
71 double time = scm_to_double (t);
72 double sec = scm_c_truncate (time);
73
74 waittime->tv_sec = (long) sec;
75 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
76 }
77 }
78
79 /*** Queues */
80
81 /* Make an empty queue data structure.
82 */
83 static SCM
84 make_queue ()
85 {
86 return scm_cons (SCM_EOL, SCM_EOL);
87 }
88
89 /* Put T at the back of Q and return a handle that can be used with
90 remqueue to remove T from Q again.
91 */
92 static SCM
93 enqueue (SCM q, SCM t)
94 {
95 SCM c = scm_cons (t, SCM_EOL);
96 if (scm_is_null (SCM_CDR (q)))
97 SCM_SETCDR (q, c);
98 else
99 SCM_SETCDR (SCM_CAR (q), c);
100 SCM_SETCAR (q, c);
101 return c;
102 }
103
104 /* Remove the element that the handle C refers to from the queue Q. C
105 must have been returned from a call to enqueue. The return value
106 is zero when the element referred to by C has already been removed.
107 Otherwise, 1 is returned.
108 */
109 static int
110 remqueue (SCM q, SCM c)
111 {
112 SCM p, prev = q;
113 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
114 {
115 if (scm_is_eq (p, c))
116 {
117 if (scm_is_eq (c, SCM_CAR (q)))
118 SCM_SETCAR (q, SCM_CDR (c));
119 SCM_SETCDR (prev, SCM_CDR (c));
120 return 1;
121 }
122 prev = p;
123 }
124 return 0;
125 }
126
127 /* Remove the front-most element from the queue Q and return it.
128 Return SCM_BOOL_F when Q is empty.
129 */
130 static SCM
131 dequeue (SCM q)
132 {
133 SCM c = SCM_CDR (q);
134 if (scm_is_null (c))
135 return SCM_BOOL_F;
136 else
137 {
138 SCM_SETCDR (q, SCM_CDR (c));
139 if (scm_is_null (SCM_CDR (q)))
140 SCM_SETCAR (q, SCM_EOL);
141 return SCM_CAR (c);
142 }
143 }
144
145 /*** Thread smob routines */
146
147 static SCM
148 thread_mark (SCM obj)
149 {
150 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
151 scm_gc_mark (t->result);
152 scm_gc_mark (t->cleanup_handler);
153 scm_gc_mark (t->join_queue);
154 scm_gc_mark (t->mutexes);
155 scm_gc_mark (t->dynwinds);
156 scm_gc_mark (t->active_asyncs);
157 scm_gc_mark (t->continuation_root);
158 return t->dynamic_state;
159 }
160
161 static int
162 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
163 {
164 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
165 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
166 the struct case, hence we go via a union, and extract according to the
167 size of pthread_t. */
168 union {
169 scm_i_pthread_t p;
170 unsigned short us;
171 unsigned int ui;
172 unsigned long ul;
173 scm_t_uintmax um;
174 } u;
175 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
176 scm_i_pthread_t p = t->pthread;
177 scm_t_uintmax id;
178 u.p = p;
179 if (sizeof (p) == sizeof (unsigned short))
180 id = u.us;
181 else if (sizeof (p) == sizeof (unsigned int))
182 id = u.ui;
183 else if (sizeof (p) == sizeof (unsigned long))
184 id = u.ul;
185 else
186 id = u.um;
187
188 scm_puts ("#<thread ", port);
189 scm_uintprint (id, 10, port);
190 scm_puts (" (", port);
191 scm_uintprint ((scm_t_bits)t, 16, port);
192 scm_puts (")>", port);
193 return 1;
194 }
195
196 static size_t
197 thread_free (SCM obj)
198 {
199 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
200 assert (t->exited);
201 scm_gc_free (t, sizeof (*t), "thread");
202 return 0;
203 }
204
205 /*** Blocking on queues. */
206
207 /* See also scm_i_queue_async_cell for how such a block is
208 interrputed.
209 */
210
211 /* Put the current thread on QUEUE and go to sleep, waiting for it to
212 be woken up by a call to 'unblock_from_queue', or to be
213 interrupted. Upon return of this function, the current thread is
214 no longer on QUEUE, even when the sleep has been interrupted.
215
216 The QUEUE data structure is assumed to be protected by MUTEX and
217 the caller of block_self must hold MUTEX. It will be atomically
218 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
219
220 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
221 as MUTEX is needed.
222
223 When WAITTIME is not NULL, the sleep will be aborted at that time.
224
225 The return value of block_self is an errno value. It will be zero
226 when the sleep has been successfully completed by a call to
227 unblock_from_queue, EINTR when it has been interrupted by the
228 delivery of a system async, and ETIMEDOUT when the timeout has
229 expired.
230
231 The system asyncs themselves are not executed by block_self.
232 */
233 static int
234 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
235 const scm_t_timespec *waittime)
236 {
237 scm_i_thread *t = SCM_I_CURRENT_THREAD;
238 SCM q_handle;
239 int err;
240
241 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
242 err = EINTR;
243 else
244 {
245 t->block_asyncs++;
246 q_handle = enqueue (queue, t->handle);
247 if (waittime == NULL)
248 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
249 else
250 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
251
252 /* When we are still on QUEUE, we have been interrupted. We
253 report this only when no other error (such as a timeout) has
254 happened above.
255 */
256 if (remqueue (queue, q_handle) && err == 0)
257 err = EINTR;
258 t->block_asyncs--;
259 scm_i_reset_sleep (t);
260 }
261
262 return err;
263 }
264
265 /* Wake up the first thread on QUEUE, if any. The caller must hold
266 the mutex that protects QUEUE. The awoken thread is returned, or
267 #f when the queue was empty.
268 */
269 static SCM
270 unblock_from_queue (SCM queue)
271 {
272 SCM thread = dequeue (queue);
273 if (scm_is_true (thread))
274 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
275 return thread;
276 }
277
278 /* Getting into and out of guile mode.
279 */
280
281 /* Ken Raeburn observes that the implementation of suspend and resume
282 (and the things that build on top of them) are very likely not
283 correct (see below). We will need fix this eventually, and that's
284 why scm_leave_guile/scm_enter_guile are not exported in the API.
285
286 Ken writes:
287
288 Consider this sequence:
289
290 Function foo, called in Guile mode, calls suspend (maybe indirectly
291 through scm_leave_guile), which does this:
292
293 // record top of stack for the GC
294 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
295 var 't'
296 // save registers.
297 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
298 setjmp (t->regs); // here's most of the magic
299
300 ... and returns.
301
302 Function foo has a SCM value X, a handle on a non-immediate object, in
303 a caller-saved register R, and it's the only reference to the object
304 currently.
305
306 The compiler wants to use R in suspend, so it pushes the current
307 value, X, into a stack slot which will be reloaded on exit from
308 suspend; then it loads stuff into R and goes about its business. The
309 setjmp call saves (some of) the current registers, including R, which
310 no longer contains X. (This isn't a problem for a normal
311 setjmp/longjmp situation, where longjmp would be called before
312 setjmp's caller returns; the old value for X would be loaded back from
313 the stack after the longjmp, before the function returned.)
314
315 So, suspend returns, loading X back into R (and invalidating the jump
316 buffer) in the process. The caller foo then goes off and calls a
317 bunch of other functions out of Guile mode, occasionally storing X on
318 the stack again, but, say, much deeper on the stack than suspend's
319 stack frame went, and the stack slot where suspend had written X has
320 long since been overwritten with other values.
321
322 Okay, nothing actively broken so far. Now, let garbage collection
323 run, triggered by another thread.
324
325 The thread calling foo is out of Guile mode at the time, so the
326 garbage collector just scans a range of stack addresses. Too bad that
327 X isn't stored there. So the pointed-to storage goes onto the free
328 list, and I think you can see where things go from there.
329
330 Is there anything I'm missing that'll prevent this scenario from
331 happening? I mean, aside from, "well, suspend and scm_leave_guile
332 don't have many local variables, so they probably won't need to save
333 any registers on most systems, so we hope everything will wind up in
334 the jump buffer and we'll just get away with it"?
335
336 (And, going the other direction, if scm_leave_guile and suspend push
337 the stack pointer over onto a new page, and foo doesn't make further
338 function calls and thus the stack pointer no longer includes that
339 page, are we guaranteed that the kernel cannot release the now-unused
340 stack page that contains the top-of-stack pointer we just saved? I
341 don't know if any OS actually does that. If it does, we could get
342 faults in garbage collection.)
343
344 I don't think scm_without_guile has to have this problem, as it gets
345 more control over the stack handling -- but it should call setjmp
346 itself. I'd probably try something like:
347
348 // record top of stack for the GC
349 t->top = SCM_STACK_PTR (&t);
350 // save registers.
351 SCM_FLUSH_REGISTER_WINDOWS;
352 setjmp (t->regs);
353 res = func(data);
354 scm_enter_guile (t);
355
356 ... though even that's making some assumptions about the stack
357 ordering of local variables versus caller-saved registers.
358
359 For something like scm_leave_guile to work, I don't think it can just
360 rely on invalidated jump buffers. A valid jump buffer, and a handle
361 on the stack state at the point when the jump buffer was initialized,
362 together, would work fine, but I think then we're talking about macros
363 invoking setjmp in the caller's stack frame, and requiring that the
364 caller of scm_leave_guile also call scm_enter_guile before returning,
365 kind of like pthread_cleanup_push/pop calls that have to be paired up
366 in a function. (In fact, the pthread ones have to be paired up
367 syntactically, as if they might expand to a compound statement
368 incorporating the user's code, and invoking a compiler's
369 exception-handling primitives. Which might be something to think
370 about for cases where Guile is used with C++ exceptions or
371 pthread_cancel.)
372 */
373
374 scm_i_pthread_key_t scm_i_thread_key;
375
376 static void
377 resume (scm_i_thread *t)
378 {
379 t->top = NULL;
380 if (t->clear_freelists_p)
381 {
382 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
383 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
384 t->clear_freelists_p = 0;
385 }
386 }
387
388 typedef void* scm_t_guile_ticket;
389
390 static void
391 scm_enter_guile (scm_t_guile_ticket ticket)
392 {
393 scm_i_thread *t = (scm_i_thread *)ticket;
394 if (t)
395 {
396 scm_i_pthread_mutex_lock (&t->heap_mutex);
397 resume (t);
398 }
399 }
400
401 static scm_i_thread *
402 suspend (void)
403 {
404 scm_i_thread *t = SCM_I_CURRENT_THREAD;
405
406 /* record top of stack for the GC */
407 t->top = SCM_STACK_PTR (&t);
408 /* save registers. */
409 SCM_FLUSH_REGISTER_WINDOWS;
410 setjmp (t->regs);
411 return t;
412 }
413
414 static scm_t_guile_ticket
415 scm_leave_guile ()
416 {
417 scm_i_thread *t = suspend ();
418 scm_i_pthread_mutex_unlock (&t->heap_mutex);
419 return (scm_t_guile_ticket) t;
420 }
421
422 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
423 static scm_i_thread *all_threads = NULL;
424 static int thread_count;
425
426 static SCM scm_i_default_dynamic_state;
427
428 /* Perform first stage of thread initialisation, in non-guile mode.
429 */
430 static void
431 guilify_self_1 (SCM_STACKITEM *base)
432 {
433 scm_i_thread *t = malloc (sizeof (scm_i_thread));
434
435 t->pthread = scm_i_pthread_self ();
436 t->handle = SCM_BOOL_F;
437 t->result = SCM_BOOL_F;
438 t->cleanup_handler = SCM_BOOL_F;
439 t->mutexes = SCM_EOL;
440 t->join_queue = SCM_EOL;
441 t->dynamic_state = SCM_BOOL_F;
442 t->dynwinds = SCM_EOL;
443 t->active_asyncs = SCM_EOL;
444 t->block_asyncs = 1;
445 t->pending_asyncs = 1;
446 t->last_debug_frame = NULL;
447 t->base = base;
448 #ifdef __ia64__
449 /* Calculate and store off the base of this thread's register
450 backing store (RBS). Unfortunately our implementation(s) of
451 scm_ia64_register_backing_store_base are only reliable for the
452 main thread. For other threads, therefore, find out the current
453 top of the RBS, and use that as a maximum. */
454 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
455 {
456 ucontext_t ctx;
457 void *bsp;
458 getcontext (&ctx);
459 bsp = scm_ia64_ar_bsp (&ctx);
460 if (t->register_backing_store_base > bsp)
461 t->register_backing_store_base = bsp;
462 }
463 #endif
464 t->continuation_root = SCM_EOL;
465 t->continuation_base = base;
466 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
467 t->sleep_mutex = NULL;
468 t->sleep_object = SCM_BOOL_F;
469 t->sleep_fd = -1;
470 /* XXX - check for errors. */
471 pipe (t->sleep_pipe);
472 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
473 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
474 t->clear_freelists_p = 0;
475 t->gc_running_p = 0;
476 t->canceled = 0;
477 t->exited = 0;
478
479 t->freelist = SCM_EOL;
480 t->freelist2 = SCM_EOL;
481 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
482 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
483
484 scm_i_pthread_setspecific (scm_i_thread_key, t);
485
486 scm_i_pthread_mutex_lock (&t->heap_mutex);
487
488 scm_i_pthread_mutex_lock (&thread_admin_mutex);
489 t->next_thread = all_threads;
490 all_threads = t;
491 thread_count++;
492 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
493 }
494
495 /* Perform second stage of thread initialisation, in guile mode.
496 */
497 static void
498 guilify_self_2 (SCM parent)
499 {
500 scm_i_thread *t = SCM_I_CURRENT_THREAD;
501
502 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
503 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
504 t->continuation_root = scm_cons (t->handle, SCM_EOL);
505 t->continuation_base = t->base;
506
507 if (scm_is_true (parent))
508 t->dynamic_state = scm_make_dynamic_state (parent);
509 else
510 t->dynamic_state = scm_i_make_initial_dynamic_state ();
511
512 t->join_queue = make_queue ();
513 t->block_asyncs = 0;
514 }
515
516 \f
517 /*** Fat mutexes */
518
519 /* We implement our own mutex type since we want them to be 'fair', we
520 want to do fancy things while waiting for them (like running
521 asyncs) and we might want to add things that are nice for
522 debugging.
523 */
524
525 typedef struct {
526 scm_i_pthread_mutex_t lock;
527 SCM owner;
528 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
529
530 int recursive; /* allow recursive locking? */
531 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
532 int allow_external_unlock; /* is it an error to unlock a mutex that is not
533 owned by the current thread? */
534
535 SCM waiting; /* the threads waiting for this mutex. */
536 } fat_mutex;
537
538 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
539 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
540
541 /* Perform thread tear-down, in guile mode.
542 */
543 static void *
544 do_thread_exit (void *v)
545 {
546 scm_i_thread *t = (scm_i_thread *) v;
547
548 if (!scm_is_false (t->cleanup_handler))
549 {
550 SCM ptr = t->cleanup_handler;
551
552 t->cleanup_handler = SCM_BOOL_F;
553 t->result = scm_internal_catch (SCM_BOOL_T,
554 (scm_t_catch_body) scm_call_0, ptr,
555 scm_handle_by_message_noexit, NULL);
556 }
557
558 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
559
560 t->exited = 1;
561 close (t->sleep_pipe[0]);
562 close (t->sleep_pipe[1]);
563 while (scm_is_true (unblock_from_queue (t->join_queue)))
564 ;
565
566 while (!scm_is_null (t->mutexes))
567 {
568 SCM mutex = SCM_CAR (t->mutexes);
569 fat_mutex *m = SCM_MUTEX_DATA (mutex);
570 scm_i_pthread_mutex_lock (&m->lock);
571
572 unblock_from_queue (m->waiting);
573
574 scm_i_pthread_mutex_unlock (&m->lock);
575 t->mutexes = SCM_CDR (t->mutexes);
576 }
577
578 scm_i_pthread_mutex_unlock (&t->admin_mutex);
579
580 return NULL;
581 }
582
583 static void
584 on_thread_exit (void *v)
585 {
586 /* This handler is executed in non-guile mode. */
587 scm_i_thread *t = (scm_i_thread *) v, **tp;
588
589 scm_i_pthread_setspecific (scm_i_thread_key, v);
590
591 /* Ensure the signal handling thread has been launched, because we might be
592 shutting it down. */
593 scm_i_ensure_signal_delivery_thread ();
594
595 /* Unblocking the joining threads needs to happen in guile mode
596 since the queue is a SCM data structure. */
597 scm_with_guile (do_thread_exit, v);
598
599 /* Removing ourself from the list of all threads needs to happen in
600 non-guile mode since all SCM values on our stack become
601 unprotected once we are no longer in the list. */
602 scm_i_pthread_mutex_lock (&thread_admin_mutex);
603 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
604 if (*tp == t)
605 {
606 *tp = t->next_thread;
607 break;
608 }
609 thread_count--;
610
611 /* If there's only one other thread, it could be the signal delivery
612 thread, so we need to notify it to shut down by closing its read pipe.
613 If it's not the signal delivery thread, then closing the read pipe isn't
614 going to hurt. */
615 if (thread_count <= 1)
616 scm_i_close_signal_pipe ();
617
618 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
619
620 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
621 }
622
623 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
624
625 static void
626 init_thread_key (void)
627 {
628 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
629 }
630
631 /* Perform any initializations necessary to bring the current thread
632 into guile mode, initializing Guile itself, if necessary.
633
634 BASE is the stack base to use with GC.
635
636 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
637 which case the default dynamic state is used.
638
639 Return zero when the thread was in guile mode already; otherwise
640 return 1.
641 */
642
643 static int
644 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
645 {
646 scm_i_thread *t;
647
648 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
649
650 if ((t = SCM_I_CURRENT_THREAD) == NULL)
651 {
652 /* This thread has not been guilified yet.
653 */
654
655 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
656 if (scm_initialized_p == 0)
657 {
658 /* First thread ever to enter Guile. Run the full
659 initialization.
660 */
661 scm_i_init_guile (base);
662 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
663 }
664 else
665 {
666 /* Guile is already initialized, but this thread enters it for
667 the first time. Only initialize this thread.
668 */
669 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
670 guilify_self_1 (base);
671 guilify_self_2 (parent);
672 }
673 return 1;
674 }
675 else if (t->top)
676 {
677 /* This thread is already guilified but not in guile mode, just
678 resume it.
679
680 XXX - base might be lower than when this thread was first
681 guilified.
682 */
683 scm_enter_guile ((scm_t_guile_ticket) t);
684 return 1;
685 }
686 else
687 {
688 /* Thread is already in guile mode. Nothing to do.
689 */
690 return 0;
691 }
692 }
693
694 #if SCM_USE_PTHREAD_THREADS
695
696 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
697 /* This method for GNU/Linux and perhaps some other systems.
698 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
699 available on them. */
700 #define HAVE_GET_THREAD_STACK_BASE
701
702 static SCM_STACKITEM *
703 get_thread_stack_base ()
704 {
705 pthread_attr_t attr;
706 void *start, *end;
707 size_t size;
708
709 pthread_getattr_np (pthread_self (), &attr);
710 pthread_attr_getstack (&attr, &start, &size);
711 end = (char *)start + size;
712
713 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
714 for the main thread, but we can use scm_get_stack_base in that
715 case.
716 */
717
718 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
719 if ((void *)&attr < start || (void *)&attr >= end)
720 return scm_get_stack_base ();
721 else
722 #endif
723 {
724 #if SCM_STACK_GROWS_UP
725 return start;
726 #else
727 return end;
728 #endif
729 }
730 }
731
732 #elif HAVE_PTHREAD_GET_STACKADDR_NP
733 /* This method for MacOS X.
734 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
735 but as of 2006 there's nothing obvious at apple.com. */
736 #define HAVE_GET_THREAD_STACK_BASE
737 static SCM_STACKITEM *
738 get_thread_stack_base ()
739 {
740 return pthread_get_stackaddr_np (pthread_self ());
741 }
742
743 #elif defined (__MINGW32__)
744 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
745 in any thread. We don't like hard-coding the name of a system, but there
746 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
747 work. */
748 #define HAVE_GET_THREAD_STACK_BASE
749 static SCM_STACKITEM *
750 get_thread_stack_base ()
751 {
752 return scm_get_stack_base ();
753 }
754
755 #endif /* pthread methods of get_thread_stack_base */
756
757 #else /* !SCM_USE_PTHREAD_THREADS */
758
759 #define HAVE_GET_THREAD_STACK_BASE
760
761 static SCM_STACKITEM *
762 get_thread_stack_base ()
763 {
764 return scm_get_stack_base ();
765 }
766
767 #endif /* !SCM_USE_PTHREAD_THREADS */
768
769 #ifdef HAVE_GET_THREAD_STACK_BASE
770
771 void
772 scm_init_guile ()
773 {
774 scm_i_init_thread_for_guile (get_thread_stack_base (),
775 scm_i_default_dynamic_state);
776 }
777
778 #endif
779
780 void *
781 scm_with_guile (void *(*func)(void *), void *data)
782 {
783 return scm_i_with_guile_and_parent (func, data,
784 scm_i_default_dynamic_state);
785 }
786
787 SCM_UNUSED static void
788 scm_leave_guile_cleanup (void *x)
789 {
790 scm_leave_guile ();
791 }
792
793 void *
794 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
795 {
796 void *res;
797 int really_entered;
798 SCM_STACKITEM base_item;
799
800 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
801 if (really_entered)
802 {
803 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
804 res = scm_c_with_continuation_barrier (func, data);
805 scm_i_pthread_cleanup_pop (0);
806 scm_leave_guile ();
807 }
808 else
809 res = scm_c_with_continuation_barrier (func, data);
810
811 return res;
812 }
813
814 void *
815 scm_without_guile (void *(*func)(void *), void *data)
816 {
817 void *res;
818 scm_t_guile_ticket t;
819 t = scm_leave_guile ();
820 res = func (data);
821 scm_enter_guile (t);
822 return res;
823 }
824
825 /*** Thread creation */
826
827 typedef struct {
828 SCM parent;
829 SCM thunk;
830 SCM handler;
831 SCM thread;
832 scm_i_pthread_mutex_t mutex;
833 scm_i_pthread_cond_t cond;
834 } launch_data;
835
836 static void *
837 really_launch (void *d)
838 {
839 launch_data *data = (launch_data *)d;
840 SCM thunk = data->thunk, handler = data->handler;
841 scm_i_thread *t;
842
843 t = SCM_I_CURRENT_THREAD;
844
845 scm_i_scm_pthread_mutex_lock (&data->mutex);
846 data->thread = scm_current_thread ();
847 scm_i_pthread_cond_signal (&data->cond);
848 scm_i_pthread_mutex_unlock (&data->mutex);
849
850 if (SCM_UNBNDP (handler))
851 t->result = scm_call_0 (thunk);
852 else
853 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
854
855 return 0;
856 }
857
858 static void *
859 launch_thread (void *d)
860 {
861 launch_data *data = (launch_data *)d;
862 scm_i_pthread_detach (scm_i_pthread_self ());
863 scm_i_with_guile_and_parent (really_launch, d, data->parent);
864 return NULL;
865 }
866
867 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
868 (SCM thunk, SCM handler),
869 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
870 "returning a new thread object representing the thread. The procedure\n"
871 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
872 "\n"
873 "When @var{handler} is specified, then @var{thunk} is called from\n"
874 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
875 "handler. This catch is established inside the continuation barrier.\n"
876 "\n"
877 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
878 "the @emph{exit value} of the thread and the thread is terminated.")
879 #define FUNC_NAME s_scm_call_with_new_thread
880 {
881 launch_data data;
882 scm_i_pthread_t id;
883 int err;
884
885 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
886 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
887 handler, SCM_ARG2, FUNC_NAME);
888
889 data.parent = scm_current_dynamic_state ();
890 data.thunk = thunk;
891 data.handler = handler;
892 data.thread = SCM_BOOL_F;
893 scm_i_pthread_mutex_init (&data.mutex, NULL);
894 scm_i_pthread_cond_init (&data.cond, NULL);
895
896 scm_i_scm_pthread_mutex_lock (&data.mutex);
897 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
898 if (err)
899 {
900 scm_i_pthread_mutex_unlock (&data.mutex);
901 errno = err;
902 scm_syserror (NULL);
903 }
904 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
905 scm_i_pthread_mutex_unlock (&data.mutex);
906
907 return data.thread;
908 }
909 #undef FUNC_NAME
910
911 typedef struct {
912 SCM parent;
913 scm_t_catch_body body;
914 void *body_data;
915 scm_t_catch_handler handler;
916 void *handler_data;
917 SCM thread;
918 scm_i_pthread_mutex_t mutex;
919 scm_i_pthread_cond_t cond;
920 } spawn_data;
921
922 static void *
923 really_spawn (void *d)
924 {
925 spawn_data *data = (spawn_data *)d;
926 scm_t_catch_body body = data->body;
927 void *body_data = data->body_data;
928 scm_t_catch_handler handler = data->handler;
929 void *handler_data = data->handler_data;
930 scm_i_thread *t = SCM_I_CURRENT_THREAD;
931
932 scm_i_scm_pthread_mutex_lock (&data->mutex);
933 data->thread = scm_current_thread ();
934 scm_i_pthread_cond_signal (&data->cond);
935 scm_i_pthread_mutex_unlock (&data->mutex);
936
937 if (handler == NULL)
938 t->result = body (body_data);
939 else
940 t->result = scm_internal_catch (SCM_BOOL_T,
941 body, body_data,
942 handler, handler_data);
943
944 return 0;
945 }
946
947 static void *
948 spawn_thread (void *d)
949 {
950 spawn_data *data = (spawn_data *)d;
951 scm_i_pthread_detach (scm_i_pthread_self ());
952 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
953 return NULL;
954 }
955
956 SCM
957 scm_spawn_thread (scm_t_catch_body body, void *body_data,
958 scm_t_catch_handler handler, void *handler_data)
959 {
960 spawn_data data;
961 scm_i_pthread_t id;
962 int err;
963
964 data.parent = scm_current_dynamic_state ();
965 data.body = body;
966 data.body_data = body_data;
967 data.handler = handler;
968 data.handler_data = handler_data;
969 data.thread = SCM_BOOL_F;
970 scm_i_pthread_mutex_init (&data.mutex, NULL);
971 scm_i_pthread_cond_init (&data.cond, NULL);
972
973 scm_i_scm_pthread_mutex_lock (&data.mutex);
974 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
975 if (err)
976 {
977 scm_i_pthread_mutex_unlock (&data.mutex);
978 errno = err;
979 scm_syserror (NULL);
980 }
981 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
982 scm_i_pthread_mutex_unlock (&data.mutex);
983
984 return data.thread;
985 }
986
987 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
988 (),
989 "Move the calling thread to the end of the scheduling queue.")
990 #define FUNC_NAME s_scm_yield
991 {
992 return scm_from_bool (scm_i_sched_yield ());
993 }
994 #undef FUNC_NAME
995
996 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
997 (SCM thread),
998 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
999 "cannot be the current thread, and if @var{thread} has already terminated or "
1000 "been signaled to terminate, this function is a no-op.")
1001 #define FUNC_NAME s_scm_cancel_thread
1002 {
1003 scm_i_thread *t = NULL;
1004
1005 SCM_VALIDATE_THREAD (1, thread);
1006 t = SCM_I_THREAD_DATA (thread);
1007 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1008 if (!t->canceled)
1009 {
1010 t->canceled = 1;
1011 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1012 scm_i_pthread_cancel (t->pthread);
1013 }
1014 else
1015 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1016
1017 return SCM_UNSPECIFIED;
1018 }
1019 #undef FUNC_NAME
1020
1021 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1022 (SCM thread, SCM proc),
1023 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1024 "This handler will be called when the thread exits.")
1025 #define FUNC_NAME s_scm_set_thread_cleanup_x
1026 {
1027 scm_i_thread *t;
1028
1029 SCM_VALIDATE_THREAD (1, thread);
1030 if (!scm_is_false (proc))
1031 SCM_VALIDATE_THUNK (2, proc);
1032
1033 t = SCM_I_THREAD_DATA (thread);
1034 scm_i_pthread_mutex_lock (&t->admin_mutex);
1035
1036 if (!(t->exited || t->canceled))
1037 t->cleanup_handler = proc;
1038
1039 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1040
1041 return SCM_UNSPECIFIED;
1042 }
1043 #undef FUNC_NAME
1044
1045 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1046 (SCM thread),
1047 "Return the cleanup handler installed for the thread @var{thread}.")
1048 #define FUNC_NAME s_scm_thread_cleanup
1049 {
1050 scm_i_thread *t;
1051 SCM ret;
1052
1053 SCM_VALIDATE_THREAD (1, thread);
1054
1055 t = SCM_I_THREAD_DATA (thread);
1056 scm_i_pthread_mutex_lock (&t->admin_mutex);
1057 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1058 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1059
1060 return ret;
1061 }
1062 #undef FUNC_NAME
1063
1064 SCM scm_join_thread (SCM thread)
1065 {
1066 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1067 }
1068
1069 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1070 (SCM thread, SCM timeout, SCM timeoutval),
1071 "Suspend execution of the calling thread until the target @var{thread} "
1072 "terminates, unless the target @var{thread} has already terminated. ")
1073 #define FUNC_NAME s_scm_join_thread_timed
1074 {
1075 scm_i_thread *t;
1076 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1077 SCM res = SCM_BOOL_F;
1078
1079 if (! (SCM_UNBNDP (timeoutval)))
1080 res = timeoutval;
1081
1082 SCM_VALIDATE_THREAD (1, thread);
1083 if (scm_is_eq (scm_current_thread (), thread))
1084 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1085
1086 t = SCM_I_THREAD_DATA (thread);
1087 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1088
1089 if (! SCM_UNBNDP (timeout))
1090 {
1091 to_timespec (timeout, &ctimeout);
1092 timeout_ptr = &ctimeout;
1093 }
1094
1095 if (t->exited)
1096 res = t->result;
1097 else
1098 {
1099 while (1)
1100 {
1101 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1102 timeout_ptr);
1103 if (err == 0)
1104 {
1105 if (t->exited)
1106 {
1107 res = t->result;
1108 break;
1109 }
1110 }
1111 else if (err == ETIMEDOUT)
1112 break;
1113
1114 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1115 SCM_TICK;
1116 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1117 }
1118 }
1119
1120 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1121
1122 return res;
1123 }
1124 #undef FUNC_NAME
1125
1126 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1127 (SCM obj),
1128 "Return @code{#t} if @var{obj} is a thread.")
1129 #define FUNC_NAME s_scm_thread_p
1130 {
1131 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1132 }
1133 #undef FUNC_NAME
1134
1135 static SCM
1136 fat_mutex_mark (SCM mx)
1137 {
1138 fat_mutex *m = SCM_MUTEX_DATA (mx);
1139 scm_gc_mark (m->owner);
1140 return m->waiting;
1141 }
1142
1143 static size_t
1144 fat_mutex_free (SCM mx)
1145 {
1146 fat_mutex *m = SCM_MUTEX_DATA (mx);
1147 scm_i_pthread_mutex_destroy (&m->lock);
1148 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1149 return 0;
1150 }
1151
1152 static int
1153 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1154 {
1155 fat_mutex *m = SCM_MUTEX_DATA (mx);
1156 scm_puts ("#<mutex ", port);
1157 scm_uintprint ((scm_t_bits)m, 16, port);
1158 scm_puts (">", port);
1159 return 1;
1160 }
1161
1162 static SCM
1163 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1164 {
1165 fat_mutex *m;
1166 SCM mx;
1167
1168 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1169 scm_i_pthread_mutex_init (&m->lock, NULL);
1170 m->owner = SCM_BOOL_F;
1171 m->level = 0;
1172
1173 m->recursive = recursive;
1174 m->unchecked_unlock = unchecked_unlock;
1175 m->allow_external_unlock = external_unlock;
1176
1177 m->waiting = SCM_EOL;
1178 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1179 m->waiting = make_queue ();
1180 return mx;
1181 }
1182
1183 SCM scm_make_mutex (void)
1184 {
1185 return scm_make_mutex_with_flags (SCM_EOL);
1186 }
1187
1188 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1189 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1190 SCM_SYMBOL (recursive_sym, "recursive");
1191
1192 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1193 (SCM flags),
1194 "Create a new mutex. ")
1195 #define FUNC_NAME s_scm_make_mutex_with_flags
1196 {
1197 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1198
1199 SCM ptr = flags;
1200 while (! scm_is_null (ptr))
1201 {
1202 SCM flag = SCM_CAR (ptr);
1203 if (scm_is_eq (flag, unchecked_unlock_sym))
1204 unchecked_unlock = 1;
1205 else if (scm_is_eq (flag, allow_external_unlock_sym))
1206 external_unlock = 1;
1207 else if (scm_is_eq (flag, recursive_sym))
1208 recursive = 1;
1209 else
1210 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1211 ptr = SCM_CDR (ptr);
1212 }
1213 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1214 }
1215 #undef FUNC_NAME
1216
1217 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1218 (void),
1219 "Create a new recursive mutex. ")
1220 #define FUNC_NAME s_scm_make_recursive_mutex
1221 {
1222 return make_fat_mutex (1, 0, 0);
1223 }
1224 #undef FUNC_NAME
1225
1226 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1227
1228 static SCM
1229 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1230 {
1231 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1232
1233 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1234 SCM err = SCM_BOOL_F;
1235
1236 struct timeval current_time;
1237
1238 scm_i_scm_pthread_mutex_lock (&m->lock);
1239
1240 while (1)
1241 {
1242 if (m->level == 0)
1243 {
1244 m->owner = new_owner;
1245 m->level++;
1246
1247 if (SCM_I_IS_THREAD (new_owner))
1248 {
1249 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1250 scm_i_pthread_mutex_lock (&t->admin_mutex);
1251 t->mutexes = scm_cons (mutex, t->mutexes);
1252 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1253 }
1254 *ret = 1;
1255 break;
1256 }
1257 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1258 {
1259 m->owner = new_owner;
1260 err = scm_cons (scm_abandoned_mutex_error_key,
1261 scm_from_locale_string ("lock obtained on abandoned "
1262 "mutex"));
1263 *ret = 1;
1264 break;
1265 }
1266 else if (scm_is_eq (m->owner, new_owner))
1267 {
1268 if (m->recursive)
1269 {
1270 m->level++;
1271 *ret = 1;
1272 }
1273 else
1274 {
1275 err = scm_cons (scm_misc_error_key,
1276 scm_from_locale_string ("mutex already locked "
1277 "by thread"));
1278 *ret = 0;
1279 }
1280 break;
1281 }
1282 else
1283 {
1284 if (timeout != NULL)
1285 {
1286 gettimeofday (&current_time, NULL);
1287 if (current_time.tv_sec > timeout->tv_sec ||
1288 (current_time.tv_sec == timeout->tv_sec &&
1289 current_time.tv_usec * 1000 > timeout->tv_nsec))
1290 {
1291 *ret = 0;
1292 break;
1293 }
1294 }
1295 block_self (m->waiting, mutex, &m->lock, timeout);
1296 scm_i_pthread_mutex_unlock (&m->lock);
1297 SCM_TICK;
1298 scm_i_scm_pthread_mutex_lock (&m->lock);
1299 }
1300 }
1301 scm_i_pthread_mutex_unlock (&m->lock);
1302 return err;
1303 }
1304
1305 SCM scm_lock_mutex (SCM mx)
1306 {
1307 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1308 }
1309
1310 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1311 (SCM m, SCM timeout, SCM owner),
1312 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1313 "blocks until the mutex becomes available. The function returns when "
1314 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1315 "a thread already owns will succeed right away and will not block the "
1316 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1317 #define FUNC_NAME s_scm_lock_mutex_timed
1318 {
1319 SCM exception;
1320 int ret = 0;
1321 scm_t_timespec cwaittime, *waittime = NULL;
1322
1323 SCM_VALIDATE_MUTEX (1, m);
1324
1325 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1326 {
1327 to_timespec (timeout, &cwaittime);
1328 waittime = &cwaittime;
1329 }
1330
1331 exception = fat_mutex_lock (m, waittime, owner, &ret);
1332 if (!scm_is_false (exception))
1333 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1334 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1335 }
1336 #undef FUNC_NAME
1337
1338 void
1339 scm_dynwind_lock_mutex (SCM mutex)
1340 {
1341 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1342 SCM_F_WIND_EXPLICITLY);
1343 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1344 SCM_F_WIND_EXPLICITLY);
1345 }
1346
1347 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1348 (SCM mutex),
1349 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1350 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1351 #define FUNC_NAME s_scm_try_mutex
1352 {
1353 SCM exception;
1354 int ret = 0;
1355 scm_t_timespec cwaittime, *waittime = NULL;
1356
1357 SCM_VALIDATE_MUTEX (1, mutex);
1358
1359 to_timespec (scm_from_int(0), &cwaittime);
1360 waittime = &cwaittime;
1361
1362 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1363 if (!scm_is_false (exception))
1364 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1365 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1366 }
1367 #undef FUNC_NAME
1368
1369 /*** Fat condition variables */
1370
1371 typedef struct {
1372 scm_i_pthread_mutex_t lock;
1373 SCM waiting; /* the threads waiting for this condition. */
1374 } fat_cond;
1375
1376 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1377 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1378
1379 static int
1380 fat_mutex_unlock (SCM mutex, SCM cond,
1381 const scm_t_timespec *waittime, int relock)
1382 {
1383 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1384 fat_cond *c = NULL;
1385 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1386 int err = 0, ret = 0;
1387
1388 scm_i_scm_pthread_mutex_lock (&m->lock);
1389
1390 SCM owner = m->owner;
1391
1392 if (!scm_is_eq (owner, scm_current_thread ()))
1393 {
1394 if (m->level == 0)
1395 {
1396 if (!m->unchecked_unlock)
1397 {
1398 scm_i_pthread_mutex_unlock (&m->lock);
1399 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1400 }
1401 owner = scm_current_thread ();
1402 }
1403 else if (!m->allow_external_unlock)
1404 {
1405 scm_i_pthread_mutex_unlock (&m->lock);
1406 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1407 }
1408 }
1409
1410 if (! (SCM_UNBNDP (cond)))
1411 {
1412 c = SCM_CONDVAR_DATA (cond);
1413 while (1)
1414 {
1415 int brk = 0;
1416
1417 scm_i_scm_pthread_mutex_lock (&c->lock);
1418 if (m->level > 0)
1419 m->level--;
1420 if (m->level == 0)
1421 m->owner = unblock_from_queue (m->waiting);
1422
1423 scm_i_pthread_mutex_unlock (&m->lock);
1424
1425 t->block_asyncs++;
1426
1427 err = block_self (c->waiting, cond, &c->lock, waittime);
1428
1429 if (err == 0)
1430 {
1431 ret = 1;
1432 brk = 1;
1433 }
1434 else if (err == ETIMEDOUT)
1435 {
1436 ret = 0;
1437 brk = 1;
1438 }
1439 else if (err != EINTR)
1440 {
1441 errno = err;
1442 scm_i_pthread_mutex_unlock (&c->lock);
1443 scm_syserror (NULL);
1444 }
1445
1446 if (brk)
1447 {
1448 if (relock)
1449 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1450 scm_i_pthread_mutex_unlock (&c->lock);
1451 break;
1452 }
1453
1454 scm_i_pthread_mutex_unlock (&c->lock);
1455
1456 t->block_asyncs--;
1457 scm_async_click ();
1458
1459 scm_remember_upto_here_2 (cond, mutex);
1460
1461 scm_i_scm_pthread_mutex_lock (&m->lock);
1462 }
1463 }
1464 else
1465 {
1466 if (m->level > 0)
1467 m->level--;
1468 if (m->level == 0)
1469 m->owner = unblock_from_queue (m->waiting);
1470
1471 scm_i_pthread_mutex_unlock (&m->lock);
1472 ret = 1;
1473 }
1474
1475 return ret;
1476 }
1477
1478 SCM scm_unlock_mutex (SCM mx)
1479 {
1480 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1481 }
1482
1483 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1484 (SCM mx, SCM cond, SCM timeout),
1485 "Unlocks @var{mutex} if the calling thread owns the lock on "
1486 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1487 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1488 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1489 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1490 "with a call to @code{unlock-mutex}. Only the last call to "
1491 "@code{unlock-mutex} will actually unlock the mutex. ")
1492 #define FUNC_NAME s_scm_unlock_mutex_timed
1493 {
1494 scm_t_timespec cwaittime, *waittime = NULL;
1495
1496 SCM_VALIDATE_MUTEX (1, mx);
1497 if (! (SCM_UNBNDP (cond)))
1498 {
1499 SCM_VALIDATE_CONDVAR (2, cond);
1500
1501 if (! (SCM_UNBNDP (timeout)))
1502 {
1503 to_timespec (timeout, &cwaittime);
1504 waittime = &cwaittime;
1505 }
1506 }
1507
1508 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1509 }
1510 #undef FUNC_NAME
1511
1512 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1513 (SCM obj),
1514 "Return @code{#t} if @var{obj} is a mutex.")
1515 #define FUNC_NAME s_scm_mutex_p
1516 {
1517 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1518 }
1519 #undef FUNC_NAME
1520
1521 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1522 (SCM mx),
1523 "Return the thread owning @var{mx}, or @code{#f}.")
1524 #define FUNC_NAME s_scm_mutex_owner
1525 {
1526 SCM owner;
1527 fat_mutex *m = NULL;
1528
1529 SCM_VALIDATE_MUTEX (1, mx);
1530 m = SCM_MUTEX_DATA (mx);
1531 scm_i_pthread_mutex_lock (&m->lock);
1532 owner = m->owner;
1533 scm_i_pthread_mutex_unlock (&m->lock);
1534
1535 return owner;
1536 }
1537 #undef FUNC_NAME
1538
1539 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1540 (SCM mx),
1541 "Return the lock level of mutex @var{mx}.")
1542 #define FUNC_NAME s_scm_mutex_level
1543 {
1544 SCM_VALIDATE_MUTEX (1, mx);
1545 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1546 }
1547 #undef FUNC_NAME
1548
1549 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1550 (SCM mx),
1551 "Returns @code{#t} if the mutex @var{mx} is locked.")
1552 #define FUNC_NAME s_scm_mutex_locked_p
1553 {
1554 SCM_VALIDATE_MUTEX (1, mx);
1555 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1556 }
1557 #undef FUNC_NAME
1558
1559 static SCM
1560 fat_cond_mark (SCM cv)
1561 {
1562 fat_cond *c = SCM_CONDVAR_DATA (cv);
1563 return c->waiting;
1564 }
1565
1566 static size_t
1567 fat_cond_free (SCM mx)
1568 {
1569 fat_cond *c = SCM_CONDVAR_DATA (mx);
1570 scm_i_pthread_mutex_destroy (&c->lock);
1571 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1572 return 0;
1573 }
1574
1575 static int
1576 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1577 {
1578 fat_cond *c = SCM_CONDVAR_DATA (cv);
1579 scm_puts ("#<condition-variable ", port);
1580 scm_uintprint ((scm_t_bits)c, 16, port);
1581 scm_puts (">", port);
1582 return 1;
1583 }
1584
1585 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1586 (void),
1587 "Make a new condition variable.")
1588 #define FUNC_NAME s_scm_make_condition_variable
1589 {
1590 fat_cond *c;
1591 SCM cv;
1592
1593 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1594 scm_i_pthread_mutex_init (&c->lock, 0);
1595 c->waiting = SCM_EOL;
1596 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1597 c->waiting = make_queue ();
1598 return cv;
1599 }
1600 #undef FUNC_NAME
1601
1602 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1603 (SCM cv, SCM mx, SCM t),
1604 "Wait until @var{cond-var} has been signalled. While waiting, "
1605 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1606 "is locked again when this function returns. When @var{time} is given, "
1607 "it specifies a point in time where the waiting should be aborted. It "
1608 "can be either a integer as returned by @code{current-time} or a pair "
1609 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1610 "mutex is locked and @code{#f} is returned. When the condition "
1611 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1612 "is returned. ")
1613 #define FUNC_NAME s_scm_timed_wait_condition_variable
1614 {
1615 scm_t_timespec waittime, *waitptr = NULL;
1616
1617 SCM_VALIDATE_CONDVAR (1, cv);
1618 SCM_VALIDATE_MUTEX (2, mx);
1619
1620 if (!SCM_UNBNDP (t))
1621 {
1622 to_timespec (t, &waittime);
1623 waitptr = &waittime;
1624 }
1625
1626 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1627 }
1628 #undef FUNC_NAME
1629
1630 static void
1631 fat_cond_signal (fat_cond *c)
1632 {
1633 scm_i_scm_pthread_mutex_lock (&c->lock);
1634 unblock_from_queue (c->waiting);
1635 scm_i_pthread_mutex_unlock (&c->lock);
1636 }
1637
1638 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1639 (SCM cv),
1640 "Wake up one thread that is waiting for @var{cv}")
1641 #define FUNC_NAME s_scm_signal_condition_variable
1642 {
1643 SCM_VALIDATE_CONDVAR (1, cv);
1644 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1645 return SCM_BOOL_T;
1646 }
1647 #undef FUNC_NAME
1648
1649 static void
1650 fat_cond_broadcast (fat_cond *c)
1651 {
1652 scm_i_scm_pthread_mutex_lock (&c->lock);
1653 while (scm_is_true (unblock_from_queue (c->waiting)))
1654 ;
1655 scm_i_pthread_mutex_unlock (&c->lock);
1656 }
1657
1658 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1659 (SCM cv),
1660 "Wake up all threads that are waiting for @var{cv}. ")
1661 #define FUNC_NAME s_scm_broadcast_condition_variable
1662 {
1663 SCM_VALIDATE_CONDVAR (1, cv);
1664 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1665 return SCM_BOOL_T;
1666 }
1667 #undef FUNC_NAME
1668
1669 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1670 (SCM obj),
1671 "Return @code{#t} if @var{obj} is a condition variable.")
1672 #define FUNC_NAME s_scm_condition_variable_p
1673 {
1674 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1675 }
1676 #undef FUNC_NAME
1677
1678 /*** Marking stacks */
1679
1680 /* XXX - what to do with this? Do we need to handle this for blocked
1681 threads as well?
1682 */
1683 #ifdef __ia64__
1684 # define SCM_MARK_BACKING_STORE() do { \
1685 ucontext_t ctx; \
1686 SCM_STACKITEM * top, * bot; \
1687 getcontext (&ctx); \
1688 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1689 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1690 / sizeof (SCM_STACKITEM))); \
1691 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1692 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1693 scm_mark_locations (bot, top - bot); } while (0)
1694 #else
1695 # define SCM_MARK_BACKING_STORE()
1696 #endif
1697
1698 void
1699 scm_threads_mark_stacks (void)
1700 {
1701 scm_i_thread *t;
1702 for (t = all_threads; t; t = t->next_thread)
1703 {
1704 /* Check that thread has indeed been suspended.
1705 */
1706 assert (t->top);
1707
1708 scm_gc_mark (t->handle);
1709
1710 #if SCM_STACK_GROWS_UP
1711 scm_mark_locations (t->base, t->top - t->base);
1712 #else
1713 scm_mark_locations (t->top, t->base - t->top);
1714 #endif
1715 scm_mark_locations ((SCM_STACKITEM *) &t->regs,
1716 ((size_t) sizeof(t->regs)
1717 / sizeof (SCM_STACKITEM)));
1718 }
1719
1720 SCM_MARK_BACKING_STORE ();
1721 }
1722
1723 /*** Select */
1724
1725 int
1726 scm_std_select (int nfds,
1727 SELECT_TYPE *readfds,
1728 SELECT_TYPE *writefds,
1729 SELECT_TYPE *exceptfds,
1730 struct timeval *timeout)
1731 {
1732 fd_set my_readfds;
1733 int res, eno, wakeup_fd;
1734 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1735 scm_t_guile_ticket ticket;
1736
1737 if (readfds == NULL)
1738 {
1739 FD_ZERO (&my_readfds);
1740 readfds = &my_readfds;
1741 }
1742
1743 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1744 SCM_TICK;
1745
1746 wakeup_fd = t->sleep_pipe[0];
1747 ticket = scm_leave_guile ();
1748 FD_SET (wakeup_fd, readfds);
1749 if (wakeup_fd >= nfds)
1750 nfds = wakeup_fd+1;
1751 res = select (nfds, readfds, writefds, exceptfds, timeout);
1752 t->sleep_fd = -1;
1753 eno = errno;
1754 scm_enter_guile (ticket);
1755
1756 scm_i_reset_sleep (t);
1757
1758 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1759 {
1760 char dummy;
1761 read (wakeup_fd, &dummy, 1);
1762 FD_CLR (wakeup_fd, readfds);
1763 res -= 1;
1764 if (res == 0)
1765 {
1766 eno = EINTR;
1767 res = -1;
1768 }
1769 }
1770 errno = eno;
1771 return res;
1772 }
1773
1774 /* Convenience API for blocking while in guile mode. */
1775
1776 #if SCM_USE_PTHREAD_THREADS
1777
1778 int
1779 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1780 {
1781 scm_t_guile_ticket t = scm_leave_guile ();
1782 int res = scm_i_pthread_mutex_lock (mutex);
1783 scm_enter_guile (t);
1784 return res;
1785 }
1786
1787 static void
1788 do_unlock (void *data)
1789 {
1790 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1791 }
1792
1793 void
1794 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1795 {
1796 scm_i_scm_pthread_mutex_lock (mutex);
1797 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1798 }
1799
1800 int
1801 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1802 {
1803 scm_t_guile_ticket t = scm_leave_guile ();
1804 int res = scm_i_pthread_cond_wait (cond, mutex);
1805 scm_enter_guile (t);
1806 return res;
1807 }
1808
1809 int
1810 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1811 scm_i_pthread_mutex_t *mutex,
1812 const scm_t_timespec *wt)
1813 {
1814 scm_t_guile_ticket t = scm_leave_guile ();
1815 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1816 scm_enter_guile (t);
1817 return res;
1818 }
1819
1820 #endif
1821
1822 unsigned long
1823 scm_std_usleep (unsigned long usecs)
1824 {
1825 struct timeval tv;
1826 tv.tv_usec = usecs % 1000000;
1827 tv.tv_sec = usecs / 1000000;
1828 scm_std_select (0, NULL, NULL, NULL, &tv);
1829 return tv.tv_sec * 1000000 + tv.tv_usec;
1830 }
1831
1832 unsigned int
1833 scm_std_sleep (unsigned int secs)
1834 {
1835 struct timeval tv;
1836 tv.tv_usec = 0;
1837 tv.tv_sec = secs;
1838 scm_std_select (0, NULL, NULL, NULL, &tv);
1839 return tv.tv_sec;
1840 }
1841
1842 /*** Misc */
1843
1844 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1845 (void),
1846 "Return the thread that called this function.")
1847 #define FUNC_NAME s_scm_current_thread
1848 {
1849 return SCM_I_CURRENT_THREAD->handle;
1850 }
1851 #undef FUNC_NAME
1852
1853 static SCM
1854 scm_c_make_list (size_t n, SCM fill)
1855 {
1856 SCM res = SCM_EOL;
1857 while (n-- > 0)
1858 res = scm_cons (fill, res);
1859 return res;
1860 }
1861
1862 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1863 (void),
1864 "Return a list of all threads.")
1865 #define FUNC_NAME s_scm_all_threads
1866 {
1867 /* We can not allocate while holding the thread_admin_mutex because
1868 of the way GC is done.
1869 */
1870 int n = thread_count;
1871 scm_i_thread *t;
1872 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1873
1874 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1875 l = &list;
1876 for (t = all_threads; t && n > 0; t = t->next_thread)
1877 {
1878 if (t != scm_i_signal_delivery_thread)
1879 {
1880 SCM_SETCAR (*l, t->handle);
1881 l = SCM_CDRLOC (*l);
1882 }
1883 n--;
1884 }
1885 *l = SCM_EOL;
1886 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1887 return list;
1888 }
1889 #undef FUNC_NAME
1890
1891 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1892 (SCM thread),
1893 "Return @code{#t} iff @var{thread} has exited.\n")
1894 #define FUNC_NAME s_scm_thread_exited_p
1895 {
1896 return scm_from_bool (scm_c_thread_exited_p (thread));
1897 }
1898 #undef FUNC_NAME
1899
1900 int
1901 scm_c_thread_exited_p (SCM thread)
1902 #define FUNC_NAME s_scm_thread_exited_p
1903 {
1904 scm_i_thread *t;
1905 SCM_VALIDATE_THREAD (1, thread);
1906 t = SCM_I_THREAD_DATA (thread);
1907 return t->exited;
1908 }
1909 #undef FUNC_NAME
1910
1911 static scm_i_pthread_cond_t wake_up_cond;
1912 int scm_i_thread_go_to_sleep;
1913 static int threads_initialized_p = 0;
1914
1915 void
1916 scm_i_thread_put_to_sleep ()
1917 {
1918 if (threads_initialized_p)
1919 {
1920 scm_i_thread *t;
1921
1922 scm_leave_guile ();
1923 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1924
1925 /* Signal all threads to go to sleep
1926 */
1927 scm_i_thread_go_to_sleep = 1;
1928 for (t = all_threads; t; t = t->next_thread)
1929 scm_i_pthread_mutex_lock (&t->heap_mutex);
1930 scm_i_thread_go_to_sleep = 0;
1931 }
1932 }
1933
1934 void
1935 scm_i_thread_invalidate_freelists ()
1936 {
1937 /* thread_admin_mutex is already locked. */
1938
1939 scm_i_thread *t;
1940 for (t = all_threads; t; t = t->next_thread)
1941 if (t != SCM_I_CURRENT_THREAD)
1942 t->clear_freelists_p = 1;
1943 }
1944
1945 void
1946 scm_i_thread_wake_up ()
1947 {
1948 if (threads_initialized_p)
1949 {
1950 scm_i_thread *t;
1951
1952 scm_i_pthread_cond_broadcast (&wake_up_cond);
1953 for (t = all_threads; t; t = t->next_thread)
1954 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1955 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1956 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1957 }
1958 }
1959
1960 void
1961 scm_i_thread_sleep_for_gc ()
1962 {
1963 scm_i_thread *t = suspend ();
1964 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1965 resume (t);
1966 }
1967
1968 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1969 */
1970 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1971 int scm_i_critical_section_level = 0;
1972
1973 static SCM dynwind_critical_section_mutex;
1974
1975 void
1976 scm_dynwind_critical_section (SCM mutex)
1977 {
1978 if (scm_is_false (mutex))
1979 mutex = dynwind_critical_section_mutex;
1980 scm_dynwind_lock_mutex (mutex);
1981 scm_dynwind_block_asyncs ();
1982 }
1983
1984 /*** Initialization */
1985
1986 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1987 scm_i_pthread_mutex_t scm_i_misc_mutex;
1988
1989 #if SCM_USE_PTHREAD_THREADS
1990 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1991 #endif
1992
1993 void
1994 scm_threads_prehistory (SCM_STACKITEM *base)
1995 {
1996 #if SCM_USE_PTHREAD_THREADS
1997 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1998 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1999 PTHREAD_MUTEX_RECURSIVE);
2000 #endif
2001
2002 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2003 scm_i_pthread_mutexattr_recursive);
2004 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2005 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2006 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2007 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2008
2009 guilify_self_1 (base);
2010 }
2011
2012 scm_t_bits scm_tc16_thread;
2013 scm_t_bits scm_tc16_mutex;
2014 scm_t_bits scm_tc16_condvar;
2015
2016 void
2017 scm_init_threads ()
2018 {
2019 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2020 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2021 scm_set_smob_print (scm_tc16_thread, thread_print);
2022 scm_set_smob_free (scm_tc16_thread, thread_free);
2023
2024 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2025 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2026 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2027 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2028
2029 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2030 sizeof (fat_cond));
2031 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2032 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2033 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2034
2035 scm_i_default_dynamic_state = SCM_BOOL_F;
2036 guilify_self_2 (SCM_BOOL_F);
2037 threads_initialized_p = 1;
2038
2039 dynwind_critical_section_mutex =
2040 scm_permanent_object (scm_make_recursive_mutex ());
2041 }
2042
2043 void
2044 scm_init_threads_default_dynamic_state ()
2045 {
2046 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2047 scm_i_default_dynamic_state = scm_permanent_object (state);
2048 }
2049
2050 void
2051 scm_init_thread_procs ()
2052 {
2053 #include "libguile/threads.x"
2054 }
2055
2056 /*
2057 Local Variables:
2058 c-file-style: "gnu"
2059 End:
2060 */