Fix hang in srfi-18.test
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/_scm.h"
25
26 #if HAVE_UNISTD_H
27 #include <unistd.h>
28 #endif
29 #include <stdio.h>
30 #include <assert.h>
31
32 #ifdef HAVE_STRING_H
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34 #endif
35
36 #if HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52 #include "libguile/scmsigs.h"
53 #include "libguile/strings.h"
54
55 #ifdef __MINGW32__
56 #ifndef ETIMEDOUT
57 # define ETIMEDOUT WSAETIMEDOUT
58 #endif
59 # include <fcntl.h>
60 # include <process.h>
61 # define pipe(fd) _pipe (fd, 256, O_BINARY)
62 #endif /* __MINGW32__ */
63
64 static void
65 to_timespec (SCM t, scm_t_timespec *waittime)
66 {
67 if (scm_is_pair (t))
68 {
69 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
70 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
71 }
72 else
73 {
74 double time = scm_to_double (t);
75 double sec = scm_c_truncate (time);
76
77 waittime->tv_sec = (long) sec;
78 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
79 }
80 }
81
82 /*** Queues */
83
84 /* Make an empty queue data structure.
85 */
86 static SCM
87 make_queue ()
88 {
89 return scm_cons (SCM_EOL, SCM_EOL);
90 }
91
92 /* Put T at the back of Q and return a handle that can be used with
93 remqueue to remove T from Q again.
94 */
95 static SCM
96 enqueue (SCM q, SCM t)
97 {
98 SCM c = scm_cons (t, SCM_EOL);
99 SCM_CRITICAL_SECTION_START;
100 if (scm_is_null (SCM_CDR (q)))
101 SCM_SETCDR (q, c);
102 else
103 SCM_SETCDR (SCM_CAR (q), c);
104 SCM_SETCAR (q, c);
105 SCM_CRITICAL_SECTION_END;
106 return c;
107 }
108
109 /* Remove the element that the handle C refers to from the queue Q. C
110 must have been returned from a call to enqueue. The return value
111 is zero when the element referred to by C has already been removed.
112 Otherwise, 1 is returned.
113 */
114 static int
115 remqueue (SCM q, SCM c)
116 {
117 SCM p, prev = q;
118 SCM_CRITICAL_SECTION_START;
119 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
120 {
121 if (scm_is_eq (p, c))
122 {
123 if (scm_is_eq (c, SCM_CAR (q)))
124 SCM_SETCAR (q, SCM_CDR (c));
125 SCM_SETCDR (prev, SCM_CDR (c));
126 SCM_CRITICAL_SECTION_END;
127 return 1;
128 }
129 prev = p;
130 }
131 SCM_CRITICAL_SECTION_END;
132 return 0;
133 }
134
135 /* Remove the front-most element from the queue Q and return it.
136 Return SCM_BOOL_F when Q is empty.
137 */
138 static SCM
139 dequeue (SCM q)
140 {
141 SCM c;
142 SCM_CRITICAL_SECTION_START;
143 c = SCM_CDR (q);
144 if (scm_is_null (c))
145 {
146 SCM_CRITICAL_SECTION_END;
147 return SCM_BOOL_F;
148 }
149 else
150 {
151 SCM_SETCDR (q, SCM_CDR (c));
152 if (scm_is_null (SCM_CDR (q)))
153 SCM_SETCAR (q, SCM_EOL);
154 SCM_CRITICAL_SECTION_END;
155 return SCM_CAR (c);
156 }
157 }
158
159 /*** Thread smob routines */
160
161 static SCM
162 thread_mark (SCM obj)
163 {
164 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
165 scm_gc_mark (t->result);
166 scm_gc_mark (t->cleanup_handler);
167 scm_gc_mark (t->join_queue);
168 scm_gc_mark (t->mutexes);
169 scm_gc_mark (t->dynwinds);
170 scm_gc_mark (t->active_asyncs);
171 scm_gc_mark (t->continuation_root);
172 scm_gc_mark (t->vm);
173 return t->dynamic_state;
174 }
175
176 static int
177 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
178 {
179 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
180 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
181 the struct case, hence we go via a union, and extract according to the
182 size of pthread_t. */
183 union {
184 scm_i_pthread_t p;
185 unsigned short us;
186 unsigned int ui;
187 unsigned long ul;
188 scm_t_uintmax um;
189 } u;
190 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
191 scm_i_pthread_t p = t->pthread;
192 scm_t_uintmax id;
193 u.p = p;
194 if (sizeof (p) == sizeof (unsigned short))
195 id = u.us;
196 else if (sizeof (p) == sizeof (unsigned int))
197 id = u.ui;
198 else if (sizeof (p) == sizeof (unsigned long))
199 id = u.ul;
200 else
201 id = u.um;
202
203 scm_puts ("#<thread ", port);
204 scm_uintprint (id, 10, port);
205 scm_puts (" (", port);
206 scm_uintprint ((scm_t_bits)t, 16, port);
207 scm_puts (")>", port);
208 return 1;
209 }
210
211 static size_t
212 thread_free (SCM obj)
213 {
214 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
215 assert (t->exited);
216 scm_gc_free (t, sizeof (*t), "thread");
217 return 0;
218 }
219
220 /*** Blocking on queues. */
221
222 /* See also scm_i_queue_async_cell for how such a block is
223 interrputed.
224 */
225
226 /* Put the current thread on QUEUE and go to sleep, waiting for it to
227 be woken up by a call to 'unblock_from_queue', or to be
228 interrupted. Upon return of this function, the current thread is
229 no longer on QUEUE, even when the sleep has been interrupted.
230
231 The caller of block_self must hold MUTEX. It will be atomically
232 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
233
234 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
235 as MUTEX is needed.
236
237 When WAITTIME is not NULL, the sleep will be aborted at that time.
238
239 The return value of block_self is an errno value. It will be zero
240 when the sleep has been successfully completed by a call to
241 unblock_from_queue, EINTR when it has been interrupted by the
242 delivery of a system async, and ETIMEDOUT when the timeout has
243 expired.
244
245 The system asyncs themselves are not executed by block_self.
246 */
247 static int
248 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
249 const scm_t_timespec *waittime)
250 {
251 scm_i_thread *t = SCM_I_CURRENT_THREAD;
252 SCM q_handle;
253 int err;
254
255 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
256 err = EINTR;
257 else
258 {
259 t->block_asyncs++;
260 q_handle = enqueue (queue, t->handle);
261 if (waittime == NULL)
262 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
263 else
264 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
265
266 /* When we are still on QUEUE, we have been interrupted. We
267 report this only when no other error (such as a timeout) has
268 happened above.
269 */
270 if (remqueue (queue, q_handle) && err == 0)
271 err = EINTR;
272 t->block_asyncs--;
273 scm_i_reset_sleep (t);
274 }
275
276 return err;
277 }
278
279 /* Wake up the first thread on QUEUE, if any. The awoken thread is
280 returned, or #f if the queue was empty.
281 */
282 static SCM
283 unblock_from_queue (SCM queue)
284 {
285 SCM thread = dequeue (queue);
286 if (scm_is_true (thread))
287 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
288 return thread;
289 }
290
291 /* Getting into and out of guile mode.
292 */
293
294 /* Ken Raeburn observes that the implementation of suspend and resume
295 (and the things that build on top of them) are very likely not
296 correct (see below). We will need fix this eventually, and that's
297 why scm_leave_guile/scm_enter_guile are not exported in the API.
298
299 Ken writes:
300
301 Consider this sequence:
302
303 Function foo, called in Guile mode, calls suspend (maybe indirectly
304 through scm_leave_guile), which does this:
305
306 // record top of stack for the GC
307 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
308 var 't'
309 // save registers.
310 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
311 setjmp (t->regs); // here's most of the magic
312
313 ... and returns.
314
315 Function foo has a SCM value X, a handle on a non-immediate object, in
316 a caller-saved register R, and it's the only reference to the object
317 currently.
318
319 The compiler wants to use R in suspend, so it pushes the current
320 value, X, into a stack slot which will be reloaded on exit from
321 suspend; then it loads stuff into R and goes about its business. The
322 setjmp call saves (some of) the current registers, including R, which
323 no longer contains X. (This isn't a problem for a normal
324 setjmp/longjmp situation, where longjmp would be called before
325 setjmp's caller returns; the old value for X would be loaded back from
326 the stack after the longjmp, before the function returned.)
327
328 So, suspend returns, loading X back into R (and invalidating the jump
329 buffer) in the process. The caller foo then goes off and calls a
330 bunch of other functions out of Guile mode, occasionally storing X on
331 the stack again, but, say, much deeper on the stack than suspend's
332 stack frame went, and the stack slot where suspend had written X has
333 long since been overwritten with other values.
334
335 Okay, nothing actively broken so far. Now, let garbage collection
336 run, triggered by another thread.
337
338 The thread calling foo is out of Guile mode at the time, so the
339 garbage collector just scans a range of stack addresses. Too bad that
340 X isn't stored there. So the pointed-to storage goes onto the free
341 list, and I think you can see where things go from there.
342
343 Is there anything I'm missing that'll prevent this scenario from
344 happening? I mean, aside from, "well, suspend and scm_leave_guile
345 don't have many local variables, so they probably won't need to save
346 any registers on most systems, so we hope everything will wind up in
347 the jump buffer and we'll just get away with it"?
348
349 (And, going the other direction, if scm_leave_guile and suspend push
350 the stack pointer over onto a new page, and foo doesn't make further
351 function calls and thus the stack pointer no longer includes that
352 page, are we guaranteed that the kernel cannot release the now-unused
353 stack page that contains the top-of-stack pointer we just saved? I
354 don't know if any OS actually does that. If it does, we could get
355 faults in garbage collection.)
356
357 I don't think scm_without_guile has to have this problem, as it gets
358 more control over the stack handling -- but it should call setjmp
359 itself. I'd probably try something like:
360
361 // record top of stack for the GC
362 t->top = SCM_STACK_PTR (&t);
363 // save registers.
364 SCM_FLUSH_REGISTER_WINDOWS;
365 setjmp (t->regs);
366 res = func(data);
367 scm_enter_guile (t);
368
369 ... though even that's making some assumptions about the stack
370 ordering of local variables versus caller-saved registers.
371
372 For something like scm_leave_guile to work, I don't think it can just
373 rely on invalidated jump buffers. A valid jump buffer, and a handle
374 on the stack state at the point when the jump buffer was initialized,
375 together, would work fine, but I think then we're talking about macros
376 invoking setjmp in the caller's stack frame, and requiring that the
377 caller of scm_leave_guile also call scm_enter_guile before returning,
378 kind of like pthread_cleanup_push/pop calls that have to be paired up
379 in a function. (In fact, the pthread ones have to be paired up
380 syntactically, as if they might expand to a compound statement
381 incorporating the user's code, and invoking a compiler's
382 exception-handling primitives. Which might be something to think
383 about for cases where Guile is used with C++ exceptions or
384 pthread_cancel.)
385 */
386
387 scm_i_pthread_key_t scm_i_thread_key;
388
389 static void
390 resume (scm_i_thread *t)
391 {
392 t->top = NULL;
393 if (t->clear_freelists_p)
394 {
395 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
396 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
397 t->clear_freelists_p = 0;
398 }
399 }
400
401 typedef void* scm_t_guile_ticket;
402
403 static void
404 scm_enter_guile (scm_t_guile_ticket ticket)
405 {
406 scm_i_thread *t = (scm_i_thread *)ticket;
407 if (t)
408 {
409 scm_i_pthread_mutex_lock (&t->heap_mutex);
410 resume (t);
411 }
412 }
413
414 static scm_i_thread *
415 suspend (void)
416 {
417 scm_i_thread *t = SCM_I_CURRENT_THREAD;
418
419 /* record top of stack for the GC */
420 t->top = SCM_STACK_PTR (&t);
421 /* save registers. */
422 SCM_FLUSH_REGISTER_WINDOWS;
423 setjmp (t->regs);
424 return t;
425 }
426
427 static scm_t_guile_ticket
428 scm_leave_guile ()
429 {
430 scm_i_thread *t = suspend ();
431 scm_i_pthread_mutex_unlock (&t->heap_mutex);
432 return (scm_t_guile_ticket) t;
433 }
434
435 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
436 static scm_i_thread *all_threads = NULL;
437 static int thread_count;
438
439 static SCM scm_i_default_dynamic_state;
440
441 /* Perform first stage of thread initialisation, in non-guile mode.
442 */
443 static void
444 guilify_self_1 (SCM_STACKITEM *base)
445 {
446 scm_i_thread *t = malloc (sizeof (scm_i_thread));
447
448 t->pthread = scm_i_pthread_self ();
449 t->handle = SCM_BOOL_F;
450 t->result = SCM_BOOL_F;
451 t->cleanup_handler = SCM_BOOL_F;
452 t->mutexes = SCM_EOL;
453 t->held_mutex = NULL;
454 t->join_queue = SCM_EOL;
455 t->dynamic_state = SCM_BOOL_F;
456 t->dynwinds = SCM_EOL;
457 t->active_asyncs = SCM_EOL;
458 t->block_asyncs = 1;
459 t->pending_asyncs = 1;
460 t->last_debug_frame = NULL;
461 t->base = base;
462 #ifdef __ia64__
463 /* Calculate and store off the base of this thread's register
464 backing store (RBS). Unfortunately our implementation(s) of
465 scm_ia64_register_backing_store_base are only reliable for the
466 main thread. For other threads, therefore, find out the current
467 top of the RBS, and use that as a maximum. */
468 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
469 {
470 ucontext_t ctx;
471 void *bsp;
472 getcontext (&ctx);
473 bsp = scm_ia64_ar_bsp (&ctx);
474 if (t->register_backing_store_base > bsp)
475 t->register_backing_store_base = bsp;
476 }
477 #endif
478 t->continuation_root = SCM_EOL;
479 t->continuation_base = base;
480 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
481 t->sleep_mutex = NULL;
482 t->sleep_object = SCM_BOOL_F;
483 t->sleep_fd = -1;
484 /* XXX - check for errors. */
485 pipe (t->sleep_pipe);
486 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
487 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
488 t->clear_freelists_p = 0;
489 t->gc_running_p = 0;
490 t->canceled = 0;
491 t->exited = 0;
492
493 t->freelist = SCM_EOL;
494 t->freelist2 = SCM_EOL;
495 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
496 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
497
498 scm_i_pthread_setspecific (scm_i_thread_key, t);
499
500 scm_i_pthread_mutex_lock (&t->heap_mutex);
501
502 scm_i_pthread_mutex_lock (&thread_admin_mutex);
503 t->next_thread = all_threads;
504 all_threads = t;
505 thread_count++;
506 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
507 }
508
509 /* Perform second stage of thread initialisation, in guile mode.
510 */
511 static void
512 guilify_self_2 (SCM parent)
513 {
514 scm_i_thread *t = SCM_I_CURRENT_THREAD;
515
516 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
517 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
518 t->continuation_root = scm_cons (t->handle, SCM_EOL);
519 t->continuation_base = t->base;
520 t->vm = SCM_BOOL_F;
521
522 if (scm_is_true (parent))
523 t->dynamic_state = scm_make_dynamic_state (parent);
524 else
525 t->dynamic_state = scm_i_make_initial_dynamic_state ();
526
527 t->join_queue = make_queue ();
528 t->block_asyncs = 0;
529 }
530
531 \f
532 /*** Fat mutexes */
533
534 /* We implement our own mutex type since we want them to be 'fair', we
535 want to do fancy things while waiting for them (like running
536 asyncs) and we might want to add things that are nice for
537 debugging.
538 */
539
540 typedef struct {
541 scm_i_pthread_mutex_t lock;
542 SCM owner;
543 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
544
545 int recursive; /* allow recursive locking? */
546 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
547 int allow_external_unlock; /* is it an error to unlock a mutex that is not
548 owned by the current thread? */
549
550 SCM waiting; /* the threads waiting for this mutex. */
551 } fat_mutex;
552
553 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
554 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
555
556 /* Perform thread tear-down, in guile mode.
557 */
558 static void *
559 do_thread_exit (void *v)
560 {
561 scm_i_thread *t = (scm_i_thread *) v;
562
563 if (!scm_is_false (t->cleanup_handler))
564 {
565 SCM ptr = t->cleanup_handler;
566
567 t->cleanup_handler = SCM_BOOL_F;
568 t->result = scm_internal_catch (SCM_BOOL_T,
569 (scm_t_catch_body) scm_call_0, ptr,
570 scm_handle_by_message_noexit, NULL);
571 }
572
573 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
574
575 t->exited = 1;
576 close (t->sleep_pipe[0]);
577 close (t->sleep_pipe[1]);
578 while (scm_is_true (unblock_from_queue (t->join_queue)))
579 ;
580
581 while (!scm_is_null (t->mutexes))
582 {
583 SCM mutex = SCM_CAR (t->mutexes);
584 fat_mutex *m = SCM_MUTEX_DATA (mutex);
585 scm_i_pthread_mutex_lock (&m->lock);
586
587 unblock_from_queue (m->waiting);
588
589 scm_i_pthread_mutex_unlock (&m->lock);
590 t->mutexes = SCM_CDR (t->mutexes);
591 }
592
593 scm_i_pthread_mutex_unlock (&t->admin_mutex);
594
595 return NULL;
596 }
597
598 static void
599 on_thread_exit (void *v)
600 {
601 /* This handler is executed in non-guile mode. */
602 scm_i_thread *t = (scm_i_thread *) v, **tp;
603
604 /* If this thread was cancelled while doing a cond wait, it will
605 still have a mutex locked, so we unlock it here. */
606 if (t->held_mutex)
607 {
608 scm_i_pthread_mutex_unlock (t->held_mutex);
609 t->held_mutex = NULL;
610 }
611
612 scm_i_pthread_setspecific (scm_i_thread_key, v);
613
614 /* Ensure the signal handling thread has been launched, because we might be
615 shutting it down. */
616 scm_i_ensure_signal_delivery_thread ();
617
618 /* Unblocking the joining threads needs to happen in guile mode
619 since the queue is a SCM data structure. */
620 scm_with_guile (do_thread_exit, v);
621
622 /* Removing ourself from the list of all threads needs to happen in
623 non-guile mode since all SCM values on our stack become
624 unprotected once we are no longer in the list. */
625 scm_i_pthread_mutex_lock (&thread_admin_mutex);
626 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
627 if (*tp == t)
628 {
629 *tp = t->next_thread;
630 break;
631 }
632 thread_count--;
633
634 /* If there's only one other thread, it could be the signal delivery
635 thread, so we need to notify it to shut down by closing its read pipe.
636 If it's not the signal delivery thread, then closing the read pipe isn't
637 going to hurt. */
638 if (thread_count <= 1)
639 scm_i_close_signal_pipe ();
640
641 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
642
643 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
644 }
645
646 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
647
648 static void
649 init_thread_key (void)
650 {
651 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
652 }
653
654 /* Perform any initializations necessary to bring the current thread
655 into guile mode, initializing Guile itself, if necessary.
656
657 BASE is the stack base to use with GC.
658
659 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
660 which case the default dynamic state is used.
661
662 Return zero when the thread was in guile mode already; otherwise
663 return 1.
664 */
665
666 static int
667 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
668 {
669 scm_i_thread *t;
670
671 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
672
673 if ((t = SCM_I_CURRENT_THREAD) == NULL)
674 {
675 /* This thread has not been guilified yet.
676 */
677
678 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
679 if (scm_initialized_p == 0)
680 {
681 /* First thread ever to enter Guile. Run the full
682 initialization.
683 */
684 scm_i_init_guile (base);
685 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
686 }
687 else
688 {
689 /* Guile is already initialized, but this thread enters it for
690 the first time. Only initialize this thread.
691 */
692 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
693 guilify_self_1 (base);
694 guilify_self_2 (parent);
695 }
696 return 1;
697 }
698 else if (t->top)
699 {
700 /* This thread is already guilified but not in guile mode, just
701 resume it.
702
703 XXX - base might be lower than when this thread was first
704 guilified.
705 */
706 scm_enter_guile ((scm_t_guile_ticket) t);
707 return 1;
708 }
709 else
710 {
711 /* Thread is already in guile mode. Nothing to do.
712 */
713 return 0;
714 }
715 }
716
717 #if SCM_USE_PTHREAD_THREADS
718
719 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
720 /* This method for GNU/Linux and perhaps some other systems.
721 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
722 available on them. */
723 #define HAVE_GET_THREAD_STACK_BASE
724
725 static SCM_STACKITEM *
726 get_thread_stack_base ()
727 {
728 pthread_attr_t attr;
729 void *start, *end;
730 size_t size;
731
732 pthread_getattr_np (pthread_self (), &attr);
733 pthread_attr_getstack (&attr, &start, &size);
734 end = (char *)start + size;
735
736 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
737 for the main thread, but we can use scm_get_stack_base in that
738 case.
739 */
740
741 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
742 if ((void *)&attr < start || (void *)&attr >= end)
743 return scm_get_stack_base ();
744 else
745 #endif
746 {
747 #if SCM_STACK_GROWS_UP
748 return start;
749 #else
750 return end;
751 #endif
752 }
753 }
754
755 #elif HAVE_PTHREAD_GET_STACKADDR_NP
756 /* This method for MacOS X.
757 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
758 but as of 2006 there's nothing obvious at apple.com. */
759 #define HAVE_GET_THREAD_STACK_BASE
760 static SCM_STACKITEM *
761 get_thread_stack_base ()
762 {
763 return pthread_get_stackaddr_np (pthread_self ());
764 }
765
766 #elif defined (__MINGW32__)
767 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
768 in any thread. We don't like hard-coding the name of a system, but there
769 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
770 work. */
771 #define HAVE_GET_THREAD_STACK_BASE
772 static SCM_STACKITEM *
773 get_thread_stack_base ()
774 {
775 return scm_get_stack_base ();
776 }
777
778 #endif /* pthread methods of get_thread_stack_base */
779
780 #else /* !SCM_USE_PTHREAD_THREADS */
781
782 #define HAVE_GET_THREAD_STACK_BASE
783
784 static SCM_STACKITEM *
785 get_thread_stack_base ()
786 {
787 return scm_get_stack_base ();
788 }
789
790 #endif /* !SCM_USE_PTHREAD_THREADS */
791
792 #ifdef HAVE_GET_THREAD_STACK_BASE
793
794 void
795 scm_init_guile ()
796 {
797 scm_i_init_thread_for_guile (get_thread_stack_base (),
798 scm_i_default_dynamic_state);
799 }
800
801 #endif
802
803 void *
804 scm_with_guile (void *(*func)(void *), void *data)
805 {
806 return scm_i_with_guile_and_parent (func, data,
807 scm_i_default_dynamic_state);
808 }
809
810 SCM_UNUSED static void
811 scm_leave_guile_cleanup (void *x)
812 {
813 scm_leave_guile ();
814 }
815
816 void *
817 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
818 {
819 void *res;
820 int really_entered;
821 SCM_STACKITEM base_item;
822
823 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
824 if (really_entered)
825 {
826 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
827 res = scm_c_with_continuation_barrier (func, data);
828 scm_i_pthread_cleanup_pop (0);
829 scm_leave_guile ();
830 }
831 else
832 res = scm_c_with_continuation_barrier (func, data);
833
834 return res;
835 }
836
837 void *
838 scm_without_guile (void *(*func)(void *), void *data)
839 {
840 void *res;
841 scm_t_guile_ticket t;
842 t = scm_leave_guile ();
843 res = func (data);
844 scm_enter_guile (t);
845 return res;
846 }
847
848 /*** Thread creation */
849
850 typedef struct {
851 SCM parent;
852 SCM thunk;
853 SCM handler;
854 SCM thread;
855 scm_i_pthread_mutex_t mutex;
856 scm_i_pthread_cond_t cond;
857 } launch_data;
858
859 static void *
860 really_launch (void *d)
861 {
862 launch_data *data = (launch_data *)d;
863 SCM thunk = data->thunk, handler = data->handler;
864 scm_i_thread *t;
865
866 t = SCM_I_CURRENT_THREAD;
867
868 scm_i_scm_pthread_mutex_lock (&data->mutex);
869 data->thread = scm_current_thread ();
870 scm_i_pthread_cond_signal (&data->cond);
871 scm_i_pthread_mutex_unlock (&data->mutex);
872
873 if (SCM_UNBNDP (handler))
874 t->result = scm_call_0 (thunk);
875 else
876 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
877
878 return 0;
879 }
880
881 static void *
882 launch_thread (void *d)
883 {
884 launch_data *data = (launch_data *)d;
885 scm_i_pthread_detach (scm_i_pthread_self ());
886 scm_i_with_guile_and_parent (really_launch, d, data->parent);
887 return NULL;
888 }
889
890 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
891 (SCM thunk, SCM handler),
892 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
893 "returning a new thread object representing the thread. The procedure\n"
894 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
895 "\n"
896 "When @var{handler} is specified, then @var{thunk} is called from\n"
897 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
898 "handler. This catch is established inside the continuation barrier.\n"
899 "\n"
900 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
901 "the @emph{exit value} of the thread and the thread is terminated.")
902 #define FUNC_NAME s_scm_call_with_new_thread
903 {
904 launch_data data;
905 scm_i_pthread_t id;
906 int err;
907
908 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
909 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
910 handler, SCM_ARG2, FUNC_NAME);
911
912 data.parent = scm_current_dynamic_state ();
913 data.thunk = thunk;
914 data.handler = handler;
915 data.thread = SCM_BOOL_F;
916 scm_i_pthread_mutex_init (&data.mutex, NULL);
917 scm_i_pthread_cond_init (&data.cond, NULL);
918
919 scm_i_scm_pthread_mutex_lock (&data.mutex);
920 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
921 if (err)
922 {
923 scm_i_pthread_mutex_unlock (&data.mutex);
924 errno = err;
925 scm_syserror (NULL);
926 }
927 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
928 scm_i_pthread_mutex_unlock (&data.mutex);
929
930 return data.thread;
931 }
932 #undef FUNC_NAME
933
934 typedef struct {
935 SCM parent;
936 scm_t_catch_body body;
937 void *body_data;
938 scm_t_catch_handler handler;
939 void *handler_data;
940 SCM thread;
941 scm_i_pthread_mutex_t mutex;
942 scm_i_pthread_cond_t cond;
943 } spawn_data;
944
945 static void *
946 really_spawn (void *d)
947 {
948 spawn_data *data = (spawn_data *)d;
949 scm_t_catch_body body = data->body;
950 void *body_data = data->body_data;
951 scm_t_catch_handler handler = data->handler;
952 void *handler_data = data->handler_data;
953 scm_i_thread *t = SCM_I_CURRENT_THREAD;
954
955 scm_i_scm_pthread_mutex_lock (&data->mutex);
956 data->thread = scm_current_thread ();
957 scm_i_pthread_cond_signal (&data->cond);
958 scm_i_pthread_mutex_unlock (&data->mutex);
959
960 if (handler == NULL)
961 t->result = body (body_data);
962 else
963 t->result = scm_internal_catch (SCM_BOOL_T,
964 body, body_data,
965 handler, handler_data);
966
967 return 0;
968 }
969
970 static void *
971 spawn_thread (void *d)
972 {
973 spawn_data *data = (spawn_data *)d;
974 scm_i_pthread_detach (scm_i_pthread_self ());
975 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
976 return NULL;
977 }
978
979 SCM
980 scm_spawn_thread (scm_t_catch_body body, void *body_data,
981 scm_t_catch_handler handler, void *handler_data)
982 {
983 spawn_data data;
984 scm_i_pthread_t id;
985 int err;
986
987 data.parent = scm_current_dynamic_state ();
988 data.body = body;
989 data.body_data = body_data;
990 data.handler = handler;
991 data.handler_data = handler_data;
992 data.thread = SCM_BOOL_F;
993 scm_i_pthread_mutex_init (&data.mutex, NULL);
994 scm_i_pthread_cond_init (&data.cond, NULL);
995
996 scm_i_scm_pthread_mutex_lock (&data.mutex);
997 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
998 if (err)
999 {
1000 scm_i_pthread_mutex_unlock (&data.mutex);
1001 errno = err;
1002 scm_syserror (NULL);
1003 }
1004 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1005 scm_i_pthread_mutex_unlock (&data.mutex);
1006
1007 return data.thread;
1008 }
1009
1010 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1011 (),
1012 "Move the calling thread to the end of the scheduling queue.")
1013 #define FUNC_NAME s_scm_yield
1014 {
1015 return scm_from_bool (scm_i_sched_yield ());
1016 }
1017 #undef FUNC_NAME
1018
1019 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1020 (SCM thread),
1021 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1022 "cannot be the current thread, and if @var{thread} has already terminated or "
1023 "been signaled to terminate, this function is a no-op.")
1024 #define FUNC_NAME s_scm_cancel_thread
1025 {
1026 scm_i_thread *t = NULL;
1027
1028 SCM_VALIDATE_THREAD (1, thread);
1029 t = SCM_I_THREAD_DATA (thread);
1030 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1031 if (!t->canceled)
1032 {
1033 t->canceled = 1;
1034 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1035 scm_i_pthread_cancel (t->pthread);
1036 }
1037 else
1038 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1039
1040 return SCM_UNSPECIFIED;
1041 }
1042 #undef FUNC_NAME
1043
1044 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1045 (SCM thread, SCM proc),
1046 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1047 "This handler will be called when the thread exits.")
1048 #define FUNC_NAME s_scm_set_thread_cleanup_x
1049 {
1050 scm_i_thread *t;
1051
1052 SCM_VALIDATE_THREAD (1, thread);
1053 if (!scm_is_false (proc))
1054 SCM_VALIDATE_THUNK (2, proc);
1055
1056 t = SCM_I_THREAD_DATA (thread);
1057 scm_i_pthread_mutex_lock (&t->admin_mutex);
1058
1059 if (!(t->exited || t->canceled))
1060 t->cleanup_handler = proc;
1061
1062 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1063
1064 return SCM_UNSPECIFIED;
1065 }
1066 #undef FUNC_NAME
1067
1068 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1069 (SCM thread),
1070 "Return the cleanup handler installed for the thread @var{thread}.")
1071 #define FUNC_NAME s_scm_thread_cleanup
1072 {
1073 scm_i_thread *t;
1074 SCM ret;
1075
1076 SCM_VALIDATE_THREAD (1, thread);
1077
1078 t = SCM_I_THREAD_DATA (thread);
1079 scm_i_pthread_mutex_lock (&t->admin_mutex);
1080 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1081 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1082
1083 return ret;
1084 }
1085 #undef FUNC_NAME
1086
1087 SCM scm_join_thread (SCM thread)
1088 {
1089 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1090 }
1091
1092 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1093 (SCM thread, SCM timeout, SCM timeoutval),
1094 "Suspend execution of the calling thread until the target @var{thread} "
1095 "terminates, unless the target @var{thread} has already terminated. ")
1096 #define FUNC_NAME s_scm_join_thread_timed
1097 {
1098 scm_i_thread *t;
1099 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1100 SCM res = SCM_BOOL_F;
1101
1102 if (! (SCM_UNBNDP (timeoutval)))
1103 res = timeoutval;
1104
1105 SCM_VALIDATE_THREAD (1, thread);
1106 if (scm_is_eq (scm_current_thread (), thread))
1107 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1108
1109 t = SCM_I_THREAD_DATA (thread);
1110 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1111
1112 if (! SCM_UNBNDP (timeout))
1113 {
1114 to_timespec (timeout, &ctimeout);
1115 timeout_ptr = &ctimeout;
1116 }
1117
1118 if (t->exited)
1119 res = t->result;
1120 else
1121 {
1122 while (1)
1123 {
1124 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1125 timeout_ptr);
1126 if (err == 0)
1127 {
1128 if (t->exited)
1129 {
1130 res = t->result;
1131 break;
1132 }
1133 }
1134 else if (err == ETIMEDOUT)
1135 break;
1136
1137 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1138 SCM_TICK;
1139 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1140 }
1141 }
1142
1143 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1144
1145 return res;
1146 }
1147 #undef FUNC_NAME
1148
1149 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1150 (SCM obj),
1151 "Return @code{#t} if @var{obj} is a thread.")
1152 #define FUNC_NAME s_scm_thread_p
1153 {
1154 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1155 }
1156 #undef FUNC_NAME
1157
1158 static SCM
1159 fat_mutex_mark (SCM mx)
1160 {
1161 fat_mutex *m = SCM_MUTEX_DATA (mx);
1162 scm_gc_mark (m->owner);
1163 return m->waiting;
1164 }
1165
1166 static size_t
1167 fat_mutex_free (SCM mx)
1168 {
1169 fat_mutex *m = SCM_MUTEX_DATA (mx);
1170 scm_i_pthread_mutex_destroy (&m->lock);
1171 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1172 return 0;
1173 }
1174
1175 static int
1176 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1177 {
1178 fat_mutex *m = SCM_MUTEX_DATA (mx);
1179 scm_puts ("#<mutex ", port);
1180 scm_uintprint ((scm_t_bits)m, 16, port);
1181 scm_puts (">", port);
1182 return 1;
1183 }
1184
1185 static SCM
1186 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1187 {
1188 fat_mutex *m;
1189 SCM mx;
1190
1191 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1192 scm_i_pthread_mutex_init (&m->lock, NULL);
1193 m->owner = SCM_BOOL_F;
1194 m->level = 0;
1195
1196 m->recursive = recursive;
1197 m->unchecked_unlock = unchecked_unlock;
1198 m->allow_external_unlock = external_unlock;
1199
1200 m->waiting = SCM_EOL;
1201 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1202 m->waiting = make_queue ();
1203 return mx;
1204 }
1205
1206 SCM scm_make_mutex (void)
1207 {
1208 return scm_make_mutex_with_flags (SCM_EOL);
1209 }
1210
1211 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1212 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1213 SCM_SYMBOL (recursive_sym, "recursive");
1214
1215 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1216 (SCM flags),
1217 "Create a new mutex. ")
1218 #define FUNC_NAME s_scm_make_mutex_with_flags
1219 {
1220 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1221
1222 SCM ptr = flags;
1223 while (! scm_is_null (ptr))
1224 {
1225 SCM flag = SCM_CAR (ptr);
1226 if (scm_is_eq (flag, unchecked_unlock_sym))
1227 unchecked_unlock = 1;
1228 else if (scm_is_eq (flag, allow_external_unlock_sym))
1229 external_unlock = 1;
1230 else if (scm_is_eq (flag, recursive_sym))
1231 recursive = 1;
1232 else
1233 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1234 ptr = SCM_CDR (ptr);
1235 }
1236 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1237 }
1238 #undef FUNC_NAME
1239
1240 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1241 (void),
1242 "Create a new recursive mutex. ")
1243 #define FUNC_NAME s_scm_make_recursive_mutex
1244 {
1245 return make_fat_mutex (1, 0, 0);
1246 }
1247 #undef FUNC_NAME
1248
1249 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1250
1251 static SCM
1252 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1253 {
1254 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1255
1256 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1257 SCM err = SCM_BOOL_F;
1258
1259 struct timeval current_time;
1260
1261 scm_i_scm_pthread_mutex_lock (&m->lock);
1262
1263 while (1)
1264 {
1265 if (m->level == 0)
1266 {
1267 m->owner = new_owner;
1268 m->level++;
1269
1270 if (SCM_I_IS_THREAD (new_owner))
1271 {
1272 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1273 scm_i_pthread_mutex_lock (&t->admin_mutex);
1274 t->mutexes = scm_cons (mutex, t->mutexes);
1275 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1276 }
1277 *ret = 1;
1278 break;
1279 }
1280 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1281 {
1282 m->owner = new_owner;
1283 err = scm_cons (scm_abandoned_mutex_error_key,
1284 scm_from_locale_string ("lock obtained on abandoned "
1285 "mutex"));
1286 *ret = 1;
1287 break;
1288 }
1289 else if (scm_is_eq (m->owner, new_owner))
1290 {
1291 if (m->recursive)
1292 {
1293 m->level++;
1294 *ret = 1;
1295 }
1296 else
1297 {
1298 err = scm_cons (scm_misc_error_key,
1299 scm_from_locale_string ("mutex already locked "
1300 "by thread"));
1301 *ret = 0;
1302 }
1303 break;
1304 }
1305 else
1306 {
1307 if (timeout != NULL)
1308 {
1309 gettimeofday (&current_time, NULL);
1310 if (current_time.tv_sec > timeout->tv_sec ||
1311 (current_time.tv_sec == timeout->tv_sec &&
1312 current_time.tv_usec * 1000 > timeout->tv_nsec))
1313 {
1314 *ret = 0;
1315 break;
1316 }
1317 }
1318 block_self (m->waiting, mutex, &m->lock, timeout);
1319 scm_i_pthread_mutex_unlock (&m->lock);
1320 SCM_TICK;
1321 scm_i_scm_pthread_mutex_lock (&m->lock);
1322 }
1323 }
1324 scm_i_pthread_mutex_unlock (&m->lock);
1325 return err;
1326 }
1327
1328 SCM scm_lock_mutex (SCM mx)
1329 {
1330 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1331 }
1332
1333 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1334 (SCM m, SCM timeout, SCM owner),
1335 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1336 "blocks until the mutex becomes available. The function returns when "
1337 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1338 "a thread already owns will succeed right away and will not block the "
1339 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1340 #define FUNC_NAME s_scm_lock_mutex_timed
1341 {
1342 SCM exception;
1343 int ret = 0;
1344 scm_t_timespec cwaittime, *waittime = NULL;
1345
1346 SCM_VALIDATE_MUTEX (1, m);
1347
1348 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1349 {
1350 to_timespec (timeout, &cwaittime);
1351 waittime = &cwaittime;
1352 }
1353
1354 exception = fat_mutex_lock (m, waittime, owner, &ret);
1355 if (!scm_is_false (exception))
1356 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1357 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1358 }
1359 #undef FUNC_NAME
1360
1361 void
1362 scm_dynwind_lock_mutex (SCM mutex)
1363 {
1364 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1365 SCM_F_WIND_EXPLICITLY);
1366 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1367 SCM_F_WIND_EXPLICITLY);
1368 }
1369
1370 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1371 (SCM mutex),
1372 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1373 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1374 #define FUNC_NAME s_scm_try_mutex
1375 {
1376 SCM exception;
1377 int ret = 0;
1378 scm_t_timespec cwaittime, *waittime = NULL;
1379
1380 SCM_VALIDATE_MUTEX (1, mutex);
1381
1382 to_timespec (scm_from_int(0), &cwaittime);
1383 waittime = &cwaittime;
1384
1385 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1386 if (!scm_is_false (exception))
1387 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1388 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1389 }
1390 #undef FUNC_NAME
1391
1392 /*** Fat condition variables */
1393
1394 typedef struct {
1395 scm_i_pthread_mutex_t lock;
1396 SCM waiting; /* the threads waiting for this condition. */
1397 } fat_cond;
1398
1399 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1400 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1401
1402 static int
1403 fat_mutex_unlock (SCM mutex, SCM cond,
1404 const scm_t_timespec *waittime, int relock)
1405 {
1406 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1407 fat_cond *c = NULL;
1408 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1409 int err = 0, ret = 0;
1410
1411 scm_i_scm_pthread_mutex_lock (&m->lock);
1412
1413 SCM owner = m->owner;
1414
1415 if (!scm_is_eq (owner, scm_current_thread ()))
1416 {
1417 if (m->level == 0)
1418 {
1419 if (!m->unchecked_unlock)
1420 {
1421 scm_i_pthread_mutex_unlock (&m->lock);
1422 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1423 }
1424 owner = scm_current_thread ();
1425 }
1426 else if (!m->allow_external_unlock)
1427 {
1428 scm_i_pthread_mutex_unlock (&m->lock);
1429 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1430 }
1431 }
1432
1433 if (! (SCM_UNBNDP (cond)))
1434 {
1435 c = SCM_CONDVAR_DATA (cond);
1436 while (1)
1437 {
1438 int brk = 0;
1439
1440 if (m->level > 0)
1441 m->level--;
1442 if (m->level == 0)
1443 m->owner = unblock_from_queue (m->waiting);
1444
1445 t->block_asyncs++;
1446
1447 err = block_self (c->waiting, cond, &m->lock, waittime);
1448 scm_i_pthread_mutex_unlock (&m->lock);
1449
1450 if (err == 0)
1451 {
1452 ret = 1;
1453 brk = 1;
1454 }
1455 else if (err == ETIMEDOUT)
1456 {
1457 ret = 0;
1458 brk = 1;
1459 }
1460 else if (err != EINTR)
1461 {
1462 errno = err;
1463 scm_syserror (NULL);
1464 }
1465
1466 if (brk)
1467 {
1468 if (relock)
1469 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1470 break;
1471 }
1472
1473 t->block_asyncs--;
1474 scm_async_click ();
1475
1476 scm_remember_upto_here_2 (cond, mutex);
1477
1478 scm_i_scm_pthread_mutex_lock (&m->lock);
1479 }
1480 }
1481 else
1482 {
1483 if (m->level > 0)
1484 m->level--;
1485 if (m->level == 0)
1486 m->owner = unblock_from_queue (m->waiting);
1487
1488 scm_i_pthread_mutex_unlock (&m->lock);
1489 ret = 1;
1490 }
1491
1492 return ret;
1493 }
1494
1495 SCM scm_unlock_mutex (SCM mx)
1496 {
1497 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1498 }
1499
1500 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1501 (SCM mx, SCM cond, SCM timeout),
1502 "Unlocks @var{mutex} if the calling thread owns the lock on "
1503 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1504 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1505 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1506 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1507 "with a call to @code{unlock-mutex}. Only the last call to "
1508 "@code{unlock-mutex} will actually unlock the mutex. ")
1509 #define FUNC_NAME s_scm_unlock_mutex_timed
1510 {
1511 scm_t_timespec cwaittime, *waittime = NULL;
1512
1513 SCM_VALIDATE_MUTEX (1, mx);
1514 if (! (SCM_UNBNDP (cond)))
1515 {
1516 SCM_VALIDATE_CONDVAR (2, cond);
1517
1518 if (! (SCM_UNBNDP (timeout)))
1519 {
1520 to_timespec (timeout, &cwaittime);
1521 waittime = &cwaittime;
1522 }
1523 }
1524
1525 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1526 }
1527 #undef FUNC_NAME
1528
1529 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1530 (SCM obj),
1531 "Return @code{#t} if @var{obj} is a mutex.")
1532 #define FUNC_NAME s_scm_mutex_p
1533 {
1534 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1535 }
1536 #undef FUNC_NAME
1537
1538 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1539 (SCM mx),
1540 "Return the thread owning @var{mx}, or @code{#f}.")
1541 #define FUNC_NAME s_scm_mutex_owner
1542 {
1543 SCM owner;
1544 fat_mutex *m = NULL;
1545
1546 SCM_VALIDATE_MUTEX (1, mx);
1547 m = SCM_MUTEX_DATA (mx);
1548 scm_i_pthread_mutex_lock (&m->lock);
1549 owner = m->owner;
1550 scm_i_pthread_mutex_unlock (&m->lock);
1551
1552 return owner;
1553 }
1554 #undef FUNC_NAME
1555
1556 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1557 (SCM mx),
1558 "Return the lock level of mutex @var{mx}.")
1559 #define FUNC_NAME s_scm_mutex_level
1560 {
1561 SCM_VALIDATE_MUTEX (1, mx);
1562 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1563 }
1564 #undef FUNC_NAME
1565
1566 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1567 (SCM mx),
1568 "Returns @code{#t} if the mutex @var{mx} is locked.")
1569 #define FUNC_NAME s_scm_mutex_locked_p
1570 {
1571 SCM_VALIDATE_MUTEX (1, mx);
1572 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1573 }
1574 #undef FUNC_NAME
1575
1576 static SCM
1577 fat_cond_mark (SCM cv)
1578 {
1579 fat_cond *c = SCM_CONDVAR_DATA (cv);
1580 return c->waiting;
1581 }
1582
1583 static size_t
1584 fat_cond_free (SCM mx)
1585 {
1586 fat_cond *c = SCM_CONDVAR_DATA (mx);
1587 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1588 return 0;
1589 }
1590
1591 static int
1592 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1593 {
1594 fat_cond *c = SCM_CONDVAR_DATA (cv);
1595 scm_puts ("#<condition-variable ", port);
1596 scm_uintprint ((scm_t_bits)c, 16, port);
1597 scm_puts (">", port);
1598 return 1;
1599 }
1600
1601 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1602 (void),
1603 "Make a new condition variable.")
1604 #define FUNC_NAME s_scm_make_condition_variable
1605 {
1606 fat_cond *c;
1607 SCM cv;
1608
1609 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1610 c->waiting = SCM_EOL;
1611 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1612 c->waiting = make_queue ();
1613 return cv;
1614 }
1615 #undef FUNC_NAME
1616
1617 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1618 (SCM cv, SCM mx, SCM t),
1619 "Wait until @var{cond-var} has been signalled. While waiting, "
1620 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1621 "is locked again when this function returns. When @var{time} is given, "
1622 "it specifies a point in time where the waiting should be aborted. It "
1623 "can be either a integer as returned by @code{current-time} or a pair "
1624 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1625 "mutex is locked and @code{#f} is returned. When the condition "
1626 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1627 "is returned. ")
1628 #define FUNC_NAME s_scm_timed_wait_condition_variable
1629 {
1630 scm_t_timespec waittime, *waitptr = NULL;
1631
1632 SCM_VALIDATE_CONDVAR (1, cv);
1633 SCM_VALIDATE_MUTEX (2, mx);
1634
1635 if (!SCM_UNBNDP (t))
1636 {
1637 to_timespec (t, &waittime);
1638 waitptr = &waittime;
1639 }
1640
1641 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1642 }
1643 #undef FUNC_NAME
1644
1645 static void
1646 fat_cond_signal (fat_cond *c)
1647 {
1648 unblock_from_queue (c->waiting);
1649 }
1650
1651 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1652 (SCM cv),
1653 "Wake up one thread that is waiting for @var{cv}")
1654 #define FUNC_NAME s_scm_signal_condition_variable
1655 {
1656 SCM_VALIDATE_CONDVAR (1, cv);
1657 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1658 return SCM_BOOL_T;
1659 }
1660 #undef FUNC_NAME
1661
1662 static void
1663 fat_cond_broadcast (fat_cond *c)
1664 {
1665 while (scm_is_true (unblock_from_queue (c->waiting)))
1666 ;
1667 }
1668
1669 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1670 (SCM cv),
1671 "Wake up all threads that are waiting for @var{cv}. ")
1672 #define FUNC_NAME s_scm_broadcast_condition_variable
1673 {
1674 SCM_VALIDATE_CONDVAR (1, cv);
1675 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1676 return SCM_BOOL_T;
1677 }
1678 #undef FUNC_NAME
1679
1680 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1681 (SCM obj),
1682 "Return @code{#t} if @var{obj} is a condition variable.")
1683 #define FUNC_NAME s_scm_condition_variable_p
1684 {
1685 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1686 }
1687 #undef FUNC_NAME
1688
1689 /*** Marking stacks */
1690
1691 /* XXX - what to do with this? Do we need to handle this for blocked
1692 threads as well?
1693 */
1694 #ifdef __ia64__
1695 # define SCM_MARK_BACKING_STORE() do { \
1696 ucontext_t ctx; \
1697 SCM_STACKITEM * top, * bot; \
1698 getcontext (&ctx); \
1699 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1700 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1701 / sizeof (SCM_STACKITEM))); \
1702 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1703 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1704 scm_mark_locations (bot, top - bot); } while (0)
1705 #else
1706 # define SCM_MARK_BACKING_STORE()
1707 #endif
1708
1709 void
1710 scm_threads_mark_stacks (void)
1711 {
1712 scm_i_thread *t;
1713 for (t = all_threads; t; t = t->next_thread)
1714 {
1715 /* Check that thread has indeed been suspended.
1716 */
1717 assert (t->top);
1718
1719 scm_gc_mark (t->handle);
1720
1721 #if SCM_STACK_GROWS_UP
1722 scm_mark_locations (t->base, t->top - t->base);
1723 #else
1724 scm_mark_locations (t->top, t->base - t->top);
1725 #endif
1726 scm_mark_locations ((SCM_STACKITEM *) &t->regs,
1727 ((size_t) sizeof(t->regs)
1728 / sizeof (SCM_STACKITEM)));
1729 }
1730
1731 SCM_MARK_BACKING_STORE ();
1732 }
1733
1734 /*** Select */
1735
1736 int
1737 scm_std_select (int nfds,
1738 SELECT_TYPE *readfds,
1739 SELECT_TYPE *writefds,
1740 SELECT_TYPE *exceptfds,
1741 struct timeval *timeout)
1742 {
1743 fd_set my_readfds;
1744 int res, eno, wakeup_fd;
1745 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1746 scm_t_guile_ticket ticket;
1747
1748 if (readfds == NULL)
1749 {
1750 FD_ZERO (&my_readfds);
1751 readfds = &my_readfds;
1752 }
1753
1754 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1755 SCM_TICK;
1756
1757 wakeup_fd = t->sleep_pipe[0];
1758 ticket = scm_leave_guile ();
1759 FD_SET (wakeup_fd, readfds);
1760 if (wakeup_fd >= nfds)
1761 nfds = wakeup_fd+1;
1762 res = select (nfds, readfds, writefds, exceptfds, timeout);
1763 t->sleep_fd = -1;
1764 eno = errno;
1765 scm_enter_guile (ticket);
1766
1767 scm_i_reset_sleep (t);
1768
1769 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1770 {
1771 char dummy;
1772 read (wakeup_fd, &dummy, 1);
1773 FD_CLR (wakeup_fd, readfds);
1774 res -= 1;
1775 if (res == 0)
1776 {
1777 eno = EINTR;
1778 res = -1;
1779 }
1780 }
1781 errno = eno;
1782 return res;
1783 }
1784
1785 /* Convenience API for blocking while in guile mode. */
1786
1787 #if SCM_USE_PTHREAD_THREADS
1788
1789 int
1790 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1791 {
1792 scm_t_guile_ticket t = scm_leave_guile ();
1793 int res = scm_i_pthread_mutex_lock (mutex);
1794 scm_enter_guile (t);
1795 return res;
1796 }
1797
1798 static void
1799 do_unlock (void *data)
1800 {
1801 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1802 }
1803
1804 void
1805 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1806 {
1807 scm_i_scm_pthread_mutex_lock (mutex);
1808 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1809 }
1810
1811 int
1812 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1813 {
1814 scm_t_guile_ticket t = scm_leave_guile ();
1815 ((scm_i_thread *)t)->held_mutex = mutex;
1816 int res = scm_i_pthread_cond_wait (cond, mutex);
1817 ((scm_i_thread *)t)->held_mutex = NULL;
1818 scm_enter_guile (t);
1819 return res;
1820 }
1821
1822 int
1823 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1824 scm_i_pthread_mutex_t *mutex,
1825 const scm_t_timespec *wt)
1826 {
1827 scm_t_guile_ticket t = scm_leave_guile ();
1828 ((scm_i_thread *)t)->held_mutex = mutex;
1829 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1830 ((scm_i_thread *)t)->held_mutex = NULL;
1831 scm_enter_guile (t);
1832 return res;
1833 }
1834
1835 #endif
1836
1837 unsigned long
1838 scm_std_usleep (unsigned long usecs)
1839 {
1840 struct timeval tv;
1841 tv.tv_usec = usecs % 1000000;
1842 tv.tv_sec = usecs / 1000000;
1843 scm_std_select (0, NULL, NULL, NULL, &tv);
1844 return tv.tv_sec * 1000000 + tv.tv_usec;
1845 }
1846
1847 unsigned int
1848 scm_std_sleep (unsigned int secs)
1849 {
1850 struct timeval tv;
1851 tv.tv_usec = 0;
1852 tv.tv_sec = secs;
1853 scm_std_select (0, NULL, NULL, NULL, &tv);
1854 return tv.tv_sec;
1855 }
1856
1857 /*** Misc */
1858
1859 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1860 (void),
1861 "Return the thread that called this function.")
1862 #define FUNC_NAME s_scm_current_thread
1863 {
1864 return SCM_I_CURRENT_THREAD->handle;
1865 }
1866 #undef FUNC_NAME
1867
1868 static SCM
1869 scm_c_make_list (size_t n, SCM fill)
1870 {
1871 SCM res = SCM_EOL;
1872 while (n-- > 0)
1873 res = scm_cons (fill, res);
1874 return res;
1875 }
1876
1877 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1878 (void),
1879 "Return a list of all threads.")
1880 #define FUNC_NAME s_scm_all_threads
1881 {
1882 /* We can not allocate while holding the thread_admin_mutex because
1883 of the way GC is done.
1884 */
1885 int n = thread_count;
1886 scm_i_thread *t;
1887 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1888
1889 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1890 l = &list;
1891 for (t = all_threads; t && n > 0; t = t->next_thread)
1892 {
1893 if (t != scm_i_signal_delivery_thread)
1894 {
1895 SCM_SETCAR (*l, t->handle);
1896 l = SCM_CDRLOC (*l);
1897 }
1898 n--;
1899 }
1900 *l = SCM_EOL;
1901 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1902 return list;
1903 }
1904 #undef FUNC_NAME
1905
1906 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1907 (SCM thread),
1908 "Return @code{#t} iff @var{thread} has exited.\n")
1909 #define FUNC_NAME s_scm_thread_exited_p
1910 {
1911 return scm_from_bool (scm_c_thread_exited_p (thread));
1912 }
1913 #undef FUNC_NAME
1914
1915 int
1916 scm_c_thread_exited_p (SCM thread)
1917 #define FUNC_NAME s_scm_thread_exited_p
1918 {
1919 scm_i_thread *t;
1920 SCM_VALIDATE_THREAD (1, thread);
1921 t = SCM_I_THREAD_DATA (thread);
1922 return t->exited;
1923 }
1924 #undef FUNC_NAME
1925
1926 static scm_i_pthread_cond_t wake_up_cond;
1927 int scm_i_thread_go_to_sleep;
1928 static int threads_initialized_p = 0;
1929
1930 void
1931 scm_i_thread_put_to_sleep ()
1932 {
1933 if (threads_initialized_p)
1934 {
1935 scm_i_thread *t;
1936
1937 scm_leave_guile ();
1938 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1939
1940 /* Signal all threads to go to sleep
1941 */
1942 scm_i_thread_go_to_sleep = 1;
1943 for (t = all_threads; t; t = t->next_thread)
1944 scm_i_pthread_mutex_lock (&t->heap_mutex);
1945 scm_i_thread_go_to_sleep = 0;
1946 }
1947 }
1948
1949 void
1950 scm_i_thread_invalidate_freelists ()
1951 {
1952 /* thread_admin_mutex is already locked. */
1953
1954 scm_i_thread *t;
1955 for (t = all_threads; t; t = t->next_thread)
1956 if (t != SCM_I_CURRENT_THREAD)
1957 t->clear_freelists_p = 1;
1958 }
1959
1960 void
1961 scm_i_thread_wake_up ()
1962 {
1963 if (threads_initialized_p)
1964 {
1965 scm_i_thread *t;
1966
1967 scm_i_pthread_cond_broadcast (&wake_up_cond);
1968 for (t = all_threads; t; t = t->next_thread)
1969 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1970 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1971 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1972 }
1973 }
1974
1975 void
1976 scm_i_thread_sleep_for_gc ()
1977 {
1978 scm_i_thread *t = suspend ();
1979 t->held_mutex = &t->heap_mutex;
1980 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1981 t->held_mutex = NULL;
1982 resume (t);
1983 }
1984
1985 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1986 */
1987 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1988 int scm_i_critical_section_level = 0;
1989
1990 static SCM dynwind_critical_section_mutex;
1991
1992 void
1993 scm_dynwind_critical_section (SCM mutex)
1994 {
1995 if (scm_is_false (mutex))
1996 mutex = dynwind_critical_section_mutex;
1997 scm_dynwind_lock_mutex (mutex);
1998 scm_dynwind_block_asyncs ();
1999 }
2000
2001 /*** Initialization */
2002
2003 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2004 scm_i_pthread_mutex_t scm_i_misc_mutex;
2005
2006 #if SCM_USE_PTHREAD_THREADS
2007 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2008 #endif
2009
2010 void
2011 scm_threads_prehistory (SCM_STACKITEM *base)
2012 {
2013 #if SCM_USE_PTHREAD_THREADS
2014 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2015 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2016 PTHREAD_MUTEX_RECURSIVE);
2017 #endif
2018
2019 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2020 scm_i_pthread_mutexattr_recursive);
2021 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2022 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2023 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2024 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2025
2026 guilify_self_1 (base);
2027 }
2028
2029 scm_t_bits scm_tc16_thread;
2030 scm_t_bits scm_tc16_mutex;
2031 scm_t_bits scm_tc16_condvar;
2032
2033 void
2034 scm_init_threads ()
2035 {
2036 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2037 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2038 scm_set_smob_print (scm_tc16_thread, thread_print);
2039 scm_set_smob_free (scm_tc16_thread, thread_free);
2040
2041 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2042 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2043 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2044 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2045
2046 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2047 sizeof (fat_cond));
2048 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2049 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2050 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2051
2052 scm_i_default_dynamic_state = SCM_BOOL_F;
2053 guilify_self_2 (SCM_BOOL_F);
2054 threads_initialized_p = 1;
2055
2056 dynwind_critical_section_mutex =
2057 scm_permanent_object (scm_make_recursive_mutex ());
2058 }
2059
2060 void
2061 scm_init_threads_default_dynamic_state ()
2062 {
2063 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2064 scm_i_default_dynamic_state = scm_permanent_object (state);
2065 }
2066
2067 void
2068 scm_init_thread_procs ()
2069 {
2070 #include "libguile/threads.x"
2071 }
2072
2073 /*
2074 Local Variables:
2075 c-file-style: "gnu"
2076 End:
2077 */