truly thread-local vms; don't compile popen.scm
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/_scm.h"
25
26 #if HAVE_UNISTD_H
27 #include <unistd.h>
28 #endif
29 #include <stdio.h>
30 #include <assert.h>
31
32 #ifdef HAVE_STRING_H
33 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34 #endif
35
36 #if HAVE_SYS_TIME_H
37 #include <sys/time.h>
38 #endif
39
40 #include "libguile/validate.h"
41 #include "libguile/root.h"
42 #include "libguile/eval.h"
43 #include "libguile/async.h"
44 #include "libguile/ports.h"
45 #include "libguile/threads.h"
46 #include "libguile/dynwind.h"
47 #include "libguile/iselect.h"
48 #include "libguile/fluids.h"
49 #include "libguile/continuations.h"
50 #include "libguile/gc.h"
51 #include "libguile/init.h"
52 #include "libguile/scmsigs.h"
53 #include "libguile/strings.h"
54
55 #ifdef __MINGW32__
56 #ifndef ETIMEDOUT
57 # define ETIMEDOUT WSAETIMEDOUT
58 #endif
59 # include <fcntl.h>
60 # include <process.h>
61 # define pipe(fd) _pipe (fd, 256, O_BINARY)
62 #endif /* __MINGW32__ */
63
64 static void
65 to_timespec (SCM t, scm_t_timespec *waittime)
66 {
67 if (scm_is_pair (t))
68 {
69 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
70 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
71 }
72 else
73 {
74 double time = scm_to_double (t);
75 double sec = scm_c_truncate (time);
76
77 waittime->tv_sec = (long) sec;
78 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
79 }
80 }
81
82 /*** Queues */
83
84 /* Make an empty queue data structure.
85 */
86 static SCM
87 make_queue ()
88 {
89 return scm_cons (SCM_EOL, SCM_EOL);
90 }
91
92 /* Put T at the back of Q and return a handle that can be used with
93 remqueue to remove T from Q again.
94 */
95 static SCM
96 enqueue (SCM q, SCM t)
97 {
98 SCM c = scm_cons (t, SCM_EOL);
99 if (scm_is_null (SCM_CDR (q)))
100 SCM_SETCDR (q, c);
101 else
102 SCM_SETCDR (SCM_CAR (q), c);
103 SCM_SETCAR (q, c);
104 return c;
105 }
106
107 /* Remove the element that the handle C refers to from the queue Q. C
108 must have been returned from a call to enqueue. The return value
109 is zero when the element referred to by C has already been removed.
110 Otherwise, 1 is returned.
111 */
112 static int
113 remqueue (SCM q, SCM c)
114 {
115 SCM p, prev = q;
116 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
117 {
118 if (scm_is_eq (p, c))
119 {
120 if (scm_is_eq (c, SCM_CAR (q)))
121 SCM_SETCAR (q, SCM_CDR (c));
122 SCM_SETCDR (prev, SCM_CDR (c));
123 return 1;
124 }
125 prev = p;
126 }
127 return 0;
128 }
129
130 /* Remove the front-most element from the queue Q and return it.
131 Return SCM_BOOL_F when Q is empty.
132 */
133 static SCM
134 dequeue (SCM q)
135 {
136 SCM c = SCM_CDR (q);
137 if (scm_is_null (c))
138 return SCM_BOOL_F;
139 else
140 {
141 SCM_SETCDR (q, SCM_CDR (c));
142 if (scm_is_null (SCM_CDR (q)))
143 SCM_SETCAR (q, SCM_EOL);
144 return SCM_CAR (c);
145 }
146 }
147
148 /*** Thread smob routines */
149
150 static SCM
151 thread_mark (SCM obj)
152 {
153 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
154 scm_gc_mark (t->result);
155 scm_gc_mark (t->cleanup_handler);
156 scm_gc_mark (t->join_queue);
157 scm_gc_mark (t->mutexes);
158 scm_gc_mark (t->dynwinds);
159 scm_gc_mark (t->active_asyncs);
160 scm_gc_mark (t->continuation_root);
161 scm_gc_mark (t->vm);
162 return t->dynamic_state;
163 }
164
165 static int
166 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
167 {
168 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
169 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
170 the struct case, hence we go via a union, and extract according to the
171 size of pthread_t. */
172 union {
173 scm_i_pthread_t p;
174 unsigned short us;
175 unsigned int ui;
176 unsigned long ul;
177 scm_t_uintmax um;
178 } u;
179 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
180 scm_i_pthread_t p = t->pthread;
181 scm_t_uintmax id;
182 u.p = p;
183 if (sizeof (p) == sizeof (unsigned short))
184 id = u.us;
185 else if (sizeof (p) == sizeof (unsigned int))
186 id = u.ui;
187 else if (sizeof (p) == sizeof (unsigned long))
188 id = u.ul;
189 else
190 id = u.um;
191
192 scm_puts ("#<thread ", port);
193 scm_uintprint (id, 10, port);
194 scm_puts (" (", port);
195 scm_uintprint ((scm_t_bits)t, 16, port);
196 scm_puts (")>", port);
197 return 1;
198 }
199
200 static size_t
201 thread_free (SCM obj)
202 {
203 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
204 assert (t->exited);
205 scm_gc_free (t, sizeof (*t), "thread");
206 return 0;
207 }
208
209 /*** Blocking on queues. */
210
211 /* See also scm_i_queue_async_cell for how such a block is
212 interrputed.
213 */
214
215 /* Put the current thread on QUEUE and go to sleep, waiting for it to
216 be woken up by a call to 'unblock_from_queue', or to be
217 interrupted. Upon return of this function, the current thread is
218 no longer on QUEUE, even when the sleep has been interrupted.
219
220 The QUEUE data structure is assumed to be protected by MUTEX and
221 the caller of block_self must hold MUTEX. It will be atomically
222 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
223
224 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
225 as MUTEX is needed.
226
227 When WAITTIME is not NULL, the sleep will be aborted at that time.
228
229 The return value of block_self is an errno value. It will be zero
230 when the sleep has been successfully completed by a call to
231 unblock_from_queue, EINTR when it has been interrupted by the
232 delivery of a system async, and ETIMEDOUT when the timeout has
233 expired.
234
235 The system asyncs themselves are not executed by block_self.
236 */
237 static int
238 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
239 const scm_t_timespec *waittime)
240 {
241 scm_i_thread *t = SCM_I_CURRENT_THREAD;
242 SCM q_handle;
243 int err;
244
245 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
246 err = EINTR;
247 else
248 {
249 t->block_asyncs++;
250 q_handle = enqueue (queue, t->handle);
251 if (waittime == NULL)
252 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
253 else
254 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
255
256 /* When we are still on QUEUE, we have been interrupted. We
257 report this only when no other error (such as a timeout) has
258 happened above.
259 */
260 if (remqueue (queue, q_handle) && err == 0)
261 err = EINTR;
262 t->block_asyncs--;
263 scm_i_reset_sleep (t);
264 }
265
266 return err;
267 }
268
269 /* Wake up the first thread on QUEUE, if any. The caller must hold
270 the mutex that protects QUEUE. The awoken thread is returned, or
271 #f when the queue was empty.
272 */
273 static SCM
274 unblock_from_queue (SCM queue)
275 {
276 SCM thread = dequeue (queue);
277 if (scm_is_true (thread))
278 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
279 return thread;
280 }
281
282 /* Getting into and out of guile mode.
283 */
284
285 /* Ken Raeburn observes that the implementation of suspend and resume
286 (and the things that build on top of them) are very likely not
287 correct (see below). We will need fix this eventually, and that's
288 why scm_leave_guile/scm_enter_guile are not exported in the API.
289
290 Ken writes:
291
292 Consider this sequence:
293
294 Function foo, called in Guile mode, calls suspend (maybe indirectly
295 through scm_leave_guile), which does this:
296
297 // record top of stack for the GC
298 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
299 var 't'
300 // save registers.
301 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
302 setjmp (t->regs); // here's most of the magic
303
304 ... and returns.
305
306 Function foo has a SCM value X, a handle on a non-immediate object, in
307 a caller-saved register R, and it's the only reference to the object
308 currently.
309
310 The compiler wants to use R in suspend, so it pushes the current
311 value, X, into a stack slot which will be reloaded on exit from
312 suspend; then it loads stuff into R and goes about its business. The
313 setjmp call saves (some of) the current registers, including R, which
314 no longer contains X. (This isn't a problem for a normal
315 setjmp/longjmp situation, where longjmp would be called before
316 setjmp's caller returns; the old value for X would be loaded back from
317 the stack after the longjmp, before the function returned.)
318
319 So, suspend returns, loading X back into R (and invalidating the jump
320 buffer) in the process. The caller foo then goes off and calls a
321 bunch of other functions out of Guile mode, occasionally storing X on
322 the stack again, but, say, much deeper on the stack than suspend's
323 stack frame went, and the stack slot where suspend had written X has
324 long since been overwritten with other values.
325
326 Okay, nothing actively broken so far. Now, let garbage collection
327 run, triggered by another thread.
328
329 The thread calling foo is out of Guile mode at the time, so the
330 garbage collector just scans a range of stack addresses. Too bad that
331 X isn't stored there. So the pointed-to storage goes onto the free
332 list, and I think you can see where things go from there.
333
334 Is there anything I'm missing that'll prevent this scenario from
335 happening? I mean, aside from, "well, suspend and scm_leave_guile
336 don't have many local variables, so they probably won't need to save
337 any registers on most systems, so we hope everything will wind up in
338 the jump buffer and we'll just get away with it"?
339
340 (And, going the other direction, if scm_leave_guile and suspend push
341 the stack pointer over onto a new page, and foo doesn't make further
342 function calls and thus the stack pointer no longer includes that
343 page, are we guaranteed that the kernel cannot release the now-unused
344 stack page that contains the top-of-stack pointer we just saved? I
345 don't know if any OS actually does that. If it does, we could get
346 faults in garbage collection.)
347
348 I don't think scm_without_guile has to have this problem, as it gets
349 more control over the stack handling -- but it should call setjmp
350 itself. I'd probably try something like:
351
352 // record top of stack for the GC
353 t->top = SCM_STACK_PTR (&t);
354 // save registers.
355 SCM_FLUSH_REGISTER_WINDOWS;
356 setjmp (t->regs);
357 res = func(data);
358 scm_enter_guile (t);
359
360 ... though even that's making some assumptions about the stack
361 ordering of local variables versus caller-saved registers.
362
363 For something like scm_leave_guile to work, I don't think it can just
364 rely on invalidated jump buffers. A valid jump buffer, and a handle
365 on the stack state at the point when the jump buffer was initialized,
366 together, would work fine, but I think then we're talking about macros
367 invoking setjmp in the caller's stack frame, and requiring that the
368 caller of scm_leave_guile also call scm_enter_guile before returning,
369 kind of like pthread_cleanup_push/pop calls that have to be paired up
370 in a function. (In fact, the pthread ones have to be paired up
371 syntactically, as if they might expand to a compound statement
372 incorporating the user's code, and invoking a compiler's
373 exception-handling primitives. Which might be something to think
374 about for cases where Guile is used with C++ exceptions or
375 pthread_cancel.)
376 */
377
378 scm_i_pthread_key_t scm_i_thread_key;
379
380 static void
381 resume (scm_i_thread *t)
382 {
383 t->top = NULL;
384 if (t->clear_freelists_p)
385 {
386 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
387 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
388 t->clear_freelists_p = 0;
389 }
390 }
391
392 typedef void* scm_t_guile_ticket;
393
394 static void
395 scm_enter_guile (scm_t_guile_ticket ticket)
396 {
397 scm_i_thread *t = (scm_i_thread *)ticket;
398 if (t)
399 {
400 scm_i_pthread_mutex_lock (&t->heap_mutex);
401 resume (t);
402 }
403 }
404
405 static scm_i_thread *
406 suspend (void)
407 {
408 scm_i_thread *t = SCM_I_CURRENT_THREAD;
409
410 /* record top of stack for the GC */
411 t->top = SCM_STACK_PTR (&t);
412 /* save registers. */
413 SCM_FLUSH_REGISTER_WINDOWS;
414 setjmp (t->regs);
415 return t;
416 }
417
418 static scm_t_guile_ticket
419 scm_leave_guile ()
420 {
421 scm_i_thread *t = suspend ();
422 scm_i_pthread_mutex_unlock (&t->heap_mutex);
423 return (scm_t_guile_ticket) t;
424 }
425
426 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
427 static scm_i_thread *all_threads = NULL;
428 static int thread_count;
429
430 static SCM scm_i_default_dynamic_state;
431
432 /* Perform first stage of thread initialisation, in non-guile mode.
433 */
434 static void
435 guilify_self_1 (SCM_STACKITEM *base)
436 {
437 scm_i_thread *t = malloc (sizeof (scm_i_thread));
438
439 t->pthread = scm_i_pthread_self ();
440 t->handle = SCM_BOOL_F;
441 t->result = SCM_BOOL_F;
442 t->cleanup_handler = SCM_BOOL_F;
443 t->mutexes = SCM_EOL;
444 t->join_queue = SCM_EOL;
445 t->dynamic_state = SCM_BOOL_F;
446 t->dynwinds = SCM_EOL;
447 t->active_asyncs = SCM_EOL;
448 t->block_asyncs = 1;
449 t->pending_asyncs = 1;
450 t->last_debug_frame = NULL;
451 t->base = base;
452 #ifdef __ia64__
453 /* Calculate and store off the base of this thread's register
454 backing store (RBS). Unfortunately our implementation(s) of
455 scm_ia64_register_backing_store_base are only reliable for the
456 main thread. For other threads, therefore, find out the current
457 top of the RBS, and use that as a maximum. */
458 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
459 {
460 ucontext_t ctx;
461 void *bsp;
462 getcontext (&ctx);
463 bsp = scm_ia64_ar_bsp (&ctx);
464 if (t->register_backing_store_base > bsp)
465 t->register_backing_store_base = bsp;
466 }
467 #endif
468 t->continuation_root = SCM_EOL;
469 t->continuation_base = base;
470 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
471 t->sleep_mutex = NULL;
472 t->sleep_object = SCM_BOOL_F;
473 t->sleep_fd = -1;
474 /* XXX - check for errors. */
475 pipe (t->sleep_pipe);
476 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
477 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
478 t->clear_freelists_p = 0;
479 t->gc_running_p = 0;
480 t->canceled = 0;
481 t->exited = 0;
482
483 t->freelist = SCM_EOL;
484 t->freelist2 = SCM_EOL;
485 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
486 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
487
488 scm_i_pthread_setspecific (scm_i_thread_key, t);
489
490 scm_i_pthread_mutex_lock (&t->heap_mutex);
491
492 scm_i_pthread_mutex_lock (&thread_admin_mutex);
493 t->next_thread = all_threads;
494 all_threads = t;
495 thread_count++;
496 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
497 }
498
499 /* Perform second stage of thread initialisation, in guile mode.
500 */
501 static void
502 guilify_self_2 (SCM parent)
503 {
504 scm_i_thread *t = SCM_I_CURRENT_THREAD;
505
506 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
507 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
508 t->continuation_root = scm_cons (t->handle, SCM_EOL);
509 t->continuation_base = t->base;
510 t->vm = SCM_BOOL_F;
511
512 if (scm_is_true (parent))
513 t->dynamic_state = scm_make_dynamic_state (parent);
514 else
515 t->dynamic_state = scm_i_make_initial_dynamic_state ();
516
517 t->join_queue = make_queue ();
518 t->block_asyncs = 0;
519 }
520
521 \f
522 /*** Fat mutexes */
523
524 /* We implement our own mutex type since we want them to be 'fair', we
525 want to do fancy things while waiting for them (like running
526 asyncs) and we might want to add things that are nice for
527 debugging.
528 */
529
530 typedef struct {
531 scm_i_pthread_mutex_t lock;
532 SCM owner;
533 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
534
535 int recursive; /* allow recursive locking? */
536 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
537 int allow_external_unlock; /* is it an error to unlock a mutex that is not
538 owned by the current thread? */
539
540 SCM waiting; /* the threads waiting for this mutex. */
541 } fat_mutex;
542
543 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
544 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
545
546 /* Perform thread tear-down, in guile mode.
547 */
548 static void *
549 do_thread_exit (void *v)
550 {
551 scm_i_thread *t = (scm_i_thread *) v;
552
553 if (!scm_is_false (t->cleanup_handler))
554 {
555 SCM ptr = t->cleanup_handler;
556
557 t->cleanup_handler = SCM_BOOL_F;
558 t->result = scm_internal_catch (SCM_BOOL_T,
559 (scm_t_catch_body) scm_call_0, ptr,
560 scm_handle_by_message_noexit, NULL);
561 }
562
563 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
564
565 t->exited = 1;
566 close (t->sleep_pipe[0]);
567 close (t->sleep_pipe[1]);
568 while (scm_is_true (unblock_from_queue (t->join_queue)))
569 ;
570
571 while (!scm_is_null (t->mutexes))
572 {
573 SCM mutex = SCM_CAR (t->mutexes);
574 fat_mutex *m = SCM_MUTEX_DATA (mutex);
575 scm_i_pthread_mutex_lock (&m->lock);
576
577 unblock_from_queue (m->waiting);
578
579 scm_i_pthread_mutex_unlock (&m->lock);
580 t->mutexes = SCM_CDR (t->mutexes);
581 }
582
583 scm_i_pthread_mutex_unlock (&t->admin_mutex);
584
585 return NULL;
586 }
587
588 static void
589 on_thread_exit (void *v)
590 {
591 /* This handler is executed in non-guile mode. */
592 scm_i_thread *t = (scm_i_thread *) v, **tp;
593
594 scm_i_pthread_setspecific (scm_i_thread_key, v);
595
596 /* Ensure the signal handling thread has been launched, because we might be
597 shutting it down. */
598 scm_i_ensure_signal_delivery_thread ();
599
600 /* Unblocking the joining threads needs to happen in guile mode
601 since the queue is a SCM data structure. */
602 scm_with_guile (do_thread_exit, v);
603
604 /* Removing ourself from the list of all threads needs to happen in
605 non-guile mode since all SCM values on our stack become
606 unprotected once we are no longer in the list. */
607 scm_i_pthread_mutex_lock (&thread_admin_mutex);
608 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
609 if (*tp == t)
610 {
611 *tp = t->next_thread;
612 break;
613 }
614 thread_count--;
615
616 /* If there's only one other thread, it could be the signal delivery
617 thread, so we need to notify it to shut down by closing its read pipe.
618 If it's not the signal delivery thread, then closing the read pipe isn't
619 going to hurt. */
620 if (thread_count <= 1)
621 scm_i_close_signal_pipe ();
622
623 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
624
625 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
626 }
627
628 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
629
630 static void
631 init_thread_key (void)
632 {
633 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
634 }
635
636 /* Perform any initializations necessary to bring the current thread
637 into guile mode, initializing Guile itself, if necessary.
638
639 BASE is the stack base to use with GC.
640
641 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
642 which case the default dynamic state is used.
643
644 Return zero when the thread was in guile mode already; otherwise
645 return 1.
646 */
647
648 static int
649 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
650 {
651 scm_i_thread *t;
652
653 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
654
655 if ((t = SCM_I_CURRENT_THREAD) == NULL)
656 {
657 /* This thread has not been guilified yet.
658 */
659
660 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
661 if (scm_initialized_p == 0)
662 {
663 /* First thread ever to enter Guile. Run the full
664 initialization.
665 */
666 scm_i_init_guile (base);
667 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
668 }
669 else
670 {
671 /* Guile is already initialized, but this thread enters it for
672 the first time. Only initialize this thread.
673 */
674 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
675 guilify_self_1 (base);
676 guilify_self_2 (parent);
677 }
678 return 1;
679 }
680 else if (t->top)
681 {
682 /* This thread is already guilified but not in guile mode, just
683 resume it.
684
685 XXX - base might be lower than when this thread was first
686 guilified.
687 */
688 scm_enter_guile ((scm_t_guile_ticket) t);
689 return 1;
690 }
691 else
692 {
693 /* Thread is already in guile mode. Nothing to do.
694 */
695 return 0;
696 }
697 }
698
699 #if SCM_USE_PTHREAD_THREADS
700
701 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
702 /* This method for GNU/Linux and perhaps some other systems.
703 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
704 available on them. */
705 #define HAVE_GET_THREAD_STACK_BASE
706
707 static SCM_STACKITEM *
708 get_thread_stack_base ()
709 {
710 pthread_attr_t attr;
711 void *start, *end;
712 size_t size;
713
714 pthread_getattr_np (pthread_self (), &attr);
715 pthread_attr_getstack (&attr, &start, &size);
716 end = (char *)start + size;
717
718 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
719 for the main thread, but we can use scm_get_stack_base in that
720 case.
721 */
722
723 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
724 if ((void *)&attr < start || (void *)&attr >= end)
725 return scm_get_stack_base ();
726 else
727 #endif
728 {
729 #if SCM_STACK_GROWS_UP
730 return start;
731 #else
732 return end;
733 #endif
734 }
735 }
736
737 #elif HAVE_PTHREAD_GET_STACKADDR_NP
738 /* This method for MacOS X.
739 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
740 but as of 2006 there's nothing obvious at apple.com. */
741 #define HAVE_GET_THREAD_STACK_BASE
742 static SCM_STACKITEM *
743 get_thread_stack_base ()
744 {
745 return pthread_get_stackaddr_np (pthread_self ());
746 }
747
748 #elif defined (__MINGW32__)
749 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
750 in any thread. We don't like hard-coding the name of a system, but there
751 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
752 work. */
753 #define HAVE_GET_THREAD_STACK_BASE
754 static SCM_STACKITEM *
755 get_thread_stack_base ()
756 {
757 return scm_get_stack_base ();
758 }
759
760 #endif /* pthread methods of get_thread_stack_base */
761
762 #else /* !SCM_USE_PTHREAD_THREADS */
763
764 #define HAVE_GET_THREAD_STACK_BASE
765
766 static SCM_STACKITEM *
767 get_thread_stack_base ()
768 {
769 return scm_get_stack_base ();
770 }
771
772 #endif /* !SCM_USE_PTHREAD_THREADS */
773
774 #ifdef HAVE_GET_THREAD_STACK_BASE
775
776 void
777 scm_init_guile ()
778 {
779 scm_i_init_thread_for_guile (get_thread_stack_base (),
780 scm_i_default_dynamic_state);
781 }
782
783 #endif
784
785 void *
786 scm_with_guile (void *(*func)(void *), void *data)
787 {
788 return scm_i_with_guile_and_parent (func, data,
789 scm_i_default_dynamic_state);
790 }
791
792 SCM_UNUSED static void
793 scm_leave_guile_cleanup (void *x)
794 {
795 scm_leave_guile ();
796 }
797
798 void *
799 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
800 {
801 void *res;
802 int really_entered;
803 SCM_STACKITEM base_item;
804
805 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
806 if (really_entered)
807 {
808 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
809 res = scm_c_with_continuation_barrier (func, data);
810 scm_i_pthread_cleanup_pop (0);
811 scm_leave_guile ();
812 }
813 else
814 res = scm_c_with_continuation_barrier (func, data);
815
816 return res;
817 }
818
819 void *
820 scm_without_guile (void *(*func)(void *), void *data)
821 {
822 void *res;
823 scm_t_guile_ticket t;
824 t = scm_leave_guile ();
825 res = func (data);
826 scm_enter_guile (t);
827 return res;
828 }
829
830 /*** Thread creation */
831
832 typedef struct {
833 SCM parent;
834 SCM thunk;
835 SCM handler;
836 SCM thread;
837 scm_i_pthread_mutex_t mutex;
838 scm_i_pthread_cond_t cond;
839 } launch_data;
840
841 static void *
842 really_launch (void *d)
843 {
844 launch_data *data = (launch_data *)d;
845 SCM thunk = data->thunk, handler = data->handler;
846 scm_i_thread *t;
847
848 t = SCM_I_CURRENT_THREAD;
849
850 scm_i_scm_pthread_mutex_lock (&data->mutex);
851 data->thread = scm_current_thread ();
852 scm_i_pthread_cond_signal (&data->cond);
853 scm_i_pthread_mutex_unlock (&data->mutex);
854
855 if (SCM_UNBNDP (handler))
856 t->result = scm_call_0 (thunk);
857 else
858 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
859
860 return 0;
861 }
862
863 static void *
864 launch_thread (void *d)
865 {
866 launch_data *data = (launch_data *)d;
867 scm_i_pthread_detach (scm_i_pthread_self ());
868 scm_i_with_guile_and_parent (really_launch, d, data->parent);
869 return NULL;
870 }
871
872 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
873 (SCM thunk, SCM handler),
874 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
875 "returning a new thread object representing the thread. The procedure\n"
876 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
877 "\n"
878 "When @var{handler} is specified, then @var{thunk} is called from\n"
879 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
880 "handler. This catch is established inside the continuation barrier.\n"
881 "\n"
882 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
883 "the @emph{exit value} of the thread and the thread is terminated.")
884 #define FUNC_NAME s_scm_call_with_new_thread
885 {
886 launch_data data;
887 scm_i_pthread_t id;
888 int err;
889
890 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
891 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
892 handler, SCM_ARG2, FUNC_NAME);
893
894 data.parent = scm_current_dynamic_state ();
895 data.thunk = thunk;
896 data.handler = handler;
897 data.thread = SCM_BOOL_F;
898 scm_i_pthread_mutex_init (&data.mutex, NULL);
899 scm_i_pthread_cond_init (&data.cond, NULL);
900
901 scm_i_scm_pthread_mutex_lock (&data.mutex);
902 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
903 if (err)
904 {
905 scm_i_pthread_mutex_unlock (&data.mutex);
906 errno = err;
907 scm_syserror (NULL);
908 }
909 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
910 scm_i_pthread_mutex_unlock (&data.mutex);
911
912 return data.thread;
913 }
914 #undef FUNC_NAME
915
916 typedef struct {
917 SCM parent;
918 scm_t_catch_body body;
919 void *body_data;
920 scm_t_catch_handler handler;
921 void *handler_data;
922 SCM thread;
923 scm_i_pthread_mutex_t mutex;
924 scm_i_pthread_cond_t cond;
925 } spawn_data;
926
927 static void *
928 really_spawn (void *d)
929 {
930 spawn_data *data = (spawn_data *)d;
931 scm_t_catch_body body = data->body;
932 void *body_data = data->body_data;
933 scm_t_catch_handler handler = data->handler;
934 void *handler_data = data->handler_data;
935 scm_i_thread *t = SCM_I_CURRENT_THREAD;
936
937 scm_i_scm_pthread_mutex_lock (&data->mutex);
938 data->thread = scm_current_thread ();
939 scm_i_pthread_cond_signal (&data->cond);
940 scm_i_pthread_mutex_unlock (&data->mutex);
941
942 if (handler == NULL)
943 t->result = body (body_data);
944 else
945 t->result = scm_internal_catch (SCM_BOOL_T,
946 body, body_data,
947 handler, handler_data);
948
949 return 0;
950 }
951
952 static void *
953 spawn_thread (void *d)
954 {
955 spawn_data *data = (spawn_data *)d;
956 scm_i_pthread_detach (scm_i_pthread_self ());
957 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
958 return NULL;
959 }
960
961 SCM
962 scm_spawn_thread (scm_t_catch_body body, void *body_data,
963 scm_t_catch_handler handler, void *handler_data)
964 {
965 spawn_data data;
966 scm_i_pthread_t id;
967 int err;
968
969 data.parent = scm_current_dynamic_state ();
970 data.body = body;
971 data.body_data = body_data;
972 data.handler = handler;
973 data.handler_data = handler_data;
974 data.thread = SCM_BOOL_F;
975 scm_i_pthread_mutex_init (&data.mutex, NULL);
976 scm_i_pthread_cond_init (&data.cond, NULL);
977
978 scm_i_scm_pthread_mutex_lock (&data.mutex);
979 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
980 if (err)
981 {
982 scm_i_pthread_mutex_unlock (&data.mutex);
983 errno = err;
984 scm_syserror (NULL);
985 }
986 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
987 scm_i_pthread_mutex_unlock (&data.mutex);
988
989 return data.thread;
990 }
991
992 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
993 (),
994 "Move the calling thread to the end of the scheduling queue.")
995 #define FUNC_NAME s_scm_yield
996 {
997 return scm_from_bool (scm_i_sched_yield ());
998 }
999 #undef FUNC_NAME
1000
1001 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1002 (SCM thread),
1003 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1004 "cannot be the current thread, and if @var{thread} has already terminated or "
1005 "been signaled to terminate, this function is a no-op.")
1006 #define FUNC_NAME s_scm_cancel_thread
1007 {
1008 scm_i_thread *t = NULL;
1009
1010 SCM_VALIDATE_THREAD (1, thread);
1011 t = SCM_I_THREAD_DATA (thread);
1012 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1013 if (!t->canceled)
1014 {
1015 t->canceled = 1;
1016 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1017 scm_i_pthread_cancel (t->pthread);
1018 }
1019 else
1020 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1021
1022 return SCM_UNSPECIFIED;
1023 }
1024 #undef FUNC_NAME
1025
1026 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1027 (SCM thread, SCM proc),
1028 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1029 "This handler will be called when the thread exits.")
1030 #define FUNC_NAME s_scm_set_thread_cleanup_x
1031 {
1032 scm_i_thread *t;
1033
1034 SCM_VALIDATE_THREAD (1, thread);
1035 if (!scm_is_false (proc))
1036 SCM_VALIDATE_THUNK (2, proc);
1037
1038 t = SCM_I_THREAD_DATA (thread);
1039 scm_i_pthread_mutex_lock (&t->admin_mutex);
1040
1041 if (!(t->exited || t->canceled))
1042 t->cleanup_handler = proc;
1043
1044 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1045
1046 return SCM_UNSPECIFIED;
1047 }
1048 #undef FUNC_NAME
1049
1050 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1051 (SCM thread),
1052 "Return the cleanup handler installed for the thread @var{thread}.")
1053 #define FUNC_NAME s_scm_thread_cleanup
1054 {
1055 scm_i_thread *t;
1056 SCM ret;
1057
1058 SCM_VALIDATE_THREAD (1, thread);
1059
1060 t = SCM_I_THREAD_DATA (thread);
1061 scm_i_pthread_mutex_lock (&t->admin_mutex);
1062 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1063 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1064
1065 return ret;
1066 }
1067 #undef FUNC_NAME
1068
1069 SCM scm_join_thread (SCM thread)
1070 {
1071 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1072 }
1073
1074 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1075 (SCM thread, SCM timeout, SCM timeoutval),
1076 "Suspend execution of the calling thread until the target @var{thread} "
1077 "terminates, unless the target @var{thread} has already terminated. ")
1078 #define FUNC_NAME s_scm_join_thread_timed
1079 {
1080 scm_i_thread *t;
1081 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1082 SCM res = SCM_BOOL_F;
1083
1084 if (! (SCM_UNBNDP (timeoutval)))
1085 res = timeoutval;
1086
1087 SCM_VALIDATE_THREAD (1, thread);
1088 if (scm_is_eq (scm_current_thread (), thread))
1089 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1090
1091 t = SCM_I_THREAD_DATA (thread);
1092 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1093
1094 if (! SCM_UNBNDP (timeout))
1095 {
1096 to_timespec (timeout, &ctimeout);
1097 timeout_ptr = &ctimeout;
1098 }
1099
1100 if (t->exited)
1101 res = t->result;
1102 else
1103 {
1104 while (1)
1105 {
1106 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1107 timeout_ptr);
1108 if (err == 0)
1109 {
1110 if (t->exited)
1111 {
1112 res = t->result;
1113 break;
1114 }
1115 }
1116 else if (err == ETIMEDOUT)
1117 break;
1118
1119 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1120 SCM_TICK;
1121 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1122 }
1123 }
1124
1125 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1126
1127 return res;
1128 }
1129 #undef FUNC_NAME
1130
1131 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1132 (SCM obj),
1133 "Return @code{#t} if @var{obj} is a thread.")
1134 #define FUNC_NAME s_scm_thread_p
1135 {
1136 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1137 }
1138 #undef FUNC_NAME
1139
1140 static SCM
1141 fat_mutex_mark (SCM mx)
1142 {
1143 fat_mutex *m = SCM_MUTEX_DATA (mx);
1144 scm_gc_mark (m->owner);
1145 return m->waiting;
1146 }
1147
1148 static size_t
1149 fat_mutex_free (SCM mx)
1150 {
1151 fat_mutex *m = SCM_MUTEX_DATA (mx);
1152 scm_i_pthread_mutex_destroy (&m->lock);
1153 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1154 return 0;
1155 }
1156
1157 static int
1158 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1159 {
1160 fat_mutex *m = SCM_MUTEX_DATA (mx);
1161 scm_puts ("#<mutex ", port);
1162 scm_uintprint ((scm_t_bits)m, 16, port);
1163 scm_puts (">", port);
1164 return 1;
1165 }
1166
1167 static SCM
1168 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1169 {
1170 fat_mutex *m;
1171 SCM mx;
1172
1173 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1174 scm_i_pthread_mutex_init (&m->lock, NULL);
1175 m->owner = SCM_BOOL_F;
1176 m->level = 0;
1177
1178 m->recursive = recursive;
1179 m->unchecked_unlock = unchecked_unlock;
1180 m->allow_external_unlock = external_unlock;
1181
1182 m->waiting = SCM_EOL;
1183 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1184 m->waiting = make_queue ();
1185 return mx;
1186 }
1187
1188 SCM scm_make_mutex (void)
1189 {
1190 return scm_make_mutex_with_flags (SCM_EOL);
1191 }
1192
1193 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1194 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1195 SCM_SYMBOL (recursive_sym, "recursive");
1196
1197 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1198 (SCM flags),
1199 "Create a new mutex. ")
1200 #define FUNC_NAME s_scm_make_mutex_with_flags
1201 {
1202 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1203
1204 SCM ptr = flags;
1205 while (! scm_is_null (ptr))
1206 {
1207 SCM flag = SCM_CAR (ptr);
1208 if (scm_is_eq (flag, unchecked_unlock_sym))
1209 unchecked_unlock = 1;
1210 else if (scm_is_eq (flag, allow_external_unlock_sym))
1211 external_unlock = 1;
1212 else if (scm_is_eq (flag, recursive_sym))
1213 recursive = 1;
1214 else
1215 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1216 ptr = SCM_CDR (ptr);
1217 }
1218 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1219 }
1220 #undef FUNC_NAME
1221
1222 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1223 (void),
1224 "Create a new recursive mutex. ")
1225 #define FUNC_NAME s_scm_make_recursive_mutex
1226 {
1227 return make_fat_mutex (1, 0, 0);
1228 }
1229 #undef FUNC_NAME
1230
1231 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1232
1233 static SCM
1234 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1235 {
1236 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1237
1238 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1239 SCM err = SCM_BOOL_F;
1240
1241 struct timeval current_time;
1242
1243 scm_i_scm_pthread_mutex_lock (&m->lock);
1244
1245 while (1)
1246 {
1247 if (m->level == 0)
1248 {
1249 m->owner = new_owner;
1250 m->level++;
1251
1252 if (SCM_I_IS_THREAD (new_owner))
1253 {
1254 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1255 scm_i_pthread_mutex_lock (&t->admin_mutex);
1256 t->mutexes = scm_cons (mutex, t->mutexes);
1257 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1258 }
1259 *ret = 1;
1260 break;
1261 }
1262 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1263 {
1264 m->owner = new_owner;
1265 err = scm_cons (scm_abandoned_mutex_error_key,
1266 scm_from_locale_string ("lock obtained on abandoned "
1267 "mutex"));
1268 *ret = 1;
1269 break;
1270 }
1271 else if (scm_is_eq (m->owner, new_owner))
1272 {
1273 if (m->recursive)
1274 {
1275 m->level++;
1276 *ret = 1;
1277 }
1278 else
1279 {
1280 err = scm_cons (scm_misc_error_key,
1281 scm_from_locale_string ("mutex already locked "
1282 "by thread"));
1283 *ret = 0;
1284 }
1285 break;
1286 }
1287 else
1288 {
1289 if (timeout != NULL)
1290 {
1291 gettimeofday (&current_time, NULL);
1292 if (current_time.tv_sec > timeout->tv_sec ||
1293 (current_time.tv_sec == timeout->tv_sec &&
1294 current_time.tv_usec * 1000 > timeout->tv_nsec))
1295 {
1296 *ret = 0;
1297 break;
1298 }
1299 }
1300 block_self (m->waiting, mutex, &m->lock, timeout);
1301 scm_i_pthread_mutex_unlock (&m->lock);
1302 SCM_TICK;
1303 scm_i_scm_pthread_mutex_lock (&m->lock);
1304 }
1305 }
1306 scm_i_pthread_mutex_unlock (&m->lock);
1307 return err;
1308 }
1309
1310 SCM scm_lock_mutex (SCM mx)
1311 {
1312 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1313 }
1314
1315 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1316 (SCM m, SCM timeout, SCM owner),
1317 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1318 "blocks until the mutex becomes available. The function returns when "
1319 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1320 "a thread already owns will succeed right away and will not block the "
1321 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1322 #define FUNC_NAME s_scm_lock_mutex_timed
1323 {
1324 SCM exception;
1325 int ret = 0;
1326 scm_t_timespec cwaittime, *waittime = NULL;
1327
1328 SCM_VALIDATE_MUTEX (1, m);
1329
1330 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1331 {
1332 to_timespec (timeout, &cwaittime);
1333 waittime = &cwaittime;
1334 }
1335
1336 exception = fat_mutex_lock (m, waittime, owner, &ret);
1337 if (!scm_is_false (exception))
1338 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1339 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1340 }
1341 #undef FUNC_NAME
1342
1343 void
1344 scm_dynwind_lock_mutex (SCM mutex)
1345 {
1346 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1347 SCM_F_WIND_EXPLICITLY);
1348 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1349 SCM_F_WIND_EXPLICITLY);
1350 }
1351
1352 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1353 (SCM mutex),
1354 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1355 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1356 #define FUNC_NAME s_scm_try_mutex
1357 {
1358 SCM exception;
1359 int ret = 0;
1360 scm_t_timespec cwaittime, *waittime = NULL;
1361
1362 SCM_VALIDATE_MUTEX (1, mutex);
1363
1364 to_timespec (scm_from_int(0), &cwaittime);
1365 waittime = &cwaittime;
1366
1367 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1368 if (!scm_is_false (exception))
1369 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1370 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1371 }
1372 #undef FUNC_NAME
1373
1374 /*** Fat condition variables */
1375
1376 typedef struct {
1377 scm_i_pthread_mutex_t lock;
1378 SCM waiting; /* the threads waiting for this condition. */
1379 } fat_cond;
1380
1381 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1382 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1383
1384 static int
1385 fat_mutex_unlock (SCM mutex, SCM cond,
1386 const scm_t_timespec *waittime, int relock)
1387 {
1388 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1389 fat_cond *c = NULL;
1390 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1391 int err = 0, ret = 0;
1392
1393 scm_i_scm_pthread_mutex_lock (&m->lock);
1394
1395 SCM owner = m->owner;
1396
1397 if (!scm_is_eq (owner, scm_current_thread ()))
1398 {
1399 if (m->level == 0)
1400 {
1401 if (!m->unchecked_unlock)
1402 {
1403 scm_i_pthread_mutex_unlock (&m->lock);
1404 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1405 }
1406 owner = scm_current_thread ();
1407 }
1408 else if (!m->allow_external_unlock)
1409 {
1410 scm_i_pthread_mutex_unlock (&m->lock);
1411 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1412 }
1413 }
1414
1415 if (! (SCM_UNBNDP (cond)))
1416 {
1417 c = SCM_CONDVAR_DATA (cond);
1418 while (1)
1419 {
1420 int brk = 0;
1421
1422 scm_i_scm_pthread_mutex_lock (&c->lock);
1423 if (m->level > 0)
1424 m->level--;
1425 if (m->level == 0)
1426 m->owner = unblock_from_queue (m->waiting);
1427
1428 scm_i_pthread_mutex_unlock (&m->lock);
1429
1430 t->block_asyncs++;
1431
1432 err = block_self (c->waiting, cond, &c->lock, waittime);
1433
1434 if (err == 0)
1435 {
1436 ret = 1;
1437 brk = 1;
1438 }
1439 else if (err == ETIMEDOUT)
1440 {
1441 ret = 0;
1442 brk = 1;
1443 }
1444 else if (err != EINTR)
1445 {
1446 errno = err;
1447 scm_i_pthread_mutex_unlock (&c->lock);
1448 scm_syserror (NULL);
1449 }
1450
1451 if (brk)
1452 {
1453 if (relock)
1454 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1455 scm_i_pthread_mutex_unlock (&c->lock);
1456 break;
1457 }
1458
1459 scm_i_pthread_mutex_unlock (&c->lock);
1460
1461 t->block_asyncs--;
1462 scm_async_click ();
1463
1464 scm_remember_upto_here_2 (cond, mutex);
1465
1466 scm_i_scm_pthread_mutex_lock (&m->lock);
1467 }
1468 }
1469 else
1470 {
1471 if (m->level > 0)
1472 m->level--;
1473 if (m->level == 0)
1474 m->owner = unblock_from_queue (m->waiting);
1475
1476 scm_i_pthread_mutex_unlock (&m->lock);
1477 ret = 1;
1478 }
1479
1480 return ret;
1481 }
1482
1483 SCM scm_unlock_mutex (SCM mx)
1484 {
1485 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1486 }
1487
1488 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1489 (SCM mx, SCM cond, SCM timeout),
1490 "Unlocks @var{mutex} if the calling thread owns the lock on "
1491 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1492 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1493 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1494 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1495 "with a call to @code{unlock-mutex}. Only the last call to "
1496 "@code{unlock-mutex} will actually unlock the mutex. ")
1497 #define FUNC_NAME s_scm_unlock_mutex_timed
1498 {
1499 scm_t_timespec cwaittime, *waittime = NULL;
1500
1501 SCM_VALIDATE_MUTEX (1, mx);
1502 if (! (SCM_UNBNDP (cond)))
1503 {
1504 SCM_VALIDATE_CONDVAR (2, cond);
1505
1506 if (! (SCM_UNBNDP (timeout)))
1507 {
1508 to_timespec (timeout, &cwaittime);
1509 waittime = &cwaittime;
1510 }
1511 }
1512
1513 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1514 }
1515 #undef FUNC_NAME
1516
1517 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1518 (SCM obj),
1519 "Return @code{#t} if @var{obj} is a mutex.")
1520 #define FUNC_NAME s_scm_mutex_p
1521 {
1522 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1523 }
1524 #undef FUNC_NAME
1525
1526 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1527 (SCM mx),
1528 "Return the thread owning @var{mx}, or @code{#f}.")
1529 #define FUNC_NAME s_scm_mutex_owner
1530 {
1531 SCM owner;
1532 fat_mutex *m = NULL;
1533
1534 SCM_VALIDATE_MUTEX (1, mx);
1535 m = SCM_MUTEX_DATA (mx);
1536 scm_i_pthread_mutex_lock (&m->lock);
1537 owner = m->owner;
1538 scm_i_pthread_mutex_unlock (&m->lock);
1539
1540 return owner;
1541 }
1542 #undef FUNC_NAME
1543
1544 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1545 (SCM mx),
1546 "Return the lock level of mutex @var{mx}.")
1547 #define FUNC_NAME s_scm_mutex_level
1548 {
1549 SCM_VALIDATE_MUTEX (1, mx);
1550 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1551 }
1552 #undef FUNC_NAME
1553
1554 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1555 (SCM mx),
1556 "Returns @code{#t} if the mutex @var{mx} is locked.")
1557 #define FUNC_NAME s_scm_mutex_locked_p
1558 {
1559 SCM_VALIDATE_MUTEX (1, mx);
1560 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1561 }
1562 #undef FUNC_NAME
1563
1564 static SCM
1565 fat_cond_mark (SCM cv)
1566 {
1567 fat_cond *c = SCM_CONDVAR_DATA (cv);
1568 return c->waiting;
1569 }
1570
1571 static size_t
1572 fat_cond_free (SCM mx)
1573 {
1574 fat_cond *c = SCM_CONDVAR_DATA (mx);
1575 scm_i_pthread_mutex_destroy (&c->lock);
1576 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1577 return 0;
1578 }
1579
1580 static int
1581 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1582 {
1583 fat_cond *c = SCM_CONDVAR_DATA (cv);
1584 scm_puts ("#<condition-variable ", port);
1585 scm_uintprint ((scm_t_bits)c, 16, port);
1586 scm_puts (">", port);
1587 return 1;
1588 }
1589
1590 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1591 (void),
1592 "Make a new condition variable.")
1593 #define FUNC_NAME s_scm_make_condition_variable
1594 {
1595 fat_cond *c;
1596 SCM cv;
1597
1598 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1599 scm_i_pthread_mutex_init (&c->lock, 0);
1600 c->waiting = SCM_EOL;
1601 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1602 c->waiting = make_queue ();
1603 return cv;
1604 }
1605 #undef FUNC_NAME
1606
1607 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1608 (SCM cv, SCM mx, SCM t),
1609 "Wait until @var{cond-var} has been signalled. While waiting, "
1610 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1611 "is locked again when this function returns. When @var{time} is given, "
1612 "it specifies a point in time where the waiting should be aborted. It "
1613 "can be either a integer as returned by @code{current-time} or a pair "
1614 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1615 "mutex is locked and @code{#f} is returned. When the condition "
1616 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1617 "is returned. ")
1618 #define FUNC_NAME s_scm_timed_wait_condition_variable
1619 {
1620 scm_t_timespec waittime, *waitptr = NULL;
1621
1622 SCM_VALIDATE_CONDVAR (1, cv);
1623 SCM_VALIDATE_MUTEX (2, mx);
1624
1625 if (!SCM_UNBNDP (t))
1626 {
1627 to_timespec (t, &waittime);
1628 waitptr = &waittime;
1629 }
1630
1631 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1632 }
1633 #undef FUNC_NAME
1634
1635 static void
1636 fat_cond_signal (fat_cond *c)
1637 {
1638 scm_i_scm_pthread_mutex_lock (&c->lock);
1639 unblock_from_queue (c->waiting);
1640 scm_i_pthread_mutex_unlock (&c->lock);
1641 }
1642
1643 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1644 (SCM cv),
1645 "Wake up one thread that is waiting for @var{cv}")
1646 #define FUNC_NAME s_scm_signal_condition_variable
1647 {
1648 SCM_VALIDATE_CONDVAR (1, cv);
1649 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1650 return SCM_BOOL_T;
1651 }
1652 #undef FUNC_NAME
1653
1654 static void
1655 fat_cond_broadcast (fat_cond *c)
1656 {
1657 scm_i_scm_pthread_mutex_lock (&c->lock);
1658 while (scm_is_true (unblock_from_queue (c->waiting)))
1659 ;
1660 scm_i_pthread_mutex_unlock (&c->lock);
1661 }
1662
1663 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1664 (SCM cv),
1665 "Wake up all threads that are waiting for @var{cv}. ")
1666 #define FUNC_NAME s_scm_broadcast_condition_variable
1667 {
1668 SCM_VALIDATE_CONDVAR (1, cv);
1669 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1670 return SCM_BOOL_T;
1671 }
1672 #undef FUNC_NAME
1673
1674 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1675 (SCM obj),
1676 "Return @code{#t} if @var{obj} is a condition variable.")
1677 #define FUNC_NAME s_scm_condition_variable_p
1678 {
1679 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1680 }
1681 #undef FUNC_NAME
1682
1683 /*** Marking stacks */
1684
1685 /* XXX - what to do with this? Do we need to handle this for blocked
1686 threads as well?
1687 */
1688 #ifdef __ia64__
1689 # define SCM_MARK_BACKING_STORE() do { \
1690 ucontext_t ctx; \
1691 SCM_STACKITEM * top, * bot; \
1692 getcontext (&ctx); \
1693 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1694 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1695 / sizeof (SCM_STACKITEM))); \
1696 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1697 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1698 scm_mark_locations (bot, top - bot); } while (0)
1699 #else
1700 # define SCM_MARK_BACKING_STORE()
1701 #endif
1702
1703 void
1704 scm_threads_mark_stacks (void)
1705 {
1706 scm_i_thread *t;
1707 for (t = all_threads; t; t = t->next_thread)
1708 {
1709 /* Check that thread has indeed been suspended.
1710 */
1711 assert (t->top);
1712
1713 scm_gc_mark (t->handle);
1714
1715 #if SCM_STACK_GROWS_UP
1716 scm_mark_locations (t->base, t->top - t->base);
1717 #else
1718 scm_mark_locations (t->top, t->base - t->top);
1719 #endif
1720 scm_mark_locations ((SCM_STACKITEM *) &t->regs,
1721 ((size_t) sizeof(t->regs)
1722 / sizeof (SCM_STACKITEM)));
1723 }
1724
1725 SCM_MARK_BACKING_STORE ();
1726 }
1727
1728 /*** Select */
1729
1730 int
1731 scm_std_select (int nfds,
1732 SELECT_TYPE *readfds,
1733 SELECT_TYPE *writefds,
1734 SELECT_TYPE *exceptfds,
1735 struct timeval *timeout)
1736 {
1737 fd_set my_readfds;
1738 int res, eno, wakeup_fd;
1739 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1740 scm_t_guile_ticket ticket;
1741
1742 if (readfds == NULL)
1743 {
1744 FD_ZERO (&my_readfds);
1745 readfds = &my_readfds;
1746 }
1747
1748 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1749 SCM_TICK;
1750
1751 wakeup_fd = t->sleep_pipe[0];
1752 ticket = scm_leave_guile ();
1753 FD_SET (wakeup_fd, readfds);
1754 if (wakeup_fd >= nfds)
1755 nfds = wakeup_fd+1;
1756 res = select (nfds, readfds, writefds, exceptfds, timeout);
1757 t->sleep_fd = -1;
1758 eno = errno;
1759 scm_enter_guile (ticket);
1760
1761 scm_i_reset_sleep (t);
1762
1763 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1764 {
1765 char dummy;
1766 read (wakeup_fd, &dummy, 1);
1767 FD_CLR (wakeup_fd, readfds);
1768 res -= 1;
1769 if (res == 0)
1770 {
1771 eno = EINTR;
1772 res = -1;
1773 }
1774 }
1775 errno = eno;
1776 return res;
1777 }
1778
1779 /* Convenience API for blocking while in guile mode. */
1780
1781 #if SCM_USE_PTHREAD_THREADS
1782
1783 int
1784 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1785 {
1786 scm_t_guile_ticket t = scm_leave_guile ();
1787 int res = scm_i_pthread_mutex_lock (mutex);
1788 scm_enter_guile (t);
1789 return res;
1790 }
1791
1792 static void
1793 do_unlock (void *data)
1794 {
1795 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1796 }
1797
1798 void
1799 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1800 {
1801 scm_i_scm_pthread_mutex_lock (mutex);
1802 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1803 }
1804
1805 int
1806 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1807 {
1808 scm_t_guile_ticket t = scm_leave_guile ();
1809 int res = scm_i_pthread_cond_wait (cond, mutex);
1810 scm_enter_guile (t);
1811 return res;
1812 }
1813
1814 int
1815 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1816 scm_i_pthread_mutex_t *mutex,
1817 const scm_t_timespec *wt)
1818 {
1819 scm_t_guile_ticket t = scm_leave_guile ();
1820 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1821 scm_enter_guile (t);
1822 return res;
1823 }
1824
1825 #endif
1826
1827 unsigned long
1828 scm_std_usleep (unsigned long usecs)
1829 {
1830 struct timeval tv;
1831 tv.tv_usec = usecs % 1000000;
1832 tv.tv_sec = usecs / 1000000;
1833 scm_std_select (0, NULL, NULL, NULL, &tv);
1834 return tv.tv_sec * 1000000 + tv.tv_usec;
1835 }
1836
1837 unsigned int
1838 scm_std_sleep (unsigned int secs)
1839 {
1840 struct timeval tv;
1841 tv.tv_usec = 0;
1842 tv.tv_sec = secs;
1843 scm_std_select (0, NULL, NULL, NULL, &tv);
1844 return tv.tv_sec;
1845 }
1846
1847 /*** Misc */
1848
1849 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1850 (void),
1851 "Return the thread that called this function.")
1852 #define FUNC_NAME s_scm_current_thread
1853 {
1854 return SCM_I_CURRENT_THREAD->handle;
1855 }
1856 #undef FUNC_NAME
1857
1858 static SCM
1859 scm_c_make_list (size_t n, SCM fill)
1860 {
1861 SCM res = SCM_EOL;
1862 while (n-- > 0)
1863 res = scm_cons (fill, res);
1864 return res;
1865 }
1866
1867 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1868 (void),
1869 "Return a list of all threads.")
1870 #define FUNC_NAME s_scm_all_threads
1871 {
1872 /* We can not allocate while holding the thread_admin_mutex because
1873 of the way GC is done.
1874 */
1875 int n = thread_count;
1876 scm_i_thread *t;
1877 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1878
1879 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1880 l = &list;
1881 for (t = all_threads; t && n > 0; t = t->next_thread)
1882 {
1883 if (t != scm_i_signal_delivery_thread)
1884 {
1885 SCM_SETCAR (*l, t->handle);
1886 l = SCM_CDRLOC (*l);
1887 }
1888 n--;
1889 }
1890 *l = SCM_EOL;
1891 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1892 return list;
1893 }
1894 #undef FUNC_NAME
1895
1896 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1897 (SCM thread),
1898 "Return @code{#t} iff @var{thread} has exited.\n")
1899 #define FUNC_NAME s_scm_thread_exited_p
1900 {
1901 return scm_from_bool (scm_c_thread_exited_p (thread));
1902 }
1903 #undef FUNC_NAME
1904
1905 int
1906 scm_c_thread_exited_p (SCM thread)
1907 #define FUNC_NAME s_scm_thread_exited_p
1908 {
1909 scm_i_thread *t;
1910 SCM_VALIDATE_THREAD (1, thread);
1911 t = SCM_I_THREAD_DATA (thread);
1912 return t->exited;
1913 }
1914 #undef FUNC_NAME
1915
1916 static scm_i_pthread_cond_t wake_up_cond;
1917 int scm_i_thread_go_to_sleep;
1918 static int threads_initialized_p = 0;
1919
1920 void
1921 scm_i_thread_put_to_sleep ()
1922 {
1923 if (threads_initialized_p)
1924 {
1925 scm_i_thread *t;
1926
1927 scm_leave_guile ();
1928 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1929
1930 /* Signal all threads to go to sleep
1931 */
1932 scm_i_thread_go_to_sleep = 1;
1933 for (t = all_threads; t; t = t->next_thread)
1934 scm_i_pthread_mutex_lock (&t->heap_mutex);
1935 scm_i_thread_go_to_sleep = 0;
1936 }
1937 }
1938
1939 void
1940 scm_i_thread_invalidate_freelists ()
1941 {
1942 /* thread_admin_mutex is already locked. */
1943
1944 scm_i_thread *t;
1945 for (t = all_threads; t; t = t->next_thread)
1946 if (t != SCM_I_CURRENT_THREAD)
1947 t->clear_freelists_p = 1;
1948 }
1949
1950 void
1951 scm_i_thread_wake_up ()
1952 {
1953 if (threads_initialized_p)
1954 {
1955 scm_i_thread *t;
1956
1957 scm_i_pthread_cond_broadcast (&wake_up_cond);
1958 for (t = all_threads; t; t = t->next_thread)
1959 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1960 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1961 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1962 }
1963 }
1964
1965 void
1966 scm_i_thread_sleep_for_gc ()
1967 {
1968 scm_i_thread *t = suspend ();
1969 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1970 resume (t);
1971 }
1972
1973 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1974 */
1975 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1976 int scm_i_critical_section_level = 0;
1977
1978 static SCM dynwind_critical_section_mutex;
1979
1980 void
1981 scm_dynwind_critical_section (SCM mutex)
1982 {
1983 if (scm_is_false (mutex))
1984 mutex = dynwind_critical_section_mutex;
1985 scm_dynwind_lock_mutex (mutex);
1986 scm_dynwind_block_asyncs ();
1987 }
1988
1989 /*** Initialization */
1990
1991 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1992 scm_i_pthread_mutex_t scm_i_misc_mutex;
1993
1994 #if SCM_USE_PTHREAD_THREADS
1995 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1996 #endif
1997
1998 void
1999 scm_threads_prehistory (SCM_STACKITEM *base)
2000 {
2001 #if SCM_USE_PTHREAD_THREADS
2002 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2003 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2004 PTHREAD_MUTEX_RECURSIVE);
2005 #endif
2006
2007 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2008 scm_i_pthread_mutexattr_recursive);
2009 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2010 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2011 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2012 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2013
2014 guilify_self_1 (base);
2015 }
2016
2017 scm_t_bits scm_tc16_thread;
2018 scm_t_bits scm_tc16_mutex;
2019 scm_t_bits scm_tc16_condvar;
2020
2021 void
2022 scm_init_threads ()
2023 {
2024 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2025 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2026 scm_set_smob_print (scm_tc16_thread, thread_print);
2027 scm_set_smob_free (scm_tc16_thread, thread_free);
2028
2029 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2030 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2031 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2032 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2033
2034 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2035 sizeof (fat_cond));
2036 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2037 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2038 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2039
2040 scm_i_default_dynamic_state = SCM_BOOL_F;
2041 guilify_self_2 (SCM_BOOL_F);
2042 threads_initialized_p = 1;
2043
2044 dynwind_critical_section_mutex =
2045 scm_permanent_object (scm_make_recursive_mutex ());
2046 }
2047
2048 void
2049 scm_init_threads_default_dynamic_state ()
2050 {
2051 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2052 scm_i_default_dynamic_state = scm_permanent_object (state);
2053 }
2054
2055 void
2056 scm_init_thread_procs ()
2057 {
2058 #include "libguile/threads.x"
2059 }
2060
2061 /*
2062 Local Variables:
2063 c-file-style: "gnu"
2064 End:
2065 */