Don't leave and reenter guile mode if mutex is available
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/_scm.h"
26
27 #if HAVE_UNISTD_H
28 #include <unistd.h>
29 #endif
30 #include <stdio.h>
31 #include <assert.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include "libguile/validate.h"
42 #include "libguile/root.h"
43 #include "libguile/eval.h"
44 #include "libguile/async.h"
45 #include "libguile/ports.h"
46 #include "libguile/threads.h"
47 #include "libguile/dynwind.h"
48 #include "libguile/iselect.h"
49 #include "libguile/fluids.h"
50 #include "libguile/continuations.h"
51 #include "libguile/gc.h"
52 #include "libguile/init.h"
53 #include "libguile/scmsigs.h"
54 #include "libguile/strings.h"
55
56 #ifdef __MINGW32__
57 #ifndef ETIMEDOUT
58 # define ETIMEDOUT WSAETIMEDOUT
59 #endif
60 # include <fcntl.h>
61 # include <process.h>
62 # define pipe(fd) _pipe (fd, 256, O_BINARY)
63 #endif /* __MINGW32__ */
64
65 #include <full-read.h>
66
67 \f
68 static void
69 to_timespec (SCM t, scm_t_timespec *waittime)
70 {
71 if (scm_is_pair (t))
72 {
73 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
74 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
75 }
76 else
77 {
78 double time = scm_to_double (t);
79 double sec = scm_c_truncate (time);
80
81 waittime->tv_sec = (long) sec;
82 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
83 }
84 }
85
86 /*** Queues */
87
88 /* Make an empty queue data structure.
89 */
90 static SCM
91 make_queue ()
92 {
93 return scm_cons (SCM_EOL, SCM_EOL);
94 }
95
96 /* Put T at the back of Q and return a handle that can be used with
97 remqueue to remove T from Q again.
98 */
99 static SCM
100 enqueue (SCM q, SCM t)
101 {
102 SCM c = scm_cons (t, SCM_EOL);
103 SCM_CRITICAL_SECTION_START;
104 if (scm_is_null (SCM_CDR (q)))
105 SCM_SETCDR (q, c);
106 else
107 SCM_SETCDR (SCM_CAR (q), c);
108 SCM_SETCAR (q, c);
109 SCM_CRITICAL_SECTION_END;
110 return c;
111 }
112
113 /* Remove the element that the handle C refers to from the queue Q. C
114 must have been returned from a call to enqueue. The return value
115 is zero when the element referred to by C has already been removed.
116 Otherwise, 1 is returned.
117 */
118 static int
119 remqueue (SCM q, SCM c)
120 {
121 SCM p, prev = q;
122 SCM_CRITICAL_SECTION_START;
123 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
124 {
125 if (scm_is_eq (p, c))
126 {
127 if (scm_is_eq (c, SCM_CAR (q)))
128 SCM_SETCAR (q, SCM_CDR (c));
129 SCM_SETCDR (prev, SCM_CDR (c));
130 SCM_CRITICAL_SECTION_END;
131 return 1;
132 }
133 prev = p;
134 }
135 SCM_CRITICAL_SECTION_END;
136 return 0;
137 }
138
139 /* Remove the front-most element from the queue Q and return it.
140 Return SCM_BOOL_F when Q is empty.
141 */
142 static SCM
143 dequeue (SCM q)
144 {
145 SCM c;
146 SCM_CRITICAL_SECTION_START;
147 c = SCM_CDR (q);
148 if (scm_is_null (c))
149 {
150 SCM_CRITICAL_SECTION_END;
151 return SCM_BOOL_F;
152 }
153 else
154 {
155 SCM_SETCDR (q, SCM_CDR (c));
156 if (scm_is_null (SCM_CDR (q)))
157 SCM_SETCAR (q, SCM_EOL);
158 SCM_CRITICAL_SECTION_END;
159 return SCM_CAR (c);
160 }
161 }
162
163 /*** Thread smob routines */
164
165 static SCM
166 thread_mark (SCM obj)
167 {
168 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
169 scm_gc_mark (t->result);
170 scm_gc_mark (t->cleanup_handler);
171 scm_gc_mark (t->join_queue);
172 scm_gc_mark (t->mutexes);
173 scm_gc_mark (t->dynwinds);
174 scm_gc_mark (t->active_asyncs);
175 scm_gc_mark (t->continuation_root);
176 scm_gc_mark (t->vm);
177 return t->dynamic_state;
178 }
179
180 static int
181 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
182 {
183 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
184 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
185 the struct case, hence we go via a union, and extract according to the
186 size of pthread_t. */
187 union {
188 scm_i_pthread_t p;
189 unsigned short us;
190 unsigned int ui;
191 unsigned long ul;
192 scm_t_uintmax um;
193 } u;
194 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
195 scm_i_pthread_t p = t->pthread;
196 scm_t_uintmax id;
197 u.p = p;
198 if (sizeof (p) == sizeof (unsigned short))
199 id = u.us;
200 else if (sizeof (p) == sizeof (unsigned int))
201 id = u.ui;
202 else if (sizeof (p) == sizeof (unsigned long))
203 id = u.ul;
204 else
205 id = u.um;
206
207 scm_puts ("#<thread ", port);
208 scm_uintprint (id, 10, port);
209 scm_puts (" (", port);
210 scm_uintprint ((scm_t_bits)t, 16, port);
211 scm_puts (")>", port);
212 return 1;
213 }
214
215 static size_t
216 thread_free (SCM obj)
217 {
218 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
219 assert (t->exited);
220 scm_gc_free (t, sizeof (*t), "thread");
221 return 0;
222 }
223
224 /*** Blocking on queues. */
225
226 /* See also scm_i_queue_async_cell for how such a block is
227 interrputed.
228 */
229
230 /* Put the current thread on QUEUE and go to sleep, waiting for it to
231 be woken up by a call to 'unblock_from_queue', or to be
232 interrupted. Upon return of this function, the current thread is
233 no longer on QUEUE, even when the sleep has been interrupted.
234
235 The caller of block_self must hold MUTEX. It will be atomically
236 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
237
238 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
239 as MUTEX is needed.
240
241 When WAITTIME is not NULL, the sleep will be aborted at that time.
242
243 The return value of block_self is an errno value. It will be zero
244 when the sleep has been successfully completed by a call to
245 unblock_from_queue, EINTR when it has been interrupted by the
246 delivery of a system async, and ETIMEDOUT when the timeout has
247 expired.
248
249 The system asyncs themselves are not executed by block_self.
250 */
251 static int
252 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
253 const scm_t_timespec *waittime)
254 {
255 scm_i_thread *t = SCM_I_CURRENT_THREAD;
256 SCM q_handle;
257 int err;
258
259 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
260 err = EINTR;
261 else
262 {
263 t->block_asyncs++;
264 q_handle = enqueue (queue, t->handle);
265 if (waittime == NULL)
266 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
267 else
268 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
269
270 /* When we are still on QUEUE, we have been interrupted. We
271 report this only when no other error (such as a timeout) has
272 happened above.
273 */
274 if (remqueue (queue, q_handle) && err == 0)
275 err = EINTR;
276 t->block_asyncs--;
277 scm_i_reset_sleep (t);
278 }
279
280 return err;
281 }
282
283 /* Wake up the first thread on QUEUE, if any. The awoken thread is
284 returned, or #f if the queue was empty.
285 */
286 static SCM
287 unblock_from_queue (SCM queue)
288 {
289 SCM thread = dequeue (queue);
290 if (scm_is_true (thread))
291 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
292 return thread;
293 }
294
295 /* Getting into and out of guile mode.
296 */
297
298 /* Ken Raeburn observes that the implementation of suspend and resume
299 (and the things that build on top of them) are very likely not
300 correct (see below). We will need fix this eventually, and that's
301 why scm_leave_guile/scm_enter_guile are not exported in the API.
302
303 Ken writes:
304
305 Consider this sequence:
306
307 Function foo, called in Guile mode, calls suspend (maybe indirectly
308 through scm_leave_guile), which does this:
309
310 // record top of stack for the GC
311 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
312 var 't'
313 // save registers.
314 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
315 SCM_I_SETJMP (t->regs); // here's most of the magic
316
317 ... and returns.
318
319 Function foo has a SCM value X, a handle on a non-immediate object, in
320 a caller-saved register R, and it's the only reference to the object
321 currently.
322
323 The compiler wants to use R in suspend, so it pushes the current
324 value, X, into a stack slot which will be reloaded on exit from
325 suspend; then it loads stuff into R and goes about its business. The
326 setjmp call saves (some of) the current registers, including R, which
327 no longer contains X. (This isn't a problem for a normal
328 setjmp/longjmp situation, where longjmp would be called before
329 setjmp's caller returns; the old value for X would be loaded back from
330 the stack after the longjmp, before the function returned.)
331
332 So, suspend returns, loading X back into R (and invalidating the jump
333 buffer) in the process. The caller foo then goes off and calls a
334 bunch of other functions out of Guile mode, occasionally storing X on
335 the stack again, but, say, much deeper on the stack than suspend's
336 stack frame went, and the stack slot where suspend had written X has
337 long since been overwritten with other values.
338
339 Okay, nothing actively broken so far. Now, let garbage collection
340 run, triggered by another thread.
341
342 The thread calling foo is out of Guile mode at the time, so the
343 garbage collector just scans a range of stack addresses. Too bad that
344 X isn't stored there. So the pointed-to storage goes onto the free
345 list, and I think you can see where things go from there.
346
347 Is there anything I'm missing that'll prevent this scenario from
348 happening? I mean, aside from, "well, suspend and scm_leave_guile
349 don't have many local variables, so they probably won't need to save
350 any registers on most systems, so we hope everything will wind up in
351 the jump buffer and we'll just get away with it"?
352
353 (And, going the other direction, if scm_leave_guile and suspend push
354 the stack pointer over onto a new page, and foo doesn't make further
355 function calls and thus the stack pointer no longer includes that
356 page, are we guaranteed that the kernel cannot release the now-unused
357 stack page that contains the top-of-stack pointer we just saved? I
358 don't know if any OS actually does that. If it does, we could get
359 faults in garbage collection.)
360
361 I don't think scm_without_guile has to have this problem, as it gets
362 more control over the stack handling -- but it should call setjmp
363 itself. I'd probably try something like:
364
365 // record top of stack for the GC
366 t->top = SCM_STACK_PTR (&t);
367 // save registers.
368 SCM_FLUSH_REGISTER_WINDOWS;
369 SCM_I_SETJMP (t->regs);
370 res = func(data);
371 scm_enter_guile (t);
372
373 ... though even that's making some assumptions about the stack
374 ordering of local variables versus caller-saved registers.
375
376 For something like scm_leave_guile to work, I don't think it can just
377 rely on invalidated jump buffers. A valid jump buffer, and a handle
378 on the stack state at the point when the jump buffer was initialized,
379 together, would work fine, but I think then we're talking about macros
380 invoking setjmp in the caller's stack frame, and requiring that the
381 caller of scm_leave_guile also call scm_enter_guile before returning,
382 kind of like pthread_cleanup_push/pop calls that have to be paired up
383 in a function. (In fact, the pthread ones have to be paired up
384 syntactically, as if they might expand to a compound statement
385 incorporating the user's code, and invoking a compiler's
386 exception-handling primitives. Which might be something to think
387 about for cases where Guile is used with C++ exceptions or
388 pthread_cancel.)
389 */
390
391 scm_i_pthread_key_t scm_i_thread_key;
392
393 static void
394 resume (scm_i_thread *t)
395 {
396 t->top = NULL;
397 if (t->clear_freelists_p)
398 {
399 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
400 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
401 t->clear_freelists_p = 0;
402 }
403 }
404
405 typedef void* scm_t_guile_ticket;
406
407 static void
408 scm_enter_guile (scm_t_guile_ticket ticket)
409 {
410 scm_i_thread *t = (scm_i_thread *)ticket;
411 if (t)
412 {
413 scm_i_pthread_mutex_lock (&t->heap_mutex);
414 t->heap_mutex_locked_by_self = 1;
415 resume (t);
416 }
417 }
418
419 static scm_i_thread *
420 suspend (void)
421 {
422 scm_i_thread *t = SCM_I_CURRENT_THREAD;
423
424 /* record top of stack for the GC */
425 t->top = SCM_STACK_PTR (&t);
426 /* save registers. */
427 SCM_FLUSH_REGISTER_WINDOWS;
428 SCM_I_SETJMP (t->regs);
429 return t;
430 }
431
432 static scm_t_guile_ticket
433 scm_leave_guile ()
434 {
435 scm_i_thread *t = suspend ();
436 if (t->heap_mutex_locked_by_self)
437 {
438 t->heap_mutex_locked_by_self = 0;
439 scm_i_pthread_mutex_unlock (&t->heap_mutex);
440 }
441 return (scm_t_guile_ticket) t;
442 }
443
444 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
445 static scm_i_thread *all_threads = NULL;
446 static int thread_count;
447
448 static SCM scm_i_default_dynamic_state;
449
450 /* Perform first stage of thread initialisation, in non-guile mode.
451 */
452 static void
453 guilify_self_1 (SCM_STACKITEM *base)
454 {
455 scm_i_thread *t = malloc (sizeof (scm_i_thread));
456
457 t->pthread = scm_i_pthread_self ();
458 t->handle = SCM_BOOL_F;
459 t->result = SCM_BOOL_F;
460 t->cleanup_handler = SCM_BOOL_F;
461 t->mutexes = SCM_EOL;
462 t->held_mutex = NULL;
463 t->join_queue = SCM_EOL;
464 t->dynamic_state = SCM_BOOL_F;
465 t->dynwinds = SCM_EOL;
466 t->active_asyncs = SCM_EOL;
467 t->block_asyncs = 1;
468 t->pending_asyncs = 1;
469 t->last_debug_frame = NULL;
470 t->base = base;
471 #ifdef __ia64__
472 /* Calculate and store off the base of this thread's register
473 backing store (RBS). Unfortunately our implementation(s) of
474 scm_ia64_register_backing_store_base are only reliable for the
475 main thread. For other threads, therefore, find out the current
476 top of the RBS, and use that as a maximum. */
477 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
478 {
479 ucontext_t ctx;
480 void *bsp;
481 getcontext (&ctx);
482 bsp = scm_ia64_ar_bsp (&ctx);
483 if (t->register_backing_store_base > bsp)
484 t->register_backing_store_base = bsp;
485 }
486 #endif
487 t->continuation_root = SCM_EOL;
488 t->continuation_base = base;
489 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
490 t->sleep_mutex = NULL;
491 t->sleep_object = SCM_BOOL_F;
492 t->sleep_fd = -1;
493
494 if (pipe (t->sleep_pipe) != 0)
495 /* FIXME: Error conditions during the initialization phase are handled
496 gracelessly since public functions such as `scm_init_guile ()'
497 currently have type `void'. */
498 abort ();
499
500 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
501 t->heap_mutex_locked_by_self = 0;
502 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
503 t->clear_freelists_p = 0;
504 t->gc_running_p = 0;
505 t->canceled = 0;
506 t->exited = 0;
507
508 t->freelist = SCM_EOL;
509 t->freelist2 = SCM_EOL;
510 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
511 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
512
513 scm_i_pthread_setspecific (scm_i_thread_key, t);
514
515 scm_i_pthread_mutex_lock (&t->heap_mutex);
516 t->heap_mutex_locked_by_self = 1;
517
518 scm_i_pthread_mutex_lock (&thread_admin_mutex);
519 t->next_thread = all_threads;
520 all_threads = t;
521 thread_count++;
522 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
523 }
524
525 /* Perform second stage of thread initialisation, in guile mode.
526 */
527 static void
528 guilify_self_2 (SCM parent)
529 {
530 scm_i_thread *t = SCM_I_CURRENT_THREAD;
531
532 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
533 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
534 t->continuation_root = scm_cons (t->handle, SCM_EOL);
535 t->continuation_base = t->base;
536 t->vm = SCM_BOOL_F;
537
538 if (scm_is_true (parent))
539 t->dynamic_state = scm_make_dynamic_state (parent);
540 else
541 t->dynamic_state = scm_i_make_initial_dynamic_state ();
542
543 t->join_queue = make_queue ();
544 t->block_asyncs = 0;
545 }
546
547 \f
548 /*** Fat mutexes */
549
550 /* We implement our own mutex type since we want them to be 'fair', we
551 want to do fancy things while waiting for them (like running
552 asyncs) and we might want to add things that are nice for
553 debugging.
554 */
555
556 typedef struct {
557 scm_i_pthread_mutex_t lock;
558 SCM owner;
559 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
560
561 int recursive; /* allow recursive locking? */
562 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
563 int allow_external_unlock; /* is it an error to unlock a mutex that is not
564 owned by the current thread? */
565
566 SCM waiting; /* the threads waiting for this mutex. */
567 } fat_mutex;
568
569 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
570 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
571
572 /* Perform thread tear-down, in guile mode.
573 */
574 static void *
575 do_thread_exit (void *v)
576 {
577 scm_i_thread *t = (scm_i_thread *) v;
578
579 if (!scm_is_false (t->cleanup_handler))
580 {
581 SCM ptr = t->cleanup_handler;
582
583 t->cleanup_handler = SCM_BOOL_F;
584 t->result = scm_internal_catch (SCM_BOOL_T,
585 (scm_t_catch_body) scm_call_0, ptr,
586 scm_handle_by_message_noexit, NULL);
587 }
588
589 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
590
591 t->exited = 1;
592 close (t->sleep_pipe[0]);
593 close (t->sleep_pipe[1]);
594 while (scm_is_true (unblock_from_queue (t->join_queue)))
595 ;
596
597 while (!scm_is_null (t->mutexes))
598 {
599 SCM mutex = SCM_CAR (t->mutexes);
600 fat_mutex *m = SCM_MUTEX_DATA (mutex);
601 scm_i_pthread_mutex_lock (&m->lock);
602
603 unblock_from_queue (m->waiting);
604
605 scm_i_pthread_mutex_unlock (&m->lock);
606 t->mutexes = SCM_CDR (t->mutexes);
607 }
608
609 scm_i_pthread_mutex_unlock (&t->admin_mutex);
610
611 return NULL;
612 }
613
614 static void
615 on_thread_exit (void *v)
616 {
617 /* This handler is executed in non-guile mode. */
618 scm_i_thread *t = (scm_i_thread *) v, **tp;
619
620 /* If this thread was cancelled while doing a cond wait, it will
621 still have a mutex locked, so we unlock it here. */
622 if (t->held_mutex)
623 {
624 scm_i_pthread_mutex_unlock (t->held_mutex);
625 t->held_mutex = NULL;
626 }
627
628 scm_i_pthread_setspecific (scm_i_thread_key, v);
629
630 /* Ensure the signal handling thread has been launched, because we might be
631 shutting it down. */
632 scm_i_ensure_signal_delivery_thread ();
633
634 /* Unblocking the joining threads needs to happen in guile mode
635 since the queue is a SCM data structure. */
636 scm_with_guile (do_thread_exit, v);
637
638 /* Removing ourself from the list of all threads needs to happen in
639 non-guile mode since all SCM values on our stack become
640 unprotected once we are no longer in the list. */
641 scm_i_pthread_mutex_lock (&thread_admin_mutex);
642 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
643 if (*tp == t)
644 {
645 *tp = t->next_thread;
646 break;
647 }
648 thread_count--;
649
650 /* If there's only one other thread, it could be the signal delivery
651 thread, so we need to notify it to shut down by closing its read pipe.
652 If it's not the signal delivery thread, then closing the read pipe isn't
653 going to hurt. */
654 if (thread_count <= 1)
655 scm_i_close_signal_pipe ();
656
657 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
658
659 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
660 }
661
662 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
663
664 static void
665 init_thread_key (void)
666 {
667 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
668 }
669
670 /* Perform any initializations necessary to bring the current thread
671 into guile mode, initializing Guile itself, if necessary.
672
673 BASE is the stack base to use with GC.
674
675 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
676 which case the default dynamic state is used.
677
678 Return zero when the thread was in guile mode already; otherwise
679 return 1.
680 */
681
682 static int
683 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
684 {
685 scm_i_thread *t;
686
687 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
688
689 if ((t = SCM_I_CURRENT_THREAD) == NULL)
690 {
691 /* This thread has not been guilified yet.
692 */
693
694 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
695 if (scm_initialized_p == 0)
696 {
697 /* First thread ever to enter Guile. Run the full
698 initialization.
699 */
700 scm_i_init_guile (base);
701 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
702 }
703 else
704 {
705 /* Guile is already initialized, but this thread enters it for
706 the first time. Only initialize this thread.
707 */
708 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
709 guilify_self_1 (base);
710 guilify_self_2 (parent);
711 }
712 return 1;
713 }
714 else if (t->top)
715 {
716 /* This thread is already guilified but not in guile mode, just
717 resume it.
718
719 A user call to scm_with_guile() will lead us to here. This could
720 happen from anywhere on the stack, and in particular lower on the
721 stack than when it was when this thread was first guilified. Thus,
722 `base' must be updated. */
723 #if SCM_STACK_GROWS_UP
724 if (base < t->base)
725 t->base = base;
726 #else
727 if (base > t->base)
728 t->base = base;
729 #endif
730
731 scm_enter_guile ((scm_t_guile_ticket) t);
732 return 1;
733 }
734 else
735 {
736 /* Thread is already in guile mode. Nothing to do.
737 */
738 return 0;
739 }
740 }
741
742 #if SCM_USE_PTHREAD_THREADS
743
744 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
745 /* This method for GNU/Linux and perhaps some other systems.
746 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
747 available on them. */
748 #define HAVE_GET_THREAD_STACK_BASE
749
750 static SCM_STACKITEM *
751 get_thread_stack_base ()
752 {
753 pthread_attr_t attr;
754 void *start, *end;
755 size_t size;
756
757 pthread_getattr_np (pthread_self (), &attr);
758 pthread_attr_getstack (&attr, &start, &size);
759 end = (char *)start + size;
760
761 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
762 for the main thread, but we can use scm_get_stack_base in that
763 case.
764 */
765
766 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
767 if ((void *)&attr < start || (void *)&attr >= end)
768 return scm_get_stack_base ();
769 else
770 #endif
771 {
772 #if SCM_STACK_GROWS_UP
773 return start;
774 #else
775 return end;
776 #endif
777 }
778 }
779
780 #elif HAVE_PTHREAD_GET_STACKADDR_NP
781 /* This method for MacOS X.
782 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
783 but as of 2006 there's nothing obvious at apple.com. */
784 #define HAVE_GET_THREAD_STACK_BASE
785 static SCM_STACKITEM *
786 get_thread_stack_base ()
787 {
788 return pthread_get_stackaddr_np (pthread_self ());
789 }
790
791 #elif defined (__MINGW32__)
792 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
793 in any thread. We don't like hard-coding the name of a system, but there
794 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
795 work. */
796 #define HAVE_GET_THREAD_STACK_BASE
797 static SCM_STACKITEM *
798 get_thread_stack_base ()
799 {
800 return scm_get_stack_base ();
801 }
802
803 #endif /* pthread methods of get_thread_stack_base */
804
805 #else /* !SCM_USE_PTHREAD_THREADS */
806
807 #define HAVE_GET_THREAD_STACK_BASE
808
809 static SCM_STACKITEM *
810 get_thread_stack_base ()
811 {
812 return scm_get_stack_base ();
813 }
814
815 #endif /* !SCM_USE_PTHREAD_THREADS */
816
817 #ifdef HAVE_GET_THREAD_STACK_BASE
818
819 void
820 scm_init_guile ()
821 {
822 scm_i_init_thread_for_guile (get_thread_stack_base (),
823 scm_i_default_dynamic_state);
824 }
825
826 #endif
827
828 void *
829 scm_with_guile (void *(*func)(void *), void *data)
830 {
831 return scm_i_with_guile_and_parent (func, data,
832 scm_i_default_dynamic_state);
833 }
834
835 SCM_UNUSED static void
836 scm_leave_guile_cleanup (void *x)
837 {
838 scm_leave_guile ();
839 }
840
841 void *
842 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
843 {
844 void *res;
845 int really_entered;
846 SCM_STACKITEM base_item;
847
848 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
849 if (really_entered)
850 {
851 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
852 res = scm_c_with_continuation_barrier (func, data);
853 scm_i_pthread_cleanup_pop (0);
854 scm_leave_guile ();
855 }
856 else
857 res = scm_c_with_continuation_barrier (func, data);
858
859 return res;
860 }
861
862 void *
863 scm_without_guile (void *(*func)(void *), void *data)
864 {
865 void *res;
866 scm_t_guile_ticket t;
867 t = scm_leave_guile ();
868 res = func (data);
869 scm_enter_guile (t);
870 return res;
871 }
872
873 /*** Thread creation */
874
875 typedef struct {
876 SCM parent;
877 SCM thunk;
878 SCM handler;
879 SCM thread;
880 scm_i_pthread_mutex_t mutex;
881 scm_i_pthread_cond_t cond;
882 } launch_data;
883
884 static void *
885 really_launch (void *d)
886 {
887 launch_data *data = (launch_data *)d;
888 SCM thunk = data->thunk, handler = data->handler;
889 scm_i_thread *t;
890
891 t = SCM_I_CURRENT_THREAD;
892
893 scm_i_scm_pthread_mutex_lock (&data->mutex);
894 data->thread = scm_current_thread ();
895 scm_i_pthread_cond_signal (&data->cond);
896 scm_i_pthread_mutex_unlock (&data->mutex);
897
898 if (SCM_UNBNDP (handler))
899 t->result = scm_call_0 (thunk);
900 else
901 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
902
903 return 0;
904 }
905
906 static void *
907 launch_thread (void *d)
908 {
909 launch_data *data = (launch_data *)d;
910 scm_i_pthread_detach (scm_i_pthread_self ());
911 scm_i_with_guile_and_parent (really_launch, d, data->parent);
912 return NULL;
913 }
914
915 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
916 (SCM thunk, SCM handler),
917 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
918 "returning a new thread object representing the thread. The procedure\n"
919 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
920 "\n"
921 "When @var{handler} is specified, then @var{thunk} is called from\n"
922 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
923 "handler. This catch is established inside the continuation barrier.\n"
924 "\n"
925 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
926 "the @emph{exit value} of the thread and the thread is terminated.")
927 #define FUNC_NAME s_scm_call_with_new_thread
928 {
929 launch_data data;
930 scm_i_pthread_t id;
931 int err;
932
933 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
934 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
935 handler, SCM_ARG2, FUNC_NAME);
936
937 data.parent = scm_current_dynamic_state ();
938 data.thunk = thunk;
939 data.handler = handler;
940 data.thread = SCM_BOOL_F;
941 scm_i_pthread_mutex_init (&data.mutex, NULL);
942 scm_i_pthread_cond_init (&data.cond, NULL);
943
944 scm_i_scm_pthread_mutex_lock (&data.mutex);
945 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
946 if (err)
947 {
948 scm_i_pthread_mutex_unlock (&data.mutex);
949 errno = err;
950 scm_syserror (NULL);
951 }
952 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
953 scm_i_pthread_mutex_unlock (&data.mutex);
954
955 return data.thread;
956 }
957 #undef FUNC_NAME
958
959 typedef struct {
960 SCM parent;
961 scm_t_catch_body body;
962 void *body_data;
963 scm_t_catch_handler handler;
964 void *handler_data;
965 SCM thread;
966 scm_i_pthread_mutex_t mutex;
967 scm_i_pthread_cond_t cond;
968 } spawn_data;
969
970 static void *
971 really_spawn (void *d)
972 {
973 spawn_data *data = (spawn_data *)d;
974 scm_t_catch_body body = data->body;
975 void *body_data = data->body_data;
976 scm_t_catch_handler handler = data->handler;
977 void *handler_data = data->handler_data;
978 scm_i_thread *t = SCM_I_CURRENT_THREAD;
979
980 scm_i_scm_pthread_mutex_lock (&data->mutex);
981 data->thread = scm_current_thread ();
982 scm_i_pthread_cond_signal (&data->cond);
983 scm_i_pthread_mutex_unlock (&data->mutex);
984
985 if (handler == NULL)
986 t->result = body (body_data);
987 else
988 t->result = scm_internal_catch (SCM_BOOL_T,
989 body, body_data,
990 handler, handler_data);
991
992 return 0;
993 }
994
995 static void *
996 spawn_thread (void *d)
997 {
998 spawn_data *data = (spawn_data *)d;
999 scm_i_pthread_detach (scm_i_pthread_self ());
1000 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1001 return NULL;
1002 }
1003
1004 SCM
1005 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1006 scm_t_catch_handler handler, void *handler_data)
1007 {
1008 spawn_data data;
1009 scm_i_pthread_t id;
1010 int err;
1011
1012 data.parent = scm_current_dynamic_state ();
1013 data.body = body;
1014 data.body_data = body_data;
1015 data.handler = handler;
1016 data.handler_data = handler_data;
1017 data.thread = SCM_BOOL_F;
1018 scm_i_pthread_mutex_init (&data.mutex, NULL);
1019 scm_i_pthread_cond_init (&data.cond, NULL);
1020
1021 scm_i_scm_pthread_mutex_lock (&data.mutex);
1022 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1023 if (err)
1024 {
1025 scm_i_pthread_mutex_unlock (&data.mutex);
1026 errno = err;
1027 scm_syserror (NULL);
1028 }
1029 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1030 scm_i_pthread_mutex_unlock (&data.mutex);
1031
1032 return data.thread;
1033 }
1034
1035 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1036 (),
1037 "Move the calling thread to the end of the scheduling queue.")
1038 #define FUNC_NAME s_scm_yield
1039 {
1040 return scm_from_bool (scm_i_sched_yield ());
1041 }
1042 #undef FUNC_NAME
1043
1044 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1045 (SCM thread),
1046 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1047 "cannot be the current thread, and if @var{thread} has already terminated or "
1048 "been signaled to terminate, this function is a no-op.")
1049 #define FUNC_NAME s_scm_cancel_thread
1050 {
1051 scm_i_thread *t = NULL;
1052
1053 SCM_VALIDATE_THREAD (1, thread);
1054 t = SCM_I_THREAD_DATA (thread);
1055 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1056 if (!t->canceled)
1057 {
1058 t->canceled = 1;
1059 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1060 scm_i_pthread_cancel (t->pthread);
1061 }
1062 else
1063 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1064
1065 return SCM_UNSPECIFIED;
1066 }
1067 #undef FUNC_NAME
1068
1069 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1070 (SCM thread, SCM proc),
1071 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1072 "This handler will be called when the thread exits.")
1073 #define FUNC_NAME s_scm_set_thread_cleanup_x
1074 {
1075 scm_i_thread *t;
1076
1077 SCM_VALIDATE_THREAD (1, thread);
1078 if (!scm_is_false (proc))
1079 SCM_VALIDATE_THUNK (2, proc);
1080
1081 t = SCM_I_THREAD_DATA (thread);
1082 scm_i_pthread_mutex_lock (&t->admin_mutex);
1083
1084 if (!(t->exited || t->canceled))
1085 t->cleanup_handler = proc;
1086
1087 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1088
1089 return SCM_UNSPECIFIED;
1090 }
1091 #undef FUNC_NAME
1092
1093 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1094 (SCM thread),
1095 "Return the cleanup handler installed for the thread @var{thread}.")
1096 #define FUNC_NAME s_scm_thread_cleanup
1097 {
1098 scm_i_thread *t;
1099 SCM ret;
1100
1101 SCM_VALIDATE_THREAD (1, thread);
1102
1103 t = SCM_I_THREAD_DATA (thread);
1104 scm_i_pthread_mutex_lock (&t->admin_mutex);
1105 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1106 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1107
1108 return ret;
1109 }
1110 #undef FUNC_NAME
1111
1112 SCM scm_join_thread (SCM thread)
1113 {
1114 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1115 }
1116
1117 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1118 (SCM thread, SCM timeout, SCM timeoutval),
1119 "Suspend execution of the calling thread until the target @var{thread} "
1120 "terminates, unless the target @var{thread} has already terminated. ")
1121 #define FUNC_NAME s_scm_join_thread_timed
1122 {
1123 scm_i_thread *t;
1124 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1125 SCM res = SCM_BOOL_F;
1126
1127 if (! (SCM_UNBNDP (timeoutval)))
1128 res = timeoutval;
1129
1130 SCM_VALIDATE_THREAD (1, thread);
1131 if (scm_is_eq (scm_current_thread (), thread))
1132 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1133
1134 t = SCM_I_THREAD_DATA (thread);
1135 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1136
1137 if (! SCM_UNBNDP (timeout))
1138 {
1139 to_timespec (timeout, &ctimeout);
1140 timeout_ptr = &ctimeout;
1141 }
1142
1143 if (t->exited)
1144 res = t->result;
1145 else
1146 {
1147 while (1)
1148 {
1149 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1150 timeout_ptr);
1151 if (err == 0)
1152 {
1153 if (t->exited)
1154 {
1155 res = t->result;
1156 break;
1157 }
1158 }
1159 else if (err == ETIMEDOUT)
1160 break;
1161
1162 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1163 SCM_TICK;
1164 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1165
1166 /* Check for exit again, since we just released and
1167 reacquired the admin mutex, before the next block_self
1168 call (which would block forever if t has already
1169 exited). */
1170 if (t->exited)
1171 {
1172 res = t->result;
1173 break;
1174 }
1175 }
1176 }
1177
1178 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1179
1180 return res;
1181 }
1182 #undef FUNC_NAME
1183
1184 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1185 (SCM obj),
1186 "Return @code{#t} if @var{obj} is a thread.")
1187 #define FUNC_NAME s_scm_thread_p
1188 {
1189 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1190 }
1191 #undef FUNC_NAME
1192
1193 static SCM
1194 fat_mutex_mark (SCM mx)
1195 {
1196 fat_mutex *m = SCM_MUTEX_DATA (mx);
1197 scm_gc_mark (m->owner);
1198 return m->waiting;
1199 }
1200
1201 static size_t
1202 fat_mutex_free (SCM mx)
1203 {
1204 fat_mutex *m = SCM_MUTEX_DATA (mx);
1205 scm_i_pthread_mutex_destroy (&m->lock);
1206 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1207 return 0;
1208 }
1209
1210 static int
1211 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1212 {
1213 fat_mutex *m = SCM_MUTEX_DATA (mx);
1214 scm_puts ("#<mutex ", port);
1215 scm_uintprint ((scm_t_bits)m, 16, port);
1216 scm_puts (">", port);
1217 return 1;
1218 }
1219
1220 static SCM
1221 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1222 {
1223 fat_mutex *m;
1224 SCM mx;
1225
1226 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1227 scm_i_pthread_mutex_init (&m->lock, NULL);
1228 m->owner = SCM_BOOL_F;
1229 m->level = 0;
1230
1231 m->recursive = recursive;
1232 m->unchecked_unlock = unchecked_unlock;
1233 m->allow_external_unlock = external_unlock;
1234
1235 m->waiting = SCM_EOL;
1236 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1237 m->waiting = make_queue ();
1238 return mx;
1239 }
1240
1241 SCM scm_make_mutex (void)
1242 {
1243 return scm_make_mutex_with_flags (SCM_EOL);
1244 }
1245
1246 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1247 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1248 SCM_SYMBOL (recursive_sym, "recursive");
1249
1250 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1251 (SCM flags),
1252 "Create a new mutex. ")
1253 #define FUNC_NAME s_scm_make_mutex_with_flags
1254 {
1255 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1256
1257 SCM ptr = flags;
1258 while (! scm_is_null (ptr))
1259 {
1260 SCM flag = SCM_CAR (ptr);
1261 if (scm_is_eq (flag, unchecked_unlock_sym))
1262 unchecked_unlock = 1;
1263 else if (scm_is_eq (flag, allow_external_unlock_sym))
1264 external_unlock = 1;
1265 else if (scm_is_eq (flag, recursive_sym))
1266 recursive = 1;
1267 else
1268 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1269 ptr = SCM_CDR (ptr);
1270 }
1271 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1272 }
1273 #undef FUNC_NAME
1274
1275 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1276 (void),
1277 "Create a new recursive mutex. ")
1278 #define FUNC_NAME s_scm_make_recursive_mutex
1279 {
1280 return make_fat_mutex (1, 0, 0);
1281 }
1282 #undef FUNC_NAME
1283
1284 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1285
1286 static SCM
1287 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1288 {
1289 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1290
1291 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1292 SCM err = SCM_BOOL_F;
1293
1294 struct timeval current_time;
1295
1296 scm_i_scm_pthread_mutex_lock (&m->lock);
1297
1298 while (1)
1299 {
1300 if (m->level == 0)
1301 {
1302 m->owner = new_owner;
1303 m->level++;
1304
1305 if (SCM_I_IS_THREAD (new_owner))
1306 {
1307 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1308 scm_i_pthread_mutex_lock (&t->admin_mutex);
1309 t->mutexes = scm_cons (mutex, t->mutexes);
1310 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1311 }
1312 *ret = 1;
1313 break;
1314 }
1315 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1316 {
1317 m->owner = new_owner;
1318 err = scm_cons (scm_abandoned_mutex_error_key,
1319 scm_from_locale_string ("lock obtained on abandoned "
1320 "mutex"));
1321 *ret = 1;
1322 break;
1323 }
1324 else if (scm_is_eq (m->owner, new_owner))
1325 {
1326 if (m->recursive)
1327 {
1328 m->level++;
1329 *ret = 1;
1330 }
1331 else
1332 {
1333 err = scm_cons (scm_misc_error_key,
1334 scm_from_locale_string ("mutex already locked "
1335 "by thread"));
1336 *ret = 0;
1337 }
1338 break;
1339 }
1340 else
1341 {
1342 if (timeout != NULL)
1343 {
1344 gettimeofday (&current_time, NULL);
1345 if (current_time.tv_sec > timeout->tv_sec ||
1346 (current_time.tv_sec == timeout->tv_sec &&
1347 current_time.tv_usec * 1000 > timeout->tv_nsec))
1348 {
1349 *ret = 0;
1350 break;
1351 }
1352 }
1353 block_self (m->waiting, mutex, &m->lock, timeout);
1354 scm_i_pthread_mutex_unlock (&m->lock);
1355 SCM_TICK;
1356 scm_i_scm_pthread_mutex_lock (&m->lock);
1357 }
1358 }
1359 scm_i_pthread_mutex_unlock (&m->lock);
1360 return err;
1361 }
1362
1363 SCM scm_lock_mutex (SCM mx)
1364 {
1365 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1366 }
1367
1368 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1369 (SCM m, SCM timeout, SCM owner),
1370 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1371 "blocks until the mutex becomes available. The function returns when "
1372 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1373 "a thread already owns will succeed right away and will not block the "
1374 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1375 #define FUNC_NAME s_scm_lock_mutex_timed
1376 {
1377 SCM exception;
1378 int ret = 0;
1379 scm_t_timespec cwaittime, *waittime = NULL;
1380
1381 SCM_VALIDATE_MUTEX (1, m);
1382
1383 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1384 {
1385 to_timespec (timeout, &cwaittime);
1386 waittime = &cwaittime;
1387 }
1388
1389 exception = fat_mutex_lock (m, waittime, owner, &ret);
1390 if (!scm_is_false (exception))
1391 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1392 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1393 }
1394 #undef FUNC_NAME
1395
1396 void
1397 scm_dynwind_lock_mutex (SCM mutex)
1398 {
1399 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1400 SCM_F_WIND_EXPLICITLY);
1401 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1402 SCM_F_WIND_EXPLICITLY);
1403 }
1404
1405 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1406 (SCM mutex),
1407 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1408 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1409 #define FUNC_NAME s_scm_try_mutex
1410 {
1411 SCM exception;
1412 int ret = 0;
1413 scm_t_timespec cwaittime, *waittime = NULL;
1414
1415 SCM_VALIDATE_MUTEX (1, mutex);
1416
1417 to_timespec (scm_from_int(0), &cwaittime);
1418 waittime = &cwaittime;
1419
1420 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1421 if (!scm_is_false (exception))
1422 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1423 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1424 }
1425 #undef FUNC_NAME
1426
1427 /*** Fat condition variables */
1428
1429 typedef struct {
1430 scm_i_pthread_mutex_t lock;
1431 SCM waiting; /* the threads waiting for this condition. */
1432 } fat_cond;
1433
1434 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1435 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1436
1437 static int
1438 fat_mutex_unlock (SCM mutex, SCM cond,
1439 const scm_t_timespec *waittime, int relock)
1440 {
1441 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1442 fat_cond *c = NULL;
1443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1444 int err = 0, ret = 0;
1445
1446 scm_i_scm_pthread_mutex_lock (&m->lock);
1447
1448 SCM owner = m->owner;
1449
1450 if (!scm_is_eq (owner, scm_current_thread ()))
1451 {
1452 if (m->level == 0)
1453 {
1454 if (!m->unchecked_unlock)
1455 {
1456 scm_i_pthread_mutex_unlock (&m->lock);
1457 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1458 }
1459 owner = scm_current_thread ();
1460 }
1461 else if (!m->allow_external_unlock)
1462 {
1463 scm_i_pthread_mutex_unlock (&m->lock);
1464 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1465 }
1466 }
1467
1468 if (! (SCM_UNBNDP (cond)))
1469 {
1470 c = SCM_CONDVAR_DATA (cond);
1471 while (1)
1472 {
1473 int brk = 0;
1474
1475 if (m->level > 0)
1476 m->level--;
1477 if (m->level == 0)
1478 m->owner = unblock_from_queue (m->waiting);
1479
1480 t->block_asyncs++;
1481
1482 err = block_self (c->waiting, cond, &m->lock, waittime);
1483 scm_i_pthread_mutex_unlock (&m->lock);
1484
1485 if (err == 0)
1486 {
1487 ret = 1;
1488 brk = 1;
1489 }
1490 else if (err == ETIMEDOUT)
1491 {
1492 ret = 0;
1493 brk = 1;
1494 }
1495 else if (err != EINTR)
1496 {
1497 errno = err;
1498 scm_syserror (NULL);
1499 }
1500
1501 if (brk)
1502 {
1503 if (relock)
1504 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1505 t->block_asyncs--;
1506 break;
1507 }
1508
1509 t->block_asyncs--;
1510 scm_async_click ();
1511
1512 scm_remember_upto_here_2 (cond, mutex);
1513
1514 scm_i_scm_pthread_mutex_lock (&m->lock);
1515 }
1516 }
1517 else
1518 {
1519 if (m->level > 0)
1520 m->level--;
1521 if (m->level == 0)
1522 m->owner = unblock_from_queue (m->waiting);
1523
1524 scm_i_pthread_mutex_unlock (&m->lock);
1525 ret = 1;
1526 }
1527
1528 return ret;
1529 }
1530
1531 SCM scm_unlock_mutex (SCM mx)
1532 {
1533 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1534 }
1535
1536 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1537 (SCM mx, SCM cond, SCM timeout),
1538 "Unlocks @var{mutex} if the calling thread owns the lock on "
1539 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1540 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1541 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1542 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1543 "with a call to @code{unlock-mutex}. Only the last call to "
1544 "@code{unlock-mutex} will actually unlock the mutex. ")
1545 #define FUNC_NAME s_scm_unlock_mutex_timed
1546 {
1547 scm_t_timespec cwaittime, *waittime = NULL;
1548
1549 SCM_VALIDATE_MUTEX (1, mx);
1550 if (! (SCM_UNBNDP (cond)))
1551 {
1552 SCM_VALIDATE_CONDVAR (2, cond);
1553
1554 if (! (SCM_UNBNDP (timeout)))
1555 {
1556 to_timespec (timeout, &cwaittime);
1557 waittime = &cwaittime;
1558 }
1559 }
1560
1561 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1562 }
1563 #undef FUNC_NAME
1564
1565 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1566 (SCM obj),
1567 "Return @code{#t} if @var{obj} is a mutex.")
1568 #define FUNC_NAME s_scm_mutex_p
1569 {
1570 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1571 }
1572 #undef FUNC_NAME
1573
1574 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1575 (SCM mx),
1576 "Return the thread owning @var{mx}, or @code{#f}.")
1577 #define FUNC_NAME s_scm_mutex_owner
1578 {
1579 SCM owner;
1580 fat_mutex *m = NULL;
1581
1582 SCM_VALIDATE_MUTEX (1, mx);
1583 m = SCM_MUTEX_DATA (mx);
1584 scm_i_pthread_mutex_lock (&m->lock);
1585 owner = m->owner;
1586 scm_i_pthread_mutex_unlock (&m->lock);
1587
1588 return owner;
1589 }
1590 #undef FUNC_NAME
1591
1592 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1593 (SCM mx),
1594 "Return the lock level of mutex @var{mx}.")
1595 #define FUNC_NAME s_scm_mutex_level
1596 {
1597 SCM_VALIDATE_MUTEX (1, mx);
1598 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1599 }
1600 #undef FUNC_NAME
1601
1602 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1603 (SCM mx),
1604 "Returns @code{#t} if the mutex @var{mx} is locked.")
1605 #define FUNC_NAME s_scm_mutex_locked_p
1606 {
1607 SCM_VALIDATE_MUTEX (1, mx);
1608 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1609 }
1610 #undef FUNC_NAME
1611
1612 static SCM
1613 fat_cond_mark (SCM cv)
1614 {
1615 fat_cond *c = SCM_CONDVAR_DATA (cv);
1616 return c->waiting;
1617 }
1618
1619 static size_t
1620 fat_cond_free (SCM mx)
1621 {
1622 fat_cond *c = SCM_CONDVAR_DATA (mx);
1623 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1624 return 0;
1625 }
1626
1627 static int
1628 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1629 {
1630 fat_cond *c = SCM_CONDVAR_DATA (cv);
1631 scm_puts ("#<condition-variable ", port);
1632 scm_uintprint ((scm_t_bits)c, 16, port);
1633 scm_puts (">", port);
1634 return 1;
1635 }
1636
1637 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1638 (void),
1639 "Make a new condition variable.")
1640 #define FUNC_NAME s_scm_make_condition_variable
1641 {
1642 fat_cond *c;
1643 SCM cv;
1644
1645 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1646 c->waiting = SCM_EOL;
1647 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1648 c->waiting = make_queue ();
1649 return cv;
1650 }
1651 #undef FUNC_NAME
1652
1653 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1654 (SCM cv, SCM mx, SCM t),
1655 "Wait until @var{cond-var} has been signalled. While waiting, "
1656 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1657 "is locked again when this function returns. When @var{time} is given, "
1658 "it specifies a point in time where the waiting should be aborted. It "
1659 "can be either a integer as returned by @code{current-time} or a pair "
1660 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1661 "mutex is locked and @code{#f} is returned. When the condition "
1662 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1663 "is returned. ")
1664 #define FUNC_NAME s_scm_timed_wait_condition_variable
1665 {
1666 scm_t_timespec waittime, *waitptr = NULL;
1667
1668 SCM_VALIDATE_CONDVAR (1, cv);
1669 SCM_VALIDATE_MUTEX (2, mx);
1670
1671 if (!SCM_UNBNDP (t))
1672 {
1673 to_timespec (t, &waittime);
1674 waitptr = &waittime;
1675 }
1676
1677 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1678 }
1679 #undef FUNC_NAME
1680
1681 static void
1682 fat_cond_signal (fat_cond *c)
1683 {
1684 unblock_from_queue (c->waiting);
1685 }
1686
1687 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1688 (SCM cv),
1689 "Wake up one thread that is waiting for @var{cv}")
1690 #define FUNC_NAME s_scm_signal_condition_variable
1691 {
1692 SCM_VALIDATE_CONDVAR (1, cv);
1693 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1694 return SCM_BOOL_T;
1695 }
1696 #undef FUNC_NAME
1697
1698 static void
1699 fat_cond_broadcast (fat_cond *c)
1700 {
1701 while (scm_is_true (unblock_from_queue (c->waiting)))
1702 ;
1703 }
1704
1705 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1706 (SCM cv),
1707 "Wake up all threads that are waiting for @var{cv}. ")
1708 #define FUNC_NAME s_scm_broadcast_condition_variable
1709 {
1710 SCM_VALIDATE_CONDVAR (1, cv);
1711 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1712 return SCM_BOOL_T;
1713 }
1714 #undef FUNC_NAME
1715
1716 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1717 (SCM obj),
1718 "Return @code{#t} if @var{obj} is a condition variable.")
1719 #define FUNC_NAME s_scm_condition_variable_p
1720 {
1721 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1722 }
1723 #undef FUNC_NAME
1724
1725 /*** Marking stacks */
1726
1727 /* XXX - what to do with this? Do we need to handle this for blocked
1728 threads as well?
1729 */
1730 #ifdef __ia64__
1731 # define SCM_MARK_BACKING_STORE() do { \
1732 ucontext_t ctx; \
1733 SCM_STACKITEM * top, * bot; \
1734 getcontext (&ctx); \
1735 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1736 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1737 / sizeof (SCM_STACKITEM))); \
1738 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1739 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1740 scm_mark_locations (bot, top - bot); } while (0)
1741 #else
1742 # define SCM_MARK_BACKING_STORE()
1743 #endif
1744
1745 void
1746 scm_threads_mark_stacks (void)
1747 {
1748 scm_i_thread *t;
1749 for (t = all_threads; t; t = t->next_thread)
1750 {
1751 /* Check that thread has indeed been suspended.
1752 */
1753 assert (t->top);
1754
1755 scm_gc_mark (t->handle);
1756
1757 #if SCM_STACK_GROWS_UP
1758 scm_mark_locations (t->base, t->top - t->base);
1759 #else
1760 scm_mark_locations (t->top, t->base - t->top);
1761 #endif
1762 scm_mark_locations ((void *) &t->regs,
1763 ((size_t) sizeof(t->regs)
1764 / sizeof (SCM_STACKITEM)));
1765 }
1766
1767 SCM_MARK_BACKING_STORE ();
1768 }
1769
1770 /*** Select */
1771
1772 int
1773 scm_std_select (int nfds,
1774 SELECT_TYPE *readfds,
1775 SELECT_TYPE *writefds,
1776 SELECT_TYPE *exceptfds,
1777 struct timeval *timeout)
1778 {
1779 fd_set my_readfds;
1780 int res, eno, wakeup_fd;
1781 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1782 scm_t_guile_ticket ticket;
1783
1784 if (readfds == NULL)
1785 {
1786 FD_ZERO (&my_readfds);
1787 readfds = &my_readfds;
1788 }
1789
1790 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1791 SCM_TICK;
1792
1793 wakeup_fd = t->sleep_pipe[0];
1794 ticket = scm_leave_guile ();
1795 FD_SET (wakeup_fd, readfds);
1796 if (wakeup_fd >= nfds)
1797 nfds = wakeup_fd+1;
1798 res = select (nfds, readfds, writefds, exceptfds, timeout);
1799 t->sleep_fd = -1;
1800 eno = errno;
1801 scm_enter_guile (ticket);
1802
1803 scm_i_reset_sleep (t);
1804
1805 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1806 {
1807 char dummy;
1808 full_read (wakeup_fd, &dummy, 1);
1809
1810 FD_CLR (wakeup_fd, readfds);
1811 res -= 1;
1812 if (res == 0)
1813 {
1814 eno = EINTR;
1815 res = -1;
1816 }
1817 }
1818 errno = eno;
1819 return res;
1820 }
1821
1822 /* Convenience API for blocking while in guile mode. */
1823
1824 #if SCM_USE_PTHREAD_THREADS
1825
1826 int
1827 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1828 {
1829 if (scm_i_pthread_mutex_trylock (mutex) == 0)
1830 return 0;
1831 else
1832 {
1833 scm_t_guile_ticket t = scm_leave_guile ();
1834 int res = scm_i_pthread_mutex_lock (mutex);
1835 scm_enter_guile (t);
1836 return res;
1837 }
1838 }
1839
1840 static void
1841 do_unlock (void *data)
1842 {
1843 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1844 }
1845
1846 void
1847 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1848 {
1849 scm_i_scm_pthread_mutex_lock (mutex);
1850 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1851 }
1852
1853 int
1854 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1855 {
1856 scm_t_guile_ticket t = scm_leave_guile ();
1857 ((scm_i_thread *)t)->held_mutex = mutex;
1858 int res = scm_i_pthread_cond_wait (cond, mutex);
1859 ((scm_i_thread *)t)->held_mutex = NULL;
1860 scm_enter_guile (t);
1861 return res;
1862 }
1863
1864 int
1865 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1866 scm_i_pthread_mutex_t *mutex,
1867 const scm_t_timespec *wt)
1868 {
1869 scm_t_guile_ticket t = scm_leave_guile ();
1870 ((scm_i_thread *)t)->held_mutex = mutex;
1871 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1872 ((scm_i_thread *)t)->held_mutex = NULL;
1873 scm_enter_guile (t);
1874 return res;
1875 }
1876
1877 #endif
1878
1879 unsigned long
1880 scm_std_usleep (unsigned long usecs)
1881 {
1882 struct timeval tv;
1883 tv.tv_usec = usecs % 1000000;
1884 tv.tv_sec = usecs / 1000000;
1885 scm_std_select (0, NULL, NULL, NULL, &tv);
1886 return tv.tv_sec * 1000000 + tv.tv_usec;
1887 }
1888
1889 unsigned int
1890 scm_std_sleep (unsigned int secs)
1891 {
1892 struct timeval tv;
1893 tv.tv_usec = 0;
1894 tv.tv_sec = secs;
1895 scm_std_select (0, NULL, NULL, NULL, &tv);
1896 return tv.tv_sec;
1897 }
1898
1899 /*** Misc */
1900
1901 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1902 (void),
1903 "Return the thread that called this function.")
1904 #define FUNC_NAME s_scm_current_thread
1905 {
1906 return SCM_I_CURRENT_THREAD->handle;
1907 }
1908 #undef FUNC_NAME
1909
1910 static SCM
1911 scm_c_make_list (size_t n, SCM fill)
1912 {
1913 SCM res = SCM_EOL;
1914 while (n-- > 0)
1915 res = scm_cons (fill, res);
1916 return res;
1917 }
1918
1919 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1920 (void),
1921 "Return a list of all threads.")
1922 #define FUNC_NAME s_scm_all_threads
1923 {
1924 /* We can not allocate while holding the thread_admin_mutex because
1925 of the way GC is done.
1926 */
1927 int n = thread_count;
1928 scm_i_thread *t;
1929 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1930
1931 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1932 l = &list;
1933 for (t = all_threads; t && n > 0; t = t->next_thread)
1934 {
1935 if (t != scm_i_signal_delivery_thread)
1936 {
1937 SCM_SETCAR (*l, t->handle);
1938 l = SCM_CDRLOC (*l);
1939 }
1940 n--;
1941 }
1942 *l = SCM_EOL;
1943 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1944 return list;
1945 }
1946 #undef FUNC_NAME
1947
1948 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1949 (SCM thread),
1950 "Return @code{#t} iff @var{thread} has exited.\n")
1951 #define FUNC_NAME s_scm_thread_exited_p
1952 {
1953 return scm_from_bool (scm_c_thread_exited_p (thread));
1954 }
1955 #undef FUNC_NAME
1956
1957 int
1958 scm_c_thread_exited_p (SCM thread)
1959 #define FUNC_NAME s_scm_thread_exited_p
1960 {
1961 scm_i_thread *t;
1962 SCM_VALIDATE_THREAD (1, thread);
1963 t = SCM_I_THREAD_DATA (thread);
1964 return t->exited;
1965 }
1966 #undef FUNC_NAME
1967
1968 static scm_i_pthread_cond_t wake_up_cond;
1969 int scm_i_thread_go_to_sleep;
1970 static int threads_initialized_p = 0;
1971
1972 void
1973 scm_i_thread_put_to_sleep ()
1974 {
1975 if (threads_initialized_p)
1976 {
1977 scm_i_thread *t;
1978
1979 scm_leave_guile ();
1980 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1981
1982 /* Signal all threads to go to sleep
1983 */
1984 scm_i_thread_go_to_sleep = 1;
1985 for (t = all_threads; t; t = t->next_thread)
1986 scm_i_pthread_mutex_lock (&t->heap_mutex);
1987 scm_i_thread_go_to_sleep = 0;
1988 }
1989 }
1990
1991 void
1992 scm_i_thread_invalidate_freelists ()
1993 {
1994 /* thread_admin_mutex is already locked. */
1995
1996 scm_i_thread *t;
1997 for (t = all_threads; t; t = t->next_thread)
1998 if (t != SCM_I_CURRENT_THREAD)
1999 t->clear_freelists_p = 1;
2000 }
2001
2002 void
2003 scm_i_thread_wake_up ()
2004 {
2005 if (threads_initialized_p)
2006 {
2007 scm_i_thread *t;
2008
2009 scm_i_pthread_cond_broadcast (&wake_up_cond);
2010 for (t = all_threads; t; t = t->next_thread)
2011 scm_i_pthread_mutex_unlock (&t->heap_mutex);
2012 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2013 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
2014 }
2015 }
2016
2017 void
2018 scm_i_thread_sleep_for_gc ()
2019 {
2020 scm_i_thread *t = suspend ();
2021
2022 /* Don't put t->heap_mutex in t->held_mutex here, because if the
2023 thread is cancelled during the cond wait, the thread's cleanup
2024 function (scm_leave_guile_cleanup) will handle unlocking the
2025 heap_mutex, so we don't need to do that again in on_thread_exit.
2026 */
2027 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
2028
2029 resume (t);
2030 }
2031
2032 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2033 */
2034 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2035 int scm_i_critical_section_level = 0;
2036
2037 static SCM dynwind_critical_section_mutex;
2038
2039 void
2040 scm_dynwind_critical_section (SCM mutex)
2041 {
2042 if (scm_is_false (mutex))
2043 mutex = dynwind_critical_section_mutex;
2044 scm_dynwind_lock_mutex (mutex);
2045 scm_dynwind_block_asyncs ();
2046 }
2047
2048 /*** Initialization */
2049
2050 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2051 scm_i_pthread_mutex_t scm_i_misc_mutex;
2052
2053 #if SCM_USE_PTHREAD_THREADS
2054 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2055 #endif
2056
2057 void
2058 scm_threads_prehistory (SCM_STACKITEM *base)
2059 {
2060 #if SCM_USE_PTHREAD_THREADS
2061 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2062 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2063 PTHREAD_MUTEX_RECURSIVE);
2064 #endif
2065
2066 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2067 scm_i_pthread_mutexattr_recursive);
2068 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2069 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2070 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2071 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
2072
2073 guilify_self_1 (base);
2074 }
2075
2076 scm_t_bits scm_tc16_thread;
2077 scm_t_bits scm_tc16_mutex;
2078 scm_t_bits scm_tc16_condvar;
2079
2080 void
2081 scm_init_threads ()
2082 {
2083 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2084 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2085 scm_set_smob_print (scm_tc16_thread, thread_print);
2086 scm_set_smob_free (scm_tc16_thread, thread_free);
2087
2088 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2089 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2090 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2091 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2092
2093 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2094 sizeof (fat_cond));
2095 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2096 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2097 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2098
2099 scm_i_default_dynamic_state = SCM_BOOL_F;
2100 guilify_self_2 (SCM_BOOL_F);
2101 threads_initialized_p = 1;
2102
2103 dynwind_critical_section_mutex =
2104 scm_permanent_object (scm_make_recursive_mutex ());
2105 }
2106
2107 void
2108 scm_init_threads_default_dynamic_state ()
2109 {
2110 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2111 scm_i_default_dynamic_state = scm_permanent_object (state);
2112 }
2113
2114 void
2115 scm_init_thread_procs ()
2116 {
2117 #include "libguile/threads.x"
2118 }
2119
2120 /*
2121 Local Variables:
2122 c-file-style: "gnu"
2123 End:
2124 */