fix deadlock in before-gc-hook on certain 7.2alpha gc versions
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42 #include <fcntl.h>
43 #include <nproc.h>
44
45 #include "libguile/validate.h"
46 #include "libguile/root.h"
47 #include "libguile/eval.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/threads.h"
51 #include "libguile/dynwind.h"
52 #include "libguile/iselect.h"
53 #include "libguile/fluids.h"
54 #include "libguile/continuations.h"
55 #include "libguile/gc.h"
56 #include "libguile/init.h"
57 #include "libguile/scmsigs.h"
58 #include "libguile/strings.h"
59 #include "libguile/weaks.h"
60
61 #include <full-read.h>
62
63
64 \f
65
66 /* First some libgc shims. */
67
68 /* Make sure GC_fn_type is defined; it is missing from the public
69 headers of GC 7.1 and earlier. */
70 #ifndef HAVE_GC_FN_TYPE
71 typedef void * (* GC_fn_type) (void *);
72 #endif
73
74
75 #ifndef GC_SUCCESS
76 #define GC_SUCCESS 0
77 #endif
78
79 #ifndef GC_UNIMPLEMENTED
80 #define GC_UNIMPLEMENTED 3
81 #endif
82
83 /* Likewise struct GC_stack_base is missing before 7.1. */
84 #ifndef HAVE_GC_STACK_BASE
85 struct GC_stack_base {
86 void * mem_base; /* Base of memory stack. */
87 #ifdef __ia64__
88 void * reg_base; /* Base of separate register stack. */
89 #endif
90 };
91
92 static int
93 GC_register_my_thread (struct GC_stack_base *stack_base)
94 {
95 return GC_UNIMPLEMENTED;
96 }
97
98 static void
99 GC_unregister_my_thread ()
100 {
101 }
102
103 #if !SCM_USE_PTHREAD_THREADS
104 /* No threads; we can just use GC_stackbottom. */
105 static void *
106 get_thread_stack_base ()
107 {
108 return GC_stackbottom;
109 }
110
111 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
112 && defined PTHREAD_ATTR_GETSTACK_WORKS
113 /* This method for GNU/Linux and perhaps some other systems.
114 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
115 available on them. */
116 static void *
117 get_thread_stack_base ()
118 {
119 pthread_attr_t attr;
120 void *start, *end;
121 size_t size;
122
123 pthread_getattr_np (pthread_self (), &attr);
124 pthread_attr_getstack (&attr, &start, &size);
125 end = (char *)start + size;
126
127 #if SCM_STACK_GROWS_UP
128 return start;
129 #else
130 return end;
131 #endif
132 }
133
134 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
135 /* This method for MacOS X.
136 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
137 but as of 2006 there's nothing obvious at apple.com. */
138 static void *
139 get_thread_stack_base ()
140 {
141 return pthread_get_stackaddr_np (pthread_self ());
142 }
143
144 #else
145 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
146 #endif
147
148 static int
149 GC_get_stack_base (struct GC_stack_base *stack_base)
150 {
151 stack_base->mem_base = get_thread_stack_base ();
152 #ifdef __ia64__
153 /* Calculate and store off the base of this thread's register
154 backing store (RBS). Unfortunately our implementation(s) of
155 scm_ia64_register_backing_store_base are only reliable for the
156 main thread. For other threads, therefore, find out the current
157 top of the RBS, and use that as a maximum. */
158 stack_base->reg_base = scm_ia64_register_backing_store_base ();
159 {
160 ucontext_t ctx;
161 void *bsp;
162 getcontext (&ctx);
163 bsp = scm_ia64_ar_bsp (&ctx);
164 if (stack_base->reg_base > bsp)
165 stack_base->reg_base = bsp;
166 }
167 #endif
168 return GC_SUCCESS;
169 }
170
171 static void *
172 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
173 {
174 struct GC_stack_base stack_base;
175
176 stack_base.mem_base = (void*)&stack_base;
177 #ifdef __ia64__
178 /* FIXME: Untested. */
179 {
180 ucontext_t ctx;
181 getcontext (&ctx);
182 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
183 }
184 #endif
185
186 return fn (&stack_base, arg);
187 }
188 #endif /* HAVE_GC_STACK_BASE */
189
190
191 /* Now define with_gc_active and with_gc_inactive. */
192
193 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
194
195 /* We have a sufficiently new libgc (7.2 or newer). */
196
197 static void*
198 with_gc_inactive (GC_fn_type func, void *data)
199 {
200 return GC_do_blocking (func, data);
201 }
202
203 static void*
204 with_gc_active (GC_fn_type func, void *data)
205 {
206 return GC_call_with_gc_active (func, data);
207 }
208
209 #else
210
211 /* libgc not new enough, so never actually deactivate GC.
212
213 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
214 GC_call_with_gc_active. */
215
216 static void*
217 with_gc_inactive (GC_fn_type func, void *data)
218 {
219 return func (data);
220 }
221
222 static void*
223 with_gc_active (GC_fn_type func, void *data)
224 {
225 return func (data);
226 }
227
228 #endif /* HAVE_GC_DO_BLOCKING */
229
230
231 \f
232 static void
233 to_timespec (SCM t, scm_t_timespec *waittime)
234 {
235 if (scm_is_pair (t))
236 {
237 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
238 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
239 }
240 else
241 {
242 double time = scm_to_double (t);
243 double sec = scm_c_truncate (time);
244
245 waittime->tv_sec = (long) sec;
246 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
247 }
248 }
249
250 \f
251 /*** Queues */
252
253 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
254 the risk of false references leading to unbounded retained space as
255 described in "Bounding Space Usage of Conservative Garbage Collectors",
256 H.J. Boehm, 2001. */
257
258 /* Make an empty queue data structure.
259 */
260 static SCM
261 make_queue ()
262 {
263 return scm_cons (SCM_EOL, SCM_EOL);
264 }
265
266 /* Put T at the back of Q and return a handle that can be used with
267 remqueue to remove T from Q again.
268 */
269 static SCM
270 enqueue (SCM q, SCM t)
271 {
272 SCM c = scm_cons (t, SCM_EOL);
273 SCM_CRITICAL_SECTION_START;
274 if (scm_is_null (SCM_CDR (q)))
275 SCM_SETCDR (q, c);
276 else
277 SCM_SETCDR (SCM_CAR (q), c);
278 SCM_SETCAR (q, c);
279 SCM_CRITICAL_SECTION_END;
280 return c;
281 }
282
283 /* Remove the element that the handle C refers to from the queue Q. C
284 must have been returned from a call to enqueue. The return value
285 is zero when the element referred to by C has already been removed.
286 Otherwise, 1 is returned.
287 */
288 static int
289 remqueue (SCM q, SCM c)
290 {
291 SCM p, prev = q;
292 SCM_CRITICAL_SECTION_START;
293 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
294 {
295 if (scm_is_eq (p, c))
296 {
297 if (scm_is_eq (c, SCM_CAR (q)))
298 SCM_SETCAR (q, SCM_CDR (c));
299 SCM_SETCDR (prev, SCM_CDR (c));
300
301 /* GC-robust */
302 SCM_SETCDR (c, SCM_EOL);
303
304 SCM_CRITICAL_SECTION_END;
305 return 1;
306 }
307 prev = p;
308 }
309 SCM_CRITICAL_SECTION_END;
310 return 0;
311 }
312
313 /* Remove the front-most element from the queue Q and return it.
314 Return SCM_BOOL_F when Q is empty.
315 */
316 static SCM
317 dequeue (SCM q)
318 {
319 SCM c;
320 SCM_CRITICAL_SECTION_START;
321 c = SCM_CDR (q);
322 if (scm_is_null (c))
323 {
324 SCM_CRITICAL_SECTION_END;
325 return SCM_BOOL_F;
326 }
327 else
328 {
329 SCM_SETCDR (q, SCM_CDR (c));
330 if (scm_is_null (SCM_CDR (q)))
331 SCM_SETCAR (q, SCM_EOL);
332 SCM_CRITICAL_SECTION_END;
333
334 /* GC-robust */
335 SCM_SETCDR (c, SCM_EOL);
336
337 return SCM_CAR (c);
338 }
339 }
340
341 /*** Thread smob routines */
342
343
344 static int
345 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
346 {
347 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
348 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
349 the struct case, hence we go via a union, and extract according to the
350 size of pthread_t. */
351 union {
352 scm_i_pthread_t p;
353 unsigned short us;
354 unsigned int ui;
355 unsigned long ul;
356 scm_t_uintmax um;
357 } u;
358 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
359 scm_i_pthread_t p = t->pthread;
360 scm_t_uintmax id;
361 u.p = p;
362 if (sizeof (p) == sizeof (unsigned short))
363 id = u.us;
364 else if (sizeof (p) == sizeof (unsigned int))
365 id = u.ui;
366 else if (sizeof (p) == sizeof (unsigned long))
367 id = u.ul;
368 else
369 id = u.um;
370
371 scm_puts ("#<thread ", port);
372 scm_uintprint (id, 10, port);
373 scm_puts (" (", port);
374 scm_uintprint ((scm_t_bits)t, 16, port);
375 scm_puts (")>", port);
376 return 1;
377 }
378
379 \f
380 /*** Blocking on queues. */
381
382 /* See also scm_i_queue_async_cell for how such a block is
383 interrputed.
384 */
385
386 /* Put the current thread on QUEUE and go to sleep, waiting for it to
387 be woken up by a call to 'unblock_from_queue', or to be
388 interrupted. Upon return of this function, the current thread is
389 no longer on QUEUE, even when the sleep has been interrupted.
390
391 The caller of block_self must hold MUTEX. It will be atomically
392 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
393
394 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
395 as MUTEX is needed.
396
397 When WAITTIME is not NULL, the sleep will be aborted at that time.
398
399 The return value of block_self is an errno value. It will be zero
400 when the sleep has been successfully completed by a call to
401 unblock_from_queue, EINTR when it has been interrupted by the
402 delivery of a system async, and ETIMEDOUT when the timeout has
403 expired.
404
405 The system asyncs themselves are not executed by block_self.
406 */
407 static int
408 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
409 const scm_t_timespec *waittime)
410 {
411 scm_i_thread *t = SCM_I_CURRENT_THREAD;
412 SCM q_handle;
413 int err;
414
415 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
416 err = EINTR;
417 else
418 {
419 t->block_asyncs++;
420 q_handle = enqueue (queue, t->handle);
421 if (waittime == NULL)
422 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
423 else
424 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
425
426 /* When we are still on QUEUE, we have been interrupted. We
427 report this only when no other error (such as a timeout) has
428 happened above.
429 */
430 if (remqueue (queue, q_handle) && err == 0)
431 err = EINTR;
432 t->block_asyncs--;
433 scm_i_reset_sleep (t);
434 }
435
436 return err;
437 }
438
439 /* Wake up the first thread on QUEUE, if any. The awoken thread is
440 returned, or #f if the queue was empty.
441 */
442 static SCM
443 unblock_from_queue (SCM queue)
444 {
445 SCM thread = dequeue (queue);
446 if (scm_is_true (thread))
447 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
448 return thread;
449 }
450
451 \f
452 /* Getting into and out of guile mode.
453 */
454
455 /* Key used to attach a cleanup handler to a given thread. Also, if
456 thread-local storage is unavailable, this key is used to retrieve the
457 current thread with `pthread_getspecific ()'. */
458 scm_i_pthread_key_t scm_i_thread_key;
459
460
461 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
462
463 /* When thread-local storage (TLS) is available, a pointer to the
464 current-thread object is kept in TLS. Note that storing the thread-object
465 itself in TLS (rather than a pointer to some malloc'd memory) is not
466 possible since thread objects may live longer than the actual thread they
467 represent. */
468 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
469
470 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
471
472
473 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
474 static scm_i_thread *all_threads = NULL;
475 static int thread_count;
476
477 static SCM scm_i_default_dynamic_state;
478
479 /* Run when a fluid is collected. */
480 void
481 scm_i_reset_fluid (size_t n)
482 {
483 scm_i_thread *t;
484
485 scm_i_pthread_mutex_lock (&thread_admin_mutex);
486 for (t = all_threads; t; t = t->next_thread)
487 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
488 {
489 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
490
491 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
492 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
493 }
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
495 }
496
497 /* Perform first stage of thread initialisation, in non-guile mode.
498 */
499 static void
500 guilify_self_1 (struct GC_stack_base *base)
501 {
502 scm_i_thread t;
503
504 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
505 before allocating anything in this thread, because allocation could
506 cause GC to run, and GC could cause finalizers, which could invoke
507 Scheme functions, which need the current thread to be set. */
508
509 t.pthread = scm_i_pthread_self ();
510 t.handle = SCM_BOOL_F;
511 t.result = SCM_BOOL_F;
512 t.cleanup_handler = SCM_BOOL_F;
513 t.mutexes = SCM_EOL;
514 t.held_mutex = NULL;
515 t.join_queue = SCM_EOL;
516 t.dynamic_state = SCM_BOOL_F;
517 t.dynwinds = SCM_EOL;
518 t.active_asyncs = SCM_EOL;
519 t.block_asyncs = 1;
520 t.pending_asyncs = 1;
521 t.critical_section_level = 0;
522 t.base = base->mem_base;
523 #ifdef __ia64__
524 t.register_backing_store_base = base->reg-base;
525 #endif
526 t.continuation_root = SCM_EOL;
527 t.continuation_base = t.base;
528 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
529 t.sleep_mutex = NULL;
530 t.sleep_object = SCM_BOOL_F;
531 t.sleep_fd = -1;
532
533 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
534 /* FIXME: Error conditions during the initialization phase are handled
535 gracelessly since public functions such as `scm_init_guile ()'
536 currently have type `void'. */
537 abort ();
538
539 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
540 t.current_mark_stack_ptr = NULL;
541 t.current_mark_stack_limit = NULL;
542 t.canceled = 0;
543 t.exited = 0;
544 t.guile_mode = 0;
545
546 /* The switcheroo. */
547 {
548 scm_i_thread *t_ptr = &t;
549
550 GC_disable ();
551 t_ptr = GC_malloc (sizeof (scm_i_thread));
552 memcpy (t_ptr, &t, sizeof t);
553
554 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
555
556 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
557 /* Cache the current thread in TLS for faster lookup. */
558 scm_i_current_thread = t_ptr;
559 #endif
560
561 scm_i_pthread_mutex_lock (&thread_admin_mutex);
562 t_ptr->next_thread = all_threads;
563 all_threads = t_ptr;
564 thread_count++;
565 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
566
567 GC_enable ();
568 }
569 }
570
571 /* Perform second stage of thread initialisation, in guile mode.
572 */
573 static void
574 guilify_self_2 (SCM parent)
575 {
576 scm_i_thread *t = SCM_I_CURRENT_THREAD;
577
578 t->guile_mode = 1;
579
580 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
581
582 t->continuation_root = scm_cons (t->handle, SCM_EOL);
583 t->continuation_base = t->base;
584 t->vm = SCM_BOOL_F;
585
586 if (scm_is_true (parent))
587 t->dynamic_state = scm_make_dynamic_state (parent);
588 else
589 t->dynamic_state = scm_i_make_initial_dynamic_state ();
590
591 t->join_queue = make_queue ();
592 t->block_asyncs = 0;
593 }
594
595 \f
596 /*** Fat mutexes */
597
598 /* We implement our own mutex type since we want them to be 'fair', we
599 want to do fancy things while waiting for them (like running
600 asyncs) and we might want to add things that are nice for
601 debugging.
602 */
603
604 typedef struct {
605 scm_i_pthread_mutex_t lock;
606 SCM owner;
607 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
608
609 int recursive; /* allow recursive locking? */
610 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
611 int allow_external_unlock; /* is it an error to unlock a mutex that is not
612 owned by the current thread? */
613
614 SCM waiting; /* the threads waiting for this mutex. */
615 } fat_mutex;
616
617 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
618 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
619
620 static SCM
621 call_cleanup (void *data)
622 {
623 SCM *proc_p = data;
624 return scm_call_0 (*proc_p);
625 }
626
627 /* Perform thread tear-down, in guile mode.
628 */
629 static void *
630 do_thread_exit (void *v)
631 {
632 scm_i_thread *t = (scm_i_thread *) v;
633
634 /* Ensure the signal handling thread has been launched, because we might be
635 shutting it down. This needs to be done in Guile mode. */
636 scm_i_ensure_signal_delivery_thread ();
637
638 if (!scm_is_false (t->cleanup_handler))
639 {
640 SCM ptr = t->cleanup_handler;
641
642 t->cleanup_handler = SCM_BOOL_F;
643 t->result = scm_internal_catch (SCM_BOOL_T,
644 call_cleanup, &ptr,
645 scm_handle_by_message_noexit, NULL);
646 }
647
648 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
649
650 t->exited = 1;
651 close (t->sleep_pipe[0]);
652 close (t->sleep_pipe[1]);
653 while (scm_is_true (unblock_from_queue (t->join_queue)))
654 ;
655
656 while (!scm_is_null (t->mutexes))
657 {
658 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
659
660 if (!SCM_UNBNDP (mutex))
661 {
662 fat_mutex *m = SCM_MUTEX_DATA (mutex);
663
664 scm_i_pthread_mutex_lock (&m->lock);
665
666 /* Since MUTEX is in `t->mutexes', T must be its owner. */
667 assert (scm_is_eq (m->owner, t->handle));
668
669 unblock_from_queue (m->waiting);
670
671 scm_i_pthread_mutex_unlock (&m->lock);
672 }
673
674 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
675 }
676
677 scm_i_pthread_mutex_unlock (&t->admin_mutex);
678
679 return NULL;
680 }
681
682 static void *
683 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
684 {
685 /* Won't hurt if we are already registered. */
686 #if SCM_USE_PTHREAD_THREADS
687 GC_register_my_thread (sb);
688 #endif
689
690 return scm_with_guile (do_thread_exit, v);
691 }
692
693 static void
694 on_thread_exit (void *v)
695 {
696 /* This handler is executed in non-guile mode. */
697 scm_i_thread *t = (scm_i_thread *) v, **tp;
698
699 /* If we were canceled, we were unable to clear `t->guile_mode', so do
700 it here. */
701 t->guile_mode = 0;
702
703 /* If this thread was cancelled while doing a cond wait, it will
704 still have a mutex locked, so we unlock it here. */
705 if (t->held_mutex)
706 {
707 scm_i_pthread_mutex_unlock (t->held_mutex);
708 t->held_mutex = NULL;
709 }
710
711 /* Reinstate the current thread for purposes of scm_with_guile
712 guile-mode cleanup handlers. Only really needed in the non-TLS
713 case but it doesn't hurt to be consistent. */
714 scm_i_pthread_setspecific (scm_i_thread_key, t);
715
716 /* Scheme-level thread finalizers and other cleanup needs to happen in
717 guile mode. */
718 GC_call_with_stack_base (do_thread_exit_trampoline, t);
719
720 /* Removing ourself from the list of all threads needs to happen in
721 non-guile mode since all SCM values on our stack become
722 unprotected once we are no longer in the list. */
723 scm_i_pthread_mutex_lock (&thread_admin_mutex);
724 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
725 if (*tp == t)
726 {
727 *tp = t->next_thread;
728
729 /* GC-robust */
730 t->next_thread = NULL;
731
732 break;
733 }
734 thread_count--;
735
736 /* If there's only one other thread, it could be the signal delivery
737 thread, so we need to notify it to shut down by closing its read pipe.
738 If it's not the signal delivery thread, then closing the read pipe isn't
739 going to hurt. */
740 if (thread_count <= 1)
741 scm_i_close_signal_pipe ();
742
743 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
744
745 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
746
747 #if SCM_USE_PTHREAD_THREADS
748 GC_unregister_my_thread ();
749 #endif
750 }
751
752 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
753
754 static void
755 init_thread_key (void)
756 {
757 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
758 }
759
760 /* Perform any initializations necessary to make the current thread
761 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
762 if necessary.
763
764 BASE is the stack base to use with GC.
765
766 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
767 which case the default dynamic state is used.
768
769 Returns zero when the thread was known to guile already; otherwise
770 return 1.
771
772 Note that it could be the case that the thread was known
773 to Guile, but not in guile mode (because we are within a
774 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
775 be sure. New threads are put into guile mode implicitly. */
776
777 static int
778 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
779 {
780 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
781
782 if (SCM_I_CURRENT_THREAD)
783 {
784 /* Thread is already known to Guile.
785 */
786 return 0;
787 }
788 else
789 {
790 /* This thread has not been guilified yet.
791 */
792
793 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
794 if (scm_initialized_p == 0)
795 {
796 /* First thread ever to enter Guile. Run the full
797 initialization.
798 */
799 scm_i_init_guile (base);
800
801 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
802 /* Allow other threads to come in later. */
803 GC_allow_register_threads ();
804 #endif
805
806 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
807 }
808 else
809 {
810 /* Guile is already initialized, but this thread enters it for
811 the first time. Only initialize this thread.
812 */
813 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
814
815 /* Register this thread with libgc. */
816 #if SCM_USE_PTHREAD_THREADS
817 GC_register_my_thread (base);
818 #endif
819
820 guilify_self_1 (base);
821 guilify_self_2 (parent);
822 }
823 return 1;
824 }
825 }
826
827 void
828 scm_init_guile ()
829 {
830 struct GC_stack_base stack_base;
831
832 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
833 scm_i_init_thread_for_guile (&stack_base,
834 scm_i_default_dynamic_state);
835 else
836 {
837 fprintf (stderr, "Failed to get stack base for current thread.\n");
838 exit (1);
839 }
840 }
841
842 struct with_guile_args
843 {
844 GC_fn_type func;
845 void *data;
846 SCM parent;
847 };
848
849 static void *
850 with_guile_trampoline (void *data)
851 {
852 struct with_guile_args *args = data;
853
854 return scm_c_with_continuation_barrier (args->func, args->data);
855 }
856
857 static void *
858 with_guile_and_parent (struct GC_stack_base *base, void *data)
859 {
860 void *res;
861 int new_thread;
862 scm_i_thread *t;
863 struct with_guile_args *args = data;
864
865 new_thread = scm_i_init_thread_for_guile (base, args->parent);
866 t = SCM_I_CURRENT_THREAD;
867 if (new_thread)
868 {
869 /* We are in Guile mode. */
870 assert (t->guile_mode);
871
872 res = scm_c_with_continuation_barrier (args->func, args->data);
873
874 /* Leave Guile mode. */
875 t->guile_mode = 0;
876 }
877 else if (t->guile_mode)
878 {
879 /* Already in Guile mode. */
880 res = scm_c_with_continuation_barrier (args->func, args->data);
881 }
882 else
883 {
884 /* We are not in Guile mode, either because we are not within a
885 scm_with_guile, or because we are within a scm_without_guile.
886
887 This call to scm_with_guile() could happen from anywhere on the
888 stack, and in particular lower on the stack than when it was
889 when this thread was first guilified. Thus, `base' must be
890 updated. */
891 #if SCM_STACK_GROWS_UP
892 if (SCM_STACK_PTR (base->mem_base) < t->base)
893 t->base = SCM_STACK_PTR (base->mem_base);
894 #else
895 if (SCM_STACK_PTR (base->mem_base) > t->base)
896 t->base = SCM_STACK_PTR (base->mem_base);
897 #endif
898
899 t->guile_mode = 1;
900 res = with_gc_active (with_guile_trampoline, args);
901 t->guile_mode = 0;
902 }
903 return res;
904 }
905
906 static void *
907 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
908 {
909 struct with_guile_args args;
910
911 args.func = func;
912 args.data = data;
913 args.parent = parent;
914
915 return GC_call_with_stack_base (with_guile_and_parent, &args);
916 }
917
918 void *
919 scm_with_guile (void *(*func)(void *), void *data)
920 {
921 return scm_i_with_guile_and_parent (func, data,
922 scm_i_default_dynamic_state);
923 }
924
925 void *
926 scm_without_guile (void *(*func)(void *), void *data)
927 {
928 void *result;
929 scm_i_thread *t = SCM_I_CURRENT_THREAD;
930
931 if (t->guile_mode)
932 {
933 SCM_I_CURRENT_THREAD->guile_mode = 0;
934 result = with_gc_inactive (func, data);
935 SCM_I_CURRENT_THREAD->guile_mode = 1;
936 }
937 else
938 /* Otherwise we're not in guile mode, so nothing to do. */
939 result = func (data);
940
941 return result;
942 }
943
944 \f
945 /*** Thread creation */
946
947 typedef struct {
948 SCM parent;
949 SCM thunk;
950 SCM handler;
951 SCM thread;
952 scm_i_pthread_mutex_t mutex;
953 scm_i_pthread_cond_t cond;
954 } launch_data;
955
956 static void *
957 really_launch (void *d)
958 {
959 launch_data *data = (launch_data *)d;
960 SCM thunk = data->thunk, handler = data->handler;
961 scm_i_thread *t;
962
963 t = SCM_I_CURRENT_THREAD;
964
965 scm_i_scm_pthread_mutex_lock (&data->mutex);
966 data->thread = scm_current_thread ();
967 scm_i_pthread_cond_signal (&data->cond);
968 scm_i_pthread_mutex_unlock (&data->mutex);
969
970 if (SCM_UNBNDP (handler))
971 t->result = scm_call_0 (thunk);
972 else
973 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
974
975 return 0;
976 }
977
978 static void *
979 launch_thread (void *d)
980 {
981 launch_data *data = (launch_data *)d;
982 scm_i_pthread_detach (scm_i_pthread_self ());
983 scm_i_with_guile_and_parent (really_launch, d, data->parent);
984 return NULL;
985 }
986
987 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
988 (SCM thunk, SCM handler),
989 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
990 "returning a new thread object representing the thread. The procedure\n"
991 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
992 "\n"
993 "When @var{handler} is specified, then @var{thunk} is called from\n"
994 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
995 "handler. This catch is established inside the continuation barrier.\n"
996 "\n"
997 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
998 "the @emph{exit value} of the thread and the thread is terminated.")
999 #define FUNC_NAME s_scm_call_with_new_thread
1000 {
1001 launch_data data;
1002 scm_i_pthread_t id;
1003 int err;
1004
1005 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1006 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1007 handler, SCM_ARG2, FUNC_NAME);
1008
1009 GC_collect_a_little ();
1010 data.parent = scm_current_dynamic_state ();
1011 data.thunk = thunk;
1012 data.handler = handler;
1013 data.thread = SCM_BOOL_F;
1014 scm_i_pthread_mutex_init (&data.mutex, NULL);
1015 scm_i_pthread_cond_init (&data.cond, NULL);
1016
1017 scm_i_scm_pthread_mutex_lock (&data.mutex);
1018 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1019 if (err)
1020 {
1021 scm_i_pthread_mutex_unlock (&data.mutex);
1022 errno = err;
1023 scm_syserror (NULL);
1024 }
1025 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1026 scm_i_pthread_mutex_unlock (&data.mutex);
1027
1028 return data.thread;
1029 }
1030 #undef FUNC_NAME
1031
1032 typedef struct {
1033 SCM parent;
1034 scm_t_catch_body body;
1035 void *body_data;
1036 scm_t_catch_handler handler;
1037 void *handler_data;
1038 SCM thread;
1039 scm_i_pthread_mutex_t mutex;
1040 scm_i_pthread_cond_t cond;
1041 } spawn_data;
1042
1043 static void *
1044 really_spawn (void *d)
1045 {
1046 spawn_data *data = (spawn_data *)d;
1047 scm_t_catch_body body = data->body;
1048 void *body_data = data->body_data;
1049 scm_t_catch_handler handler = data->handler;
1050 void *handler_data = data->handler_data;
1051 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1052
1053 scm_i_scm_pthread_mutex_lock (&data->mutex);
1054 data->thread = scm_current_thread ();
1055 scm_i_pthread_cond_signal (&data->cond);
1056 scm_i_pthread_mutex_unlock (&data->mutex);
1057
1058 if (handler == NULL)
1059 t->result = body (body_data);
1060 else
1061 t->result = scm_internal_catch (SCM_BOOL_T,
1062 body, body_data,
1063 handler, handler_data);
1064
1065 return 0;
1066 }
1067
1068 static void *
1069 spawn_thread (void *d)
1070 {
1071 spawn_data *data = (spawn_data *)d;
1072 scm_i_pthread_detach (scm_i_pthread_self ());
1073 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1074 return NULL;
1075 }
1076
1077 SCM
1078 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1079 scm_t_catch_handler handler, void *handler_data)
1080 {
1081 spawn_data data;
1082 scm_i_pthread_t id;
1083 int err;
1084
1085 data.parent = scm_current_dynamic_state ();
1086 data.body = body;
1087 data.body_data = body_data;
1088 data.handler = handler;
1089 data.handler_data = handler_data;
1090 data.thread = SCM_BOOL_F;
1091 scm_i_pthread_mutex_init (&data.mutex, NULL);
1092 scm_i_pthread_cond_init (&data.cond, NULL);
1093
1094 scm_i_scm_pthread_mutex_lock (&data.mutex);
1095 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1096 if (err)
1097 {
1098 scm_i_pthread_mutex_unlock (&data.mutex);
1099 errno = err;
1100 scm_syserror (NULL);
1101 }
1102 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1103 scm_i_pthread_mutex_unlock (&data.mutex);
1104
1105 return data.thread;
1106 }
1107
1108 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1109 (),
1110 "Move the calling thread to the end of the scheduling queue.")
1111 #define FUNC_NAME s_scm_yield
1112 {
1113 return scm_from_bool (scm_i_sched_yield ());
1114 }
1115 #undef FUNC_NAME
1116
1117 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1118 (SCM thread),
1119 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1120 "cannot be the current thread, and if @var{thread} has already terminated or "
1121 "been signaled to terminate, this function is a no-op.")
1122 #define FUNC_NAME s_scm_cancel_thread
1123 {
1124 scm_i_thread *t = NULL;
1125
1126 SCM_VALIDATE_THREAD (1, thread);
1127 t = SCM_I_THREAD_DATA (thread);
1128 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1129 if (!t->canceled)
1130 {
1131 t->canceled = 1;
1132 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1133 scm_i_pthread_cancel (t->pthread);
1134 }
1135 else
1136 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1137
1138 return SCM_UNSPECIFIED;
1139 }
1140 #undef FUNC_NAME
1141
1142 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1143 (SCM thread, SCM proc),
1144 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1145 "This handler will be called when the thread exits.")
1146 #define FUNC_NAME s_scm_set_thread_cleanup_x
1147 {
1148 scm_i_thread *t;
1149
1150 SCM_VALIDATE_THREAD (1, thread);
1151 if (!scm_is_false (proc))
1152 SCM_VALIDATE_THUNK (2, proc);
1153
1154 t = SCM_I_THREAD_DATA (thread);
1155 scm_i_pthread_mutex_lock (&t->admin_mutex);
1156
1157 if (!(t->exited || t->canceled))
1158 t->cleanup_handler = proc;
1159
1160 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1161
1162 return SCM_UNSPECIFIED;
1163 }
1164 #undef FUNC_NAME
1165
1166 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1167 (SCM thread),
1168 "Return the cleanup handler installed for the thread @var{thread}.")
1169 #define FUNC_NAME s_scm_thread_cleanup
1170 {
1171 scm_i_thread *t;
1172 SCM ret;
1173
1174 SCM_VALIDATE_THREAD (1, thread);
1175
1176 t = SCM_I_THREAD_DATA (thread);
1177 scm_i_pthread_mutex_lock (&t->admin_mutex);
1178 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1179 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1180
1181 return ret;
1182 }
1183 #undef FUNC_NAME
1184
1185 SCM scm_join_thread (SCM thread)
1186 {
1187 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1188 }
1189
1190 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1191 (SCM thread, SCM timeout, SCM timeoutval),
1192 "Suspend execution of the calling thread until the target @var{thread} "
1193 "terminates, unless the target @var{thread} has already terminated. ")
1194 #define FUNC_NAME s_scm_join_thread_timed
1195 {
1196 scm_i_thread *t;
1197 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1198 SCM res = SCM_BOOL_F;
1199
1200 if (! (SCM_UNBNDP (timeoutval)))
1201 res = timeoutval;
1202
1203 SCM_VALIDATE_THREAD (1, thread);
1204 if (scm_is_eq (scm_current_thread (), thread))
1205 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1206
1207 t = SCM_I_THREAD_DATA (thread);
1208 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1209
1210 if (! SCM_UNBNDP (timeout))
1211 {
1212 to_timespec (timeout, &ctimeout);
1213 timeout_ptr = &ctimeout;
1214 }
1215
1216 if (t->exited)
1217 res = t->result;
1218 else
1219 {
1220 while (1)
1221 {
1222 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1223 timeout_ptr);
1224 if (err == 0)
1225 {
1226 if (t->exited)
1227 {
1228 res = t->result;
1229 break;
1230 }
1231 }
1232 else if (err == ETIMEDOUT)
1233 break;
1234
1235 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1236 SCM_TICK;
1237 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1238
1239 /* Check for exit again, since we just released and
1240 reacquired the admin mutex, before the next block_self
1241 call (which would block forever if t has already
1242 exited). */
1243 if (t->exited)
1244 {
1245 res = t->result;
1246 break;
1247 }
1248 }
1249 }
1250
1251 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1252
1253 return res;
1254 }
1255 #undef FUNC_NAME
1256
1257 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1258 (SCM obj),
1259 "Return @code{#t} if @var{obj} is a thread.")
1260 #define FUNC_NAME s_scm_thread_p
1261 {
1262 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1263 }
1264 #undef FUNC_NAME
1265
1266
1267 static size_t
1268 fat_mutex_free (SCM mx)
1269 {
1270 fat_mutex *m = SCM_MUTEX_DATA (mx);
1271 scm_i_pthread_mutex_destroy (&m->lock);
1272 return 0;
1273 }
1274
1275 static int
1276 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1277 {
1278 fat_mutex *m = SCM_MUTEX_DATA (mx);
1279 scm_puts ("#<mutex ", port);
1280 scm_uintprint ((scm_t_bits)m, 16, port);
1281 scm_puts (">", port);
1282 return 1;
1283 }
1284
1285 static SCM
1286 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1287 {
1288 fat_mutex *m;
1289 SCM mx;
1290
1291 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1292 scm_i_pthread_mutex_init (&m->lock, NULL);
1293 m->owner = SCM_BOOL_F;
1294 m->level = 0;
1295
1296 m->recursive = recursive;
1297 m->unchecked_unlock = unchecked_unlock;
1298 m->allow_external_unlock = external_unlock;
1299
1300 m->waiting = SCM_EOL;
1301 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1302 m->waiting = make_queue ();
1303 return mx;
1304 }
1305
1306 SCM scm_make_mutex (void)
1307 {
1308 return scm_make_mutex_with_flags (SCM_EOL);
1309 }
1310
1311 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1312 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1313 SCM_SYMBOL (recursive_sym, "recursive");
1314
1315 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1316 (SCM flags),
1317 "Create a new mutex. ")
1318 #define FUNC_NAME s_scm_make_mutex_with_flags
1319 {
1320 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1321
1322 SCM ptr = flags;
1323 while (! scm_is_null (ptr))
1324 {
1325 SCM flag = SCM_CAR (ptr);
1326 if (scm_is_eq (flag, unchecked_unlock_sym))
1327 unchecked_unlock = 1;
1328 else if (scm_is_eq (flag, allow_external_unlock_sym))
1329 external_unlock = 1;
1330 else if (scm_is_eq (flag, recursive_sym))
1331 recursive = 1;
1332 else
1333 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1334 ptr = SCM_CDR (ptr);
1335 }
1336 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1337 }
1338 #undef FUNC_NAME
1339
1340 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1341 (void),
1342 "Create a new recursive mutex. ")
1343 #define FUNC_NAME s_scm_make_recursive_mutex
1344 {
1345 return make_fat_mutex (1, 0, 0);
1346 }
1347 #undef FUNC_NAME
1348
1349 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1350
1351 static SCM
1352 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1353 {
1354 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1355
1356 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1357 SCM err = SCM_BOOL_F;
1358
1359 struct timeval current_time;
1360
1361 scm_i_scm_pthread_mutex_lock (&m->lock);
1362
1363 while (1)
1364 {
1365 if (m->level == 0)
1366 {
1367 m->owner = new_owner;
1368 m->level++;
1369
1370 if (SCM_I_IS_THREAD (new_owner))
1371 {
1372 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1373
1374 /* FIXME: The order in which `t->admin_mutex' and
1375 `m->lock' are taken differs from that in
1376 `on_thread_exit', potentially leading to deadlocks. */
1377 scm_i_pthread_mutex_lock (&t->admin_mutex);
1378
1379 /* Only keep a weak reference to MUTEX so that it's not
1380 retained when not referenced elsewhere (bug #27450).
1381 The weak pair itself is eventually removed when MUTEX
1382 is unlocked. Note that `t->mutexes' lists mutexes
1383 currently held by T, so it should be small. */
1384 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1385
1386 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1387 }
1388 *ret = 1;
1389 break;
1390 }
1391 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1392 {
1393 m->owner = new_owner;
1394 err = scm_cons (scm_abandoned_mutex_error_key,
1395 scm_from_locale_string ("lock obtained on abandoned "
1396 "mutex"));
1397 *ret = 1;
1398 break;
1399 }
1400 else if (scm_is_eq (m->owner, new_owner))
1401 {
1402 if (m->recursive)
1403 {
1404 m->level++;
1405 *ret = 1;
1406 }
1407 else
1408 {
1409 err = scm_cons (scm_misc_error_key,
1410 scm_from_locale_string ("mutex already locked "
1411 "by thread"));
1412 *ret = 0;
1413 }
1414 break;
1415 }
1416 else
1417 {
1418 if (timeout != NULL)
1419 {
1420 gettimeofday (&current_time, NULL);
1421 if (current_time.tv_sec > timeout->tv_sec ||
1422 (current_time.tv_sec == timeout->tv_sec &&
1423 current_time.tv_usec * 1000 > timeout->tv_nsec))
1424 {
1425 *ret = 0;
1426 break;
1427 }
1428 }
1429 block_self (m->waiting, mutex, &m->lock, timeout);
1430 scm_i_pthread_mutex_unlock (&m->lock);
1431 SCM_TICK;
1432 scm_i_scm_pthread_mutex_lock (&m->lock);
1433 }
1434 }
1435 scm_i_pthread_mutex_unlock (&m->lock);
1436 return err;
1437 }
1438
1439 SCM scm_lock_mutex (SCM mx)
1440 {
1441 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1442 }
1443
1444 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1445 (SCM m, SCM timeout, SCM owner),
1446 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1447 "blocks until the mutex becomes available. The function returns when "
1448 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1449 "a thread already owns will succeed right away and will not block the "
1450 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1451 #define FUNC_NAME s_scm_lock_mutex_timed
1452 {
1453 SCM exception;
1454 int ret = 0;
1455 scm_t_timespec cwaittime, *waittime = NULL;
1456
1457 SCM_VALIDATE_MUTEX (1, m);
1458
1459 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1460 {
1461 to_timespec (timeout, &cwaittime);
1462 waittime = &cwaittime;
1463 }
1464
1465 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1466 SCM_VALIDATE_THREAD (3, owner);
1467
1468 exception = fat_mutex_lock (m, waittime, owner, &ret);
1469 if (!scm_is_false (exception))
1470 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1471 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1472 }
1473 #undef FUNC_NAME
1474
1475 static void
1476 lock_mutex_return_void (SCM mx)
1477 {
1478 (void) scm_lock_mutex (mx);
1479 }
1480
1481 static void
1482 unlock_mutex_return_void (SCM mx)
1483 {
1484 (void) scm_unlock_mutex (mx);
1485 }
1486
1487 void
1488 scm_dynwind_lock_mutex (SCM mutex)
1489 {
1490 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1491 SCM_F_WIND_EXPLICITLY);
1492 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1493 SCM_F_WIND_EXPLICITLY);
1494 }
1495
1496 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1497 (SCM mutex),
1498 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1499 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1500 #define FUNC_NAME s_scm_try_mutex
1501 {
1502 SCM exception;
1503 int ret = 0;
1504 scm_t_timespec cwaittime, *waittime = NULL;
1505
1506 SCM_VALIDATE_MUTEX (1, mutex);
1507
1508 to_timespec (scm_from_int(0), &cwaittime);
1509 waittime = &cwaittime;
1510
1511 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1512 if (!scm_is_false (exception))
1513 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1514 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1515 }
1516 #undef FUNC_NAME
1517
1518 /*** Fat condition variables */
1519
1520 typedef struct {
1521 scm_i_pthread_mutex_t lock;
1522 SCM waiting; /* the threads waiting for this condition. */
1523 } fat_cond;
1524
1525 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1526 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1527
1528 static int
1529 fat_mutex_unlock (SCM mutex, SCM cond,
1530 const scm_t_timespec *waittime, int relock)
1531 {
1532 SCM owner;
1533 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1534 fat_cond *c = NULL;
1535 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1536 int err = 0, ret = 0;
1537
1538 scm_i_scm_pthread_mutex_lock (&m->lock);
1539
1540 owner = m->owner;
1541
1542 if (!scm_is_eq (owner, t->handle))
1543 {
1544 if (m->level == 0)
1545 {
1546 if (!m->unchecked_unlock)
1547 {
1548 scm_i_pthread_mutex_unlock (&m->lock);
1549 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1550 }
1551 owner = t->handle;
1552 }
1553 else if (!m->allow_external_unlock)
1554 {
1555 scm_i_pthread_mutex_unlock (&m->lock);
1556 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1557 }
1558 }
1559
1560 if (! (SCM_UNBNDP (cond)))
1561 {
1562 c = SCM_CONDVAR_DATA (cond);
1563 while (1)
1564 {
1565 int brk = 0;
1566
1567 if (m->level > 0)
1568 m->level--;
1569 if (m->level == 0)
1570 {
1571 /* Change the owner of MUTEX. */
1572 t->mutexes = scm_delq_x (mutex, t->mutexes);
1573 m->owner = unblock_from_queue (m->waiting);
1574 }
1575
1576 t->block_asyncs++;
1577
1578 err = block_self (c->waiting, cond, &m->lock, waittime);
1579 scm_i_pthread_mutex_unlock (&m->lock);
1580
1581 if (err == 0)
1582 {
1583 ret = 1;
1584 brk = 1;
1585 }
1586 else if (err == ETIMEDOUT)
1587 {
1588 ret = 0;
1589 brk = 1;
1590 }
1591 else if (err != EINTR)
1592 {
1593 errno = err;
1594 scm_syserror (NULL);
1595 }
1596
1597 if (brk)
1598 {
1599 if (relock)
1600 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1601 t->block_asyncs--;
1602 break;
1603 }
1604
1605 t->block_asyncs--;
1606 scm_async_click ();
1607
1608 scm_remember_upto_here_2 (cond, mutex);
1609
1610 scm_i_scm_pthread_mutex_lock (&m->lock);
1611 }
1612 }
1613 else
1614 {
1615 if (m->level > 0)
1616 m->level--;
1617 if (m->level == 0)
1618 {
1619 /* Change the owner of MUTEX. */
1620 t->mutexes = scm_delq_x (mutex, t->mutexes);
1621 m->owner = unblock_from_queue (m->waiting);
1622 }
1623
1624 scm_i_pthread_mutex_unlock (&m->lock);
1625 ret = 1;
1626 }
1627
1628 return ret;
1629 }
1630
1631 SCM scm_unlock_mutex (SCM mx)
1632 {
1633 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1634 }
1635
1636 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1637 (SCM mx, SCM cond, SCM timeout),
1638 "Unlocks @var{mutex} if the calling thread owns the lock on "
1639 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1640 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1641 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1642 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1643 "with a call to @code{unlock-mutex}. Only the last call to "
1644 "@code{unlock-mutex} will actually unlock the mutex. ")
1645 #define FUNC_NAME s_scm_unlock_mutex_timed
1646 {
1647 scm_t_timespec cwaittime, *waittime = NULL;
1648
1649 SCM_VALIDATE_MUTEX (1, mx);
1650 if (! (SCM_UNBNDP (cond)))
1651 {
1652 SCM_VALIDATE_CONDVAR (2, cond);
1653
1654 if (! (SCM_UNBNDP (timeout)))
1655 {
1656 to_timespec (timeout, &cwaittime);
1657 waittime = &cwaittime;
1658 }
1659 }
1660
1661 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1662 }
1663 #undef FUNC_NAME
1664
1665 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1666 (SCM obj),
1667 "Return @code{#t} if @var{obj} is a mutex.")
1668 #define FUNC_NAME s_scm_mutex_p
1669 {
1670 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1671 }
1672 #undef FUNC_NAME
1673
1674 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1675 (SCM mx),
1676 "Return the thread owning @var{mx}, or @code{#f}.")
1677 #define FUNC_NAME s_scm_mutex_owner
1678 {
1679 SCM owner;
1680 fat_mutex *m = NULL;
1681
1682 SCM_VALIDATE_MUTEX (1, mx);
1683 m = SCM_MUTEX_DATA (mx);
1684 scm_i_pthread_mutex_lock (&m->lock);
1685 owner = m->owner;
1686 scm_i_pthread_mutex_unlock (&m->lock);
1687
1688 return owner;
1689 }
1690 #undef FUNC_NAME
1691
1692 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1693 (SCM mx),
1694 "Return the lock level of mutex @var{mx}.")
1695 #define FUNC_NAME s_scm_mutex_level
1696 {
1697 SCM_VALIDATE_MUTEX (1, mx);
1698 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1699 }
1700 #undef FUNC_NAME
1701
1702 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1703 (SCM mx),
1704 "Returns @code{#t} if the mutex @var{mx} is locked.")
1705 #define FUNC_NAME s_scm_mutex_locked_p
1706 {
1707 SCM_VALIDATE_MUTEX (1, mx);
1708 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1709 }
1710 #undef FUNC_NAME
1711
1712 static int
1713 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1714 {
1715 fat_cond *c = SCM_CONDVAR_DATA (cv);
1716 scm_puts ("#<condition-variable ", port);
1717 scm_uintprint ((scm_t_bits)c, 16, port);
1718 scm_puts (">", port);
1719 return 1;
1720 }
1721
1722 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1723 (void),
1724 "Make a new condition variable.")
1725 #define FUNC_NAME s_scm_make_condition_variable
1726 {
1727 fat_cond *c;
1728 SCM cv;
1729
1730 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1731 c->waiting = SCM_EOL;
1732 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1733 c->waiting = make_queue ();
1734 return cv;
1735 }
1736 #undef FUNC_NAME
1737
1738 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1739 (SCM cv, SCM mx, SCM t),
1740 "Wait until @var{cond-var} has been signalled. While waiting, "
1741 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1742 "is locked again when this function returns. When @var{time} is given, "
1743 "it specifies a point in time where the waiting should be aborted. It "
1744 "can be either a integer as returned by @code{current-time} or a pair "
1745 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1746 "mutex is locked and @code{#f} is returned. When the condition "
1747 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1748 "is returned. ")
1749 #define FUNC_NAME s_scm_timed_wait_condition_variable
1750 {
1751 scm_t_timespec waittime, *waitptr = NULL;
1752
1753 SCM_VALIDATE_CONDVAR (1, cv);
1754 SCM_VALIDATE_MUTEX (2, mx);
1755
1756 if (!SCM_UNBNDP (t))
1757 {
1758 to_timespec (t, &waittime);
1759 waitptr = &waittime;
1760 }
1761
1762 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1763 }
1764 #undef FUNC_NAME
1765
1766 static void
1767 fat_cond_signal (fat_cond *c)
1768 {
1769 unblock_from_queue (c->waiting);
1770 }
1771
1772 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1773 (SCM cv),
1774 "Wake up one thread that is waiting for @var{cv}")
1775 #define FUNC_NAME s_scm_signal_condition_variable
1776 {
1777 SCM_VALIDATE_CONDVAR (1, cv);
1778 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1779 return SCM_BOOL_T;
1780 }
1781 #undef FUNC_NAME
1782
1783 static void
1784 fat_cond_broadcast (fat_cond *c)
1785 {
1786 while (scm_is_true (unblock_from_queue (c->waiting)))
1787 ;
1788 }
1789
1790 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1791 (SCM cv),
1792 "Wake up all threads that are waiting for @var{cv}. ")
1793 #define FUNC_NAME s_scm_broadcast_condition_variable
1794 {
1795 SCM_VALIDATE_CONDVAR (1, cv);
1796 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1797 return SCM_BOOL_T;
1798 }
1799 #undef FUNC_NAME
1800
1801 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1802 (SCM obj),
1803 "Return @code{#t} if @var{obj} is a condition variable.")
1804 #define FUNC_NAME s_scm_condition_variable_p
1805 {
1806 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1807 }
1808 #undef FUNC_NAME
1809
1810
1811 \f
1812 /*** Select */
1813
1814 struct select_args
1815 {
1816 int nfds;
1817 SELECT_TYPE *read_fds;
1818 SELECT_TYPE *write_fds;
1819 SELECT_TYPE *except_fds;
1820 struct timeval *timeout;
1821
1822 int result;
1823 int errno_value;
1824 };
1825
1826 static void *
1827 do_std_select (void *args)
1828 {
1829 struct select_args *select_args;
1830
1831 select_args = (struct select_args *) args;
1832
1833 select_args->result =
1834 select (select_args->nfds,
1835 select_args->read_fds, select_args->write_fds,
1836 select_args->except_fds, select_args->timeout);
1837 select_args->errno_value = errno;
1838
1839 return NULL;
1840 }
1841
1842 int
1843 scm_std_select (int nfds,
1844 SELECT_TYPE *readfds,
1845 SELECT_TYPE *writefds,
1846 SELECT_TYPE *exceptfds,
1847 struct timeval *timeout)
1848 {
1849 fd_set my_readfds;
1850 int res, eno, wakeup_fd;
1851 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1852 struct select_args args;
1853
1854 if (readfds == NULL)
1855 {
1856 FD_ZERO (&my_readfds);
1857 readfds = &my_readfds;
1858 }
1859
1860 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1861 SCM_TICK;
1862
1863 wakeup_fd = t->sleep_pipe[0];
1864 FD_SET (wakeup_fd, readfds);
1865 if (wakeup_fd >= nfds)
1866 nfds = wakeup_fd+1;
1867
1868 args.nfds = nfds;
1869 args.read_fds = readfds;
1870 args.write_fds = writefds;
1871 args.except_fds = exceptfds;
1872 args.timeout = timeout;
1873
1874 /* Explicitly cooperate with the GC. */
1875 scm_without_guile (do_std_select, &args);
1876
1877 res = args.result;
1878 eno = args.errno_value;
1879
1880 t->sleep_fd = -1;
1881 scm_i_reset_sleep (t);
1882
1883 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1884 {
1885 char dummy;
1886 full_read (wakeup_fd, &dummy, 1);
1887
1888 FD_CLR (wakeup_fd, readfds);
1889 res -= 1;
1890 if (res == 0)
1891 {
1892 eno = EINTR;
1893 res = -1;
1894 }
1895 }
1896 errno = eno;
1897 return res;
1898 }
1899
1900 /* Convenience API for blocking while in guile mode. */
1901
1902 #if SCM_USE_PTHREAD_THREADS
1903
1904 /* It seems reasonable to not run procedures related to mutex and condition
1905 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1906 without it, and (ii) the only potential gain would be GC latency. See
1907 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1908 for a discussion of the pros and cons. */
1909
1910 int
1911 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1912 {
1913 int res = scm_i_pthread_mutex_lock (mutex);
1914 return res;
1915 }
1916
1917 static void
1918 do_unlock (void *data)
1919 {
1920 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1921 }
1922
1923 void
1924 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1925 {
1926 scm_i_scm_pthread_mutex_lock (mutex);
1927 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1928 }
1929
1930 int
1931 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1932 {
1933 int res;
1934 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1935
1936 t->held_mutex = mutex;
1937 res = scm_i_pthread_cond_wait (cond, mutex);
1938 t->held_mutex = NULL;
1939
1940 return res;
1941 }
1942
1943 int
1944 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1945 scm_i_pthread_mutex_t *mutex,
1946 const scm_t_timespec *wt)
1947 {
1948 int res;
1949 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1950
1951 t->held_mutex = mutex;
1952 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1953 t->held_mutex = NULL;
1954
1955 return res;
1956 }
1957
1958 #endif
1959
1960 unsigned long
1961 scm_std_usleep (unsigned long usecs)
1962 {
1963 struct timeval tv;
1964 tv.tv_usec = usecs % 1000000;
1965 tv.tv_sec = usecs / 1000000;
1966 scm_std_select (0, NULL, NULL, NULL, &tv);
1967 return tv.tv_sec * 1000000 + tv.tv_usec;
1968 }
1969
1970 unsigned int
1971 scm_std_sleep (unsigned int secs)
1972 {
1973 struct timeval tv;
1974 tv.tv_usec = 0;
1975 tv.tv_sec = secs;
1976 scm_std_select (0, NULL, NULL, NULL, &tv);
1977 return tv.tv_sec;
1978 }
1979
1980 /*** Misc */
1981
1982 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1983 (void),
1984 "Return the thread that called this function.")
1985 #define FUNC_NAME s_scm_current_thread
1986 {
1987 return SCM_I_CURRENT_THREAD->handle;
1988 }
1989 #undef FUNC_NAME
1990
1991 static SCM
1992 scm_c_make_list (size_t n, SCM fill)
1993 {
1994 SCM res = SCM_EOL;
1995 while (n-- > 0)
1996 res = scm_cons (fill, res);
1997 return res;
1998 }
1999
2000 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2001 (void),
2002 "Return a list of all threads.")
2003 #define FUNC_NAME s_scm_all_threads
2004 {
2005 /* We can not allocate while holding the thread_admin_mutex because
2006 of the way GC is done.
2007 */
2008 int n = thread_count;
2009 scm_i_thread *t;
2010 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2011
2012 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2013 l = &list;
2014 for (t = all_threads; t && n > 0; t = t->next_thread)
2015 {
2016 if (t != scm_i_signal_delivery_thread)
2017 {
2018 SCM_SETCAR (*l, t->handle);
2019 l = SCM_CDRLOC (*l);
2020 }
2021 n--;
2022 }
2023 *l = SCM_EOL;
2024 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2025 return list;
2026 }
2027 #undef FUNC_NAME
2028
2029 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2030 (SCM thread),
2031 "Return @code{#t} iff @var{thread} has exited.\n")
2032 #define FUNC_NAME s_scm_thread_exited_p
2033 {
2034 return scm_from_bool (scm_c_thread_exited_p (thread));
2035 }
2036 #undef FUNC_NAME
2037
2038 int
2039 scm_c_thread_exited_p (SCM thread)
2040 #define FUNC_NAME s_scm_thread_exited_p
2041 {
2042 scm_i_thread *t;
2043 SCM_VALIDATE_THREAD (1, thread);
2044 t = SCM_I_THREAD_DATA (thread);
2045 return t->exited;
2046 }
2047 #undef FUNC_NAME
2048
2049 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2050 (void),
2051 "Return the total number of processors of the machine, which\n"
2052 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2053 "thread execution unit, which can be either:\n\n"
2054 "@itemize\n"
2055 "@item an execution core in a (possibly multi-core) chip, in a\n"
2056 " (possibly multi- chip) module, in a single computer, or\n"
2057 "@item a thread execution unit inside a core in the case of\n"
2058 " @dfn{hyper-threaded} CPUs.\n"
2059 "@end itemize\n\n"
2060 "Which of the two definitions is used, is unspecified.\n")
2061 #define FUNC_NAME s_scm_total_processor_count
2062 {
2063 return scm_from_ulong (num_processors (NPROC_ALL));
2064 }
2065 #undef FUNC_NAME
2066
2067 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2068 (void),
2069 "Like @code{total-processor-count}, but return the number of\n"
2070 "processors available to the current process. See\n"
2071 "@code{setaffinity} and @code{getaffinity} for more\n"
2072 "information.\n")
2073 #define FUNC_NAME s_scm_current_processor_count
2074 {
2075 return scm_from_ulong (num_processors (NPROC_CURRENT));
2076 }
2077 #undef FUNC_NAME
2078
2079
2080 \f
2081
2082 static scm_i_pthread_cond_t wake_up_cond;
2083 static int threads_initialized_p = 0;
2084
2085
2086 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2087 */
2088 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2089
2090 static SCM dynwind_critical_section_mutex;
2091
2092 void
2093 scm_dynwind_critical_section (SCM mutex)
2094 {
2095 if (scm_is_false (mutex))
2096 mutex = dynwind_critical_section_mutex;
2097 scm_dynwind_lock_mutex (mutex);
2098 scm_dynwind_block_asyncs ();
2099 }
2100
2101 /*** Initialization */
2102
2103 scm_i_pthread_mutex_t scm_i_misc_mutex;
2104
2105 #if SCM_USE_PTHREAD_THREADS
2106 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2107 #endif
2108
2109 void
2110 scm_threads_prehistory (void *base)
2111 {
2112 #if SCM_USE_PTHREAD_THREADS
2113 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2114 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2115 PTHREAD_MUTEX_RECURSIVE);
2116 #endif
2117
2118 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2119 scm_i_pthread_mutexattr_recursive);
2120 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2121 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2122
2123 guilify_self_1 ((struct GC_stack_base *) base);
2124 }
2125
2126 scm_t_bits scm_tc16_thread;
2127 scm_t_bits scm_tc16_mutex;
2128 scm_t_bits scm_tc16_condvar;
2129
2130 void
2131 scm_init_threads ()
2132 {
2133 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2134 scm_set_smob_print (scm_tc16_thread, thread_print);
2135
2136 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2137 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2138 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2139
2140 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2141 sizeof (fat_cond));
2142 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2143
2144 scm_i_default_dynamic_state = SCM_BOOL_F;
2145 guilify_self_2 (SCM_BOOL_F);
2146 threads_initialized_p = 1;
2147
2148 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2149 }
2150
2151 void
2152 scm_init_threads_default_dynamic_state ()
2153 {
2154 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2155 scm_i_default_dynamic_state = state;
2156 }
2157
2158 void
2159 scm_init_thread_procs ()
2160 {
2161 #include "libguile/threads.x"
2162 }
2163
2164 \f
2165 /* IA64-specific things. */
2166
2167 #ifdef __ia64__
2168 # ifdef __hpux
2169 # include <sys/param.h>
2170 # include <sys/pstat.h>
2171 void *
2172 scm_ia64_register_backing_store_base (void)
2173 {
2174 struct pst_vm_status vm_status;
2175 int i = 0;
2176 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2177 if (vm_status.pst_type == PS_RSESTACK)
2178 return (void *) vm_status.pst_vaddr;
2179 abort ();
2180 }
2181 void *
2182 scm_ia64_ar_bsp (const void *ctx)
2183 {
2184 uint64_t bsp;
2185 __uc_get_ar_bsp (ctx, &bsp);
2186 return (void *) bsp;
2187 }
2188 # endif /* hpux */
2189 # ifdef linux
2190 # include <ucontext.h>
2191 void *
2192 scm_ia64_register_backing_store_base (void)
2193 {
2194 extern void *__libc_ia64_register_backing_store_base;
2195 return __libc_ia64_register_backing_store_base;
2196 }
2197 void *
2198 scm_ia64_ar_bsp (const void *opaque)
2199 {
2200 const ucontext_t *ctx = opaque;
2201 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2202 }
2203 # endif /* linux */
2204 #endif /* __ia64__ */
2205
2206
2207 /*
2208 Local Variables:
2209 c-file-style: "gnu"
2210 End:
2211 */