Merge remote-tracking branch 'local-2.0/stable-2.0'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42 #include <fcntl.h>
43 #include <nproc.h>
44
45 #include "libguile/validate.h"
46 #include "libguile/root.h"
47 #include "libguile/eval.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/threads.h"
51 #include "libguile/dynwind.h"
52 #include "libguile/iselect.h"
53 #include "libguile/fluids.h"
54 #include "libguile/continuations.h"
55 #include "libguile/gc.h"
56 #include "libguile/init.h"
57 #include "libguile/scmsigs.h"
58 #include "libguile/strings.h"
59 #include "libguile/weaks.h"
60
61 #include <full-read.h>
62
63
64 \f
65
66 /* First some libgc shims. */
67
68 /* Make sure GC_fn_type is defined; it is missing from the public
69 headers of GC 7.1 and earlier. */
70 #ifndef HAVE_GC_FN_TYPE
71 typedef void * (* GC_fn_type) (void *);
72 #endif
73
74
75 #ifndef GC_SUCCESS
76 #define GC_SUCCESS 0
77 #endif
78
79 #ifndef GC_UNIMPLEMENTED
80 #define GC_UNIMPLEMENTED 3
81 #endif
82
83 /* Likewise struct GC_stack_base is missing before 7.1. */
84 #ifndef HAVE_GC_STACK_BASE
85 struct GC_stack_base {
86 void * mem_base; /* Base of memory stack. */
87 #ifdef __ia64__
88 void * reg_base; /* Base of separate register stack. */
89 #endif
90 };
91
92 static int
93 GC_register_my_thread (struct GC_stack_base *stack_base)
94 {
95 return GC_UNIMPLEMENTED;
96 }
97
98 static void
99 GC_unregister_my_thread ()
100 {
101 }
102
103 #if !SCM_USE_PTHREAD_THREADS
104 /* No threads; we can just use GC_stackbottom. */
105 static void *
106 get_thread_stack_base ()
107 {
108 return GC_stackbottom;
109 }
110
111 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
112 && defined PTHREAD_ATTR_GETSTACK_WORKS
113 /* This method for GNU/Linux and perhaps some other systems.
114 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
115 available on them. */
116 static void *
117 get_thread_stack_base ()
118 {
119 pthread_attr_t attr;
120 void *start, *end;
121 size_t size;
122
123 pthread_getattr_np (pthread_self (), &attr);
124 pthread_attr_getstack (&attr, &start, &size);
125 end = (char *)start + size;
126
127 #if SCM_STACK_GROWS_UP
128 return start;
129 #else
130 return end;
131 #endif
132 }
133
134 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
135 /* This method for MacOS X.
136 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
137 but as of 2006 there's nothing obvious at apple.com. */
138 static void *
139 get_thread_stack_base ()
140 {
141 return pthread_get_stackaddr_np (pthread_self ());
142 }
143
144 #else
145 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
146 #endif
147
148 static int
149 GC_get_stack_base (struct GC_stack_base *stack_base)
150 {
151 stack_base->mem_base = get_thread_stack_base ();
152 #ifdef __ia64__
153 /* Calculate and store off the base of this thread's register
154 backing store (RBS). Unfortunately our implementation(s) of
155 scm_ia64_register_backing_store_base are only reliable for the
156 main thread. For other threads, therefore, find out the current
157 top of the RBS, and use that as a maximum. */
158 stack_base->reg_base = scm_ia64_register_backing_store_base ();
159 {
160 ucontext_t ctx;
161 void *bsp;
162 getcontext (&ctx);
163 bsp = scm_ia64_ar_bsp (&ctx);
164 if (stack_base->reg_base > bsp)
165 stack_base->reg_base = bsp;
166 }
167 #endif
168 return GC_SUCCESS;
169 }
170
171 static void *
172 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
173 {
174 struct GC_stack_base stack_base;
175
176 stack_base.mem_base = (void*)&stack_base;
177 #ifdef __ia64__
178 /* FIXME: Untested. */
179 {
180 ucontext_t ctx;
181 getcontext (&ctx);
182 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
183 }
184 #endif
185
186 return fn (&stack_base, arg);
187 }
188 #endif /* HAVE_GC_STACK_BASE */
189
190
191 /* Now define with_gc_active and with_gc_inactive. */
192
193 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
194
195 /* We have a sufficiently new libgc (7.2 or newer). */
196
197 static void*
198 with_gc_inactive (GC_fn_type func, void *data)
199 {
200 return GC_do_blocking (func, data);
201 }
202
203 static void*
204 with_gc_active (GC_fn_type func, void *data)
205 {
206 return GC_call_with_gc_active (func, data);
207 }
208
209 #else
210
211 /* libgc not new enough, so never actually deactivate GC.
212
213 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
214 GC_call_with_gc_active. */
215
216 static void*
217 with_gc_inactive (GC_fn_type func, void *data)
218 {
219 return func (data);
220 }
221
222 static void*
223 with_gc_active (GC_fn_type func, void *data)
224 {
225 return func (data);
226 }
227
228 #endif /* HAVE_GC_DO_BLOCKING */
229
230
231 \f
232 static void
233 to_timespec (SCM t, scm_t_timespec *waittime)
234 {
235 if (scm_is_pair (t))
236 {
237 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
238 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
239 }
240 else
241 {
242 double time = scm_to_double (t);
243 double sec = scm_c_truncate (time);
244
245 waittime->tv_sec = (long) sec;
246 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
247 }
248 }
249
250 \f
251 /*** Queues */
252
253 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
254 the risk of false references leading to unbounded retained space as
255 described in "Bounding Space Usage of Conservative Garbage Collectors",
256 H.J. Boehm, 2001. */
257
258 /* Make an empty queue data structure.
259 */
260 static SCM
261 make_queue ()
262 {
263 return scm_cons (SCM_EOL, SCM_EOL);
264 }
265
266 /* Put T at the back of Q and return a handle that can be used with
267 remqueue to remove T from Q again.
268 */
269 static SCM
270 enqueue (SCM q, SCM t)
271 {
272 SCM c = scm_cons (t, SCM_EOL);
273 SCM_CRITICAL_SECTION_START;
274 if (scm_is_null (SCM_CDR (q)))
275 SCM_SETCDR (q, c);
276 else
277 SCM_SETCDR (SCM_CAR (q), c);
278 SCM_SETCAR (q, c);
279 SCM_CRITICAL_SECTION_END;
280 return c;
281 }
282
283 /* Remove the element that the handle C refers to from the queue Q. C
284 must have been returned from a call to enqueue. The return value
285 is zero when the element referred to by C has already been removed.
286 Otherwise, 1 is returned.
287 */
288 static int
289 remqueue (SCM q, SCM c)
290 {
291 SCM p, prev = q;
292 SCM_CRITICAL_SECTION_START;
293 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
294 {
295 if (scm_is_eq (p, c))
296 {
297 if (scm_is_eq (c, SCM_CAR (q)))
298 SCM_SETCAR (q, SCM_CDR (c));
299 SCM_SETCDR (prev, SCM_CDR (c));
300
301 /* GC-robust */
302 SCM_SETCDR (c, SCM_EOL);
303
304 SCM_CRITICAL_SECTION_END;
305 return 1;
306 }
307 prev = p;
308 }
309 SCM_CRITICAL_SECTION_END;
310 return 0;
311 }
312
313 /* Remove the front-most element from the queue Q and return it.
314 Return SCM_BOOL_F when Q is empty.
315 */
316 static SCM
317 dequeue (SCM q)
318 {
319 SCM c;
320 SCM_CRITICAL_SECTION_START;
321 c = SCM_CDR (q);
322 if (scm_is_null (c))
323 {
324 SCM_CRITICAL_SECTION_END;
325 return SCM_BOOL_F;
326 }
327 else
328 {
329 SCM_SETCDR (q, SCM_CDR (c));
330 if (scm_is_null (SCM_CDR (q)))
331 SCM_SETCAR (q, SCM_EOL);
332 SCM_CRITICAL_SECTION_END;
333
334 /* GC-robust */
335 SCM_SETCDR (c, SCM_EOL);
336
337 return SCM_CAR (c);
338 }
339 }
340
341 /*** Thread smob routines */
342
343
344 static int
345 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
346 {
347 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
348 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
349 the struct case, hence we go via a union, and extract according to the
350 size of pthread_t. */
351 union {
352 scm_i_pthread_t p;
353 unsigned short us;
354 unsigned int ui;
355 unsigned long ul;
356 scm_t_uintmax um;
357 } u;
358 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
359 scm_i_pthread_t p = t->pthread;
360 scm_t_uintmax id;
361 u.p = p;
362 if (sizeof (p) == sizeof (unsigned short))
363 id = u.us;
364 else if (sizeof (p) == sizeof (unsigned int))
365 id = u.ui;
366 else if (sizeof (p) == sizeof (unsigned long))
367 id = u.ul;
368 else
369 id = u.um;
370
371 scm_puts ("#<thread ", port);
372 scm_uintprint (id, 10, port);
373 scm_puts (" (", port);
374 scm_uintprint ((scm_t_bits)t, 16, port);
375 scm_puts (")>", port);
376 return 1;
377 }
378
379 \f
380 /*** Blocking on queues. */
381
382 /* See also scm_i_queue_async_cell for how such a block is
383 interrputed.
384 */
385
386 /* Put the current thread on QUEUE and go to sleep, waiting for it to
387 be woken up by a call to 'unblock_from_queue', or to be
388 interrupted. Upon return of this function, the current thread is
389 no longer on QUEUE, even when the sleep has been interrupted.
390
391 The caller of block_self must hold MUTEX. It will be atomically
392 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
393
394 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
395 as MUTEX is needed.
396
397 When WAITTIME is not NULL, the sleep will be aborted at that time.
398
399 The return value of block_self is an errno value. It will be zero
400 when the sleep has been successfully completed by a call to
401 unblock_from_queue, EINTR when it has been interrupted by the
402 delivery of a system async, and ETIMEDOUT when the timeout has
403 expired.
404
405 The system asyncs themselves are not executed by block_self.
406 */
407 static int
408 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
409 const scm_t_timespec *waittime)
410 {
411 scm_i_thread *t = SCM_I_CURRENT_THREAD;
412 SCM q_handle;
413 int err;
414
415 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
416 err = EINTR;
417 else
418 {
419 t->block_asyncs++;
420 q_handle = enqueue (queue, t->handle);
421 if (waittime == NULL)
422 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
423 else
424 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
425
426 /* When we are still on QUEUE, we have been interrupted. We
427 report this only when no other error (such as a timeout) has
428 happened above.
429 */
430 if (remqueue (queue, q_handle) && err == 0)
431 err = EINTR;
432 t->block_asyncs--;
433 scm_i_reset_sleep (t);
434 }
435
436 return err;
437 }
438
439 /* Wake up the first thread on QUEUE, if any. The awoken thread is
440 returned, or #f if the queue was empty.
441 */
442 static SCM
443 unblock_from_queue (SCM queue)
444 {
445 SCM thread = dequeue (queue);
446 if (scm_is_true (thread))
447 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
448 return thread;
449 }
450
451 \f
452 /* Getting into and out of guile mode.
453 */
454
455 /* Key used to attach a cleanup handler to a given thread. Also, if
456 thread-local storage is unavailable, this key is used to retrieve the
457 current thread with `pthread_getspecific ()'. */
458 scm_i_pthread_key_t scm_i_thread_key;
459
460
461 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
462
463 /* When thread-local storage (TLS) is available, a pointer to the
464 current-thread object is kept in TLS. Note that storing the thread-object
465 itself in TLS (rather than a pointer to some malloc'd memory) is not
466 possible since thread objects may live longer than the actual thread they
467 represent. */
468 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
469
470 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
471
472
473 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
474 static scm_i_thread *all_threads = NULL;
475 static int thread_count;
476
477 static SCM scm_i_default_dynamic_state;
478
479 /* Run when a fluid is collected. */
480 void
481 scm_i_reset_fluid (size_t n, SCM val)
482 {
483 scm_i_thread *t;
484
485 scm_i_pthread_mutex_lock (&thread_admin_mutex);
486 for (t = all_threads; t; t = t->next_thread)
487 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
488 {
489 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
490
491 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
492 SCM_SIMPLE_VECTOR_SET (v, n, val);
493 }
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
495 }
496
497 /* Perform first stage of thread initialisation, in non-guile mode.
498 */
499 static void
500 guilify_self_1 (struct GC_stack_base *base)
501 {
502 scm_i_thread t;
503
504 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
505 before allocating anything in this thread, because allocation could
506 cause GC to run, and GC could cause finalizers, which could invoke
507 Scheme functions, which need the current thread to be set. */
508
509 t.pthread = scm_i_pthread_self ();
510 t.handle = SCM_BOOL_F;
511 t.result = SCM_BOOL_F;
512 t.cleanup_handler = SCM_BOOL_F;
513 t.mutexes = SCM_EOL;
514 t.held_mutex = NULL;
515 t.join_queue = SCM_EOL;
516 t.dynamic_state = SCM_BOOL_F;
517 t.dynwinds = SCM_EOL;
518 t.active_asyncs = SCM_EOL;
519 t.block_asyncs = 1;
520 t.pending_asyncs = 1;
521 t.critical_section_level = 0;
522 t.base = base->mem_base;
523 #ifdef __ia64__
524 t.register_backing_store_base = base->reg-base;
525 #endif
526 t.continuation_root = SCM_EOL;
527 t.continuation_base = t.base;
528 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
529 t.sleep_mutex = NULL;
530 t.sleep_object = SCM_BOOL_F;
531 t.sleep_fd = -1;
532
533 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
534 /* FIXME: Error conditions during the initialization phase are handled
535 gracelessly since public functions such as `scm_init_guile ()'
536 currently have type `void'. */
537 abort ();
538
539 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
540 t.current_mark_stack_ptr = NULL;
541 t.current_mark_stack_limit = NULL;
542 t.canceled = 0;
543 t.exited = 0;
544 t.guile_mode = 0;
545
546 /* The switcheroo. */
547 {
548 scm_i_thread *t_ptr = &t;
549
550 GC_disable ();
551 t_ptr = GC_malloc (sizeof (scm_i_thread));
552 memcpy (t_ptr, &t, sizeof t);
553
554 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
555
556 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
557 /* Cache the current thread in TLS for faster lookup. */
558 scm_i_current_thread = t_ptr;
559 #endif
560
561 scm_i_pthread_mutex_lock (&thread_admin_mutex);
562 t_ptr->next_thread = all_threads;
563 all_threads = t_ptr;
564 thread_count++;
565 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
566
567 GC_enable ();
568 }
569 }
570
571 /* Perform second stage of thread initialisation, in guile mode.
572 */
573 static void
574 guilify_self_2 (SCM parent)
575 {
576 scm_i_thread *t = SCM_I_CURRENT_THREAD;
577
578 t->guile_mode = 1;
579
580 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
581
582 t->continuation_root = scm_cons (t->handle, SCM_EOL);
583 t->continuation_base = t->base;
584 t->vm = SCM_BOOL_F;
585
586 if (scm_is_true (parent))
587 t->dynamic_state = scm_make_dynamic_state (parent);
588 else
589 t->dynamic_state = scm_i_make_initial_dynamic_state ();
590
591 t->join_queue = make_queue ();
592 t->block_asyncs = 0;
593 }
594
595 \f
596 /*** Fat mutexes */
597
598 /* We implement our own mutex type since we want them to be 'fair', we
599 want to do fancy things while waiting for them (like running
600 asyncs) and we might want to add things that are nice for
601 debugging.
602 */
603
604 typedef struct {
605 scm_i_pthread_mutex_t lock;
606 SCM owner;
607 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
608
609 int recursive; /* allow recursive locking? */
610 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
611 int allow_external_unlock; /* is it an error to unlock a mutex that is not
612 owned by the current thread? */
613
614 SCM waiting; /* the threads waiting for this mutex. */
615 } fat_mutex;
616
617 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
618 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
619
620 static SCM
621 call_cleanup (void *data)
622 {
623 SCM *proc_p = data;
624 return scm_call_0 (*proc_p);
625 }
626
627 /* Perform thread tear-down, in guile mode.
628 */
629 static void *
630 do_thread_exit (void *v)
631 {
632 scm_i_thread *t = (scm_i_thread *) v;
633
634 /* Ensure the signal handling thread has been launched, because we might be
635 shutting it down. This needs to be done in Guile mode. */
636 scm_i_ensure_signal_delivery_thread ();
637
638 if (!scm_is_false (t->cleanup_handler))
639 {
640 SCM ptr = t->cleanup_handler;
641
642 t->cleanup_handler = SCM_BOOL_F;
643 t->result = scm_internal_catch (SCM_BOOL_T,
644 call_cleanup, &ptr,
645 scm_handle_by_message_noexit, NULL);
646 }
647
648 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
649
650 t->exited = 1;
651 close (t->sleep_pipe[0]);
652 close (t->sleep_pipe[1]);
653 while (scm_is_true (unblock_from_queue (t->join_queue)))
654 ;
655
656 while (!scm_is_null (t->mutexes))
657 {
658 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
659
660 if (!SCM_UNBNDP (mutex))
661 {
662 fat_mutex *m = SCM_MUTEX_DATA (mutex);
663
664 scm_i_pthread_mutex_lock (&m->lock);
665
666 /* Since MUTEX is in `t->mutexes', T must be its owner. */
667 assert (scm_is_eq (m->owner, t->handle));
668
669 unblock_from_queue (m->waiting);
670
671 scm_i_pthread_mutex_unlock (&m->lock);
672 }
673
674 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
675 }
676
677 scm_i_pthread_mutex_unlock (&t->admin_mutex);
678
679 return NULL;
680 }
681
682 static void *
683 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
684 {
685 /* Won't hurt if we are already registered. */
686 #if SCM_USE_PTHREAD_THREADS
687 GC_register_my_thread (sb);
688 #endif
689
690 return scm_with_guile (do_thread_exit, v);
691 }
692
693 static void
694 on_thread_exit (void *v)
695 {
696 /* This handler is executed in non-guile mode. */
697 scm_i_thread *t = (scm_i_thread *) v, **tp;
698
699 /* If this thread was cancelled while doing a cond wait, it will
700 still have a mutex locked, so we unlock it here. */
701 if (t->held_mutex)
702 {
703 scm_i_pthread_mutex_unlock (t->held_mutex);
704 t->held_mutex = NULL;
705 }
706
707 /* Reinstate the current thread for purposes of scm_with_guile
708 guile-mode cleanup handlers. Only really needed in the non-TLS
709 case but it doesn't hurt to be consistent. */
710 scm_i_pthread_setspecific (scm_i_thread_key, t);
711
712 /* Scheme-level thread finalizers and other cleanup needs to happen in
713 guile mode. */
714 GC_call_with_stack_base (do_thread_exit_trampoline, t);
715
716 /* Removing ourself from the list of all threads needs to happen in
717 non-guile mode since all SCM values on our stack become
718 unprotected once we are no longer in the list. */
719 scm_i_pthread_mutex_lock (&thread_admin_mutex);
720 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
721 if (*tp == t)
722 {
723 *tp = t->next_thread;
724
725 /* GC-robust */
726 t->next_thread = NULL;
727
728 break;
729 }
730 thread_count--;
731
732 /* If there's only one other thread, it could be the signal delivery
733 thread, so we need to notify it to shut down by closing its read pipe.
734 If it's not the signal delivery thread, then closing the read pipe isn't
735 going to hurt. */
736 if (thread_count <= 1)
737 scm_i_close_signal_pipe ();
738
739 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
740
741 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
742
743 #if SCM_USE_PTHREAD_THREADS
744 GC_unregister_my_thread ();
745 #endif
746 }
747
748 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
749
750 static void
751 init_thread_key (void)
752 {
753 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
754 }
755
756 /* Perform any initializations necessary to make the current thread
757 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
758 if necessary.
759
760 BASE is the stack base to use with GC.
761
762 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
763 which case the default dynamic state is used.
764
765 Returns zero when the thread was known to guile already; otherwise
766 return 1.
767
768 Note that it could be the case that the thread was known
769 to Guile, but not in guile mode (because we are within a
770 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
771 be sure. New threads are put into guile mode implicitly. */
772
773 static int
774 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
775 {
776 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
777
778 if (SCM_I_CURRENT_THREAD)
779 {
780 /* Thread is already known to Guile.
781 */
782 return 0;
783 }
784 else
785 {
786 /* This thread has not been guilified yet.
787 */
788
789 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
790 if (scm_initialized_p == 0)
791 {
792 /* First thread ever to enter Guile. Run the full
793 initialization.
794 */
795 scm_i_init_guile (base);
796
797 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
798 /* Allow other threads to come in later. */
799 GC_allow_register_threads ();
800 #endif
801
802 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
803 }
804 else
805 {
806 /* Guile is already initialized, but this thread enters it for
807 the first time. Only initialize this thread.
808 */
809 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
810
811 /* Register this thread with libgc. */
812 #if SCM_USE_PTHREAD_THREADS
813 GC_register_my_thread (base);
814 #endif
815
816 guilify_self_1 (base);
817 guilify_self_2 (parent);
818 }
819 return 1;
820 }
821 }
822
823 void
824 scm_init_guile ()
825 {
826 struct GC_stack_base stack_base;
827
828 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
829 scm_i_init_thread_for_guile (&stack_base,
830 scm_i_default_dynamic_state);
831 else
832 {
833 fprintf (stderr, "Failed to get stack base for current thread.\n");
834 exit (EXIT_FAILURE);
835 }
836 }
837
838 SCM_UNUSED static void
839 scm_leave_guile_cleanup (void *x)
840 {
841 on_thread_exit (SCM_I_CURRENT_THREAD);
842 }
843
844 struct with_guile_args
845 {
846 GC_fn_type func;
847 void *data;
848 SCM parent;
849 };
850
851 static void *
852 with_guile_trampoline (void *data)
853 {
854 struct with_guile_args *args = data;
855
856 return scm_c_with_continuation_barrier (args->func, args->data);
857 }
858
859 static void *
860 with_guile_and_parent (struct GC_stack_base *base, void *data)
861 {
862 void *res;
863 int new_thread;
864 scm_i_thread *t;
865 struct with_guile_args *args = data;
866
867 new_thread = scm_i_init_thread_for_guile (base, args->parent);
868 t = SCM_I_CURRENT_THREAD;
869 if (new_thread)
870 {
871 /* We are in Guile mode. */
872 assert (t->guile_mode);
873
874 res = scm_c_with_continuation_barrier (args->func, args->data);
875
876 /* Leave Guile mode. */
877 t->guile_mode = 0;
878 }
879 else if (t->guile_mode)
880 {
881 /* Already in Guile mode. */
882 res = scm_c_with_continuation_barrier (args->func, args->data);
883 }
884 else
885 {
886 /* We are not in Guile mode, either because we are not within a
887 scm_with_guile, or because we are within a scm_without_guile.
888
889 This call to scm_with_guile() could happen from anywhere on the
890 stack, and in particular lower on the stack than when it was
891 when this thread was first guilified. Thus, `base' must be
892 updated. */
893 #if SCM_STACK_GROWS_UP
894 if (SCM_STACK_PTR (base->mem_base) < t->base)
895 t->base = SCM_STACK_PTR (base->mem_base);
896 #else
897 if (SCM_STACK_PTR (base->mem_base) > t->base)
898 t->base = SCM_STACK_PTR (base->mem_base);
899 #endif
900
901 t->guile_mode = 1;
902 res = with_gc_active (with_guile_trampoline, args);
903 t->guile_mode = 0;
904 }
905 return res;
906 }
907
908 static void *
909 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
910 {
911 struct with_guile_args args;
912
913 args.func = func;
914 args.data = data;
915 args.parent = parent;
916
917 return GC_call_with_stack_base (with_guile_and_parent, &args);
918 }
919
920 void *
921 scm_with_guile (void *(*func)(void *), void *data)
922 {
923 return scm_i_with_guile_and_parent (func, data,
924 scm_i_default_dynamic_state);
925 }
926
927 void *
928 scm_without_guile (void *(*func)(void *), void *data)
929 {
930 void *result;
931 scm_i_thread *t = SCM_I_CURRENT_THREAD;
932
933 if (t->guile_mode)
934 {
935 SCM_I_CURRENT_THREAD->guile_mode = 0;
936 result = with_gc_inactive (func, data);
937 SCM_I_CURRENT_THREAD->guile_mode = 1;
938 }
939 else
940 /* Otherwise we're not in guile mode, so nothing to do. */
941 result = func (data);
942
943 return result;
944 }
945
946 \f
947 /*** Thread creation */
948
949 typedef struct {
950 SCM parent;
951 SCM thunk;
952 SCM handler;
953 SCM thread;
954 scm_i_pthread_mutex_t mutex;
955 scm_i_pthread_cond_t cond;
956 } launch_data;
957
958 static void *
959 really_launch (void *d)
960 {
961 launch_data *data = (launch_data *)d;
962 SCM thunk = data->thunk, handler = data->handler;
963 scm_i_thread *t;
964
965 t = SCM_I_CURRENT_THREAD;
966
967 scm_i_scm_pthread_mutex_lock (&data->mutex);
968 data->thread = scm_current_thread ();
969 scm_i_pthread_cond_signal (&data->cond);
970 scm_i_pthread_mutex_unlock (&data->mutex);
971
972 if (SCM_UNBNDP (handler))
973 t->result = scm_call_0 (thunk);
974 else
975 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
976
977 return 0;
978 }
979
980 static void *
981 launch_thread (void *d)
982 {
983 launch_data *data = (launch_data *)d;
984 scm_i_pthread_detach (scm_i_pthread_self ());
985 scm_i_with_guile_and_parent (really_launch, d, data->parent);
986 return NULL;
987 }
988
989 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
990 (SCM thunk, SCM handler),
991 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
992 "returning a new thread object representing the thread. The procedure\n"
993 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
994 "\n"
995 "When @var{handler} is specified, then @var{thunk} is called from\n"
996 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
997 "handler. This catch is established inside the continuation barrier.\n"
998 "\n"
999 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1000 "the @emph{exit value} of the thread and the thread is terminated.")
1001 #define FUNC_NAME s_scm_call_with_new_thread
1002 {
1003 launch_data data;
1004 scm_i_pthread_t id;
1005 int err;
1006
1007 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1008 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1009 handler, SCM_ARG2, FUNC_NAME);
1010
1011 data.parent = scm_current_dynamic_state ();
1012 data.thunk = thunk;
1013 data.handler = handler;
1014 data.thread = SCM_BOOL_F;
1015 scm_i_pthread_mutex_init (&data.mutex, NULL);
1016 scm_i_pthread_cond_init (&data.cond, NULL);
1017
1018 scm_i_scm_pthread_mutex_lock (&data.mutex);
1019 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1020 if (err)
1021 {
1022 scm_i_pthread_mutex_unlock (&data.mutex);
1023 errno = err;
1024 scm_syserror (NULL);
1025 }
1026 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1027 scm_i_pthread_mutex_unlock (&data.mutex);
1028
1029 return data.thread;
1030 }
1031 #undef FUNC_NAME
1032
1033 typedef struct {
1034 SCM parent;
1035 scm_t_catch_body body;
1036 void *body_data;
1037 scm_t_catch_handler handler;
1038 void *handler_data;
1039 SCM thread;
1040 scm_i_pthread_mutex_t mutex;
1041 scm_i_pthread_cond_t cond;
1042 } spawn_data;
1043
1044 static void *
1045 really_spawn (void *d)
1046 {
1047 spawn_data *data = (spawn_data *)d;
1048 scm_t_catch_body body = data->body;
1049 void *body_data = data->body_data;
1050 scm_t_catch_handler handler = data->handler;
1051 void *handler_data = data->handler_data;
1052 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1053
1054 scm_i_scm_pthread_mutex_lock (&data->mutex);
1055 data->thread = scm_current_thread ();
1056 scm_i_pthread_cond_signal (&data->cond);
1057 scm_i_pthread_mutex_unlock (&data->mutex);
1058
1059 if (handler == NULL)
1060 t->result = body (body_data);
1061 else
1062 t->result = scm_internal_catch (SCM_BOOL_T,
1063 body, body_data,
1064 handler, handler_data);
1065
1066 return 0;
1067 }
1068
1069 static void *
1070 spawn_thread (void *d)
1071 {
1072 spawn_data *data = (spawn_data *)d;
1073 scm_i_pthread_detach (scm_i_pthread_self ());
1074 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1075 return NULL;
1076 }
1077
1078 SCM
1079 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1080 scm_t_catch_handler handler, void *handler_data)
1081 {
1082 spawn_data data;
1083 scm_i_pthread_t id;
1084 int err;
1085
1086 data.parent = scm_current_dynamic_state ();
1087 data.body = body;
1088 data.body_data = body_data;
1089 data.handler = handler;
1090 data.handler_data = handler_data;
1091 data.thread = SCM_BOOL_F;
1092 scm_i_pthread_mutex_init (&data.mutex, NULL);
1093 scm_i_pthread_cond_init (&data.cond, NULL);
1094
1095 scm_i_scm_pthread_mutex_lock (&data.mutex);
1096 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1097 if (err)
1098 {
1099 scm_i_pthread_mutex_unlock (&data.mutex);
1100 errno = err;
1101 scm_syserror (NULL);
1102 }
1103 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1104 scm_i_pthread_mutex_unlock (&data.mutex);
1105
1106 return data.thread;
1107 }
1108
1109 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1110 (),
1111 "Move the calling thread to the end of the scheduling queue.")
1112 #define FUNC_NAME s_scm_yield
1113 {
1114 return scm_from_bool (scm_i_sched_yield ());
1115 }
1116 #undef FUNC_NAME
1117
1118 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1119 (SCM thread),
1120 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1121 "cannot be the current thread, and if @var{thread} has already terminated or "
1122 "been signaled to terminate, this function is a no-op.")
1123 #define FUNC_NAME s_scm_cancel_thread
1124 {
1125 scm_i_thread *t = NULL;
1126
1127 SCM_VALIDATE_THREAD (1, thread);
1128 t = SCM_I_THREAD_DATA (thread);
1129 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1130 if (!t->canceled)
1131 {
1132 t->canceled = 1;
1133 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1134 scm_i_pthread_cancel (t->pthread);
1135 }
1136 else
1137 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1138
1139 return SCM_UNSPECIFIED;
1140 }
1141 #undef FUNC_NAME
1142
1143 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1144 (SCM thread, SCM proc),
1145 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1146 "This handler will be called when the thread exits.")
1147 #define FUNC_NAME s_scm_set_thread_cleanup_x
1148 {
1149 scm_i_thread *t;
1150
1151 SCM_VALIDATE_THREAD (1, thread);
1152 if (!scm_is_false (proc))
1153 SCM_VALIDATE_THUNK (2, proc);
1154
1155 t = SCM_I_THREAD_DATA (thread);
1156 scm_i_pthread_mutex_lock (&t->admin_mutex);
1157
1158 if (!(t->exited || t->canceled))
1159 t->cleanup_handler = proc;
1160
1161 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1162
1163 return SCM_UNSPECIFIED;
1164 }
1165 #undef FUNC_NAME
1166
1167 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1168 (SCM thread),
1169 "Return the cleanup handler installed for the thread @var{thread}.")
1170 #define FUNC_NAME s_scm_thread_cleanup
1171 {
1172 scm_i_thread *t;
1173 SCM ret;
1174
1175 SCM_VALIDATE_THREAD (1, thread);
1176
1177 t = SCM_I_THREAD_DATA (thread);
1178 scm_i_pthread_mutex_lock (&t->admin_mutex);
1179 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1180 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1181
1182 return ret;
1183 }
1184 #undef FUNC_NAME
1185
1186 SCM scm_join_thread (SCM thread)
1187 {
1188 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1189 }
1190
1191 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1192 (SCM thread, SCM timeout, SCM timeoutval),
1193 "Suspend execution of the calling thread until the target @var{thread} "
1194 "terminates, unless the target @var{thread} has already terminated. ")
1195 #define FUNC_NAME s_scm_join_thread_timed
1196 {
1197 scm_i_thread *t;
1198 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1199 SCM res = SCM_BOOL_F;
1200
1201 if (! (SCM_UNBNDP (timeoutval)))
1202 res = timeoutval;
1203
1204 SCM_VALIDATE_THREAD (1, thread);
1205 if (scm_is_eq (scm_current_thread (), thread))
1206 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1207
1208 t = SCM_I_THREAD_DATA (thread);
1209 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1210
1211 if (! SCM_UNBNDP (timeout))
1212 {
1213 to_timespec (timeout, &ctimeout);
1214 timeout_ptr = &ctimeout;
1215 }
1216
1217 if (t->exited)
1218 res = t->result;
1219 else
1220 {
1221 while (1)
1222 {
1223 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1224 timeout_ptr);
1225 if (err == 0)
1226 {
1227 if (t->exited)
1228 {
1229 res = t->result;
1230 break;
1231 }
1232 }
1233 else if (err == ETIMEDOUT)
1234 break;
1235
1236 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1237 SCM_TICK;
1238 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1239
1240 /* Check for exit again, since we just released and
1241 reacquired the admin mutex, before the next block_self
1242 call (which would block forever if t has already
1243 exited). */
1244 if (t->exited)
1245 {
1246 res = t->result;
1247 break;
1248 }
1249 }
1250 }
1251
1252 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1253
1254 return res;
1255 }
1256 #undef FUNC_NAME
1257
1258 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1259 (SCM obj),
1260 "Return @code{#t} if @var{obj} is a thread.")
1261 #define FUNC_NAME s_scm_thread_p
1262 {
1263 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1264 }
1265 #undef FUNC_NAME
1266
1267
1268 static size_t
1269 fat_mutex_free (SCM mx)
1270 {
1271 fat_mutex *m = SCM_MUTEX_DATA (mx);
1272 scm_i_pthread_mutex_destroy (&m->lock);
1273 return 0;
1274 }
1275
1276 static int
1277 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1278 {
1279 fat_mutex *m = SCM_MUTEX_DATA (mx);
1280 scm_puts ("#<mutex ", port);
1281 scm_uintprint ((scm_t_bits)m, 16, port);
1282 scm_puts (">", port);
1283 return 1;
1284 }
1285
1286 static SCM
1287 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1288 {
1289 fat_mutex *m;
1290 SCM mx;
1291
1292 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1293 scm_i_pthread_mutex_init (&m->lock, NULL);
1294 m->owner = SCM_BOOL_F;
1295 m->level = 0;
1296
1297 m->recursive = recursive;
1298 m->unchecked_unlock = unchecked_unlock;
1299 m->allow_external_unlock = external_unlock;
1300
1301 m->waiting = SCM_EOL;
1302 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1303 m->waiting = make_queue ();
1304 return mx;
1305 }
1306
1307 SCM scm_make_mutex (void)
1308 {
1309 return scm_make_mutex_with_flags (SCM_EOL);
1310 }
1311
1312 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1313 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1314 SCM_SYMBOL (recursive_sym, "recursive");
1315
1316 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1317 (SCM flags),
1318 "Create a new mutex. ")
1319 #define FUNC_NAME s_scm_make_mutex_with_flags
1320 {
1321 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1322
1323 SCM ptr = flags;
1324 while (! scm_is_null (ptr))
1325 {
1326 SCM flag = SCM_CAR (ptr);
1327 if (scm_is_eq (flag, unchecked_unlock_sym))
1328 unchecked_unlock = 1;
1329 else if (scm_is_eq (flag, allow_external_unlock_sym))
1330 external_unlock = 1;
1331 else if (scm_is_eq (flag, recursive_sym))
1332 recursive = 1;
1333 else
1334 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1335 ptr = SCM_CDR (ptr);
1336 }
1337 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1338 }
1339 #undef FUNC_NAME
1340
1341 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1342 (void),
1343 "Create a new recursive mutex. ")
1344 #define FUNC_NAME s_scm_make_recursive_mutex
1345 {
1346 return make_fat_mutex (1, 0, 0);
1347 }
1348 #undef FUNC_NAME
1349
1350 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1351
1352 static SCM
1353 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1354 {
1355 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1356
1357 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1358 SCM err = SCM_BOOL_F;
1359
1360 struct timeval current_time;
1361
1362 scm_i_scm_pthread_mutex_lock (&m->lock);
1363
1364 while (1)
1365 {
1366 if (m->level == 0)
1367 {
1368 m->owner = new_owner;
1369 m->level++;
1370
1371 if (SCM_I_IS_THREAD (new_owner))
1372 {
1373 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1374 scm_i_pthread_mutex_lock (&t->admin_mutex);
1375
1376 /* Only keep a weak reference to MUTEX so that it's not
1377 retained when not referenced elsewhere (bug #27450).
1378 The weak pair itself is eventually removed when MUTEX
1379 is unlocked. Note that `t->mutexes' lists mutexes
1380 currently held by T, so it should be small. */
1381 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1382
1383 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1384 }
1385 *ret = 1;
1386 break;
1387 }
1388 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1389 {
1390 m->owner = new_owner;
1391 err = scm_cons (scm_abandoned_mutex_error_key,
1392 scm_from_locale_string ("lock obtained on abandoned "
1393 "mutex"));
1394 *ret = 1;
1395 break;
1396 }
1397 else if (scm_is_eq (m->owner, new_owner))
1398 {
1399 if (m->recursive)
1400 {
1401 m->level++;
1402 *ret = 1;
1403 }
1404 else
1405 {
1406 err = scm_cons (scm_misc_error_key,
1407 scm_from_locale_string ("mutex already locked "
1408 "by thread"));
1409 *ret = 0;
1410 }
1411 break;
1412 }
1413 else
1414 {
1415 if (timeout != NULL)
1416 {
1417 gettimeofday (&current_time, NULL);
1418 if (current_time.tv_sec > timeout->tv_sec ||
1419 (current_time.tv_sec == timeout->tv_sec &&
1420 current_time.tv_usec * 1000 > timeout->tv_nsec))
1421 {
1422 *ret = 0;
1423 break;
1424 }
1425 }
1426 block_self (m->waiting, mutex, &m->lock, timeout);
1427 scm_i_pthread_mutex_unlock (&m->lock);
1428 SCM_TICK;
1429 scm_i_scm_pthread_mutex_lock (&m->lock);
1430 }
1431 }
1432 scm_i_pthread_mutex_unlock (&m->lock);
1433 return err;
1434 }
1435
1436 SCM scm_lock_mutex (SCM mx)
1437 {
1438 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1439 }
1440
1441 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1442 (SCM m, SCM timeout, SCM owner),
1443 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1444 "blocks until the mutex becomes available. The function returns when "
1445 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1446 "a thread already owns will succeed right away and will not block the "
1447 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1448 #define FUNC_NAME s_scm_lock_mutex_timed
1449 {
1450 SCM exception;
1451 int ret = 0;
1452 scm_t_timespec cwaittime, *waittime = NULL;
1453
1454 SCM_VALIDATE_MUTEX (1, m);
1455
1456 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1457 {
1458 to_timespec (timeout, &cwaittime);
1459 waittime = &cwaittime;
1460 }
1461
1462 exception = fat_mutex_lock (m, waittime, owner, &ret);
1463 if (!scm_is_false (exception))
1464 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1465 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1466 }
1467 #undef FUNC_NAME
1468
1469 static void
1470 lock_mutex_return_void (SCM mx)
1471 {
1472 (void) scm_lock_mutex (mx);
1473 }
1474
1475 static void
1476 unlock_mutex_return_void (SCM mx)
1477 {
1478 (void) scm_unlock_mutex (mx);
1479 }
1480
1481 void
1482 scm_dynwind_lock_mutex (SCM mutex)
1483 {
1484 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1485 SCM_F_WIND_EXPLICITLY);
1486 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1487 SCM_F_WIND_EXPLICITLY);
1488 }
1489
1490 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1491 (SCM mutex),
1492 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1493 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1494 #define FUNC_NAME s_scm_try_mutex
1495 {
1496 SCM exception;
1497 int ret = 0;
1498 scm_t_timespec cwaittime, *waittime = NULL;
1499
1500 SCM_VALIDATE_MUTEX (1, mutex);
1501
1502 to_timespec (scm_from_int(0), &cwaittime);
1503 waittime = &cwaittime;
1504
1505 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1506 if (!scm_is_false (exception))
1507 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1508 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1509 }
1510 #undef FUNC_NAME
1511
1512 /*** Fat condition variables */
1513
1514 typedef struct {
1515 scm_i_pthread_mutex_t lock;
1516 SCM waiting; /* the threads waiting for this condition. */
1517 } fat_cond;
1518
1519 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1520 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1521
1522 static int
1523 fat_mutex_unlock (SCM mutex, SCM cond,
1524 const scm_t_timespec *waittime, int relock)
1525 {
1526 SCM owner;
1527 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1528 fat_cond *c = NULL;
1529 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1530 int err = 0, ret = 0;
1531
1532 scm_i_scm_pthread_mutex_lock (&m->lock);
1533
1534 owner = m->owner;
1535
1536 if (!scm_is_eq (owner, t->handle))
1537 {
1538 if (m->level == 0)
1539 {
1540 if (!m->unchecked_unlock)
1541 {
1542 scm_i_pthread_mutex_unlock (&m->lock);
1543 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1544 }
1545 owner = t->handle;
1546 }
1547 else if (!m->allow_external_unlock)
1548 {
1549 scm_i_pthread_mutex_unlock (&m->lock);
1550 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1551 }
1552 }
1553
1554 if (! (SCM_UNBNDP (cond)))
1555 {
1556 c = SCM_CONDVAR_DATA (cond);
1557 while (1)
1558 {
1559 int brk = 0;
1560
1561 if (m->level > 0)
1562 m->level--;
1563 if (m->level == 0)
1564 {
1565 /* Change the owner of MUTEX. */
1566 t->mutexes = scm_delq_x (mutex, t->mutexes);
1567 m->owner = unblock_from_queue (m->waiting);
1568 }
1569
1570 t->block_asyncs++;
1571
1572 err = block_self (c->waiting, cond, &m->lock, waittime);
1573 scm_i_pthread_mutex_unlock (&m->lock);
1574
1575 if (err == 0)
1576 {
1577 ret = 1;
1578 brk = 1;
1579 }
1580 else if (err == ETIMEDOUT)
1581 {
1582 ret = 0;
1583 brk = 1;
1584 }
1585 else if (err != EINTR)
1586 {
1587 errno = err;
1588 scm_syserror (NULL);
1589 }
1590
1591 if (brk)
1592 {
1593 if (relock)
1594 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1595 t->block_asyncs--;
1596 break;
1597 }
1598
1599 t->block_asyncs--;
1600 scm_async_tick ();
1601
1602 scm_remember_upto_here_2 (cond, mutex);
1603
1604 scm_i_scm_pthread_mutex_lock (&m->lock);
1605 }
1606 }
1607 else
1608 {
1609 if (m->level > 0)
1610 m->level--;
1611 if (m->level == 0)
1612 {
1613 /* Change the owner of MUTEX. */
1614 t->mutexes = scm_delq_x (mutex, t->mutexes);
1615 m->owner = unblock_from_queue (m->waiting);
1616 }
1617
1618 scm_i_pthread_mutex_unlock (&m->lock);
1619 ret = 1;
1620 }
1621
1622 return ret;
1623 }
1624
1625 SCM scm_unlock_mutex (SCM mx)
1626 {
1627 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1628 }
1629
1630 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1631 (SCM mx, SCM cond, SCM timeout),
1632 "Unlocks @var{mutex} if the calling thread owns the lock on "
1633 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1634 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1635 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1636 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1637 "with a call to @code{unlock-mutex}. Only the last call to "
1638 "@code{unlock-mutex} will actually unlock the mutex. ")
1639 #define FUNC_NAME s_scm_unlock_mutex_timed
1640 {
1641 scm_t_timespec cwaittime, *waittime = NULL;
1642
1643 SCM_VALIDATE_MUTEX (1, mx);
1644 if (! (SCM_UNBNDP (cond)))
1645 {
1646 SCM_VALIDATE_CONDVAR (2, cond);
1647
1648 if (! (SCM_UNBNDP (timeout)))
1649 {
1650 to_timespec (timeout, &cwaittime);
1651 waittime = &cwaittime;
1652 }
1653 }
1654
1655 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1656 }
1657 #undef FUNC_NAME
1658
1659 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1660 (SCM obj),
1661 "Return @code{#t} if @var{obj} is a mutex.")
1662 #define FUNC_NAME s_scm_mutex_p
1663 {
1664 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1665 }
1666 #undef FUNC_NAME
1667
1668 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1669 (SCM mx),
1670 "Return the thread owning @var{mx}, or @code{#f}.")
1671 #define FUNC_NAME s_scm_mutex_owner
1672 {
1673 SCM owner;
1674 fat_mutex *m = NULL;
1675
1676 SCM_VALIDATE_MUTEX (1, mx);
1677 m = SCM_MUTEX_DATA (mx);
1678 scm_i_pthread_mutex_lock (&m->lock);
1679 owner = m->owner;
1680 scm_i_pthread_mutex_unlock (&m->lock);
1681
1682 return owner;
1683 }
1684 #undef FUNC_NAME
1685
1686 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1687 (SCM mx),
1688 "Return the lock level of mutex @var{mx}.")
1689 #define FUNC_NAME s_scm_mutex_level
1690 {
1691 SCM_VALIDATE_MUTEX (1, mx);
1692 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1693 }
1694 #undef FUNC_NAME
1695
1696 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1697 (SCM mx),
1698 "Returns @code{#t} if the mutex @var{mx} is locked.")
1699 #define FUNC_NAME s_scm_mutex_locked_p
1700 {
1701 SCM_VALIDATE_MUTEX (1, mx);
1702 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1703 }
1704 #undef FUNC_NAME
1705
1706 static int
1707 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1708 {
1709 fat_cond *c = SCM_CONDVAR_DATA (cv);
1710 scm_puts ("#<condition-variable ", port);
1711 scm_uintprint ((scm_t_bits)c, 16, port);
1712 scm_puts (">", port);
1713 return 1;
1714 }
1715
1716 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1717 (void),
1718 "Make a new condition variable.")
1719 #define FUNC_NAME s_scm_make_condition_variable
1720 {
1721 fat_cond *c;
1722 SCM cv;
1723
1724 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1725 c->waiting = SCM_EOL;
1726 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1727 c->waiting = make_queue ();
1728 return cv;
1729 }
1730 #undef FUNC_NAME
1731
1732 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1733 (SCM cv, SCM mx, SCM t),
1734 "Wait until @var{cond-var} has been signalled. While waiting, "
1735 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1736 "is locked again when this function returns. When @var{time} is given, "
1737 "it specifies a point in time where the waiting should be aborted. It "
1738 "can be either a integer as returned by @code{current-time} or a pair "
1739 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1740 "mutex is locked and @code{#f} is returned. When the condition "
1741 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1742 "is returned. ")
1743 #define FUNC_NAME s_scm_timed_wait_condition_variable
1744 {
1745 scm_t_timespec waittime, *waitptr = NULL;
1746
1747 SCM_VALIDATE_CONDVAR (1, cv);
1748 SCM_VALIDATE_MUTEX (2, mx);
1749
1750 if (!SCM_UNBNDP (t))
1751 {
1752 to_timespec (t, &waittime);
1753 waitptr = &waittime;
1754 }
1755
1756 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1757 }
1758 #undef FUNC_NAME
1759
1760 static void
1761 fat_cond_signal (fat_cond *c)
1762 {
1763 unblock_from_queue (c->waiting);
1764 }
1765
1766 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1767 (SCM cv),
1768 "Wake up one thread that is waiting for @var{cv}")
1769 #define FUNC_NAME s_scm_signal_condition_variable
1770 {
1771 SCM_VALIDATE_CONDVAR (1, cv);
1772 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1773 return SCM_BOOL_T;
1774 }
1775 #undef FUNC_NAME
1776
1777 static void
1778 fat_cond_broadcast (fat_cond *c)
1779 {
1780 while (scm_is_true (unblock_from_queue (c->waiting)))
1781 ;
1782 }
1783
1784 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1785 (SCM cv),
1786 "Wake up all threads that are waiting for @var{cv}. ")
1787 #define FUNC_NAME s_scm_broadcast_condition_variable
1788 {
1789 SCM_VALIDATE_CONDVAR (1, cv);
1790 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1791 return SCM_BOOL_T;
1792 }
1793 #undef FUNC_NAME
1794
1795 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1796 (SCM obj),
1797 "Return @code{#t} if @var{obj} is a condition variable.")
1798 #define FUNC_NAME s_scm_condition_variable_p
1799 {
1800 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1801 }
1802 #undef FUNC_NAME
1803
1804
1805 \f
1806 /*** Select */
1807
1808 struct select_args
1809 {
1810 int nfds;
1811 SELECT_TYPE *read_fds;
1812 SELECT_TYPE *write_fds;
1813 SELECT_TYPE *except_fds;
1814 struct timeval *timeout;
1815
1816 int result;
1817 int errno_value;
1818 };
1819
1820 static void *
1821 do_std_select (void *args)
1822 {
1823 struct select_args *select_args;
1824
1825 select_args = (struct select_args *) args;
1826
1827 select_args->result =
1828 select (select_args->nfds,
1829 select_args->read_fds, select_args->write_fds,
1830 select_args->except_fds, select_args->timeout);
1831 select_args->errno_value = errno;
1832
1833 return NULL;
1834 }
1835
1836 int
1837 scm_std_select (int nfds,
1838 SELECT_TYPE *readfds,
1839 SELECT_TYPE *writefds,
1840 SELECT_TYPE *exceptfds,
1841 struct timeval *timeout)
1842 {
1843 fd_set my_readfds;
1844 int res, eno, wakeup_fd;
1845 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1846 struct select_args args;
1847
1848 if (readfds == NULL)
1849 {
1850 FD_ZERO (&my_readfds);
1851 readfds = &my_readfds;
1852 }
1853
1854 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1855 SCM_TICK;
1856
1857 wakeup_fd = t->sleep_pipe[0];
1858 FD_SET (wakeup_fd, readfds);
1859 if (wakeup_fd >= nfds)
1860 nfds = wakeup_fd+1;
1861
1862 args.nfds = nfds;
1863 args.read_fds = readfds;
1864 args.write_fds = writefds;
1865 args.except_fds = exceptfds;
1866 args.timeout = timeout;
1867
1868 /* Explicitly cooperate with the GC. */
1869 scm_without_guile (do_std_select, &args);
1870
1871 res = args.result;
1872 eno = args.errno_value;
1873
1874 t->sleep_fd = -1;
1875 scm_i_reset_sleep (t);
1876
1877 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1878 {
1879 char dummy;
1880 full_read (wakeup_fd, &dummy, 1);
1881
1882 FD_CLR (wakeup_fd, readfds);
1883 res -= 1;
1884 if (res == 0)
1885 {
1886 eno = EINTR;
1887 res = -1;
1888 }
1889 }
1890 errno = eno;
1891 return res;
1892 }
1893
1894 /* Convenience API for blocking while in guile mode. */
1895
1896 #if SCM_USE_PTHREAD_THREADS
1897
1898 /* It seems reasonable to not run procedures related to mutex and condition
1899 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1900 without it, and (ii) the only potential gain would be GC latency. See
1901 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1902 for a discussion of the pros and cons. */
1903
1904 int
1905 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1906 {
1907 int res = scm_i_pthread_mutex_lock (mutex);
1908 return res;
1909 }
1910
1911 static void
1912 do_unlock (void *data)
1913 {
1914 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1915 }
1916
1917 void
1918 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1919 {
1920 scm_i_scm_pthread_mutex_lock (mutex);
1921 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1922 }
1923
1924 int
1925 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1926 {
1927 int res;
1928 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1929
1930 t->held_mutex = mutex;
1931 res = scm_i_pthread_cond_wait (cond, mutex);
1932 t->held_mutex = NULL;
1933
1934 return res;
1935 }
1936
1937 int
1938 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1939 scm_i_pthread_mutex_t *mutex,
1940 const scm_t_timespec *wt)
1941 {
1942 int res;
1943 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1944
1945 t->held_mutex = mutex;
1946 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1947 t->held_mutex = NULL;
1948
1949 return res;
1950 }
1951
1952 #endif
1953
1954 unsigned long
1955 scm_std_usleep (unsigned long usecs)
1956 {
1957 struct timeval tv;
1958 tv.tv_usec = usecs % 1000000;
1959 tv.tv_sec = usecs / 1000000;
1960 scm_std_select (0, NULL, NULL, NULL, &tv);
1961 return tv.tv_sec * 1000000 + tv.tv_usec;
1962 }
1963
1964 unsigned int
1965 scm_std_sleep (unsigned int secs)
1966 {
1967 struct timeval tv;
1968 tv.tv_usec = 0;
1969 tv.tv_sec = secs;
1970 scm_std_select (0, NULL, NULL, NULL, &tv);
1971 return tv.tv_sec;
1972 }
1973
1974 /*** Misc */
1975
1976 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1977 (void),
1978 "Return the thread that called this function.")
1979 #define FUNC_NAME s_scm_current_thread
1980 {
1981 return SCM_I_CURRENT_THREAD->handle;
1982 }
1983 #undef FUNC_NAME
1984
1985 static SCM
1986 scm_c_make_list (size_t n, SCM fill)
1987 {
1988 SCM res = SCM_EOL;
1989 while (n-- > 0)
1990 res = scm_cons (fill, res);
1991 return res;
1992 }
1993
1994 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1995 (void),
1996 "Return a list of all threads.")
1997 #define FUNC_NAME s_scm_all_threads
1998 {
1999 /* We can not allocate while holding the thread_admin_mutex because
2000 of the way GC is done.
2001 */
2002 int n = thread_count;
2003 scm_i_thread *t;
2004 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2005
2006 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2007 l = &list;
2008 for (t = all_threads; t && n > 0; t = t->next_thread)
2009 {
2010 if (t != scm_i_signal_delivery_thread)
2011 {
2012 SCM_SETCAR (*l, t->handle);
2013 l = SCM_CDRLOC (*l);
2014 }
2015 n--;
2016 }
2017 *l = SCM_EOL;
2018 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2019 return list;
2020 }
2021 #undef FUNC_NAME
2022
2023 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2024 (SCM thread),
2025 "Return @code{#t} iff @var{thread} has exited.\n")
2026 #define FUNC_NAME s_scm_thread_exited_p
2027 {
2028 return scm_from_bool (scm_c_thread_exited_p (thread));
2029 }
2030 #undef FUNC_NAME
2031
2032 int
2033 scm_c_thread_exited_p (SCM thread)
2034 #define FUNC_NAME s_scm_thread_exited_p
2035 {
2036 scm_i_thread *t;
2037 SCM_VALIDATE_THREAD (1, thread);
2038 t = SCM_I_THREAD_DATA (thread);
2039 return t->exited;
2040 }
2041 #undef FUNC_NAME
2042
2043 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2044 (void),
2045 "Return the total number of processors of the machine, which\n"
2046 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2047 "thread execution unit, which can be either:\n\n"
2048 "@itemize\n"
2049 "@item an execution core in a (possibly multi-core) chip, in a\n"
2050 " (possibly multi- chip) module, in a single computer, or\n"
2051 "@item a thread execution unit inside a core in the case of\n"
2052 " @dfn{hyper-threaded} CPUs.\n"
2053 "@end itemize\n\n"
2054 "Which of the two definitions is used, is unspecified.\n")
2055 #define FUNC_NAME s_scm_total_processor_count
2056 {
2057 return scm_from_ulong (num_processors (NPROC_ALL));
2058 }
2059 #undef FUNC_NAME
2060
2061 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2062 (void),
2063 "Like @code{total-processor-count}, but return the number of\n"
2064 "processors available to the current process. See\n"
2065 "@code{setaffinity} and @code{getaffinity} for more\n"
2066 "information.\n")
2067 #define FUNC_NAME s_scm_current_processor_count
2068 {
2069 return scm_from_ulong (num_processors (NPROC_CURRENT));
2070 }
2071 #undef FUNC_NAME
2072
2073
2074 \f
2075
2076 static scm_i_pthread_cond_t wake_up_cond;
2077 static int threads_initialized_p = 0;
2078
2079
2080 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2081 */
2082 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2083
2084 static SCM dynwind_critical_section_mutex;
2085
2086 void
2087 scm_dynwind_critical_section (SCM mutex)
2088 {
2089 if (scm_is_false (mutex))
2090 mutex = dynwind_critical_section_mutex;
2091 scm_dynwind_lock_mutex (mutex);
2092 scm_dynwind_block_asyncs ();
2093 }
2094
2095 /*** Initialization */
2096
2097 scm_i_pthread_mutex_t scm_i_misc_mutex;
2098
2099 #if SCM_USE_PTHREAD_THREADS
2100 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2101 #endif
2102
2103 void
2104 scm_threads_prehistory (void *base)
2105 {
2106 #if SCM_USE_PTHREAD_THREADS
2107 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2108 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2109 PTHREAD_MUTEX_RECURSIVE);
2110 #endif
2111
2112 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2113 scm_i_pthread_mutexattr_recursive);
2114 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2115 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2116
2117 guilify_self_1 ((struct GC_stack_base *) base);
2118 }
2119
2120 scm_t_bits scm_tc16_thread;
2121 scm_t_bits scm_tc16_mutex;
2122 scm_t_bits scm_tc16_condvar;
2123
2124 void
2125 scm_init_threads ()
2126 {
2127 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2128 scm_set_smob_print (scm_tc16_thread, thread_print);
2129
2130 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2131 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2132 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2133
2134 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2135 sizeof (fat_cond));
2136 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2137
2138 scm_i_default_dynamic_state = SCM_BOOL_F;
2139 guilify_self_2 (SCM_BOOL_F);
2140 threads_initialized_p = 1;
2141
2142 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2143 }
2144
2145 void
2146 scm_init_threads_default_dynamic_state ()
2147 {
2148 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2149 scm_i_default_dynamic_state = state;
2150 }
2151
2152 void
2153 scm_init_thread_procs ()
2154 {
2155 #include "libguile/threads.x"
2156 }
2157
2158 \f
2159 /* IA64-specific things. */
2160
2161 #ifdef __ia64__
2162 # ifdef __hpux
2163 # include <sys/param.h>
2164 # include <sys/pstat.h>
2165 void *
2166 scm_ia64_register_backing_store_base (void)
2167 {
2168 struct pst_vm_status vm_status;
2169 int i = 0;
2170 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2171 if (vm_status.pst_type == PS_RSESTACK)
2172 return (void *) vm_status.pst_vaddr;
2173 abort ();
2174 }
2175 void *
2176 scm_ia64_ar_bsp (const void *ctx)
2177 {
2178 uint64_t bsp;
2179 __uc_get_ar_bsp (ctx, &bsp);
2180 return (void *) bsp;
2181 }
2182 # endif /* hpux */
2183 # ifdef linux
2184 # include <ucontext.h>
2185 void *
2186 scm_ia64_register_backing_store_base (void)
2187 {
2188 extern void *__libc_ia64_register_backing_store_base;
2189 return __libc_ia64_register_backing_store_base;
2190 }
2191 void *
2192 scm_ia64_ar_bsp (const void *opaque)
2193 {
2194 const ucontext_t *ctx = opaque;
2195 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2196 }
2197 # endif /* linux */
2198 #endif /* __ia64__ */
2199
2200
2201 /*
2202 Local Variables:
2203 c-file-style: "gnu"
2204 End:
2205 */