Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42 #include <fcntl.h>
43 #include <nproc.h>
44
45 #include "libguile/validate.h"
46 #include "libguile/root.h"
47 #include "libguile/eval.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/threads.h"
51 #include "libguile/dynwind.h"
52 #include "libguile/iselect.h"
53 #include "libguile/fluids.h"
54 #include "libguile/continuations.h"
55 #include "libguile/gc.h"
56 #include "libguile/init.h"
57 #include "libguile/scmsigs.h"
58 #include "libguile/strings.h"
59
60 #include <full-read.h>
61
62
63 \f
64
65 /* First some libgc shims. */
66
67 /* Make sure GC_fn_type is defined; it is missing from the public
68 headers of GC 7.1 and earlier. */
69 #ifndef HAVE_GC_FN_TYPE
70 typedef void * (* GC_fn_type) (void *);
71 #endif
72
73
74 #ifndef GC_SUCCESS
75 #define GC_SUCCESS 0
76 #endif
77
78 #ifndef GC_UNIMPLEMENTED
79 #define GC_UNIMPLEMENTED 3
80 #endif
81
82 /* Likewise struct GC_stack_base is missing before 7.1. */
83 #ifndef HAVE_GC_STACK_BASE
84 struct GC_stack_base {
85 void * mem_base; /* Base of memory stack. */
86 #ifdef __ia64__
87 void * reg_base; /* Base of separate register stack. */
88 #endif
89 };
90
91 static int
92 GC_register_my_thread (struct GC_stack_base *stack_base)
93 {
94 return GC_UNIMPLEMENTED;
95 }
96
97 static void
98 GC_unregister_my_thread ()
99 {
100 }
101
102 #if !SCM_USE_PTHREAD_THREADS
103 /* No threads; we can just use GC_stackbottom. */
104 static void *
105 get_thread_stack_base ()
106 {
107 return GC_stackbottom;
108 }
109
110 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
111 && defined PTHREAD_ATTR_GETSTACK_WORKS
112 /* This method for GNU/Linux and perhaps some other systems.
113 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
114 available on them. */
115 static void *
116 get_thread_stack_base ()
117 {
118 pthread_attr_t attr;
119 void *start, *end;
120 size_t size;
121
122 pthread_getattr_np (pthread_self (), &attr);
123 pthread_attr_getstack (&attr, &start, &size);
124 end = (char *)start + size;
125
126 #if SCM_STACK_GROWS_UP
127 return start;
128 #else
129 return end;
130 #endif
131 }
132
133 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
134 /* This method for MacOS X.
135 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
136 but as of 2006 there's nothing obvious at apple.com. */
137 static void *
138 get_thread_stack_base ()
139 {
140 return pthread_get_stackaddr_np (pthread_self ());
141 }
142
143 #else
144 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
145 #endif
146
147 static int
148 GC_get_stack_base (struct GC_stack_base *stack_base)
149 {
150 stack_base->mem_base = get_thread_stack_base ();
151 #ifdef __ia64__
152 /* Calculate and store off the base of this thread's register
153 backing store (RBS). Unfortunately our implementation(s) of
154 scm_ia64_register_backing_store_base are only reliable for the
155 main thread. For other threads, therefore, find out the current
156 top of the RBS, and use that as a maximum. */
157 stack_base->reg_base = scm_ia64_register_backing_store_base ();
158 {
159 ucontext_t ctx;
160 void *bsp;
161 getcontext (&ctx);
162 bsp = scm_ia64_ar_bsp (&ctx);
163 if (stack_base->reg_base > bsp)
164 stack_base->reg_base = bsp;
165 }
166 #endif
167 return GC_SUCCESS;
168 }
169
170 static void *
171 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
172 {
173 struct GC_stack_base stack_base;
174
175 stack_base.mem_base = (void*)&stack_base;
176 #ifdef __ia64__
177 /* FIXME: Untested. */
178 {
179 ucontext_t ctx;
180 getcontext (&ctx);
181 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
182 }
183 #endif
184
185 return fn (&stack_base, arg);
186 }
187 #endif /* HAVE_GC_STACK_BASE */
188
189
190 /* Now define with_gc_active and with_gc_inactive. */
191
192 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
193
194 /* We have a sufficiently new libgc (7.2 or newer). */
195
196 static void*
197 with_gc_inactive (GC_fn_type func, void *data)
198 {
199 return GC_do_blocking (func, data);
200 }
201
202 static void*
203 with_gc_active (GC_fn_type func, void *data)
204 {
205 return GC_call_with_gc_active (func, data);
206 }
207
208 #else
209
210 /* libgc not new enough, so never actually deactivate GC.
211
212 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
213 GC_call_with_gc_active. */
214
215 static void*
216 with_gc_inactive (GC_fn_type func, void *data)
217 {
218 return func (data);
219 }
220
221 static void*
222 with_gc_active (GC_fn_type func, void *data)
223 {
224 return func (data);
225 }
226
227 #endif /* HAVE_GC_DO_BLOCKING */
228
229
230 \f
231 static void
232 to_timespec (SCM t, scm_t_timespec *waittime)
233 {
234 if (scm_is_pair (t))
235 {
236 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
237 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
238 }
239 else
240 {
241 double time = scm_to_double (t);
242 double sec = scm_c_truncate (time);
243
244 waittime->tv_sec = (long) sec;
245 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
246 }
247 }
248
249 \f
250 /*** Queues */
251
252 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
253 the risk of false references leading to unbounded retained space as
254 described in "Bounding Space Usage of Conservative Garbage Collectors",
255 H.J. Boehm, 2001. */
256
257 /* Make an empty queue data structure.
258 */
259 static SCM
260 make_queue ()
261 {
262 return scm_cons (SCM_EOL, SCM_EOL);
263 }
264
265 /* Put T at the back of Q and return a handle that can be used with
266 remqueue to remove T from Q again.
267 */
268 static SCM
269 enqueue (SCM q, SCM t)
270 {
271 SCM c = scm_cons (t, SCM_EOL);
272 SCM_CRITICAL_SECTION_START;
273 if (scm_is_null (SCM_CDR (q)))
274 SCM_SETCDR (q, c);
275 else
276 SCM_SETCDR (SCM_CAR (q), c);
277 SCM_SETCAR (q, c);
278 SCM_CRITICAL_SECTION_END;
279 return c;
280 }
281
282 /* Remove the element that the handle C refers to from the queue Q. C
283 must have been returned from a call to enqueue. The return value
284 is zero when the element referred to by C has already been removed.
285 Otherwise, 1 is returned.
286 */
287 static int
288 remqueue (SCM q, SCM c)
289 {
290 SCM p, prev = q;
291 SCM_CRITICAL_SECTION_START;
292 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
293 {
294 if (scm_is_eq (p, c))
295 {
296 if (scm_is_eq (c, SCM_CAR (q)))
297 SCM_SETCAR (q, SCM_CDR (c));
298 SCM_SETCDR (prev, SCM_CDR (c));
299
300 /* GC-robust */
301 SCM_SETCDR (c, SCM_EOL);
302
303 SCM_CRITICAL_SECTION_END;
304 return 1;
305 }
306 prev = p;
307 }
308 SCM_CRITICAL_SECTION_END;
309 return 0;
310 }
311
312 /* Remove the front-most element from the queue Q and return it.
313 Return SCM_BOOL_F when Q is empty.
314 */
315 static SCM
316 dequeue (SCM q)
317 {
318 SCM c;
319 SCM_CRITICAL_SECTION_START;
320 c = SCM_CDR (q);
321 if (scm_is_null (c))
322 {
323 SCM_CRITICAL_SECTION_END;
324 return SCM_BOOL_F;
325 }
326 else
327 {
328 SCM_SETCDR (q, SCM_CDR (c));
329 if (scm_is_null (SCM_CDR (q)))
330 SCM_SETCAR (q, SCM_EOL);
331 SCM_CRITICAL_SECTION_END;
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
336 return SCM_CAR (c);
337 }
338 }
339
340 /*** Thread smob routines */
341
342
343 static int
344 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
345 {
346 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
347 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
348 the struct case, hence we go via a union, and extract according to the
349 size of pthread_t. */
350 union {
351 scm_i_pthread_t p;
352 unsigned short us;
353 unsigned int ui;
354 unsigned long ul;
355 scm_t_uintmax um;
356 } u;
357 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
358 scm_i_pthread_t p = t->pthread;
359 scm_t_uintmax id;
360 u.p = p;
361 if (sizeof (p) == sizeof (unsigned short))
362 id = u.us;
363 else if (sizeof (p) == sizeof (unsigned int))
364 id = u.ui;
365 else if (sizeof (p) == sizeof (unsigned long))
366 id = u.ul;
367 else
368 id = u.um;
369
370 scm_puts_unlocked ("#<thread ", port);
371 scm_uintprint (id, 10, port);
372 scm_puts_unlocked (" (", port);
373 scm_uintprint ((scm_t_bits)t, 16, port);
374 scm_puts_unlocked (")>", port);
375 return 1;
376 }
377
378 \f
379 /*** Blocking on queues. */
380
381 /* See also scm_i_queue_async_cell for how such a block is
382 interrputed.
383 */
384
385 /* Put the current thread on QUEUE and go to sleep, waiting for it to
386 be woken up by a call to 'unblock_from_queue', or to be
387 interrupted. Upon return of this function, the current thread is
388 no longer on QUEUE, even when the sleep has been interrupted.
389
390 The caller of block_self must hold MUTEX. It will be atomically
391 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
392
393 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
394 as MUTEX is needed.
395
396 When WAITTIME is not NULL, the sleep will be aborted at that time.
397
398 The return value of block_self is an errno value. It will be zero
399 when the sleep has been successfully completed by a call to
400 unblock_from_queue, EINTR when it has been interrupted by the
401 delivery of a system async, and ETIMEDOUT when the timeout has
402 expired.
403
404 The system asyncs themselves are not executed by block_self.
405 */
406 static int
407 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
408 const scm_t_timespec *waittime)
409 {
410 scm_i_thread *t = SCM_I_CURRENT_THREAD;
411 SCM q_handle;
412 int err;
413
414 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
415 err = EINTR;
416 else
417 {
418 t->block_asyncs++;
419 q_handle = enqueue (queue, t->handle);
420 if (waittime == NULL)
421 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
422 else
423 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
424
425 /* When we are still on QUEUE, we have been interrupted. We
426 report this only when no other error (such as a timeout) has
427 happened above.
428 */
429 if (remqueue (queue, q_handle) && err == 0)
430 err = EINTR;
431 t->block_asyncs--;
432 scm_i_reset_sleep (t);
433 }
434
435 return err;
436 }
437
438 /* Wake up the first thread on QUEUE, if any. The awoken thread is
439 returned, or #f if the queue was empty.
440 */
441 static SCM
442 unblock_from_queue (SCM queue)
443 {
444 SCM thread = dequeue (queue);
445 if (scm_is_true (thread))
446 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
447 return thread;
448 }
449
450 \f
451 /* Getting into and out of guile mode.
452 */
453
454 /* Key used to attach a cleanup handler to a given thread. Also, if
455 thread-local storage is unavailable, this key is used to retrieve the
456 current thread with `pthread_getspecific ()'. */
457 scm_i_pthread_key_t scm_i_thread_key;
458
459
460 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
461
462 /* When thread-local storage (TLS) is available, a pointer to the
463 current-thread object is kept in TLS. Note that storing the thread-object
464 itself in TLS (rather than a pointer to some malloc'd memory) is not
465 possible since thread objects may live longer than the actual thread they
466 represent. */
467 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
468
469 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
470
471
472 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
473 static scm_i_thread *all_threads = NULL;
474 static int thread_count;
475
476 static SCM scm_i_default_dynamic_state;
477
478 /* Run when a fluid is collected. */
479 void
480 scm_i_reset_fluid (size_t n)
481 {
482 scm_i_thread *t;
483
484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
485 for (t = all_threads; t; t = t->next_thread)
486 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
487 {
488 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
489
490 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
491 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
492 }
493 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
494 }
495
496 /* Perform first stage of thread initialisation, in non-guile mode.
497 */
498 static void
499 guilify_self_1 (struct GC_stack_base *base)
500 {
501 scm_i_thread t;
502
503 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
504 before allocating anything in this thread, because allocation could
505 cause GC to run, and GC could cause finalizers, which could invoke
506 Scheme functions, which need the current thread to be set. */
507
508 t.pthread = scm_i_pthread_self ();
509 t.handle = SCM_BOOL_F;
510 t.result = SCM_BOOL_F;
511 t.cleanup_handler = SCM_BOOL_F;
512 t.mutexes = SCM_EOL;
513 t.held_mutex = NULL;
514 t.join_queue = SCM_EOL;
515 t.dynamic_state = SCM_BOOL_F;
516 t.dynwinds = SCM_EOL;
517 t.active_asyncs = SCM_EOL;
518 t.block_asyncs = 1;
519 t.pending_asyncs = 1;
520 t.critical_section_level = 0;
521 t.base = base->mem_base;
522 #ifdef __ia64__
523 t.register_backing_store_base = base->reg-base;
524 #endif
525 t.continuation_root = SCM_EOL;
526 t.continuation_base = t.base;
527 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
528 t.sleep_mutex = NULL;
529 t.sleep_object = SCM_BOOL_F;
530 t.sleep_fd = -1;
531
532 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
533 /* FIXME: Error conditions during the initialization phase are handled
534 gracelessly since public functions such as `scm_init_guile ()'
535 currently have type `void'. */
536 abort ();
537
538 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
539 t.current_mark_stack_ptr = NULL;
540 t.current_mark_stack_limit = NULL;
541 t.canceled = 0;
542 t.exited = 0;
543 t.guile_mode = 0;
544
545 /* The switcheroo. */
546 {
547 scm_i_thread *t_ptr = &t;
548
549 GC_disable ();
550 t_ptr = GC_malloc (sizeof (scm_i_thread));
551 memcpy (t_ptr, &t, sizeof t);
552
553 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
554
555 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
556 /* Cache the current thread in TLS for faster lookup. */
557 scm_i_current_thread = t_ptr;
558 #endif
559
560 scm_i_pthread_mutex_lock (&thread_admin_mutex);
561 t_ptr->next_thread = all_threads;
562 all_threads = t_ptr;
563 thread_count++;
564 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
565
566 GC_enable ();
567 }
568 }
569
570 /* Perform second stage of thread initialisation, in guile mode.
571 */
572 static void
573 guilify_self_2 (SCM parent)
574 {
575 scm_i_thread *t = SCM_I_CURRENT_THREAD;
576
577 t->guile_mode = 1;
578
579 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
580
581 t->continuation_root = scm_cons (t->handle, SCM_EOL);
582 t->continuation_base = t->base;
583 t->vm = SCM_BOOL_F;
584
585 if (scm_is_true (parent))
586 t->dynamic_state = scm_make_dynamic_state (parent);
587 else
588 t->dynamic_state = scm_i_make_initial_dynamic_state ();
589
590 t->join_queue = make_queue ();
591 t->block_asyncs = 0;
592 }
593
594 \f
595 /*** Fat mutexes */
596
597 /* We implement our own mutex type since we want them to be 'fair', we
598 want to do fancy things while waiting for them (like running
599 asyncs) and we might want to add things that are nice for
600 debugging.
601 */
602
603 typedef struct {
604 scm_i_pthread_mutex_t lock;
605 SCM owner;
606 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
607
608 int recursive; /* allow recursive locking? */
609 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
610 int allow_external_unlock; /* is it an error to unlock a mutex that is not
611 owned by the current thread? */
612
613 SCM waiting; /* the threads waiting for this mutex. */
614 } fat_mutex;
615
616 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
617 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
618
619 static SCM
620 call_cleanup (void *data)
621 {
622 SCM *proc_p = data;
623 return scm_call_0 (*proc_p);
624 }
625
626 /* Perform thread tear-down, in guile mode.
627 */
628 static void *
629 do_thread_exit (void *v)
630 {
631 scm_i_thread *t = (scm_i_thread *) v;
632
633 if (!scm_is_false (t->cleanup_handler))
634 {
635 SCM ptr = t->cleanup_handler;
636
637 t->cleanup_handler = SCM_BOOL_F;
638 t->result = scm_internal_catch (SCM_BOOL_T,
639 call_cleanup, &ptr,
640 scm_handle_by_message_noexit, NULL);
641 }
642
643 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
644
645 t->exited = 1;
646 close (t->sleep_pipe[0]);
647 close (t->sleep_pipe[1]);
648 while (scm_is_true (unblock_from_queue (t->join_queue)))
649 ;
650
651 while (!scm_is_null (t->mutexes))
652 {
653 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
654
655 if (scm_is_true (mutex))
656 {
657 fat_mutex *m = SCM_MUTEX_DATA (mutex);
658
659 scm_i_pthread_mutex_lock (&m->lock);
660
661 /* Since MUTEX is in `t->mutexes', T must be its owner. */
662 assert (scm_is_eq (m->owner, t->handle));
663
664 unblock_from_queue (m->waiting);
665
666 scm_i_pthread_mutex_unlock (&m->lock);
667 }
668
669 t->mutexes = scm_cdr (t->mutexes);
670 }
671
672 scm_i_pthread_mutex_unlock (&t->admin_mutex);
673
674 return NULL;
675 }
676
677 static void *
678 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
679 {
680 /* Won't hurt if we are already registered. */
681 #if SCM_USE_PTHREAD_THREADS
682 GC_register_my_thread (sb);
683 #endif
684
685 return scm_with_guile (do_thread_exit, v);
686 }
687
688 static void
689 on_thread_exit (void *v)
690 {
691 /* This handler is executed in non-guile mode. */
692 scm_i_thread *t = (scm_i_thread *) v, **tp;
693
694 /* If we were canceled, we were unable to clear `t->guile_mode', so do
695 it here. */
696 t->guile_mode = 0;
697
698 /* If this thread was cancelled while doing a cond wait, it will
699 still have a mutex locked, so we unlock it here. */
700 if (t->held_mutex)
701 {
702 scm_i_pthread_mutex_unlock (t->held_mutex);
703 t->held_mutex = NULL;
704 }
705
706 /* Reinstate the current thread for purposes of scm_with_guile
707 guile-mode cleanup handlers. Only really needed in the non-TLS
708 case but it doesn't hurt to be consistent. */
709 scm_i_pthread_setspecific (scm_i_thread_key, t);
710
711 /* Scheme-level thread finalizers and other cleanup needs to happen in
712 guile mode. */
713 GC_call_with_stack_base (do_thread_exit_trampoline, t);
714
715 /* Removing ourself from the list of all threads needs to happen in
716 non-guile mode since all SCM values on our stack become
717 unprotected once we are no longer in the list. */
718 scm_i_pthread_mutex_lock (&thread_admin_mutex);
719 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
720 if (*tp == t)
721 {
722 *tp = t->next_thread;
723
724 /* GC-robust */
725 t->next_thread = NULL;
726
727 break;
728 }
729 thread_count--;
730
731 /* If there's only one other thread, it could be the signal delivery
732 thread, so we need to notify it to shut down by closing its read pipe.
733 If it's not the signal delivery thread, then closing the read pipe isn't
734 going to hurt. */
735 if (thread_count <= 1)
736 scm_i_close_signal_pipe ();
737
738 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
739
740 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
741
742 #if SCM_USE_PTHREAD_THREADS
743 GC_unregister_my_thread ();
744 #endif
745 }
746
747 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
748
749 static void
750 init_thread_key (void)
751 {
752 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
753 }
754
755 /* Perform any initializations necessary to make the current thread
756 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
757 if necessary.
758
759 BASE is the stack base to use with GC.
760
761 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
762 which case the default dynamic state is used.
763
764 Returns zero when the thread was known to guile already; otherwise
765 return 1.
766
767 Note that it could be the case that the thread was known
768 to Guile, but not in guile mode (because we are within a
769 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
770 be sure. New threads are put into guile mode implicitly. */
771
772 static int
773 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
774 {
775 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
776
777 if (SCM_I_CURRENT_THREAD)
778 {
779 /* Thread is already known to Guile.
780 */
781 return 0;
782 }
783 else
784 {
785 /* This thread has not been guilified yet.
786 */
787
788 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
789 if (scm_initialized_p == 0)
790 {
791 /* First thread ever to enter Guile. Run the full
792 initialization.
793 */
794 scm_i_init_guile (base);
795
796 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
797 /* Allow other threads to come in later. */
798 GC_allow_register_threads ();
799 #endif
800
801 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
802 }
803 else
804 {
805 /* Guile is already initialized, but this thread enters it for
806 the first time. Only initialize this thread.
807 */
808 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
809
810 /* Register this thread with libgc. */
811 #if SCM_USE_PTHREAD_THREADS
812 GC_register_my_thread (base);
813 #endif
814
815 guilify_self_1 (base);
816 guilify_self_2 (parent);
817 }
818 return 1;
819 }
820 }
821
822 void
823 scm_init_guile ()
824 {
825 struct GC_stack_base stack_base;
826
827 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
828 scm_i_init_thread_for_guile (&stack_base,
829 scm_i_default_dynamic_state);
830 else
831 {
832 fprintf (stderr, "Failed to get stack base for current thread.\n");
833 exit (EXIT_FAILURE);
834 }
835 }
836
837 struct with_guile_args
838 {
839 GC_fn_type func;
840 void *data;
841 SCM parent;
842 };
843
844 static void *
845 with_guile_trampoline (void *data)
846 {
847 struct with_guile_args *args = data;
848
849 return scm_c_with_continuation_barrier (args->func, args->data);
850 }
851
852 static void *
853 with_guile_and_parent (struct GC_stack_base *base, void *data)
854 {
855 void *res;
856 int new_thread;
857 scm_i_thread *t;
858 struct with_guile_args *args = data;
859
860 new_thread = scm_i_init_thread_for_guile (base, args->parent);
861 t = SCM_I_CURRENT_THREAD;
862 if (new_thread)
863 {
864 /* We are in Guile mode. */
865 assert (t->guile_mode);
866
867 res = scm_c_with_continuation_barrier (args->func, args->data);
868
869 /* Leave Guile mode. */
870 t->guile_mode = 0;
871 }
872 else if (t->guile_mode)
873 {
874 /* Already in Guile mode. */
875 res = scm_c_with_continuation_barrier (args->func, args->data);
876 }
877 else
878 {
879 /* We are not in Guile mode, either because we are not within a
880 scm_with_guile, or because we are within a scm_without_guile.
881
882 This call to scm_with_guile() could happen from anywhere on the
883 stack, and in particular lower on the stack than when it was
884 when this thread was first guilified. Thus, `base' must be
885 updated. */
886 #if SCM_STACK_GROWS_UP
887 if (SCM_STACK_PTR (base->mem_base) < t->base)
888 t->base = SCM_STACK_PTR (base->mem_base);
889 #else
890 if (SCM_STACK_PTR (base->mem_base) > t->base)
891 t->base = SCM_STACK_PTR (base->mem_base);
892 #endif
893
894 t->guile_mode = 1;
895 res = with_gc_active (with_guile_trampoline, args);
896 t->guile_mode = 0;
897 }
898 return res;
899 }
900
901 static void *
902 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
903 {
904 struct with_guile_args args;
905
906 args.func = func;
907 args.data = data;
908 args.parent = parent;
909
910 return GC_call_with_stack_base (with_guile_and_parent, &args);
911 }
912
913 void *
914 scm_with_guile (void *(*func)(void *), void *data)
915 {
916 return scm_i_with_guile_and_parent (func, data,
917 scm_i_default_dynamic_state);
918 }
919
920 void *
921 scm_without_guile (void *(*func)(void *), void *data)
922 {
923 void *result;
924 scm_i_thread *t = SCM_I_CURRENT_THREAD;
925
926 if (t->guile_mode)
927 {
928 SCM_I_CURRENT_THREAD->guile_mode = 0;
929 result = with_gc_inactive (func, data);
930 SCM_I_CURRENT_THREAD->guile_mode = 1;
931 }
932 else
933 /* Otherwise we're not in guile mode, so nothing to do. */
934 result = func (data);
935
936 return result;
937 }
938
939 \f
940 /*** Thread creation */
941
942 typedef struct {
943 SCM parent;
944 SCM thunk;
945 SCM handler;
946 SCM thread;
947 scm_i_pthread_mutex_t mutex;
948 scm_i_pthread_cond_t cond;
949 } launch_data;
950
951 static void *
952 really_launch (void *d)
953 {
954 launch_data *data = (launch_data *)d;
955 SCM thunk = data->thunk, handler = data->handler;
956 scm_i_thread *t;
957
958 t = SCM_I_CURRENT_THREAD;
959
960 scm_i_scm_pthread_mutex_lock (&data->mutex);
961 data->thread = scm_current_thread ();
962 scm_i_pthread_cond_signal (&data->cond);
963 scm_i_pthread_mutex_unlock (&data->mutex);
964
965 if (SCM_UNBNDP (handler))
966 t->result = scm_call_0 (thunk);
967 else
968 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
969
970 return 0;
971 }
972
973 static void *
974 launch_thread (void *d)
975 {
976 launch_data *data = (launch_data *)d;
977 scm_i_pthread_detach (scm_i_pthread_self ());
978 scm_i_with_guile_and_parent (really_launch, d, data->parent);
979 return NULL;
980 }
981
982 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
983 (SCM thunk, SCM handler),
984 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
985 "returning a new thread object representing the thread. The procedure\n"
986 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
987 "\n"
988 "When @var{handler} is specified, then @var{thunk} is called from\n"
989 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
990 "handler. This catch is established inside the continuation barrier.\n"
991 "\n"
992 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
993 "the @emph{exit value} of the thread and the thread is terminated.")
994 #define FUNC_NAME s_scm_call_with_new_thread
995 {
996 launch_data data;
997 scm_i_pthread_t id;
998 int err;
999
1000 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1001 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1002 handler, SCM_ARG2, FUNC_NAME);
1003
1004 GC_collect_a_little ();
1005 data.parent = scm_current_dynamic_state ();
1006 data.thunk = thunk;
1007 data.handler = handler;
1008 data.thread = SCM_BOOL_F;
1009 scm_i_pthread_mutex_init (&data.mutex, NULL);
1010 scm_i_pthread_cond_init (&data.cond, NULL);
1011
1012 scm_i_scm_pthread_mutex_lock (&data.mutex);
1013 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1014 if (err)
1015 {
1016 scm_i_pthread_mutex_unlock (&data.mutex);
1017 errno = err;
1018 scm_syserror (NULL);
1019 }
1020 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1021 scm_i_pthread_mutex_unlock (&data.mutex);
1022
1023 return data.thread;
1024 }
1025 #undef FUNC_NAME
1026
1027 typedef struct {
1028 SCM parent;
1029 scm_t_catch_body body;
1030 void *body_data;
1031 scm_t_catch_handler handler;
1032 void *handler_data;
1033 SCM thread;
1034 scm_i_pthread_mutex_t mutex;
1035 scm_i_pthread_cond_t cond;
1036 } spawn_data;
1037
1038 static void *
1039 really_spawn (void *d)
1040 {
1041 spawn_data *data = (spawn_data *)d;
1042 scm_t_catch_body body = data->body;
1043 void *body_data = data->body_data;
1044 scm_t_catch_handler handler = data->handler;
1045 void *handler_data = data->handler_data;
1046 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1047
1048 scm_i_scm_pthread_mutex_lock (&data->mutex);
1049 data->thread = scm_current_thread ();
1050 scm_i_pthread_cond_signal (&data->cond);
1051 scm_i_pthread_mutex_unlock (&data->mutex);
1052
1053 if (handler == NULL)
1054 t->result = body (body_data);
1055 else
1056 t->result = scm_internal_catch (SCM_BOOL_T,
1057 body, body_data,
1058 handler, handler_data);
1059
1060 return 0;
1061 }
1062
1063 static void *
1064 spawn_thread (void *d)
1065 {
1066 spawn_data *data = (spawn_data *)d;
1067 scm_i_pthread_detach (scm_i_pthread_self ());
1068 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1069 return NULL;
1070 }
1071
1072 SCM
1073 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1074 scm_t_catch_handler handler, void *handler_data)
1075 {
1076 spawn_data data;
1077 scm_i_pthread_t id;
1078 int err;
1079
1080 data.parent = scm_current_dynamic_state ();
1081 data.body = body;
1082 data.body_data = body_data;
1083 data.handler = handler;
1084 data.handler_data = handler_data;
1085 data.thread = SCM_BOOL_F;
1086 scm_i_pthread_mutex_init (&data.mutex, NULL);
1087 scm_i_pthread_cond_init (&data.cond, NULL);
1088
1089 scm_i_scm_pthread_mutex_lock (&data.mutex);
1090 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1091 if (err)
1092 {
1093 scm_i_pthread_mutex_unlock (&data.mutex);
1094 errno = err;
1095 scm_syserror (NULL);
1096 }
1097 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1098 scm_i_pthread_mutex_unlock (&data.mutex);
1099
1100 return data.thread;
1101 }
1102
1103 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1104 (),
1105 "Move the calling thread to the end of the scheduling queue.")
1106 #define FUNC_NAME s_scm_yield
1107 {
1108 return scm_from_bool (scm_i_sched_yield ());
1109 }
1110 #undef FUNC_NAME
1111
1112 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1113 (SCM thread),
1114 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1115 "cannot be the current thread, and if @var{thread} has already terminated or "
1116 "been signaled to terminate, this function is a no-op.")
1117 #define FUNC_NAME s_scm_cancel_thread
1118 {
1119 scm_i_thread *t = NULL;
1120
1121 SCM_VALIDATE_THREAD (1, thread);
1122 t = SCM_I_THREAD_DATA (thread);
1123 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1124 if (!t->canceled)
1125 {
1126 t->canceled = 1;
1127 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1128 scm_i_pthread_cancel (t->pthread);
1129 }
1130 else
1131 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1132
1133 return SCM_UNSPECIFIED;
1134 }
1135 #undef FUNC_NAME
1136
1137 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1138 (SCM thread, SCM proc),
1139 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1140 "This handler will be called when the thread exits.")
1141 #define FUNC_NAME s_scm_set_thread_cleanup_x
1142 {
1143 scm_i_thread *t;
1144
1145 SCM_VALIDATE_THREAD (1, thread);
1146 if (!scm_is_false (proc))
1147 SCM_VALIDATE_THUNK (2, proc);
1148
1149 t = SCM_I_THREAD_DATA (thread);
1150 scm_i_pthread_mutex_lock (&t->admin_mutex);
1151
1152 if (!(t->exited || t->canceled))
1153 t->cleanup_handler = proc;
1154
1155 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1156
1157 return SCM_UNSPECIFIED;
1158 }
1159 #undef FUNC_NAME
1160
1161 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1162 (SCM thread),
1163 "Return the cleanup handler installed for the thread @var{thread}.")
1164 #define FUNC_NAME s_scm_thread_cleanup
1165 {
1166 scm_i_thread *t;
1167 SCM ret;
1168
1169 SCM_VALIDATE_THREAD (1, thread);
1170
1171 t = SCM_I_THREAD_DATA (thread);
1172 scm_i_pthread_mutex_lock (&t->admin_mutex);
1173 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1174 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1175
1176 return ret;
1177 }
1178 #undef FUNC_NAME
1179
1180 SCM scm_join_thread (SCM thread)
1181 {
1182 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1183 }
1184
1185 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1186 (SCM thread, SCM timeout, SCM timeoutval),
1187 "Suspend execution of the calling thread until the target @var{thread} "
1188 "terminates, unless the target @var{thread} has already terminated. ")
1189 #define FUNC_NAME s_scm_join_thread_timed
1190 {
1191 scm_i_thread *t;
1192 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1193 SCM res = SCM_BOOL_F;
1194
1195 if (! (SCM_UNBNDP (timeoutval)))
1196 res = timeoutval;
1197
1198 SCM_VALIDATE_THREAD (1, thread);
1199 if (scm_is_eq (scm_current_thread (), thread))
1200 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1201
1202 t = SCM_I_THREAD_DATA (thread);
1203 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1204
1205 if (! SCM_UNBNDP (timeout))
1206 {
1207 to_timespec (timeout, &ctimeout);
1208 timeout_ptr = &ctimeout;
1209 }
1210
1211 if (t->exited)
1212 res = t->result;
1213 else
1214 {
1215 while (1)
1216 {
1217 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1218 timeout_ptr);
1219 if (err == 0)
1220 {
1221 if (t->exited)
1222 {
1223 res = t->result;
1224 break;
1225 }
1226 }
1227 else if (err == ETIMEDOUT)
1228 break;
1229
1230 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1231 SCM_TICK;
1232 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1233
1234 /* Check for exit again, since we just released and
1235 reacquired the admin mutex, before the next block_self
1236 call (which would block forever if t has already
1237 exited). */
1238 if (t->exited)
1239 {
1240 res = t->result;
1241 break;
1242 }
1243 }
1244 }
1245
1246 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1247
1248 return res;
1249 }
1250 #undef FUNC_NAME
1251
1252 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1253 (SCM obj),
1254 "Return @code{#t} if @var{obj} is a thread.")
1255 #define FUNC_NAME s_scm_thread_p
1256 {
1257 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1258 }
1259 #undef FUNC_NAME
1260
1261
1262 static size_t
1263 fat_mutex_free (SCM mx)
1264 {
1265 fat_mutex *m = SCM_MUTEX_DATA (mx);
1266 scm_i_pthread_mutex_destroy (&m->lock);
1267 return 0;
1268 }
1269
1270 static int
1271 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1272 {
1273 fat_mutex *m = SCM_MUTEX_DATA (mx);
1274 scm_puts_unlocked ("#<mutex ", port);
1275 scm_uintprint ((scm_t_bits)m, 16, port);
1276 scm_puts_unlocked (">", port);
1277 return 1;
1278 }
1279
1280 static SCM
1281 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1282 {
1283 fat_mutex *m;
1284 SCM mx;
1285
1286 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1287 scm_i_pthread_mutex_init (&m->lock, NULL);
1288 m->owner = SCM_BOOL_F;
1289 m->level = 0;
1290
1291 m->recursive = recursive;
1292 m->unchecked_unlock = unchecked_unlock;
1293 m->allow_external_unlock = external_unlock;
1294
1295 m->waiting = SCM_EOL;
1296 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1297 m->waiting = make_queue ();
1298 return mx;
1299 }
1300
1301 SCM scm_make_mutex (void)
1302 {
1303 return scm_make_mutex_with_flags (SCM_EOL);
1304 }
1305
1306 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1307 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1308 SCM_SYMBOL (recursive_sym, "recursive");
1309
1310 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1311 (SCM flags),
1312 "Create a new mutex. ")
1313 #define FUNC_NAME s_scm_make_mutex_with_flags
1314 {
1315 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1316
1317 SCM ptr = flags;
1318 while (! scm_is_null (ptr))
1319 {
1320 SCM flag = SCM_CAR (ptr);
1321 if (scm_is_eq (flag, unchecked_unlock_sym))
1322 unchecked_unlock = 1;
1323 else if (scm_is_eq (flag, allow_external_unlock_sym))
1324 external_unlock = 1;
1325 else if (scm_is_eq (flag, recursive_sym))
1326 recursive = 1;
1327 else
1328 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1329 ptr = SCM_CDR (ptr);
1330 }
1331 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1332 }
1333 #undef FUNC_NAME
1334
1335 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1336 (void),
1337 "Create a new recursive mutex. ")
1338 #define FUNC_NAME s_scm_make_recursive_mutex
1339 {
1340 return make_fat_mutex (1, 0, 0);
1341 }
1342 #undef FUNC_NAME
1343
1344 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1345
1346 static SCM
1347 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1348 {
1349 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1350
1351 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1352 SCM err = SCM_BOOL_F;
1353
1354 struct timeval current_time;
1355
1356 scm_i_scm_pthread_mutex_lock (&m->lock);
1357
1358 while (1)
1359 {
1360 if (m->level == 0)
1361 {
1362 m->owner = new_owner;
1363 m->level++;
1364
1365 if (SCM_I_IS_THREAD (new_owner))
1366 {
1367 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1368
1369 /* FIXME: The order in which `t->admin_mutex' and
1370 `m->lock' are taken differs from that in
1371 `on_thread_exit', potentially leading to deadlocks. */
1372 scm_i_pthread_mutex_lock (&t->admin_mutex);
1373
1374 /* Only keep a weak reference to MUTEX so that it's not
1375 retained when not referenced elsewhere (bug #27450).
1376 The weak pair itself is eventually removed when MUTEX
1377 is unlocked. Note that `t->mutexes' lists mutexes
1378 currently held by T, so it should be small. */
1379 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1380 t->mutexes);
1381
1382 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1383 }
1384 *ret = 1;
1385 break;
1386 }
1387 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1388 {
1389 m->owner = new_owner;
1390 err = scm_cons (scm_abandoned_mutex_error_key,
1391 scm_from_locale_string ("lock obtained on abandoned "
1392 "mutex"));
1393 *ret = 1;
1394 break;
1395 }
1396 else if (scm_is_eq (m->owner, new_owner))
1397 {
1398 if (m->recursive)
1399 {
1400 m->level++;
1401 *ret = 1;
1402 }
1403 else
1404 {
1405 err = scm_cons (scm_misc_error_key,
1406 scm_from_locale_string ("mutex already locked "
1407 "by thread"));
1408 *ret = 0;
1409 }
1410 break;
1411 }
1412 else
1413 {
1414 if (timeout != NULL)
1415 {
1416 gettimeofday (&current_time, NULL);
1417 if (current_time.tv_sec > timeout->tv_sec ||
1418 (current_time.tv_sec == timeout->tv_sec &&
1419 current_time.tv_usec * 1000 > timeout->tv_nsec))
1420 {
1421 *ret = 0;
1422 break;
1423 }
1424 }
1425 block_self (m->waiting, mutex, &m->lock, timeout);
1426 scm_i_pthread_mutex_unlock (&m->lock);
1427 SCM_TICK;
1428 scm_i_scm_pthread_mutex_lock (&m->lock);
1429 }
1430 }
1431 scm_i_pthread_mutex_unlock (&m->lock);
1432 return err;
1433 }
1434
1435 SCM scm_lock_mutex (SCM mx)
1436 {
1437 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1438 }
1439
1440 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1441 (SCM m, SCM timeout, SCM owner),
1442 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1443 "blocks until the mutex becomes available. The function returns when "
1444 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1445 "a thread already owns will succeed right away and will not block the "
1446 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1447 #define FUNC_NAME s_scm_lock_mutex_timed
1448 {
1449 SCM exception;
1450 int ret = 0;
1451 scm_t_timespec cwaittime, *waittime = NULL;
1452
1453 SCM_VALIDATE_MUTEX (1, m);
1454
1455 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1456 {
1457 to_timespec (timeout, &cwaittime);
1458 waittime = &cwaittime;
1459 }
1460
1461 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1462 SCM_VALIDATE_THREAD (3, owner);
1463
1464 exception = fat_mutex_lock (m, waittime, owner, &ret);
1465 if (!scm_is_false (exception))
1466 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1467 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1468 }
1469 #undef FUNC_NAME
1470
1471 static void
1472 lock_mutex_return_void (SCM mx)
1473 {
1474 (void) scm_lock_mutex (mx);
1475 }
1476
1477 static void
1478 unlock_mutex_return_void (SCM mx)
1479 {
1480 (void) scm_unlock_mutex (mx);
1481 }
1482
1483 void
1484 scm_dynwind_lock_mutex (SCM mutex)
1485 {
1486 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1487 SCM_F_WIND_EXPLICITLY);
1488 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1489 SCM_F_WIND_EXPLICITLY);
1490 }
1491
1492 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1493 (SCM mutex),
1494 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1495 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1496 #define FUNC_NAME s_scm_try_mutex
1497 {
1498 SCM exception;
1499 int ret = 0;
1500 scm_t_timespec cwaittime, *waittime = NULL;
1501
1502 SCM_VALIDATE_MUTEX (1, mutex);
1503
1504 to_timespec (scm_from_int(0), &cwaittime);
1505 waittime = &cwaittime;
1506
1507 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1508 if (!scm_is_false (exception))
1509 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1510 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1511 }
1512 #undef FUNC_NAME
1513
1514 /*** Fat condition variables */
1515
1516 typedef struct {
1517 scm_i_pthread_mutex_t lock;
1518 SCM waiting; /* the threads waiting for this condition. */
1519 } fat_cond;
1520
1521 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1522 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1523
1524 static void
1525 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1526 {
1527 SCM walk, prev;
1528
1529 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1530 walk = SCM_CDR (walk))
1531 {
1532 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1533 {
1534 if (scm_is_pair (prev))
1535 SCM_SETCDR (prev, SCM_CDR (walk));
1536 else
1537 t->mutexes = SCM_CDR (walk);
1538 break;
1539 }
1540 }
1541 }
1542
1543 static int
1544 fat_mutex_unlock (SCM mutex, SCM cond,
1545 const scm_t_timespec *waittime, int relock)
1546 {
1547 SCM owner;
1548 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1549 fat_cond *c = NULL;
1550 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1551 int err = 0, ret = 0;
1552
1553 scm_i_scm_pthread_mutex_lock (&m->lock);
1554
1555 owner = m->owner;
1556
1557 if (!scm_is_eq (owner, t->handle))
1558 {
1559 if (m->level == 0)
1560 {
1561 if (!m->unchecked_unlock)
1562 {
1563 scm_i_pthread_mutex_unlock (&m->lock);
1564 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1565 }
1566 owner = t->handle;
1567 }
1568 else if (!m->allow_external_unlock)
1569 {
1570 scm_i_pthread_mutex_unlock (&m->lock);
1571 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1572 }
1573 }
1574
1575 if (! (SCM_UNBNDP (cond)))
1576 {
1577 c = SCM_CONDVAR_DATA (cond);
1578 while (1)
1579 {
1580 int brk = 0;
1581
1582 if (m->level > 0)
1583 m->level--;
1584 if (m->level == 0)
1585 {
1586 /* Change the owner of MUTEX. */
1587 remove_mutex_from_thread (mutex, t);
1588 m->owner = unblock_from_queue (m->waiting);
1589 }
1590
1591 t->block_asyncs++;
1592
1593 err = block_self (c->waiting, cond, &m->lock, waittime);
1594 scm_i_pthread_mutex_unlock (&m->lock);
1595
1596 if (err == 0)
1597 {
1598 ret = 1;
1599 brk = 1;
1600 }
1601 else if (err == ETIMEDOUT)
1602 {
1603 ret = 0;
1604 brk = 1;
1605 }
1606 else if (err != EINTR)
1607 {
1608 errno = err;
1609 scm_syserror (NULL);
1610 }
1611
1612 if (brk)
1613 {
1614 if (relock)
1615 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1616 t->block_asyncs--;
1617 break;
1618 }
1619
1620 t->block_asyncs--;
1621 scm_async_tick ();
1622
1623 scm_remember_upto_here_2 (cond, mutex);
1624
1625 scm_i_scm_pthread_mutex_lock (&m->lock);
1626 }
1627 }
1628 else
1629 {
1630 if (m->level > 0)
1631 m->level--;
1632 if (m->level == 0)
1633 {
1634 /* Change the owner of MUTEX. */
1635 remove_mutex_from_thread (mutex, t);
1636 m->owner = unblock_from_queue (m->waiting);
1637 }
1638
1639 scm_i_pthread_mutex_unlock (&m->lock);
1640 ret = 1;
1641 }
1642
1643 return ret;
1644 }
1645
1646 SCM scm_unlock_mutex (SCM mx)
1647 {
1648 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1649 }
1650
1651 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1652 (SCM mx, SCM cond, SCM timeout),
1653 "Unlocks @var{mutex} if the calling thread owns the lock on "
1654 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1655 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1656 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1657 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1658 "with a call to @code{unlock-mutex}. Only the last call to "
1659 "@code{unlock-mutex} will actually unlock the mutex. ")
1660 #define FUNC_NAME s_scm_unlock_mutex_timed
1661 {
1662 scm_t_timespec cwaittime, *waittime = NULL;
1663
1664 SCM_VALIDATE_MUTEX (1, mx);
1665 if (! (SCM_UNBNDP (cond)))
1666 {
1667 SCM_VALIDATE_CONDVAR (2, cond);
1668
1669 if (! (SCM_UNBNDP (timeout)))
1670 {
1671 to_timespec (timeout, &cwaittime);
1672 waittime = &cwaittime;
1673 }
1674 }
1675
1676 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1677 }
1678 #undef FUNC_NAME
1679
1680 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1681 (SCM obj),
1682 "Return @code{#t} if @var{obj} is a mutex.")
1683 #define FUNC_NAME s_scm_mutex_p
1684 {
1685 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1686 }
1687 #undef FUNC_NAME
1688
1689 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1690 (SCM mx),
1691 "Return the thread owning @var{mx}, or @code{#f}.")
1692 #define FUNC_NAME s_scm_mutex_owner
1693 {
1694 SCM owner;
1695 fat_mutex *m = NULL;
1696
1697 SCM_VALIDATE_MUTEX (1, mx);
1698 m = SCM_MUTEX_DATA (mx);
1699 scm_i_pthread_mutex_lock (&m->lock);
1700 owner = m->owner;
1701 scm_i_pthread_mutex_unlock (&m->lock);
1702
1703 return owner;
1704 }
1705 #undef FUNC_NAME
1706
1707 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1708 (SCM mx),
1709 "Return the lock level of mutex @var{mx}.")
1710 #define FUNC_NAME s_scm_mutex_level
1711 {
1712 SCM_VALIDATE_MUTEX (1, mx);
1713 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1714 }
1715 #undef FUNC_NAME
1716
1717 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1718 (SCM mx),
1719 "Returns @code{#t} if the mutex @var{mx} is locked.")
1720 #define FUNC_NAME s_scm_mutex_locked_p
1721 {
1722 SCM_VALIDATE_MUTEX (1, mx);
1723 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1724 }
1725 #undef FUNC_NAME
1726
1727 static int
1728 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1729 {
1730 fat_cond *c = SCM_CONDVAR_DATA (cv);
1731 scm_puts_unlocked ("#<condition-variable ", port);
1732 scm_uintprint ((scm_t_bits)c, 16, port);
1733 scm_puts_unlocked (">", port);
1734 return 1;
1735 }
1736
1737 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1738 (void),
1739 "Make a new condition variable.")
1740 #define FUNC_NAME s_scm_make_condition_variable
1741 {
1742 fat_cond *c;
1743 SCM cv;
1744
1745 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1746 c->waiting = SCM_EOL;
1747 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1748 c->waiting = make_queue ();
1749 return cv;
1750 }
1751 #undef FUNC_NAME
1752
1753 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1754 (SCM cv, SCM mx, SCM t),
1755 "Wait until @var{cond-var} has been signalled. While waiting, "
1756 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1757 "is locked again when this function returns. When @var{time} is given, "
1758 "it specifies a point in time where the waiting should be aborted. It "
1759 "can be either a integer as returned by @code{current-time} or a pair "
1760 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1761 "mutex is locked and @code{#f} is returned. When the condition "
1762 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1763 "is returned. ")
1764 #define FUNC_NAME s_scm_timed_wait_condition_variable
1765 {
1766 scm_t_timespec waittime, *waitptr = NULL;
1767
1768 SCM_VALIDATE_CONDVAR (1, cv);
1769 SCM_VALIDATE_MUTEX (2, mx);
1770
1771 if (!SCM_UNBNDP (t))
1772 {
1773 to_timespec (t, &waittime);
1774 waitptr = &waittime;
1775 }
1776
1777 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1778 }
1779 #undef FUNC_NAME
1780
1781 static void
1782 fat_cond_signal (fat_cond *c)
1783 {
1784 unblock_from_queue (c->waiting);
1785 }
1786
1787 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1788 (SCM cv),
1789 "Wake up one thread that is waiting for @var{cv}")
1790 #define FUNC_NAME s_scm_signal_condition_variable
1791 {
1792 SCM_VALIDATE_CONDVAR (1, cv);
1793 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1794 return SCM_BOOL_T;
1795 }
1796 #undef FUNC_NAME
1797
1798 static void
1799 fat_cond_broadcast (fat_cond *c)
1800 {
1801 while (scm_is_true (unblock_from_queue (c->waiting)))
1802 ;
1803 }
1804
1805 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1806 (SCM cv),
1807 "Wake up all threads that are waiting for @var{cv}. ")
1808 #define FUNC_NAME s_scm_broadcast_condition_variable
1809 {
1810 SCM_VALIDATE_CONDVAR (1, cv);
1811 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1812 return SCM_BOOL_T;
1813 }
1814 #undef FUNC_NAME
1815
1816 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1817 (SCM obj),
1818 "Return @code{#t} if @var{obj} is a condition variable.")
1819 #define FUNC_NAME s_scm_condition_variable_p
1820 {
1821 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1822 }
1823 #undef FUNC_NAME
1824
1825
1826 \f
1827 /*** Select */
1828
1829 struct select_args
1830 {
1831 int nfds;
1832 SELECT_TYPE *read_fds;
1833 SELECT_TYPE *write_fds;
1834 SELECT_TYPE *except_fds;
1835 struct timeval *timeout;
1836
1837 int result;
1838 int errno_value;
1839 };
1840
1841 static void *
1842 do_std_select (void *args)
1843 {
1844 struct select_args *select_args;
1845
1846 select_args = (struct select_args *) args;
1847
1848 select_args->result =
1849 select (select_args->nfds,
1850 select_args->read_fds, select_args->write_fds,
1851 select_args->except_fds, select_args->timeout);
1852 select_args->errno_value = errno;
1853
1854 return NULL;
1855 }
1856
1857 int
1858 scm_std_select (int nfds,
1859 SELECT_TYPE *readfds,
1860 SELECT_TYPE *writefds,
1861 SELECT_TYPE *exceptfds,
1862 struct timeval *timeout)
1863 {
1864 fd_set my_readfds;
1865 int res, eno, wakeup_fd;
1866 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1867 struct select_args args;
1868
1869 if (readfds == NULL)
1870 {
1871 FD_ZERO (&my_readfds);
1872 readfds = &my_readfds;
1873 }
1874
1875 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1876 SCM_TICK;
1877
1878 wakeup_fd = t->sleep_pipe[0];
1879 FD_SET (wakeup_fd, readfds);
1880 if (wakeup_fd >= nfds)
1881 nfds = wakeup_fd+1;
1882
1883 args.nfds = nfds;
1884 args.read_fds = readfds;
1885 args.write_fds = writefds;
1886 args.except_fds = exceptfds;
1887 args.timeout = timeout;
1888
1889 /* Explicitly cooperate with the GC. */
1890 scm_without_guile (do_std_select, &args);
1891
1892 res = args.result;
1893 eno = args.errno_value;
1894
1895 t->sleep_fd = -1;
1896 scm_i_reset_sleep (t);
1897
1898 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1899 {
1900 char dummy;
1901 full_read (wakeup_fd, &dummy, 1);
1902
1903 FD_CLR (wakeup_fd, readfds);
1904 res -= 1;
1905 if (res == 0)
1906 {
1907 eno = EINTR;
1908 res = -1;
1909 }
1910 }
1911 errno = eno;
1912 return res;
1913 }
1914
1915 /* Convenience API for blocking while in guile mode. */
1916
1917 #if SCM_USE_PTHREAD_THREADS
1918
1919 /* It seems reasonable to not run procedures related to mutex and condition
1920 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1921 without it, and (ii) the only potential gain would be GC latency. See
1922 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1923 for a discussion of the pros and cons. */
1924
1925 int
1926 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1927 {
1928 int res = scm_i_pthread_mutex_lock (mutex);
1929 return res;
1930 }
1931
1932 static void
1933 do_unlock (void *data)
1934 {
1935 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1936 }
1937
1938 void
1939 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1940 {
1941 scm_i_scm_pthread_mutex_lock (mutex);
1942 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1943 }
1944
1945 int
1946 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1947 {
1948 int res;
1949 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1950
1951 t->held_mutex = mutex;
1952 res = scm_i_pthread_cond_wait (cond, mutex);
1953 t->held_mutex = NULL;
1954
1955 return res;
1956 }
1957
1958 int
1959 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1960 scm_i_pthread_mutex_t *mutex,
1961 const scm_t_timespec *wt)
1962 {
1963 int res;
1964 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1965
1966 t->held_mutex = mutex;
1967 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1968 t->held_mutex = NULL;
1969
1970 return res;
1971 }
1972
1973 #endif
1974
1975 unsigned long
1976 scm_std_usleep (unsigned long usecs)
1977 {
1978 struct timeval tv;
1979 tv.tv_usec = usecs % 1000000;
1980 tv.tv_sec = usecs / 1000000;
1981 scm_std_select (0, NULL, NULL, NULL, &tv);
1982 return tv.tv_sec * 1000000 + tv.tv_usec;
1983 }
1984
1985 unsigned int
1986 scm_std_sleep (unsigned int secs)
1987 {
1988 struct timeval tv;
1989 tv.tv_usec = 0;
1990 tv.tv_sec = secs;
1991 scm_std_select (0, NULL, NULL, NULL, &tv);
1992 return tv.tv_sec;
1993 }
1994
1995 /*** Misc */
1996
1997 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1998 (void),
1999 "Return the thread that called this function.")
2000 #define FUNC_NAME s_scm_current_thread
2001 {
2002 return SCM_I_CURRENT_THREAD->handle;
2003 }
2004 #undef FUNC_NAME
2005
2006 static SCM
2007 scm_c_make_list (size_t n, SCM fill)
2008 {
2009 SCM res = SCM_EOL;
2010 while (n-- > 0)
2011 res = scm_cons (fill, res);
2012 return res;
2013 }
2014
2015 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2016 (void),
2017 "Return a list of all threads.")
2018 #define FUNC_NAME s_scm_all_threads
2019 {
2020 /* We can not allocate while holding the thread_admin_mutex because
2021 of the way GC is done.
2022 */
2023 int n = thread_count;
2024 scm_i_thread *t;
2025 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2026
2027 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2028 l = &list;
2029 for (t = all_threads; t && n > 0; t = t->next_thread)
2030 {
2031 if (t != scm_i_signal_delivery_thread)
2032 {
2033 SCM_SETCAR (*l, t->handle);
2034 l = SCM_CDRLOC (*l);
2035 }
2036 n--;
2037 }
2038 *l = SCM_EOL;
2039 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2040 return list;
2041 }
2042 #undef FUNC_NAME
2043
2044 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2045 (SCM thread),
2046 "Return @code{#t} iff @var{thread} has exited.\n")
2047 #define FUNC_NAME s_scm_thread_exited_p
2048 {
2049 return scm_from_bool (scm_c_thread_exited_p (thread));
2050 }
2051 #undef FUNC_NAME
2052
2053 int
2054 scm_c_thread_exited_p (SCM thread)
2055 #define FUNC_NAME s_scm_thread_exited_p
2056 {
2057 scm_i_thread *t;
2058 SCM_VALIDATE_THREAD (1, thread);
2059 t = SCM_I_THREAD_DATA (thread);
2060 return t->exited;
2061 }
2062 #undef FUNC_NAME
2063
2064 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2065 (void),
2066 "Return the total number of processors of the machine, which\n"
2067 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2068 "thread execution unit, which can be either:\n\n"
2069 "@itemize\n"
2070 "@item an execution core in a (possibly multi-core) chip, in a\n"
2071 " (possibly multi- chip) module, in a single computer, or\n"
2072 "@item a thread execution unit inside a core in the case of\n"
2073 " @dfn{hyper-threaded} CPUs.\n"
2074 "@end itemize\n\n"
2075 "Which of the two definitions is used, is unspecified.\n")
2076 #define FUNC_NAME s_scm_total_processor_count
2077 {
2078 return scm_from_ulong (num_processors (NPROC_ALL));
2079 }
2080 #undef FUNC_NAME
2081
2082 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2083 (void),
2084 "Like @code{total-processor-count}, but return the number of\n"
2085 "processors available to the current process. See\n"
2086 "@code{setaffinity} and @code{getaffinity} for more\n"
2087 "information.\n")
2088 #define FUNC_NAME s_scm_current_processor_count
2089 {
2090 return scm_from_ulong (num_processors (NPROC_CURRENT));
2091 }
2092 #undef FUNC_NAME
2093
2094
2095 \f
2096
2097 static scm_i_pthread_cond_t wake_up_cond;
2098 static int threads_initialized_p = 0;
2099
2100
2101 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2102 */
2103 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2104
2105 static SCM dynwind_critical_section_mutex;
2106
2107 void
2108 scm_dynwind_critical_section (SCM mutex)
2109 {
2110 if (scm_is_false (mutex))
2111 mutex = dynwind_critical_section_mutex;
2112 scm_dynwind_lock_mutex (mutex);
2113 scm_dynwind_block_asyncs ();
2114 }
2115
2116 /*** Initialization */
2117
2118 scm_i_pthread_mutex_t scm_i_misc_mutex;
2119
2120 #if SCM_USE_PTHREAD_THREADS
2121 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2122 #endif
2123
2124 void
2125 scm_threads_prehistory (void *base)
2126 {
2127 #if SCM_USE_PTHREAD_THREADS
2128 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2129 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2130 PTHREAD_MUTEX_RECURSIVE);
2131 #endif
2132
2133 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2134 scm_i_pthread_mutexattr_recursive);
2135 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2136 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2137
2138 guilify_self_1 ((struct GC_stack_base *) base);
2139 }
2140
2141 scm_t_bits scm_tc16_thread;
2142 scm_t_bits scm_tc16_mutex;
2143 scm_t_bits scm_tc16_condvar;
2144
2145 void
2146 scm_init_threads ()
2147 {
2148 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2149 scm_set_smob_print (scm_tc16_thread, thread_print);
2150
2151 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2152 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2153 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2154
2155 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2156 sizeof (fat_cond));
2157 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2158
2159 scm_i_default_dynamic_state = SCM_BOOL_F;
2160 guilify_self_2 (SCM_BOOL_F);
2161 threads_initialized_p = 1;
2162
2163 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2164 }
2165
2166 void
2167 scm_init_threads_default_dynamic_state ()
2168 {
2169 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2170 scm_i_default_dynamic_state = state;
2171 }
2172
2173 void
2174 scm_init_thread_procs ()
2175 {
2176 #include "libguile/threads.x"
2177 }
2178
2179 \f
2180 /* IA64-specific things. */
2181
2182 #ifdef __ia64__
2183 # ifdef __hpux
2184 # include <sys/param.h>
2185 # include <sys/pstat.h>
2186 void *
2187 scm_ia64_register_backing_store_base (void)
2188 {
2189 struct pst_vm_status vm_status;
2190 int i = 0;
2191 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2192 if (vm_status.pst_type == PS_RSESTACK)
2193 return (void *) vm_status.pst_vaddr;
2194 abort ();
2195 }
2196 void *
2197 scm_ia64_ar_bsp (const void *ctx)
2198 {
2199 uint64_t bsp;
2200 __uc_get_ar_bsp (ctx, &bsp);
2201 return (void *) bsp;
2202 }
2203 # endif /* hpux */
2204 # ifdef linux
2205 # include <ucontext.h>
2206 void *
2207 scm_ia64_register_backing_store_base (void)
2208 {
2209 extern void *__libc_ia64_register_backing_store_base;
2210 return __libc_ia64_register_backing_store_base;
2211 }
2212 void *
2213 scm_ia64_ar_bsp (const void *opaque)
2214 {
2215 const ucontext_t *ctx = opaque;
2216 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2217 }
2218 # endif /* linux */
2219 #endif /* __ia64__ */
2220
2221
2222 /*
2223 Local Variables:
2224 c-file-style: "gnu"
2225 End:
2226 */