Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
29
30 #include <stdlib.h>
31 #if HAVE_UNISTD_H
32 #include <unistd.h>
33 #endif
34 #include <stdio.h>
35
36 #ifdef HAVE_STRING_H
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38 #endif
39
40 #if HAVE_SYS_TIME_H
41 #include <sys/time.h>
42 #endif
43
44 #if HAVE_PTHREAD_NP_H
45 # include <pthread_np.h>
46 #endif
47
48 #include <assert.h>
49 #include <fcntl.h>
50 #include <nproc.h>
51
52 #include "libguile/validate.h"
53 #include "libguile/root.h"
54 #include "libguile/eval.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/threads.h"
58 #include "libguile/dynwind.h"
59 #include "libguile/iselect.h"
60 #include "libguile/fluids.h"
61 #include "libguile/continuations.h"
62 #include "libguile/gc.h"
63 #include "libguile/init.h"
64 #include "libguile/scmsigs.h"
65 #include "libguile/strings.h"
66
67 #include <full-read.h>
68
69
70 \f
71
72 /* First some libgc shims. */
73
74 /* Make sure GC_fn_type is defined; it is missing from the public
75 headers of GC 7.1 and earlier. */
76 #ifndef HAVE_GC_FN_TYPE
77 typedef void * (* GC_fn_type) (void *);
78 #endif
79
80
81 #ifndef GC_SUCCESS
82 #define GC_SUCCESS 0
83 #endif
84
85 #ifndef GC_UNIMPLEMENTED
86 #define GC_UNIMPLEMENTED 3
87 #endif
88
89 /* Likewise struct GC_stack_base is missing before 7.1. */
90 #ifndef HAVE_GC_STACK_BASE
91 struct GC_stack_base {
92 void * mem_base; /* Base of memory stack. */
93 #ifdef __ia64__
94 void * reg_base; /* Base of separate register stack. */
95 #endif
96 };
97
98 static int
99 GC_register_my_thread (struct GC_stack_base *stack_base)
100 {
101 return GC_UNIMPLEMENTED;
102 }
103
104 static void
105 GC_unregister_my_thread ()
106 {
107 }
108
109 #if !SCM_USE_PTHREAD_THREADS
110 /* No threads; we can just use GC_stackbottom. */
111 static void *
112 get_thread_stack_base ()
113 {
114 return GC_stackbottom;
115 }
116
117 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
118 && defined PTHREAD_ATTR_GETSTACK_WORKS
119 /* This method for GNU/Linux and perhaps some other systems.
120 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
121 available on them. */
122 static void *
123 get_thread_stack_base ()
124 {
125 pthread_attr_t attr;
126 void *start, *end;
127 size_t size;
128
129 pthread_getattr_np (pthread_self (), &attr);
130 pthread_attr_getstack (&attr, &start, &size);
131 end = (char *)start + size;
132
133 #if SCM_STACK_GROWS_UP
134 return start;
135 #else
136 return end;
137 #endif
138 }
139
140 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
141 /* This method for MacOS X.
142 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
143 but as of 2006 there's nothing obvious at apple.com. */
144 static void *
145 get_thread_stack_base ()
146 {
147 return pthread_get_stackaddr_np (pthread_self ());
148 }
149
150 #elif HAVE_PTHREAD_ATTR_GET_NP
151 /* This one is for FreeBSD 9. */
152 static void *
153 get_thread_stack_base ()
154 {
155 pthread_attr_t attr;
156 void *start, *end;
157 size_t size;
158
159 pthread_attr_init (&attr);
160 pthread_attr_get_np (pthread_self (), &attr);
161 pthread_attr_getstack (&attr, &start, &size);
162 pthread_attr_destroy (&attr);
163
164 end = (char *)start + size;
165
166 #if SCM_STACK_GROWS_UP
167 return start;
168 #else
169 return end;
170 #endif
171 }
172
173 #else
174 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
175 #endif
176
177 static int
178 GC_get_stack_base (struct GC_stack_base *stack_base)
179 {
180 stack_base->mem_base = get_thread_stack_base ();
181 #ifdef __ia64__
182 /* Calculate and store off the base of this thread's register
183 backing store (RBS). Unfortunately our implementation(s) of
184 scm_ia64_register_backing_store_base are only reliable for the
185 main thread. For other threads, therefore, find out the current
186 top of the RBS, and use that as a maximum. */
187 stack_base->reg_base = scm_ia64_register_backing_store_base ();
188 {
189 ucontext_t ctx;
190 void *bsp;
191 getcontext (&ctx);
192 bsp = scm_ia64_ar_bsp (&ctx);
193 if (stack_base->reg_base > bsp)
194 stack_base->reg_base = bsp;
195 }
196 #endif
197 return GC_SUCCESS;
198 }
199
200 static void *
201 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
202 {
203 struct GC_stack_base stack_base;
204
205 stack_base.mem_base = (void*)&stack_base;
206 #ifdef __ia64__
207 /* FIXME: Untested. */
208 {
209 ucontext_t ctx;
210 getcontext (&ctx);
211 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
212 }
213 #endif
214
215 return fn (&stack_base, arg);
216 }
217 #endif /* HAVE_GC_STACK_BASE */
218
219
220 /* Now define with_gc_active and with_gc_inactive. */
221
222 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
223
224 /* We have a sufficiently new libgc (7.2 or newer). */
225
226 static void*
227 with_gc_inactive (GC_fn_type func, void *data)
228 {
229 return GC_do_blocking (func, data);
230 }
231
232 static void*
233 with_gc_active (GC_fn_type func, void *data)
234 {
235 return GC_call_with_gc_active (func, data);
236 }
237
238 #else
239
240 /* libgc not new enough, so never actually deactivate GC.
241
242 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
243 GC_call_with_gc_active. */
244
245 static void*
246 with_gc_inactive (GC_fn_type func, void *data)
247 {
248 return func (data);
249 }
250
251 static void*
252 with_gc_active (GC_fn_type func, void *data)
253 {
254 return func (data);
255 }
256
257 #endif /* HAVE_GC_DO_BLOCKING */
258
259
260 \f
261 static void
262 to_timespec (SCM t, scm_t_timespec *waittime)
263 {
264 if (scm_is_pair (t))
265 {
266 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
267 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
268 }
269 else
270 {
271 double time = scm_to_double (t);
272 double sec = scm_c_truncate (time);
273
274 waittime->tv_sec = (long) sec;
275 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
276 }
277 }
278
279 \f
280 /*** Queues */
281
282 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
283 the risk of false references leading to unbounded retained space as
284 described in "Bounding Space Usage of Conservative Garbage Collectors",
285 H.J. Boehm, 2001. */
286
287 /* Make an empty queue data structure.
288 */
289 static SCM
290 make_queue ()
291 {
292 return scm_cons (SCM_EOL, SCM_EOL);
293 }
294
295 /* Put T at the back of Q and return a handle that can be used with
296 remqueue to remove T from Q again.
297 */
298 static SCM
299 enqueue (SCM q, SCM t)
300 {
301 SCM c = scm_cons (t, SCM_EOL);
302 SCM_CRITICAL_SECTION_START;
303 if (scm_is_null (SCM_CDR (q)))
304 SCM_SETCDR (q, c);
305 else
306 SCM_SETCDR (SCM_CAR (q), c);
307 SCM_SETCAR (q, c);
308 SCM_CRITICAL_SECTION_END;
309 return c;
310 }
311
312 /* Remove the element that the handle C refers to from the queue Q. C
313 must have been returned from a call to enqueue. The return value
314 is zero when the element referred to by C has already been removed.
315 Otherwise, 1 is returned.
316 */
317 static int
318 remqueue (SCM q, SCM c)
319 {
320 SCM p, prev = q;
321 SCM_CRITICAL_SECTION_START;
322 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
323 {
324 if (scm_is_eq (p, c))
325 {
326 if (scm_is_eq (c, SCM_CAR (q)))
327 SCM_SETCAR (q, SCM_CDR (c));
328 SCM_SETCDR (prev, SCM_CDR (c));
329
330 /* GC-robust */
331 SCM_SETCDR (c, SCM_EOL);
332
333 SCM_CRITICAL_SECTION_END;
334 return 1;
335 }
336 prev = p;
337 }
338 SCM_CRITICAL_SECTION_END;
339 return 0;
340 }
341
342 /* Remove the front-most element from the queue Q and return it.
343 Return SCM_BOOL_F when Q is empty.
344 */
345 static SCM
346 dequeue (SCM q)
347 {
348 SCM c;
349 SCM_CRITICAL_SECTION_START;
350 c = SCM_CDR (q);
351 if (scm_is_null (c))
352 {
353 SCM_CRITICAL_SECTION_END;
354 return SCM_BOOL_F;
355 }
356 else
357 {
358 SCM_SETCDR (q, SCM_CDR (c));
359 if (scm_is_null (SCM_CDR (q)))
360 SCM_SETCAR (q, SCM_EOL);
361 SCM_CRITICAL_SECTION_END;
362
363 /* GC-robust */
364 SCM_SETCDR (c, SCM_EOL);
365
366 return SCM_CAR (c);
367 }
368 }
369
370 /*** Thread smob routines */
371
372
373 static int
374 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
375 {
376 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
377 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
378 the struct case, hence we go via a union, and extract according to the
379 size of pthread_t. */
380 union {
381 scm_i_pthread_t p;
382 unsigned short us;
383 unsigned int ui;
384 unsigned long ul;
385 scm_t_uintmax um;
386 } u;
387 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
388 scm_i_pthread_t p = t->pthread;
389 scm_t_uintmax id;
390 u.p = p;
391 if (sizeof (p) == sizeof (unsigned short))
392 id = u.us;
393 else if (sizeof (p) == sizeof (unsigned int))
394 id = u.ui;
395 else if (sizeof (p) == sizeof (unsigned long))
396 id = u.ul;
397 else
398 id = u.um;
399
400 scm_puts_unlocked ("#<thread ", port);
401 scm_uintprint (id, 10, port);
402 scm_puts_unlocked (" (", port);
403 scm_uintprint ((scm_t_bits)t, 16, port);
404 scm_puts_unlocked (")>", port);
405 return 1;
406 }
407
408 \f
409 /*** Blocking on queues. */
410
411 /* See also scm_i_queue_async_cell for how such a block is
412 interrputed.
413 */
414
415 /* Put the current thread on QUEUE and go to sleep, waiting for it to
416 be woken up by a call to 'unblock_from_queue', or to be
417 interrupted. Upon return of this function, the current thread is
418 no longer on QUEUE, even when the sleep has been interrupted.
419
420 The caller of block_self must hold MUTEX. It will be atomically
421 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
422
423 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
424 as MUTEX is needed.
425
426 When WAITTIME is not NULL, the sleep will be aborted at that time.
427
428 The return value of block_self is an errno value. It will be zero
429 when the sleep has been successfully completed by a call to
430 unblock_from_queue, EINTR when it has been interrupted by the
431 delivery of a system async, and ETIMEDOUT when the timeout has
432 expired.
433
434 The system asyncs themselves are not executed by block_self.
435 */
436 static int
437 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
438 const scm_t_timespec *waittime)
439 {
440 scm_i_thread *t = SCM_I_CURRENT_THREAD;
441 SCM q_handle;
442 int err;
443
444 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
445 err = EINTR;
446 else
447 {
448 t->block_asyncs++;
449 q_handle = enqueue (queue, t->handle);
450 if (waittime == NULL)
451 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
452 else
453 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
454
455 /* When we are still on QUEUE, we have been interrupted. We
456 report this only when no other error (such as a timeout) has
457 happened above.
458 */
459 if (remqueue (queue, q_handle) && err == 0)
460 err = EINTR;
461 t->block_asyncs--;
462 scm_i_reset_sleep (t);
463 }
464
465 return err;
466 }
467
468 /* Wake up the first thread on QUEUE, if any. The awoken thread is
469 returned, or #f if the queue was empty.
470 */
471 static SCM
472 unblock_from_queue (SCM queue)
473 {
474 SCM thread = dequeue (queue);
475 if (scm_is_true (thread))
476 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
477 return thread;
478 }
479
480 \f
481 /* Getting into and out of guile mode.
482 */
483
484 /* Key used to attach a cleanup handler to a given thread. Also, if
485 thread-local storage is unavailable, this key is used to retrieve the
486 current thread with `pthread_getspecific ()'. */
487 scm_i_pthread_key_t scm_i_thread_key;
488
489
490 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
491
492 /* When thread-local storage (TLS) is available, a pointer to the
493 current-thread object is kept in TLS. Note that storing the thread-object
494 itself in TLS (rather than a pointer to some malloc'd memory) is not
495 possible since thread objects may live longer than the actual thread they
496 represent. */
497 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
498
499 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
500
501
502 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
503 static scm_i_thread *all_threads = NULL;
504 static int thread_count;
505
506 static SCM scm_i_default_dynamic_state;
507
508 /* Run when a fluid is collected. */
509 void
510 scm_i_reset_fluid (size_t n)
511 {
512 scm_i_thread *t;
513
514 scm_i_pthread_mutex_lock (&thread_admin_mutex);
515 for (t = all_threads; t; t = t->next_thread)
516 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
517 {
518 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
519
520 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
521 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
522 }
523 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
524 }
525
526 /* Perform first stage of thread initialisation, in non-guile mode.
527 */
528 static void
529 guilify_self_1 (struct GC_stack_base *base)
530 {
531 scm_i_thread t;
532
533 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
534 before allocating anything in this thread, because allocation could
535 cause GC to run, and GC could cause finalizers, which could invoke
536 Scheme functions, which need the current thread to be set. */
537
538 t.pthread = scm_i_pthread_self ();
539 t.handle = SCM_BOOL_F;
540 t.result = SCM_BOOL_F;
541 t.cleanup_handler = SCM_BOOL_F;
542 t.mutexes = SCM_EOL;
543 t.held_mutex = NULL;
544 t.join_queue = SCM_EOL;
545 t.dynamic_state = SCM_BOOL_F;
546 t.dynstack.base = NULL;
547 t.dynstack.top = NULL;
548 t.dynstack.limit = NULL;
549 t.active_asyncs = SCM_EOL;
550 t.block_asyncs = 1;
551 t.pending_asyncs = 1;
552 t.critical_section_level = 0;
553 t.base = base->mem_base;
554 #ifdef __ia64__
555 t.register_backing_store_base = base->reg_base;
556 #endif
557 t.continuation_root = SCM_EOL;
558 t.continuation_base = t.base;
559 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
560 t.sleep_mutex = NULL;
561 t.sleep_object = SCM_BOOL_F;
562 t.sleep_fd = -1;
563
564 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
565 /* FIXME: Error conditions during the initialization phase are handled
566 gracelessly since public functions such as `scm_init_guile ()'
567 currently have type `void'. */
568 abort ();
569
570 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
571 t.current_mark_stack_ptr = NULL;
572 t.current_mark_stack_limit = NULL;
573 t.canceled = 0;
574 t.exited = 0;
575 t.guile_mode = 0;
576
577 /* The switcheroo. */
578 {
579 scm_i_thread *t_ptr = &t;
580
581 GC_disable ();
582 t_ptr = GC_malloc (sizeof (scm_i_thread));
583 memcpy (t_ptr, &t, sizeof t);
584
585 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
586
587 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
588 /* Cache the current thread in TLS for faster lookup. */
589 scm_i_current_thread = t_ptr;
590 #endif
591
592 scm_i_pthread_mutex_lock (&thread_admin_mutex);
593 t_ptr->next_thread = all_threads;
594 all_threads = t_ptr;
595 thread_count++;
596 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
597
598 GC_enable ();
599 }
600 }
601
602 /* Perform second stage of thread initialisation, in guile mode.
603 */
604 static void
605 guilify_self_2 (SCM parent)
606 {
607 scm_i_thread *t = SCM_I_CURRENT_THREAD;
608
609 t->guile_mode = 1;
610
611 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
612
613 t->continuation_root = scm_cons (t->handle, SCM_EOL);
614 t->continuation_base = t->base;
615 t->vm = SCM_BOOL_F;
616
617 if (scm_is_true (parent))
618 t->dynamic_state = scm_make_dynamic_state (parent);
619 else
620 t->dynamic_state = scm_i_make_initial_dynamic_state ();
621
622 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
623 t->dynstack.limit = t->dynstack.base + 16;
624 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
625
626 t->join_queue = make_queue ();
627 t->block_asyncs = 0;
628
629 /* See note in finalizers.c:queue_finalizer_async(). */
630 GC_invoke_finalizers ();
631 }
632
633 \f
634 /*** Fat mutexes */
635
636 /* We implement our own mutex type since we want them to be 'fair', we
637 want to do fancy things while waiting for them (like running
638 asyncs) and we might want to add things that are nice for
639 debugging.
640 */
641
642 typedef struct {
643 scm_i_pthread_mutex_t lock;
644 SCM owner;
645 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
646
647 int recursive; /* allow recursive locking? */
648 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
649 int allow_external_unlock; /* is it an error to unlock a mutex that is not
650 owned by the current thread? */
651
652 SCM waiting; /* the threads waiting for this mutex. */
653 } fat_mutex;
654
655 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
656 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
657
658 static SCM
659 call_cleanup (void *data)
660 {
661 SCM *proc_p = data;
662 return scm_call_0 (*proc_p);
663 }
664
665 /* Perform thread tear-down, in guile mode.
666 */
667 static void *
668 do_thread_exit (void *v)
669 {
670 scm_i_thread *t = (scm_i_thread *) v;
671
672 if (!scm_is_false (t->cleanup_handler))
673 {
674 SCM ptr = t->cleanup_handler;
675
676 t->cleanup_handler = SCM_BOOL_F;
677 t->result = scm_internal_catch (SCM_BOOL_T,
678 call_cleanup, &ptr,
679 scm_handle_by_message_noexit, NULL);
680 }
681
682 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
683
684 t->exited = 1;
685 close (t->sleep_pipe[0]);
686 close (t->sleep_pipe[1]);
687 while (scm_is_true (unblock_from_queue (t->join_queue)))
688 ;
689
690 while (!scm_is_null (t->mutexes))
691 {
692 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
693
694 if (scm_is_true (mutex))
695 {
696 fat_mutex *m = SCM_MUTEX_DATA (mutex);
697
698 scm_i_pthread_mutex_lock (&m->lock);
699
700 /* Check whether T owns MUTEX. This is usually the case, unless
701 T abandoned MUTEX; in that case, T is no longer its owner (see
702 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
703 if (scm_is_eq (m->owner, t->handle))
704 unblock_from_queue (m->waiting);
705
706 scm_i_pthread_mutex_unlock (&m->lock);
707 }
708
709 t->mutexes = scm_cdr (t->mutexes);
710 }
711
712 scm_i_pthread_mutex_unlock (&t->admin_mutex);
713
714 return NULL;
715 }
716
717 static void *
718 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
719 {
720 /* Won't hurt if we are already registered. */
721 #if SCM_USE_PTHREAD_THREADS
722 GC_register_my_thread (sb);
723 #endif
724
725 return scm_with_guile (do_thread_exit, v);
726 }
727
728 static void
729 on_thread_exit (void *v)
730 {
731 /* This handler is executed in non-guile mode. */
732 scm_i_thread *t = (scm_i_thread *) v, **tp;
733
734 /* If we were canceled, we were unable to clear `t->guile_mode', so do
735 it here. */
736 t->guile_mode = 0;
737
738 /* If this thread was cancelled while doing a cond wait, it will
739 still have a mutex locked, so we unlock it here. */
740 if (t->held_mutex)
741 {
742 scm_i_pthread_mutex_unlock (t->held_mutex);
743 t->held_mutex = NULL;
744 }
745
746 /* Reinstate the current thread for purposes of scm_with_guile
747 guile-mode cleanup handlers. Only really needed in the non-TLS
748 case but it doesn't hurt to be consistent. */
749 scm_i_pthread_setspecific (scm_i_thread_key, t);
750
751 /* Scheme-level thread finalizers and other cleanup needs to happen in
752 guile mode. */
753 GC_call_with_stack_base (do_thread_exit_trampoline, t);
754
755 /* Removing ourself from the list of all threads needs to happen in
756 non-guile mode since all SCM values on our stack become
757 unprotected once we are no longer in the list. */
758 scm_i_pthread_mutex_lock (&thread_admin_mutex);
759 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
760 if (*tp == t)
761 {
762 *tp = t->next_thread;
763
764 /* GC-robust */
765 t->next_thread = NULL;
766
767 break;
768 }
769 thread_count--;
770
771 /* If there's only one other thread, it could be the signal delivery
772 thread, so we need to notify it to shut down by closing its read pipe.
773 If it's not the signal delivery thread, then closing the read pipe isn't
774 going to hurt. */
775 if (thread_count <= 1)
776 scm_i_close_signal_pipe ();
777
778 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
779
780 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
781
782 #if SCM_USE_PTHREAD_THREADS
783 GC_unregister_my_thread ();
784 #endif
785 }
786
787 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
788
789 static void
790 init_thread_key (void)
791 {
792 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
793 }
794
795 /* Perform any initializations necessary to make the current thread
796 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
797 if necessary.
798
799 BASE is the stack base to use with GC.
800
801 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
802 which case the default dynamic state is used.
803
804 Returns zero when the thread was known to guile already; otherwise
805 return 1.
806
807 Note that it could be the case that the thread was known
808 to Guile, but not in guile mode (because we are within a
809 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
810 be sure. New threads are put into guile mode implicitly. */
811
812 static int
813 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
814 {
815 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
816
817 if (SCM_I_CURRENT_THREAD)
818 {
819 /* Thread is already known to Guile.
820 */
821 return 0;
822 }
823 else
824 {
825 /* This thread has not been guilified yet.
826 */
827
828 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
829 if (scm_initialized_p == 0)
830 {
831 /* First thread ever to enter Guile. Run the full
832 initialization.
833 */
834 scm_i_init_guile (base);
835
836 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
837 /* Allow other threads to come in later. */
838 GC_allow_register_threads ();
839 #endif
840
841 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
842 }
843 else
844 {
845 /* Guile is already initialized, but this thread enters it for
846 the first time. Only initialize this thread.
847 */
848 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
849
850 /* Register this thread with libgc. */
851 #if SCM_USE_PTHREAD_THREADS
852 GC_register_my_thread (base);
853 #endif
854
855 guilify_self_1 (base);
856 guilify_self_2 (parent);
857 }
858 return 1;
859 }
860 }
861
862 void
863 scm_init_guile ()
864 {
865 struct GC_stack_base stack_base;
866
867 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
868 scm_i_init_thread_for_guile (&stack_base,
869 scm_i_default_dynamic_state);
870 else
871 {
872 fprintf (stderr, "Failed to get stack base for current thread.\n");
873 exit (EXIT_FAILURE);
874 }
875 }
876
877 struct with_guile_args
878 {
879 GC_fn_type func;
880 void *data;
881 SCM parent;
882 };
883
884 static void *
885 with_guile_trampoline (void *data)
886 {
887 struct with_guile_args *args = data;
888
889 return scm_c_with_continuation_barrier (args->func, args->data);
890 }
891
892 static void *
893 with_guile_and_parent (struct GC_stack_base *base, void *data)
894 {
895 void *res;
896 int new_thread;
897 scm_i_thread *t;
898 struct with_guile_args *args = data;
899
900 new_thread = scm_i_init_thread_for_guile (base, args->parent);
901 t = SCM_I_CURRENT_THREAD;
902 if (new_thread)
903 {
904 /* We are in Guile mode. */
905 assert (t->guile_mode);
906
907 res = scm_c_with_continuation_barrier (args->func, args->data);
908
909 /* Leave Guile mode. */
910 t->guile_mode = 0;
911 }
912 else if (t->guile_mode)
913 {
914 /* Already in Guile mode. */
915 res = scm_c_with_continuation_barrier (args->func, args->data);
916 }
917 else
918 {
919 /* We are not in Guile mode, either because we are not within a
920 scm_with_guile, or because we are within a scm_without_guile.
921
922 This call to scm_with_guile() could happen from anywhere on the
923 stack, and in particular lower on the stack than when it was
924 when this thread was first guilified. Thus, `base' must be
925 updated. */
926 #if SCM_STACK_GROWS_UP
927 if (SCM_STACK_PTR (base->mem_base) < t->base)
928 t->base = SCM_STACK_PTR (base->mem_base);
929 #else
930 if (SCM_STACK_PTR (base->mem_base) > t->base)
931 t->base = SCM_STACK_PTR (base->mem_base);
932 #endif
933
934 t->guile_mode = 1;
935 res = with_gc_active (with_guile_trampoline, args);
936 t->guile_mode = 0;
937 }
938 return res;
939 }
940
941 static void *
942 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
943 {
944 struct with_guile_args args;
945
946 args.func = func;
947 args.data = data;
948 args.parent = parent;
949
950 return GC_call_with_stack_base (with_guile_and_parent, &args);
951 }
952
953 void *
954 scm_with_guile (void *(*func)(void *), void *data)
955 {
956 return scm_i_with_guile_and_parent (func, data,
957 scm_i_default_dynamic_state);
958 }
959
960 void *
961 scm_without_guile (void *(*func)(void *), void *data)
962 {
963 void *result;
964 scm_i_thread *t = SCM_I_CURRENT_THREAD;
965
966 if (t->guile_mode)
967 {
968 SCM_I_CURRENT_THREAD->guile_mode = 0;
969 result = with_gc_inactive (func, data);
970 SCM_I_CURRENT_THREAD->guile_mode = 1;
971 }
972 else
973 /* Otherwise we're not in guile mode, so nothing to do. */
974 result = func (data);
975
976 return result;
977 }
978
979 \f
980 /*** Thread creation */
981
982 typedef struct {
983 SCM parent;
984 SCM thunk;
985 SCM handler;
986 SCM thread;
987 scm_i_pthread_mutex_t mutex;
988 scm_i_pthread_cond_t cond;
989 } launch_data;
990
991 static void *
992 really_launch (void *d)
993 {
994 launch_data *data = (launch_data *)d;
995 SCM thunk = data->thunk, handler = data->handler;
996 scm_i_thread *t;
997
998 t = SCM_I_CURRENT_THREAD;
999
1000 scm_i_scm_pthread_mutex_lock (&data->mutex);
1001 data->thread = scm_current_thread ();
1002 scm_i_pthread_cond_signal (&data->cond);
1003 scm_i_pthread_mutex_unlock (&data->mutex);
1004
1005 if (SCM_UNBNDP (handler))
1006 t->result = scm_call_0 (thunk);
1007 else
1008 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1009
1010 return 0;
1011 }
1012
1013 static void *
1014 launch_thread (void *d)
1015 {
1016 launch_data *data = (launch_data *)d;
1017 scm_i_pthread_detach (scm_i_pthread_self ());
1018 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1019 return NULL;
1020 }
1021
1022 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
1023 (SCM thunk, SCM handler),
1024 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1025 "returning a new thread object representing the thread. The procedure\n"
1026 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1027 "\n"
1028 "When @var{handler} is specified, then @var{thunk} is called from\n"
1029 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1030 "handler. This catch is established inside the continuation barrier.\n"
1031 "\n"
1032 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1033 "the @emph{exit value} of the thread and the thread is terminated.")
1034 #define FUNC_NAME s_scm_call_with_new_thread
1035 {
1036 launch_data data;
1037 scm_i_pthread_t id;
1038 int err;
1039
1040 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1041 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1042 handler, SCM_ARG2, FUNC_NAME);
1043
1044 GC_collect_a_little ();
1045 data.parent = scm_current_dynamic_state ();
1046 data.thunk = thunk;
1047 data.handler = handler;
1048 data.thread = SCM_BOOL_F;
1049 scm_i_pthread_mutex_init (&data.mutex, NULL);
1050 scm_i_pthread_cond_init (&data.cond, NULL);
1051
1052 scm_i_scm_pthread_mutex_lock (&data.mutex);
1053 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1054 if (err)
1055 {
1056 scm_i_pthread_mutex_unlock (&data.mutex);
1057 errno = err;
1058 scm_syserror (NULL);
1059 }
1060 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1061 scm_i_pthread_mutex_unlock (&data.mutex);
1062
1063 return data.thread;
1064 }
1065 #undef FUNC_NAME
1066
1067 typedef struct {
1068 SCM parent;
1069 scm_t_catch_body body;
1070 void *body_data;
1071 scm_t_catch_handler handler;
1072 void *handler_data;
1073 SCM thread;
1074 scm_i_pthread_mutex_t mutex;
1075 scm_i_pthread_cond_t cond;
1076 } spawn_data;
1077
1078 static void *
1079 really_spawn (void *d)
1080 {
1081 spawn_data *data = (spawn_data *)d;
1082 scm_t_catch_body body = data->body;
1083 void *body_data = data->body_data;
1084 scm_t_catch_handler handler = data->handler;
1085 void *handler_data = data->handler_data;
1086 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1087
1088 scm_i_scm_pthread_mutex_lock (&data->mutex);
1089 data->thread = scm_current_thread ();
1090 scm_i_pthread_cond_signal (&data->cond);
1091 scm_i_pthread_mutex_unlock (&data->mutex);
1092
1093 if (handler == NULL)
1094 t->result = body (body_data);
1095 else
1096 t->result = scm_internal_catch (SCM_BOOL_T,
1097 body, body_data,
1098 handler, handler_data);
1099
1100 return 0;
1101 }
1102
1103 static void *
1104 spawn_thread (void *d)
1105 {
1106 spawn_data *data = (spawn_data *)d;
1107 scm_i_pthread_detach (scm_i_pthread_self ());
1108 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1109 return NULL;
1110 }
1111
1112 SCM
1113 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1114 scm_t_catch_handler handler, void *handler_data)
1115 {
1116 spawn_data data;
1117 scm_i_pthread_t id;
1118 int err;
1119
1120 data.parent = scm_current_dynamic_state ();
1121 data.body = body;
1122 data.body_data = body_data;
1123 data.handler = handler;
1124 data.handler_data = handler_data;
1125 data.thread = SCM_BOOL_F;
1126 scm_i_pthread_mutex_init (&data.mutex, NULL);
1127 scm_i_pthread_cond_init (&data.cond, NULL);
1128
1129 scm_i_scm_pthread_mutex_lock (&data.mutex);
1130 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1131 if (err)
1132 {
1133 scm_i_pthread_mutex_unlock (&data.mutex);
1134 errno = err;
1135 scm_syserror (NULL);
1136 }
1137 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1138 scm_i_pthread_mutex_unlock (&data.mutex);
1139
1140 assert (SCM_I_IS_THREAD (data.thread));
1141
1142 return data.thread;
1143 }
1144
1145 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1146 (),
1147 "Move the calling thread to the end of the scheduling queue.")
1148 #define FUNC_NAME s_scm_yield
1149 {
1150 return scm_from_bool (scm_i_sched_yield ());
1151 }
1152 #undef FUNC_NAME
1153
1154 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1155 (SCM thread),
1156 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1157 "cannot be the current thread, and if @var{thread} has already terminated or "
1158 "been signaled to terminate, this function is a no-op.")
1159 #define FUNC_NAME s_scm_cancel_thread
1160 {
1161 scm_i_thread *t = NULL;
1162
1163 SCM_VALIDATE_THREAD (1, thread);
1164 t = SCM_I_THREAD_DATA (thread);
1165 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1166 if (!t->canceled)
1167 {
1168 t->canceled = 1;
1169 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1170 scm_i_pthread_cancel (t->pthread);
1171 }
1172 else
1173 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1174
1175 return SCM_UNSPECIFIED;
1176 }
1177 #undef FUNC_NAME
1178
1179 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1180 (SCM thread, SCM proc),
1181 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1182 "This handler will be called when the thread exits.")
1183 #define FUNC_NAME s_scm_set_thread_cleanup_x
1184 {
1185 scm_i_thread *t;
1186
1187 SCM_VALIDATE_THREAD (1, thread);
1188 if (!scm_is_false (proc))
1189 SCM_VALIDATE_THUNK (2, proc);
1190
1191 t = SCM_I_THREAD_DATA (thread);
1192 scm_i_pthread_mutex_lock (&t->admin_mutex);
1193
1194 if (!(t->exited || t->canceled))
1195 t->cleanup_handler = proc;
1196
1197 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1198
1199 return SCM_UNSPECIFIED;
1200 }
1201 #undef FUNC_NAME
1202
1203 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1204 (SCM thread),
1205 "Return the cleanup handler installed for the thread @var{thread}.")
1206 #define FUNC_NAME s_scm_thread_cleanup
1207 {
1208 scm_i_thread *t;
1209 SCM ret;
1210
1211 SCM_VALIDATE_THREAD (1, thread);
1212
1213 t = SCM_I_THREAD_DATA (thread);
1214 scm_i_pthread_mutex_lock (&t->admin_mutex);
1215 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1216 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1217
1218 return ret;
1219 }
1220 #undef FUNC_NAME
1221
1222 SCM scm_join_thread (SCM thread)
1223 {
1224 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1225 }
1226
1227 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1228 (SCM thread, SCM timeout, SCM timeoutval),
1229 "Suspend execution of the calling thread until the target @var{thread} "
1230 "terminates, unless the target @var{thread} has already terminated. ")
1231 #define FUNC_NAME s_scm_join_thread_timed
1232 {
1233 scm_i_thread *t;
1234 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1235 SCM res = SCM_BOOL_F;
1236
1237 if (! (SCM_UNBNDP (timeoutval)))
1238 res = timeoutval;
1239
1240 SCM_VALIDATE_THREAD (1, thread);
1241 if (scm_is_eq (scm_current_thread (), thread))
1242 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1243
1244 t = SCM_I_THREAD_DATA (thread);
1245 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1246
1247 if (! SCM_UNBNDP (timeout))
1248 {
1249 to_timespec (timeout, &ctimeout);
1250 timeout_ptr = &ctimeout;
1251 }
1252
1253 if (t->exited)
1254 res = t->result;
1255 else
1256 {
1257 while (1)
1258 {
1259 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1260 timeout_ptr);
1261 if (err == 0)
1262 {
1263 if (t->exited)
1264 {
1265 res = t->result;
1266 break;
1267 }
1268 }
1269 else if (err == ETIMEDOUT)
1270 break;
1271
1272 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1273 SCM_TICK;
1274 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1275
1276 /* Check for exit again, since we just released and
1277 reacquired the admin mutex, before the next block_self
1278 call (which would block forever if t has already
1279 exited). */
1280 if (t->exited)
1281 {
1282 res = t->result;
1283 break;
1284 }
1285 }
1286 }
1287
1288 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1289
1290 return res;
1291 }
1292 #undef FUNC_NAME
1293
1294 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1295 (SCM obj),
1296 "Return @code{#t} if @var{obj} is a thread.")
1297 #define FUNC_NAME s_scm_thread_p
1298 {
1299 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1300 }
1301 #undef FUNC_NAME
1302
1303
1304 static int
1305 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1306 {
1307 fat_mutex *m = SCM_MUTEX_DATA (mx);
1308 scm_puts_unlocked ("#<mutex ", port);
1309 scm_uintprint ((scm_t_bits)m, 16, port);
1310 scm_puts_unlocked (">", port);
1311 return 1;
1312 }
1313
1314 static SCM
1315 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1316 {
1317 fat_mutex *m;
1318 SCM mx;
1319 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1320
1321 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1322 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1323 and so we can just copy it. */
1324 memcpy (&m->lock, &lock, sizeof (m->lock));
1325 m->owner = SCM_BOOL_F;
1326 m->level = 0;
1327
1328 m->recursive = recursive;
1329 m->unchecked_unlock = unchecked_unlock;
1330 m->allow_external_unlock = external_unlock;
1331
1332 m->waiting = SCM_EOL;
1333 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1334 m->waiting = make_queue ();
1335 return mx;
1336 }
1337
1338 SCM scm_make_mutex (void)
1339 {
1340 return scm_make_mutex_with_flags (SCM_EOL);
1341 }
1342
1343 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1344 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1345 SCM_SYMBOL (recursive_sym, "recursive");
1346
1347 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1348 (SCM flags),
1349 "Create a new mutex. ")
1350 #define FUNC_NAME s_scm_make_mutex_with_flags
1351 {
1352 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1353
1354 SCM ptr = flags;
1355 while (! scm_is_null (ptr))
1356 {
1357 SCM flag = SCM_CAR (ptr);
1358 if (scm_is_eq (flag, unchecked_unlock_sym))
1359 unchecked_unlock = 1;
1360 else if (scm_is_eq (flag, allow_external_unlock_sym))
1361 external_unlock = 1;
1362 else if (scm_is_eq (flag, recursive_sym))
1363 recursive = 1;
1364 else
1365 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1366 ptr = SCM_CDR (ptr);
1367 }
1368 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1369 }
1370 #undef FUNC_NAME
1371
1372 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1373 (void),
1374 "Create a new recursive mutex. ")
1375 #define FUNC_NAME s_scm_make_recursive_mutex
1376 {
1377 return make_fat_mutex (1, 0, 0);
1378 }
1379 #undef FUNC_NAME
1380
1381 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1382
1383 static SCM
1384 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1385 {
1386 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1387
1388 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1389 SCM err = SCM_BOOL_F;
1390
1391 struct timeval current_time;
1392
1393 scm_i_scm_pthread_mutex_lock (&m->lock);
1394
1395 while (1)
1396 {
1397 if (m->level == 0)
1398 {
1399 m->owner = new_owner;
1400 m->level++;
1401
1402 if (SCM_I_IS_THREAD (new_owner))
1403 {
1404 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1405
1406 /* FIXME: The order in which `t->admin_mutex' and
1407 `m->lock' are taken differs from that in
1408 `on_thread_exit', potentially leading to deadlocks. */
1409 scm_i_pthread_mutex_lock (&t->admin_mutex);
1410
1411 /* Only keep a weak reference to MUTEX so that it's not
1412 retained when not referenced elsewhere (bug #27450).
1413 The weak pair itself is eventually removed when MUTEX
1414 is unlocked. Note that `t->mutexes' lists mutexes
1415 currently held by T, so it should be small. */
1416 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1417 t->mutexes);
1418
1419 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1420 }
1421 *ret = 1;
1422 break;
1423 }
1424 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1425 {
1426 m->owner = new_owner;
1427 err = scm_cons (scm_abandoned_mutex_error_key,
1428 scm_from_locale_string ("lock obtained on abandoned "
1429 "mutex"));
1430 *ret = 1;
1431 break;
1432 }
1433 else if (scm_is_eq (m->owner, new_owner))
1434 {
1435 if (m->recursive)
1436 {
1437 m->level++;
1438 *ret = 1;
1439 }
1440 else
1441 {
1442 err = scm_cons (scm_misc_error_key,
1443 scm_from_locale_string ("mutex already locked "
1444 "by thread"));
1445 *ret = 0;
1446 }
1447 break;
1448 }
1449 else
1450 {
1451 if (timeout != NULL)
1452 {
1453 gettimeofday (&current_time, NULL);
1454 if (current_time.tv_sec > timeout->tv_sec ||
1455 (current_time.tv_sec == timeout->tv_sec &&
1456 current_time.tv_usec * 1000 > timeout->tv_nsec))
1457 {
1458 *ret = 0;
1459 break;
1460 }
1461 }
1462 block_self (m->waiting, mutex, &m->lock, timeout);
1463 scm_i_pthread_mutex_unlock (&m->lock);
1464 SCM_TICK;
1465 scm_i_scm_pthread_mutex_lock (&m->lock);
1466 }
1467 }
1468 scm_i_pthread_mutex_unlock (&m->lock);
1469 return err;
1470 }
1471
1472 SCM scm_lock_mutex (SCM mx)
1473 {
1474 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1475 }
1476
1477 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1478 (SCM m, SCM timeout, SCM owner),
1479 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1480 "thread blocks until the mutex becomes available. The function\n"
1481 "returns when the calling thread owns the lock on @var{m}.\n"
1482 "Locking a mutex that a thread already owns will succeed right\n"
1483 "away and will not block the thread. That is, Guile's mutexes\n"
1484 "are @emph{recursive}.")
1485 #define FUNC_NAME s_scm_lock_mutex_timed
1486 {
1487 SCM exception;
1488 int ret = 0;
1489 scm_t_timespec cwaittime, *waittime = NULL;
1490
1491 SCM_VALIDATE_MUTEX (1, m);
1492
1493 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1494 {
1495 to_timespec (timeout, &cwaittime);
1496 waittime = &cwaittime;
1497 }
1498
1499 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1500 SCM_VALIDATE_THREAD (3, owner);
1501
1502 exception = fat_mutex_lock (m, waittime, owner, &ret);
1503 if (!scm_is_false (exception))
1504 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1505 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1506 }
1507 #undef FUNC_NAME
1508
1509 static void
1510 lock_mutex_return_void (SCM mx)
1511 {
1512 (void) scm_lock_mutex (mx);
1513 }
1514
1515 static void
1516 unlock_mutex_return_void (SCM mx)
1517 {
1518 (void) scm_unlock_mutex (mx);
1519 }
1520
1521 void
1522 scm_dynwind_lock_mutex (SCM mutex)
1523 {
1524 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1525 SCM_F_WIND_EXPLICITLY);
1526 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1527 SCM_F_WIND_EXPLICITLY);
1528 }
1529
1530 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1531 (SCM mutex),
1532 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1533 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1534 #define FUNC_NAME s_scm_try_mutex
1535 {
1536 SCM exception;
1537 int ret = 0;
1538 scm_t_timespec cwaittime, *waittime = NULL;
1539
1540 SCM_VALIDATE_MUTEX (1, mutex);
1541
1542 to_timespec (scm_from_int(0), &cwaittime);
1543 waittime = &cwaittime;
1544
1545 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1546 if (!scm_is_false (exception))
1547 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1548 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1549 }
1550 #undef FUNC_NAME
1551
1552 /*** Fat condition variables */
1553
1554 typedef struct {
1555 scm_i_pthread_mutex_t lock;
1556 SCM waiting; /* the threads waiting for this condition. */
1557 } fat_cond;
1558
1559 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1560 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1561
1562 static void
1563 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1564 {
1565 SCM walk, prev;
1566
1567 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1568 walk = SCM_CDR (walk))
1569 {
1570 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1571 {
1572 if (scm_is_pair (prev))
1573 SCM_SETCDR (prev, SCM_CDR (walk));
1574 else
1575 t->mutexes = SCM_CDR (walk);
1576 break;
1577 }
1578 }
1579 }
1580
1581 static int
1582 fat_mutex_unlock (SCM mutex, SCM cond,
1583 const scm_t_timespec *waittime, int relock)
1584 {
1585 SCM owner;
1586 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1587 fat_cond *c = NULL;
1588 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1589 int err = 0, ret = 0;
1590
1591 scm_i_scm_pthread_mutex_lock (&m->lock);
1592
1593 owner = m->owner;
1594
1595 if (!scm_is_eq (owner, t->handle))
1596 {
1597 if (m->level == 0)
1598 {
1599 if (!m->unchecked_unlock)
1600 {
1601 scm_i_pthread_mutex_unlock (&m->lock);
1602 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1603 }
1604 owner = t->handle;
1605 }
1606 else if (!m->allow_external_unlock)
1607 {
1608 scm_i_pthread_mutex_unlock (&m->lock);
1609 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1610 }
1611 }
1612
1613 if (! (SCM_UNBNDP (cond)))
1614 {
1615 c = SCM_CONDVAR_DATA (cond);
1616 while (1)
1617 {
1618 int brk = 0;
1619
1620 if (m->level > 0)
1621 m->level--;
1622 if (m->level == 0)
1623 {
1624 /* Change the owner of MUTEX. */
1625 remove_mutex_from_thread (mutex, t);
1626 m->owner = unblock_from_queue (m->waiting);
1627 }
1628
1629 t->block_asyncs++;
1630
1631 err = block_self (c->waiting, cond, &m->lock, waittime);
1632 scm_i_pthread_mutex_unlock (&m->lock);
1633
1634 if (err == 0)
1635 {
1636 ret = 1;
1637 brk = 1;
1638 }
1639 else if (err == ETIMEDOUT)
1640 {
1641 ret = 0;
1642 brk = 1;
1643 }
1644 else if (err != EINTR)
1645 {
1646 errno = err;
1647 scm_syserror (NULL);
1648 }
1649
1650 if (brk)
1651 {
1652 if (relock)
1653 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1654 t->block_asyncs--;
1655 break;
1656 }
1657
1658 t->block_asyncs--;
1659 scm_async_tick ();
1660
1661 scm_remember_upto_here_2 (cond, mutex);
1662
1663 scm_i_scm_pthread_mutex_lock (&m->lock);
1664 }
1665 }
1666 else
1667 {
1668 if (m->level > 0)
1669 m->level--;
1670 if (m->level == 0)
1671 {
1672 /* Change the owner of MUTEX. */
1673 remove_mutex_from_thread (mutex, t);
1674 m->owner = unblock_from_queue (m->waiting);
1675 }
1676
1677 scm_i_pthread_mutex_unlock (&m->lock);
1678 ret = 1;
1679 }
1680
1681 return ret;
1682 }
1683
1684 SCM scm_unlock_mutex (SCM mx)
1685 {
1686 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1687 }
1688
1689 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1690 (SCM mx, SCM cond, SCM timeout),
1691 "Unlocks @var{mutex} if the calling thread owns the lock on "
1692 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1693 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1694 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1695 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1696 "with a call to @code{unlock-mutex}. Only the last call to "
1697 "@code{unlock-mutex} will actually unlock the mutex. ")
1698 #define FUNC_NAME s_scm_unlock_mutex_timed
1699 {
1700 scm_t_timespec cwaittime, *waittime = NULL;
1701
1702 SCM_VALIDATE_MUTEX (1, mx);
1703 if (! (SCM_UNBNDP (cond)))
1704 {
1705 SCM_VALIDATE_CONDVAR (2, cond);
1706
1707 if (! (SCM_UNBNDP (timeout)))
1708 {
1709 to_timespec (timeout, &cwaittime);
1710 waittime = &cwaittime;
1711 }
1712 }
1713
1714 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1715 }
1716 #undef FUNC_NAME
1717
1718 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1719 (SCM obj),
1720 "Return @code{#t} if @var{obj} is a mutex.")
1721 #define FUNC_NAME s_scm_mutex_p
1722 {
1723 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1724 }
1725 #undef FUNC_NAME
1726
1727 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1728 (SCM mx),
1729 "Return the thread owning @var{mx}, or @code{#f}.")
1730 #define FUNC_NAME s_scm_mutex_owner
1731 {
1732 SCM owner;
1733 fat_mutex *m = NULL;
1734
1735 SCM_VALIDATE_MUTEX (1, mx);
1736 m = SCM_MUTEX_DATA (mx);
1737 scm_i_pthread_mutex_lock (&m->lock);
1738 owner = m->owner;
1739 scm_i_pthread_mutex_unlock (&m->lock);
1740
1741 return owner;
1742 }
1743 #undef FUNC_NAME
1744
1745 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1746 (SCM mx),
1747 "Return the lock level of mutex @var{mx}.")
1748 #define FUNC_NAME s_scm_mutex_level
1749 {
1750 SCM_VALIDATE_MUTEX (1, mx);
1751 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1752 }
1753 #undef FUNC_NAME
1754
1755 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1756 (SCM mx),
1757 "Returns @code{#t} if the mutex @var{mx} is locked.")
1758 #define FUNC_NAME s_scm_mutex_locked_p
1759 {
1760 SCM_VALIDATE_MUTEX (1, mx);
1761 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1762 }
1763 #undef FUNC_NAME
1764
1765 static int
1766 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1767 {
1768 fat_cond *c = SCM_CONDVAR_DATA (cv);
1769 scm_puts_unlocked ("#<condition-variable ", port);
1770 scm_uintprint ((scm_t_bits)c, 16, port);
1771 scm_puts_unlocked (">", port);
1772 return 1;
1773 }
1774
1775 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1776 (void),
1777 "Make a new condition variable.")
1778 #define FUNC_NAME s_scm_make_condition_variable
1779 {
1780 fat_cond *c;
1781 SCM cv;
1782
1783 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1784 c->waiting = SCM_EOL;
1785 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1786 c->waiting = make_queue ();
1787 return cv;
1788 }
1789 #undef FUNC_NAME
1790
1791 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1792 (SCM cv, SCM mx, SCM t),
1793 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1794 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1795 "is locked again when this function returns. When @var{t} is given, "
1796 "it specifies a point in time where the waiting should be aborted. It "
1797 "can be either a integer as returned by @code{current-time} or a pair "
1798 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1799 "mutex is locked and @code{#f} is returned. When the condition "
1800 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1801 "is returned. ")
1802 #define FUNC_NAME s_scm_timed_wait_condition_variable
1803 {
1804 scm_t_timespec waittime, *waitptr = NULL;
1805
1806 SCM_VALIDATE_CONDVAR (1, cv);
1807 SCM_VALIDATE_MUTEX (2, mx);
1808
1809 if (!SCM_UNBNDP (t))
1810 {
1811 to_timespec (t, &waittime);
1812 waitptr = &waittime;
1813 }
1814
1815 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1816 }
1817 #undef FUNC_NAME
1818
1819 static void
1820 fat_cond_signal (fat_cond *c)
1821 {
1822 unblock_from_queue (c->waiting);
1823 }
1824
1825 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1826 (SCM cv),
1827 "Wake up one thread that is waiting for @var{cv}")
1828 #define FUNC_NAME s_scm_signal_condition_variable
1829 {
1830 SCM_VALIDATE_CONDVAR (1, cv);
1831 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1832 return SCM_BOOL_T;
1833 }
1834 #undef FUNC_NAME
1835
1836 static void
1837 fat_cond_broadcast (fat_cond *c)
1838 {
1839 while (scm_is_true (unblock_from_queue (c->waiting)))
1840 ;
1841 }
1842
1843 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1844 (SCM cv),
1845 "Wake up all threads that are waiting for @var{cv}. ")
1846 #define FUNC_NAME s_scm_broadcast_condition_variable
1847 {
1848 SCM_VALIDATE_CONDVAR (1, cv);
1849 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1850 return SCM_BOOL_T;
1851 }
1852 #undef FUNC_NAME
1853
1854 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1855 (SCM obj),
1856 "Return @code{#t} if @var{obj} is a condition variable.")
1857 #define FUNC_NAME s_scm_condition_variable_p
1858 {
1859 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1860 }
1861 #undef FUNC_NAME
1862
1863
1864 \f
1865 /*** Select */
1866
1867 struct select_args
1868 {
1869 int nfds;
1870 SELECT_TYPE *read_fds;
1871 SELECT_TYPE *write_fds;
1872 SELECT_TYPE *except_fds;
1873 struct timeval *timeout;
1874
1875 int result;
1876 int errno_value;
1877 };
1878
1879 static void *
1880 do_std_select (void *args)
1881 {
1882 struct select_args *select_args;
1883
1884 select_args = (struct select_args *) args;
1885
1886 select_args->result =
1887 select (select_args->nfds,
1888 select_args->read_fds, select_args->write_fds,
1889 select_args->except_fds, select_args->timeout);
1890 select_args->errno_value = errno;
1891
1892 return NULL;
1893 }
1894
1895 int
1896 scm_std_select (int nfds,
1897 SELECT_TYPE *readfds,
1898 SELECT_TYPE *writefds,
1899 SELECT_TYPE *exceptfds,
1900 struct timeval *timeout)
1901 {
1902 fd_set my_readfds;
1903 int res, eno, wakeup_fd;
1904 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1905 struct select_args args;
1906
1907 if (readfds == NULL)
1908 {
1909 FD_ZERO (&my_readfds);
1910 readfds = &my_readfds;
1911 }
1912
1913 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1914 SCM_TICK;
1915
1916 wakeup_fd = t->sleep_pipe[0];
1917 FD_SET (wakeup_fd, readfds);
1918 if (wakeup_fd >= nfds)
1919 nfds = wakeup_fd+1;
1920
1921 args.nfds = nfds;
1922 args.read_fds = readfds;
1923 args.write_fds = writefds;
1924 args.except_fds = exceptfds;
1925 args.timeout = timeout;
1926
1927 /* Explicitly cooperate with the GC. */
1928 scm_without_guile (do_std_select, &args);
1929
1930 res = args.result;
1931 eno = args.errno_value;
1932
1933 t->sleep_fd = -1;
1934 scm_i_reset_sleep (t);
1935
1936 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1937 {
1938 char dummy;
1939 full_read (wakeup_fd, &dummy, 1);
1940
1941 FD_CLR (wakeup_fd, readfds);
1942 res -= 1;
1943 if (res == 0)
1944 {
1945 eno = EINTR;
1946 res = -1;
1947 }
1948 }
1949 errno = eno;
1950 return res;
1951 }
1952
1953 /* Convenience API for blocking while in guile mode. */
1954
1955 #if SCM_USE_PTHREAD_THREADS
1956
1957 /* It seems reasonable to not run procedures related to mutex and condition
1958 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1959 without it, and (ii) the only potential gain would be GC latency. See
1960 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1961 for a discussion of the pros and cons. */
1962
1963 int
1964 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1965 {
1966 int res = scm_i_pthread_mutex_lock (mutex);
1967 return res;
1968 }
1969
1970 static void
1971 do_unlock (void *data)
1972 {
1973 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1974 }
1975
1976 void
1977 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1978 {
1979 scm_i_scm_pthread_mutex_lock (mutex);
1980 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1981 }
1982
1983 int
1984 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1985 {
1986 int res;
1987 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1988
1989 t->held_mutex = mutex;
1990 res = scm_i_pthread_cond_wait (cond, mutex);
1991 t->held_mutex = NULL;
1992
1993 return res;
1994 }
1995
1996 int
1997 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1998 scm_i_pthread_mutex_t *mutex,
1999 const scm_t_timespec *wt)
2000 {
2001 int res;
2002 scm_i_thread *t = SCM_I_CURRENT_THREAD;
2003
2004 t->held_mutex = mutex;
2005 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2006 t->held_mutex = NULL;
2007
2008 return res;
2009 }
2010
2011 #endif
2012
2013 unsigned long
2014 scm_std_usleep (unsigned long usecs)
2015 {
2016 struct timeval tv;
2017 tv.tv_usec = usecs % 1000000;
2018 tv.tv_sec = usecs / 1000000;
2019 scm_std_select (0, NULL, NULL, NULL, &tv);
2020 return tv.tv_sec * 1000000 + tv.tv_usec;
2021 }
2022
2023 unsigned int
2024 scm_std_sleep (unsigned int secs)
2025 {
2026 struct timeval tv;
2027 tv.tv_usec = 0;
2028 tv.tv_sec = secs;
2029 scm_std_select (0, NULL, NULL, NULL, &tv);
2030 return tv.tv_sec;
2031 }
2032
2033 /*** Misc */
2034
2035 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2036 (void),
2037 "Return the thread that called this function.")
2038 #define FUNC_NAME s_scm_current_thread
2039 {
2040 return SCM_I_CURRENT_THREAD->handle;
2041 }
2042 #undef FUNC_NAME
2043
2044 static SCM
2045 scm_c_make_list (size_t n, SCM fill)
2046 {
2047 SCM res = SCM_EOL;
2048 while (n-- > 0)
2049 res = scm_cons (fill, res);
2050 return res;
2051 }
2052
2053 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2054 (void),
2055 "Return a list of all threads.")
2056 #define FUNC_NAME s_scm_all_threads
2057 {
2058 /* We can not allocate while holding the thread_admin_mutex because
2059 of the way GC is done.
2060 */
2061 int n = thread_count;
2062 scm_i_thread *t;
2063 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2064
2065 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2066 l = &list;
2067 for (t = all_threads; t && n > 0; t = t->next_thread)
2068 {
2069 if (t != scm_i_signal_delivery_thread)
2070 {
2071 SCM_SETCAR (*l, t->handle);
2072 l = SCM_CDRLOC (*l);
2073 }
2074 n--;
2075 }
2076 *l = SCM_EOL;
2077 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2078 return list;
2079 }
2080 #undef FUNC_NAME
2081
2082 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2083 (SCM thread),
2084 "Return @code{#t} iff @var{thread} has exited.\n")
2085 #define FUNC_NAME s_scm_thread_exited_p
2086 {
2087 return scm_from_bool (scm_c_thread_exited_p (thread));
2088 }
2089 #undef FUNC_NAME
2090
2091 int
2092 scm_c_thread_exited_p (SCM thread)
2093 #define FUNC_NAME s_scm_thread_exited_p
2094 {
2095 scm_i_thread *t;
2096 SCM_VALIDATE_THREAD (1, thread);
2097 t = SCM_I_THREAD_DATA (thread);
2098 return t->exited;
2099 }
2100 #undef FUNC_NAME
2101
2102 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2103 (void),
2104 "Return the total number of processors of the machine, which\n"
2105 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2106 "thread execution unit, which can be either:\n\n"
2107 "@itemize\n"
2108 "@item an execution core in a (possibly multi-core) chip, in a\n"
2109 " (possibly multi- chip) module, in a single computer, or\n"
2110 "@item a thread execution unit inside a core in the case of\n"
2111 " @dfn{hyper-threaded} CPUs.\n"
2112 "@end itemize\n\n"
2113 "Which of the two definitions is used, is unspecified.\n")
2114 #define FUNC_NAME s_scm_total_processor_count
2115 {
2116 return scm_from_ulong (num_processors (NPROC_ALL));
2117 }
2118 #undef FUNC_NAME
2119
2120 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2121 (void),
2122 "Like @code{total-processor-count}, but return the number of\n"
2123 "processors available to the current process. See\n"
2124 "@code{setaffinity} and @code{getaffinity} for more\n"
2125 "information.\n")
2126 #define FUNC_NAME s_scm_current_processor_count
2127 {
2128 return scm_from_ulong (num_processors (NPROC_CURRENT));
2129 }
2130 #undef FUNC_NAME
2131
2132
2133 \f
2134
2135 static scm_i_pthread_cond_t wake_up_cond;
2136 static int threads_initialized_p = 0;
2137
2138
2139 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2140 */
2141 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2142
2143 static SCM dynwind_critical_section_mutex;
2144
2145 void
2146 scm_dynwind_critical_section (SCM mutex)
2147 {
2148 if (scm_is_false (mutex))
2149 mutex = dynwind_critical_section_mutex;
2150 scm_dynwind_lock_mutex (mutex);
2151 scm_dynwind_block_asyncs ();
2152 }
2153
2154 /*** Initialization */
2155
2156 scm_i_pthread_mutex_t scm_i_misc_mutex;
2157
2158 #if SCM_USE_PTHREAD_THREADS
2159 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2160 #endif
2161
2162 void
2163 scm_threads_prehistory (void *base)
2164 {
2165 #if SCM_USE_PTHREAD_THREADS
2166 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2167 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2168 PTHREAD_MUTEX_RECURSIVE);
2169 #endif
2170
2171 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2172 scm_i_pthread_mutexattr_recursive);
2173 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2174 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2175
2176 guilify_self_1 ((struct GC_stack_base *) base);
2177 }
2178
2179 scm_t_bits scm_tc16_thread;
2180 scm_t_bits scm_tc16_mutex;
2181 scm_t_bits scm_tc16_condvar;
2182
2183 void
2184 scm_init_threads ()
2185 {
2186 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2187 scm_set_smob_print (scm_tc16_thread, thread_print);
2188
2189 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2190 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2191
2192 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2193 sizeof (fat_cond));
2194 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2195
2196 scm_i_default_dynamic_state = SCM_BOOL_F;
2197 guilify_self_2 (SCM_BOOL_F);
2198 threads_initialized_p = 1;
2199
2200 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2201 }
2202
2203 void
2204 scm_init_threads_default_dynamic_state ()
2205 {
2206 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2207 scm_i_default_dynamic_state = state;
2208 }
2209
2210 void
2211 scm_init_thread_procs ()
2212 {
2213 #include "libguile/threads.x"
2214 }
2215
2216 \f
2217 /* IA64-specific things. */
2218
2219 #ifdef __ia64__
2220 # ifdef __hpux
2221 # include <sys/param.h>
2222 # include <sys/pstat.h>
2223 void *
2224 scm_ia64_register_backing_store_base (void)
2225 {
2226 struct pst_vm_status vm_status;
2227 int i = 0;
2228 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2229 if (vm_status.pst_type == PS_RSESTACK)
2230 return (void *) vm_status.pst_vaddr;
2231 abort ();
2232 }
2233 void *
2234 scm_ia64_ar_bsp (const void *ctx)
2235 {
2236 uint64_t bsp;
2237 __uc_get_ar_bsp (ctx, &bsp);
2238 return (void *) bsp;
2239 }
2240 # endif /* hpux */
2241 # ifdef linux
2242 # include <ucontext.h>
2243 void *
2244 scm_ia64_register_backing_store_base (void)
2245 {
2246 extern void *__libc_ia64_register_backing_store_base;
2247 return __libc_ia64_register_backing_store_base;
2248 }
2249 void *
2250 scm_ia64_ar_bsp (const void *opaque)
2251 {
2252 const ucontext_t *ctx = opaque;
2253 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2254 }
2255 # endif /* linux */
2256 # ifdef __FreeBSD__
2257 # include <ucontext.h>
2258 void *
2259 scm_ia64_register_backing_store_base (void)
2260 {
2261 return (void *)0x8000000000000000;
2262 }
2263 void *
2264 scm_ia64_ar_bsp (const void *opaque)
2265 {
2266 const ucontext_t *ctx = opaque;
2267 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2268 + ctx->uc_mcontext.mc_special.ndirty);
2269 }
2270 # endif /* __FreeBSD__ */
2271 #endif /* __ia64__ */
2272
2273
2274 /*
2275 Local Variables:
2276 c-file-style: "gnu"
2277 End:
2278 */