Add 'EXIT_SUCCESS' and 'EXIT_FAILURE'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
3 * 2014 Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
29
30 #include <stdlib.h>
31 #include <unistd.h>
32 #include <stdio.h>
33
34 #ifdef HAVE_STRING_H
35 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36 #endif
37
38 #if HAVE_SYS_TIME_H
39 #include <sys/time.h>
40 #endif
41
42 #if HAVE_PTHREAD_NP_H
43 # include <pthread_np.h>
44 #endif
45
46 #include <sys/select.h>
47
48 #include <assert.h>
49 #include <fcntl.h>
50 #include <nproc.h>
51
52 #include "libguile/validate.h"
53 #include "libguile/root.h"
54 #include "libguile/eval.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/threads.h"
58 #include "libguile/dynwind.h"
59 #include "libguile/iselect.h"
60 #include "libguile/fluids.h"
61 #include "libguile/continuations.h"
62 #include "libguile/gc.h"
63 #include "libguile/init.h"
64 #include "libguile/scmsigs.h"
65 #include "libguile/strings.h"
66 #include "libguile/weaks.h"
67
68 #include <full-read.h>
69
70
71 \f
72
73 /* First some libgc shims. */
74
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type) (void *);
79 #endif
80
81
82 #ifndef GC_SUCCESS
83 #define GC_SUCCESS 0
84 #endif
85
86 #ifndef GC_UNIMPLEMENTED
87 #define GC_UNIMPLEMENTED 3
88 #endif
89
90 /* Likewise struct GC_stack_base is missing before 7.1. */
91 #ifndef HAVE_GC_STACK_BASE
92 struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94 #ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96 #endif
97 };
98
99 static int
100 GC_register_my_thread (struct GC_stack_base *stack_base)
101 {
102 return GC_UNIMPLEMENTED;
103 }
104
105 static void
106 GC_unregister_my_thread ()
107 {
108 }
109
110 #if !SCM_USE_PTHREAD_THREADS
111 /* No threads; we can just use GC_stackbottom. */
112 static void *
113 get_thread_stack_base ()
114 {
115 return GC_stackbottom;
116 }
117
118 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120 /* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123 static void *
124 get_thread_stack_base ()
125 {
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134 #if SCM_STACK_GROWS_UP
135 return start;
136 #else
137 return end;
138 #endif
139 }
140
141 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142 /* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145 static void *
146 get_thread_stack_base ()
147 {
148 return pthread_get_stackaddr_np (pthread_self ());
149 }
150
151 #elif HAVE_PTHREAD_ATTR_GET_NP
152 /* This one is for FreeBSD 9. */
153 static void *
154 get_thread_stack_base ()
155 {
156 pthread_attr_t attr;
157 void *start, *end;
158 size_t size;
159
160 pthread_attr_init (&attr);
161 pthread_attr_get_np (pthread_self (), &attr);
162 pthread_attr_getstack (&attr, &start, &size);
163 pthread_attr_destroy (&attr);
164
165 end = (char *)start + size;
166
167 #if SCM_STACK_GROWS_UP
168 return start;
169 #else
170 return end;
171 #endif
172 }
173
174 #else
175 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
176 #endif
177
178 static int
179 GC_get_stack_base (struct GC_stack_base *stack_base)
180 {
181 stack_base->mem_base = get_thread_stack_base ();
182 #ifdef __ia64__
183 /* Calculate and store off the base of this thread's register
184 backing store (RBS). Unfortunately our implementation(s) of
185 scm_ia64_register_backing_store_base are only reliable for the
186 main thread. For other threads, therefore, find out the current
187 top of the RBS, and use that as a maximum. */
188 stack_base->reg_base = scm_ia64_register_backing_store_base ();
189 {
190 ucontext_t ctx;
191 void *bsp;
192 getcontext (&ctx);
193 bsp = scm_ia64_ar_bsp (&ctx);
194 if (stack_base->reg_base > bsp)
195 stack_base->reg_base = bsp;
196 }
197 #endif
198 return GC_SUCCESS;
199 }
200
201 static void *
202 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
203 {
204 struct GC_stack_base stack_base;
205
206 stack_base.mem_base = (void*)&stack_base;
207 #ifdef __ia64__
208 /* FIXME: Untested. */
209 {
210 ucontext_t ctx;
211 getcontext (&ctx);
212 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
213 }
214 #endif
215
216 return fn (&stack_base, arg);
217 }
218 #endif /* HAVE_GC_STACK_BASE */
219
220
221 /* Now define with_gc_active and with_gc_inactive. */
222
223 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
224
225 /* We have a sufficiently new libgc (7.2 or newer). */
226
227 static void*
228 with_gc_inactive (GC_fn_type func, void *data)
229 {
230 return GC_do_blocking (func, data);
231 }
232
233 static void*
234 with_gc_active (GC_fn_type func, void *data)
235 {
236 return GC_call_with_gc_active (func, data);
237 }
238
239 #else
240
241 /* libgc not new enough, so never actually deactivate GC.
242
243 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
244 GC_call_with_gc_active. */
245
246 static void*
247 with_gc_inactive (GC_fn_type func, void *data)
248 {
249 return func (data);
250 }
251
252 static void*
253 with_gc_active (GC_fn_type func, void *data)
254 {
255 return func (data);
256 }
257
258 #endif /* HAVE_GC_DO_BLOCKING */
259
260
261 \f
262 static void
263 to_timespec (SCM t, scm_t_timespec *waittime)
264 {
265 if (scm_is_pair (t))
266 {
267 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
268 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
269 }
270 else
271 {
272 double time = scm_to_double (t);
273 double sec = scm_c_truncate (time);
274
275 waittime->tv_sec = (long) sec;
276 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
277 }
278 }
279
280 \f
281 /*** Queues */
282
283 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
284 the risk of false references leading to unbounded retained space as
285 described in "Bounding Space Usage of Conservative Garbage Collectors",
286 H.J. Boehm, 2001. */
287
288 /* Make an empty queue data structure.
289 */
290 static SCM
291 make_queue ()
292 {
293 return scm_cons (SCM_EOL, SCM_EOL);
294 }
295
296 /* Put T at the back of Q and return a handle that can be used with
297 remqueue to remove T from Q again.
298 */
299 static SCM
300 enqueue (SCM q, SCM t)
301 {
302 SCM c = scm_cons (t, SCM_EOL);
303 SCM_CRITICAL_SECTION_START;
304 if (scm_is_null (SCM_CDR (q)))
305 SCM_SETCDR (q, c);
306 else
307 SCM_SETCDR (SCM_CAR (q), c);
308 SCM_SETCAR (q, c);
309 SCM_CRITICAL_SECTION_END;
310 return c;
311 }
312
313 /* Remove the element that the handle C refers to from the queue Q. C
314 must have been returned from a call to enqueue. The return value
315 is zero when the element referred to by C has already been removed.
316 Otherwise, 1 is returned.
317 */
318 static int
319 remqueue (SCM q, SCM c)
320 {
321 SCM p, prev = q;
322 SCM_CRITICAL_SECTION_START;
323 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
324 {
325 if (scm_is_eq (p, c))
326 {
327 if (scm_is_eq (c, SCM_CAR (q)))
328 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
329 SCM_SETCDR (prev, SCM_CDR (c));
330
331 /* GC-robust */
332 SCM_SETCDR (c, SCM_EOL);
333
334 SCM_CRITICAL_SECTION_END;
335 return 1;
336 }
337 prev = p;
338 }
339 SCM_CRITICAL_SECTION_END;
340 return 0;
341 }
342
343 /* Remove the front-most element from the queue Q and return it.
344 Return SCM_BOOL_F when Q is empty.
345 */
346 static SCM
347 dequeue (SCM q)
348 {
349 SCM c;
350 SCM_CRITICAL_SECTION_START;
351 c = SCM_CDR (q);
352 if (scm_is_null (c))
353 {
354 SCM_CRITICAL_SECTION_END;
355 return SCM_BOOL_F;
356 }
357 else
358 {
359 SCM_SETCDR (q, SCM_CDR (c));
360 if (scm_is_null (SCM_CDR (q)))
361 SCM_SETCAR (q, SCM_EOL);
362 SCM_CRITICAL_SECTION_END;
363
364 /* GC-robust */
365 SCM_SETCDR (c, SCM_EOL);
366
367 return SCM_CAR (c);
368 }
369 }
370
371 /*** Thread smob routines */
372
373
374 static int
375 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
376 {
377 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
378 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
379 the struct case, hence we go via a union, and extract according to the
380 size of pthread_t. */
381 union {
382 scm_i_pthread_t p;
383 unsigned short us;
384 unsigned int ui;
385 unsigned long ul;
386 scm_t_uintmax um;
387 } u;
388 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
389 scm_i_pthread_t p = t->pthread;
390 scm_t_uintmax id;
391 u.p = p;
392 if (sizeof (p) == sizeof (unsigned short))
393 id = u.us;
394 else if (sizeof (p) == sizeof (unsigned int))
395 id = u.ui;
396 else if (sizeof (p) == sizeof (unsigned long))
397 id = u.ul;
398 else
399 id = u.um;
400
401 scm_puts ("#<thread ", port);
402 scm_uintprint (id, 10, port);
403 scm_puts (" (", port);
404 scm_uintprint ((scm_t_bits)t, 16, port);
405 scm_puts (")>", port);
406 return 1;
407 }
408
409 \f
410 /*** Blocking on queues. */
411
412 /* See also scm_i_queue_async_cell for how such a block is
413 interrputed.
414 */
415
416 /* Put the current thread on QUEUE and go to sleep, waiting for it to
417 be woken up by a call to 'unblock_from_queue', or to be
418 interrupted. Upon return of this function, the current thread is
419 no longer on QUEUE, even when the sleep has been interrupted.
420
421 The caller of block_self must hold MUTEX. It will be atomically
422 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
423
424 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
425 as MUTEX is needed.
426
427 When WAITTIME is not NULL, the sleep will be aborted at that time.
428
429 The return value of block_self is an errno value. It will be zero
430 when the sleep has been successfully completed by a call to
431 unblock_from_queue, EINTR when it has been interrupted by the
432 delivery of a system async, and ETIMEDOUT when the timeout has
433 expired.
434
435 The system asyncs themselves are not executed by block_self.
436 */
437 static int
438 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
439 const scm_t_timespec *waittime)
440 {
441 scm_i_thread *t = SCM_I_CURRENT_THREAD;
442 SCM q_handle;
443 int err;
444
445 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
446 err = EINTR;
447 else
448 {
449 t->block_asyncs++;
450 q_handle = enqueue (queue, t->handle);
451 if (waittime == NULL)
452 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
453 else
454 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
455
456 /* When we are still on QUEUE, we have been interrupted. We
457 report this only when no other error (such as a timeout) has
458 happened above.
459 */
460 if (remqueue (queue, q_handle) && err == 0)
461 err = EINTR;
462 t->block_asyncs--;
463 scm_i_reset_sleep (t);
464 }
465
466 return err;
467 }
468
469 /* Wake up the first thread on QUEUE, if any. The awoken thread is
470 returned, or #f if the queue was empty.
471 */
472 static SCM
473 unblock_from_queue (SCM queue)
474 {
475 SCM thread = dequeue (queue);
476 if (scm_is_true (thread))
477 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
478 return thread;
479 }
480
481 \f
482 /* Getting into and out of guile mode.
483 */
484
485 /* Key used to attach a cleanup handler to a given thread. Also, if
486 thread-local storage is unavailable, this key is used to retrieve the
487 current thread with `pthread_getspecific ()'. */
488 scm_i_pthread_key_t scm_i_thread_key;
489
490
491 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
492
493 /* When thread-local storage (TLS) is available, a pointer to the
494 current-thread object is kept in TLS. Note that storing the thread-object
495 itself in TLS (rather than a pointer to some malloc'd memory) is not
496 possible since thread objects may live longer than the actual thread they
497 represent. */
498 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
499
500 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
501
502
503 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
504 static scm_i_thread *all_threads = NULL;
505 static int thread_count;
506
507 static SCM scm_i_default_dynamic_state;
508
509 /* Run when a fluid is collected. */
510 void
511 scm_i_reset_fluid (size_t n)
512 {
513 scm_i_thread *t;
514
515 scm_i_pthread_mutex_lock (&thread_admin_mutex);
516 for (t = all_threads; t; t = t->next_thread)
517 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
518 {
519 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
520
521 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
522 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
523 }
524 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
525 }
526
527 /* Perform first stage of thread initialisation, in non-guile mode.
528 */
529 static void
530 guilify_self_1 (struct GC_stack_base *base)
531 {
532 scm_i_thread t;
533
534 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
535 before allocating anything in this thread, because allocation could
536 cause GC to run, and GC could cause finalizers, which could invoke
537 Scheme functions, which need the current thread to be set. */
538
539 t.pthread = scm_i_pthread_self ();
540 t.handle = SCM_BOOL_F;
541 t.result = SCM_BOOL_F;
542 t.cleanup_handler = SCM_BOOL_F;
543 t.mutexes = SCM_EOL;
544 t.held_mutex = NULL;
545 t.join_queue = SCM_EOL;
546 t.dynamic_state = SCM_BOOL_F;
547 t.dynwinds = SCM_EOL;
548 t.active_asyncs = SCM_EOL;
549 t.block_asyncs = 1;
550 t.pending_asyncs = 1;
551 t.critical_section_level = 0;
552 t.base = base->mem_base;
553 #ifdef __ia64__
554 t.register_backing_store_base = base->reg_base;
555 #endif
556 t.continuation_root = SCM_EOL;
557 t.continuation_base = t.base;
558 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
559 t.sleep_mutex = NULL;
560 t.sleep_object = SCM_BOOL_F;
561 t.sleep_fd = -1;
562
563 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
564 /* FIXME: Error conditions during the initialization phase are handled
565 gracelessly since public functions such as `scm_init_guile ()'
566 currently have type `void'. */
567 abort ();
568
569 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
570 t.current_mark_stack_ptr = NULL;
571 t.current_mark_stack_limit = NULL;
572 t.canceled = 0;
573 t.exited = 0;
574 t.guile_mode = 0;
575
576 /* The switcheroo. */
577 {
578 scm_i_thread *t_ptr = &t;
579
580 GC_disable ();
581 t_ptr = GC_malloc (sizeof (scm_i_thread));
582 memcpy (t_ptr, &t, sizeof t);
583
584 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
585
586 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
587 /* Cache the current thread in TLS for faster lookup. */
588 scm_i_current_thread = t_ptr;
589 #endif
590
591 scm_i_pthread_mutex_lock (&thread_admin_mutex);
592 t_ptr->next_thread = all_threads;
593 all_threads = t_ptr;
594 thread_count++;
595 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
596
597 GC_enable ();
598 }
599 }
600
601 /* Perform second stage of thread initialisation, in guile mode.
602 */
603 static void
604 guilify_self_2 (SCM parent)
605 {
606 scm_i_thread *t = SCM_I_CURRENT_THREAD;
607
608 t->guile_mode = 1;
609
610 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
611
612 t->continuation_root = scm_cons (t->handle, SCM_EOL);
613 t->continuation_base = t->base;
614 t->vm = SCM_BOOL_F;
615
616 if (scm_is_true (parent))
617 t->dynamic_state = scm_make_dynamic_state (parent);
618 else
619 t->dynamic_state = scm_i_make_initial_dynamic_state ();
620
621 t->join_queue = make_queue ();
622 t->block_asyncs = 0;
623
624 /* See note in finalizers.c:queue_finalizer_async(). */
625 GC_invoke_finalizers ();
626 }
627
628 \f
629 /*** Fat mutexes */
630
631 /* We implement our own mutex type since we want them to be 'fair', we
632 want to do fancy things while waiting for them (like running
633 asyncs) and we might want to add things that are nice for
634 debugging.
635 */
636
637 typedef struct {
638 scm_i_pthread_mutex_t lock;
639 SCM owner;
640 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
641
642 int recursive; /* allow recursive locking? */
643 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
644 int allow_external_unlock; /* is it an error to unlock a mutex that is not
645 owned by the current thread? */
646
647 SCM waiting; /* the threads waiting for this mutex. */
648 } fat_mutex;
649
650 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
651 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
652
653 static SCM
654 call_cleanup (void *data)
655 {
656 SCM *proc_p = data;
657 return scm_call_0 (*proc_p);
658 }
659
660 /* Perform thread tear-down, in guile mode.
661 */
662 static void *
663 do_thread_exit (void *v)
664 {
665 scm_i_thread *t = (scm_i_thread *) v;
666
667 /* Ensure the signal handling thread has been launched, because we might be
668 shutting it down. This needs to be done in Guile mode. */
669 scm_i_ensure_signal_delivery_thread ();
670
671 if (!scm_is_false (t->cleanup_handler))
672 {
673 SCM ptr = t->cleanup_handler;
674
675 t->cleanup_handler = SCM_BOOL_F;
676 t->result = scm_internal_catch (SCM_BOOL_T,
677 call_cleanup, &ptr,
678 scm_handle_by_message_noexit, NULL);
679 }
680
681 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
682
683 t->exited = 1;
684 close (t->sleep_pipe[0]);
685 close (t->sleep_pipe[1]);
686 while (scm_is_true (unblock_from_queue (t->join_queue)))
687 ;
688
689 while (!scm_is_null (t->mutexes))
690 {
691 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
692
693 if (!SCM_UNBNDP (mutex))
694 {
695 fat_mutex *m = SCM_MUTEX_DATA (mutex);
696
697 scm_i_pthread_mutex_lock (&m->lock);
698
699 /* Check whether T owns MUTEX. This is usually the case, unless
700 T abandoned MUTEX; in that case, T is no longer its owner (see
701 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
702 if (scm_is_eq (m->owner, t->handle))
703 unblock_from_queue (m->waiting);
704
705 scm_i_pthread_mutex_unlock (&m->lock);
706 }
707
708 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
709 }
710
711 scm_i_pthread_mutex_unlock (&t->admin_mutex);
712
713 return NULL;
714 }
715
716 static void *
717 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
718 {
719 /* Won't hurt if we are already registered. */
720 #if SCM_USE_PTHREAD_THREADS
721 GC_register_my_thread (sb);
722 #endif
723
724 return scm_with_guile (do_thread_exit, v);
725 }
726
727 static void
728 on_thread_exit (void *v)
729 {
730 /* This handler is executed in non-guile mode. */
731 scm_i_thread *t = (scm_i_thread *) v, **tp;
732
733 /* If we were canceled, we were unable to clear `t->guile_mode', so do
734 it here. */
735 t->guile_mode = 0;
736
737 /* If this thread was cancelled while doing a cond wait, it will
738 still have a mutex locked, so we unlock it here. */
739 if (t->held_mutex)
740 {
741 scm_i_pthread_mutex_unlock (t->held_mutex);
742 t->held_mutex = NULL;
743 }
744
745 /* Reinstate the current thread for purposes of scm_with_guile
746 guile-mode cleanup handlers. Only really needed in the non-TLS
747 case but it doesn't hurt to be consistent. */
748 scm_i_pthread_setspecific (scm_i_thread_key, t);
749
750 /* Scheme-level thread finalizers and other cleanup needs to happen in
751 guile mode. */
752 GC_call_with_stack_base (do_thread_exit_trampoline, t);
753
754 /* Removing ourself from the list of all threads needs to happen in
755 non-guile mode since all SCM values on our stack become
756 unprotected once we are no longer in the list. */
757 scm_i_pthread_mutex_lock (&thread_admin_mutex);
758 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
759 if (*tp == t)
760 {
761 *tp = t->next_thread;
762
763 /* GC-robust */
764 t->next_thread = NULL;
765
766 break;
767 }
768 thread_count--;
769
770 /* If there's only one other thread, it could be the signal delivery
771 thread, so we need to notify it to shut down by closing its read pipe.
772 If it's not the signal delivery thread, then closing the read pipe isn't
773 going to hurt. */
774 if (thread_count <= 1)
775 scm_i_close_signal_pipe ();
776
777 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
778
779 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
780
781 #if SCM_USE_PTHREAD_THREADS
782 GC_unregister_my_thread ();
783 #endif
784 }
785
786 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
787
788 static void
789 init_thread_key (void)
790 {
791 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
792 }
793
794 /* Perform any initializations necessary to make the current thread
795 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
796 if necessary.
797
798 BASE is the stack base to use with GC.
799
800 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
801 which case the default dynamic state is used.
802
803 Returns zero when the thread was known to guile already; otherwise
804 return 1.
805
806 Note that it could be the case that the thread was known
807 to Guile, but not in guile mode (because we are within a
808 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
809 be sure. New threads are put into guile mode implicitly. */
810
811 static int
812 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
813 {
814 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
815
816 if (SCM_I_CURRENT_THREAD)
817 {
818 /* Thread is already known to Guile.
819 */
820 return 0;
821 }
822 else
823 {
824 /* This thread has not been guilified yet.
825 */
826
827 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
828 if (scm_initialized_p == 0)
829 {
830 /* First thread ever to enter Guile. Run the full
831 initialization.
832 */
833 scm_i_init_guile (base);
834
835 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
836 /* Allow other threads to come in later. */
837 GC_allow_register_threads ();
838 #endif
839
840 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
841 }
842 else
843 {
844 /* Guile is already initialized, but this thread enters it for
845 the first time. Only initialize this thread.
846 */
847 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
848
849 /* Register this thread with libgc. */
850 #if SCM_USE_PTHREAD_THREADS
851 GC_register_my_thread (base);
852 #endif
853
854 guilify_self_1 (base);
855 guilify_self_2 (parent);
856 }
857 return 1;
858 }
859 }
860
861 void
862 scm_init_guile ()
863 {
864 struct GC_stack_base stack_base;
865
866 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
867 scm_i_init_thread_for_guile (&stack_base,
868 scm_i_default_dynamic_state);
869 else
870 {
871 fprintf (stderr, "Failed to get stack base for current thread.\n");
872 exit (EXIT_FAILURE);
873 }
874 }
875
876 struct with_guile_args
877 {
878 GC_fn_type func;
879 void *data;
880 SCM parent;
881 };
882
883 static void *
884 with_guile_trampoline (void *data)
885 {
886 struct with_guile_args *args = data;
887
888 return scm_c_with_continuation_barrier (args->func, args->data);
889 }
890
891 static void *
892 with_guile_and_parent (struct GC_stack_base *base, void *data)
893 {
894 void *res;
895 int new_thread;
896 scm_i_thread *t;
897 struct with_guile_args *args = data;
898
899 new_thread = scm_i_init_thread_for_guile (base, args->parent);
900 t = SCM_I_CURRENT_THREAD;
901 if (new_thread)
902 {
903 /* We are in Guile mode. */
904 assert (t->guile_mode);
905
906 res = scm_c_with_continuation_barrier (args->func, args->data);
907
908 /* Leave Guile mode. */
909 t->guile_mode = 0;
910 }
911 else if (t->guile_mode)
912 {
913 /* Already in Guile mode. */
914 res = scm_c_with_continuation_barrier (args->func, args->data);
915 }
916 else
917 {
918 /* We are not in Guile mode, either because we are not within a
919 scm_with_guile, or because we are within a scm_without_guile.
920
921 This call to scm_with_guile() could happen from anywhere on the
922 stack, and in particular lower on the stack than when it was
923 when this thread was first guilified. Thus, `base' must be
924 updated. */
925 #if SCM_STACK_GROWS_UP
926 if (SCM_STACK_PTR (base->mem_base) < t->base)
927 t->base = SCM_STACK_PTR (base->mem_base);
928 #else
929 if (SCM_STACK_PTR (base->mem_base) > t->base)
930 t->base = SCM_STACK_PTR (base->mem_base);
931 #endif
932
933 t->guile_mode = 1;
934 res = with_gc_active (with_guile_trampoline, args);
935 t->guile_mode = 0;
936 }
937 return res;
938 }
939
940 static void *
941 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
942 {
943 struct with_guile_args args;
944
945 args.func = func;
946 args.data = data;
947 args.parent = parent;
948
949 return GC_call_with_stack_base (with_guile_and_parent, &args);
950 }
951
952 void *
953 scm_with_guile (void *(*func)(void *), void *data)
954 {
955 return scm_i_with_guile_and_parent (func, data,
956 scm_i_default_dynamic_state);
957 }
958
959 void *
960 scm_without_guile (void *(*func)(void *), void *data)
961 {
962 void *result;
963 scm_i_thread *t = SCM_I_CURRENT_THREAD;
964
965 if (t->guile_mode)
966 {
967 SCM_I_CURRENT_THREAD->guile_mode = 0;
968 result = with_gc_inactive (func, data);
969 SCM_I_CURRENT_THREAD->guile_mode = 1;
970 }
971 else
972 /* Otherwise we're not in guile mode, so nothing to do. */
973 result = func (data);
974
975 return result;
976 }
977
978 \f
979 /*** Thread creation */
980
981 typedef struct {
982 SCM parent;
983 SCM thunk;
984 SCM handler;
985 SCM thread;
986 scm_i_pthread_mutex_t mutex;
987 scm_i_pthread_cond_t cond;
988 } launch_data;
989
990 static void *
991 really_launch (void *d)
992 {
993 launch_data *data = (launch_data *)d;
994 SCM thunk = data->thunk, handler = data->handler;
995 scm_i_thread *t;
996
997 t = SCM_I_CURRENT_THREAD;
998
999 scm_i_scm_pthread_mutex_lock (&data->mutex);
1000 data->thread = scm_current_thread ();
1001 scm_i_pthread_cond_signal (&data->cond);
1002 scm_i_pthread_mutex_unlock (&data->mutex);
1003
1004 if (SCM_UNBNDP (handler))
1005 t->result = scm_call_0 (thunk);
1006 else
1007 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1008
1009 return 0;
1010 }
1011
1012 static void *
1013 launch_thread (void *d)
1014 {
1015 launch_data *data = (launch_data *)d;
1016 scm_i_pthread_detach (scm_i_pthread_self ());
1017 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1018 return NULL;
1019 }
1020
1021 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
1022 (SCM thunk, SCM handler),
1023 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1024 "returning a new thread object representing the thread. The procedure\n"
1025 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1026 "\n"
1027 "When @var{handler} is specified, then @var{thunk} is called from\n"
1028 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1029 "handler. This catch is established inside the continuation barrier.\n"
1030 "\n"
1031 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1032 "the @emph{exit value} of the thread and the thread is terminated.")
1033 #define FUNC_NAME s_scm_call_with_new_thread
1034 {
1035 launch_data data;
1036 scm_i_pthread_t id;
1037 int err;
1038
1039 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1040 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1041 handler, SCM_ARG2, FUNC_NAME);
1042
1043 GC_collect_a_little ();
1044 data.parent = scm_current_dynamic_state ();
1045 data.thunk = thunk;
1046 data.handler = handler;
1047 data.thread = SCM_BOOL_F;
1048 scm_i_pthread_mutex_init (&data.mutex, NULL);
1049 scm_i_pthread_cond_init (&data.cond, NULL);
1050
1051 scm_i_scm_pthread_mutex_lock (&data.mutex);
1052 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1053 if (err)
1054 {
1055 scm_i_pthread_mutex_unlock (&data.mutex);
1056 errno = err;
1057 scm_syserror (NULL);
1058 }
1059
1060 while (scm_is_false (data.thread))
1061 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1062
1063 scm_i_pthread_mutex_unlock (&data.mutex);
1064
1065 return data.thread;
1066 }
1067 #undef FUNC_NAME
1068
1069 typedef struct {
1070 SCM parent;
1071 scm_t_catch_body body;
1072 void *body_data;
1073 scm_t_catch_handler handler;
1074 void *handler_data;
1075 SCM thread;
1076 scm_i_pthread_mutex_t mutex;
1077 scm_i_pthread_cond_t cond;
1078 } spawn_data;
1079
1080 static void *
1081 really_spawn (void *d)
1082 {
1083 spawn_data *data = (spawn_data *)d;
1084 scm_t_catch_body body = data->body;
1085 void *body_data = data->body_data;
1086 scm_t_catch_handler handler = data->handler;
1087 void *handler_data = data->handler_data;
1088 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1089
1090 scm_i_scm_pthread_mutex_lock (&data->mutex);
1091 data->thread = scm_current_thread ();
1092 scm_i_pthread_cond_signal (&data->cond);
1093 scm_i_pthread_mutex_unlock (&data->mutex);
1094
1095 if (handler == NULL)
1096 t->result = body (body_data);
1097 else
1098 t->result = scm_internal_catch (SCM_BOOL_T,
1099 body, body_data,
1100 handler, handler_data);
1101
1102 return 0;
1103 }
1104
1105 static void *
1106 spawn_thread (void *d)
1107 {
1108 spawn_data *data = (spawn_data *)d;
1109 scm_i_pthread_detach (scm_i_pthread_self ());
1110 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1111 return NULL;
1112 }
1113
1114 SCM
1115 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1116 scm_t_catch_handler handler, void *handler_data)
1117 {
1118 spawn_data data;
1119 scm_i_pthread_t id;
1120 int err;
1121
1122 data.parent = scm_current_dynamic_state ();
1123 data.body = body;
1124 data.body_data = body_data;
1125 data.handler = handler;
1126 data.handler_data = handler_data;
1127 data.thread = SCM_BOOL_F;
1128 scm_i_pthread_mutex_init (&data.mutex, NULL);
1129 scm_i_pthread_cond_init (&data.cond, NULL);
1130
1131 scm_i_scm_pthread_mutex_lock (&data.mutex);
1132 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1133 if (err)
1134 {
1135 scm_i_pthread_mutex_unlock (&data.mutex);
1136 errno = err;
1137 scm_syserror (NULL);
1138 }
1139
1140 while (scm_is_false (data.thread))
1141 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1142
1143 scm_i_pthread_mutex_unlock (&data.mutex);
1144
1145 assert (SCM_I_IS_THREAD (data.thread));
1146
1147 return data.thread;
1148 }
1149
1150 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1151 (),
1152 "Move the calling thread to the end of the scheduling queue.")
1153 #define FUNC_NAME s_scm_yield
1154 {
1155 return scm_from_bool (scm_i_sched_yield ());
1156 }
1157 #undef FUNC_NAME
1158
1159 /* Some systems, notably Android, lack 'pthread_cancel'. Don't provide
1160 'cancel-thread' on these systems. */
1161
1162 #if !SCM_USE_PTHREAD_THREADS || defined HAVE_PTHREAD_CANCEL
1163
1164 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1165 (SCM thread),
1166 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1167 "cannot be the current thread, and if @var{thread} has already terminated or "
1168 "been signaled to terminate, this function is a no-op.")
1169 #define FUNC_NAME s_scm_cancel_thread
1170 {
1171 scm_i_thread *t = NULL;
1172
1173 SCM_VALIDATE_THREAD (1, thread);
1174 t = SCM_I_THREAD_DATA (thread);
1175 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1176 if (!t->canceled)
1177 {
1178 t->canceled = 1;
1179 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1180 scm_i_pthread_cancel (t->pthread);
1181 }
1182 else
1183 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1184
1185 return SCM_UNSPECIFIED;
1186 }
1187 #undef FUNC_NAME
1188
1189 #endif
1190
1191 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1192 (SCM thread, SCM proc),
1193 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1194 "This handler will be called when the thread exits.")
1195 #define FUNC_NAME s_scm_set_thread_cleanup_x
1196 {
1197 scm_i_thread *t;
1198
1199 SCM_VALIDATE_THREAD (1, thread);
1200 if (!scm_is_false (proc))
1201 SCM_VALIDATE_THUNK (2, proc);
1202
1203 t = SCM_I_THREAD_DATA (thread);
1204 scm_i_pthread_mutex_lock (&t->admin_mutex);
1205
1206 if (!(t->exited || t->canceled))
1207 t->cleanup_handler = proc;
1208
1209 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1210
1211 return SCM_UNSPECIFIED;
1212 }
1213 #undef FUNC_NAME
1214
1215 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1216 (SCM thread),
1217 "Return the cleanup handler installed for the thread @var{thread}.")
1218 #define FUNC_NAME s_scm_thread_cleanup
1219 {
1220 scm_i_thread *t;
1221 SCM ret;
1222
1223 SCM_VALIDATE_THREAD (1, thread);
1224
1225 t = SCM_I_THREAD_DATA (thread);
1226 scm_i_pthread_mutex_lock (&t->admin_mutex);
1227 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1228 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1229
1230 return ret;
1231 }
1232 #undef FUNC_NAME
1233
1234 SCM scm_join_thread (SCM thread)
1235 {
1236 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1237 }
1238
1239 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1240 (SCM thread, SCM timeout, SCM timeoutval),
1241 "Suspend execution of the calling thread until the target @var{thread} "
1242 "terminates, unless the target @var{thread} has already terminated. ")
1243 #define FUNC_NAME s_scm_join_thread_timed
1244 {
1245 scm_i_thread *t;
1246 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1247 SCM res = SCM_BOOL_F;
1248
1249 if (! (SCM_UNBNDP (timeoutval)))
1250 res = timeoutval;
1251
1252 SCM_VALIDATE_THREAD (1, thread);
1253 if (scm_is_eq (scm_current_thread (), thread))
1254 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1255
1256 t = SCM_I_THREAD_DATA (thread);
1257 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1258
1259 if (! SCM_UNBNDP (timeout))
1260 {
1261 to_timespec (timeout, &ctimeout);
1262 timeout_ptr = &ctimeout;
1263 }
1264
1265 if (t->exited)
1266 res = t->result;
1267 else
1268 {
1269 while (1)
1270 {
1271 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1272 timeout_ptr);
1273 if (err == 0)
1274 {
1275 if (t->exited)
1276 {
1277 res = t->result;
1278 break;
1279 }
1280 }
1281 else if (err == ETIMEDOUT)
1282 break;
1283
1284 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1285 SCM_TICK;
1286 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1287
1288 /* Check for exit again, since we just released and
1289 reacquired the admin mutex, before the next block_self
1290 call (which would block forever if t has already
1291 exited). */
1292 if (t->exited)
1293 {
1294 res = t->result;
1295 break;
1296 }
1297 }
1298 }
1299
1300 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1301
1302 return res;
1303 }
1304 #undef FUNC_NAME
1305
1306 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1307 (SCM obj),
1308 "Return @code{#t} if @var{obj} is a thread.")
1309 #define FUNC_NAME s_scm_thread_p
1310 {
1311 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1312 }
1313 #undef FUNC_NAME
1314
1315
1316 static size_t
1317 fat_mutex_free (SCM mx)
1318 {
1319 fat_mutex *m = SCM_MUTEX_DATA (mx);
1320 scm_i_pthread_mutex_destroy (&m->lock);
1321 return 0;
1322 }
1323
1324 static int
1325 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1326 {
1327 fat_mutex *m = SCM_MUTEX_DATA (mx);
1328 scm_puts ("#<mutex ", port);
1329 scm_uintprint ((scm_t_bits)m, 16, port);
1330 scm_puts (">", port);
1331 return 1;
1332 }
1333
1334 static SCM
1335 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1336 {
1337 fat_mutex *m;
1338 SCM mx;
1339
1340 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1341 scm_i_pthread_mutex_init (&m->lock, NULL);
1342 m->owner = SCM_BOOL_F;
1343 m->level = 0;
1344
1345 m->recursive = recursive;
1346 m->unchecked_unlock = unchecked_unlock;
1347 m->allow_external_unlock = external_unlock;
1348
1349 m->waiting = SCM_EOL;
1350 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1351 m->waiting = make_queue ();
1352 return mx;
1353 }
1354
1355 SCM scm_make_mutex (void)
1356 {
1357 return scm_make_mutex_with_flags (SCM_EOL);
1358 }
1359
1360 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1361 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1362 SCM_SYMBOL (recursive_sym, "recursive");
1363
1364 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1365 (SCM flags),
1366 "Create a new mutex. ")
1367 #define FUNC_NAME s_scm_make_mutex_with_flags
1368 {
1369 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1370
1371 SCM ptr = flags;
1372 while (! scm_is_null (ptr))
1373 {
1374 SCM flag = SCM_CAR (ptr);
1375 if (scm_is_eq (flag, unchecked_unlock_sym))
1376 unchecked_unlock = 1;
1377 else if (scm_is_eq (flag, allow_external_unlock_sym))
1378 external_unlock = 1;
1379 else if (scm_is_eq (flag, recursive_sym))
1380 recursive = 1;
1381 else
1382 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1383 ptr = SCM_CDR (ptr);
1384 }
1385 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1386 }
1387 #undef FUNC_NAME
1388
1389 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1390 (void),
1391 "Create a new recursive mutex. ")
1392 #define FUNC_NAME s_scm_make_recursive_mutex
1393 {
1394 return make_fat_mutex (1, 0, 0);
1395 }
1396 #undef FUNC_NAME
1397
1398 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1399
1400 static SCM
1401 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1402 {
1403 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1404
1405 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1406 SCM err = SCM_BOOL_F;
1407
1408 struct timeval current_time;
1409
1410 scm_i_scm_pthread_mutex_lock (&m->lock);
1411
1412 while (1)
1413 {
1414 if (m->level == 0)
1415 {
1416 m->owner = new_owner;
1417 m->level++;
1418
1419 if (SCM_I_IS_THREAD (new_owner))
1420 {
1421 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1422
1423 /* FIXME: The order in which `t->admin_mutex' and
1424 `m->lock' are taken differs from that in
1425 `on_thread_exit', potentially leading to deadlocks. */
1426 scm_i_pthread_mutex_lock (&t->admin_mutex);
1427
1428 /* Only keep a weak reference to MUTEX so that it's not
1429 retained when not referenced elsewhere (bug #27450).
1430 The weak pair itself is eventually removed when MUTEX
1431 is unlocked. Note that `t->mutexes' lists mutexes
1432 currently held by T, so it should be small. */
1433 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1434
1435 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1436 }
1437 *ret = 1;
1438 break;
1439 }
1440 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1441 {
1442 m->owner = new_owner;
1443 err = scm_cons (scm_abandoned_mutex_error_key,
1444 scm_from_locale_string ("lock obtained on abandoned "
1445 "mutex"));
1446 *ret = 1;
1447 break;
1448 }
1449 else if (scm_is_eq (m->owner, new_owner))
1450 {
1451 if (m->recursive)
1452 {
1453 m->level++;
1454 *ret = 1;
1455 }
1456 else
1457 {
1458 err = scm_cons (scm_misc_error_key,
1459 scm_from_locale_string ("mutex already locked "
1460 "by thread"));
1461 *ret = 0;
1462 }
1463 break;
1464 }
1465 else
1466 {
1467 if (timeout != NULL)
1468 {
1469 gettimeofday (&current_time, NULL);
1470 if (current_time.tv_sec > timeout->tv_sec ||
1471 (current_time.tv_sec == timeout->tv_sec &&
1472 current_time.tv_usec * 1000 > timeout->tv_nsec))
1473 {
1474 *ret = 0;
1475 break;
1476 }
1477 }
1478 block_self (m->waiting, mutex, &m->lock, timeout);
1479 scm_i_pthread_mutex_unlock (&m->lock);
1480 SCM_TICK;
1481 scm_i_scm_pthread_mutex_lock (&m->lock);
1482 }
1483 }
1484 scm_i_pthread_mutex_unlock (&m->lock);
1485 return err;
1486 }
1487
1488 SCM scm_lock_mutex (SCM mx)
1489 {
1490 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1491 }
1492
1493 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1494 (SCM m, SCM timeout, SCM owner),
1495 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1496 "thread blocks until the mutex becomes available. The function\n"
1497 "returns when the calling thread owns the lock on @var{m}.\n"
1498 "Locking a mutex that a thread already owns will succeed right\n"
1499 "away and will not block the thread. That is, Guile's mutexes\n"
1500 "are @emph{recursive}.")
1501 #define FUNC_NAME s_scm_lock_mutex_timed
1502 {
1503 SCM exception;
1504 int ret = 0;
1505 scm_t_timespec cwaittime, *waittime = NULL;
1506
1507 SCM_VALIDATE_MUTEX (1, m);
1508
1509 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1510 {
1511 to_timespec (timeout, &cwaittime);
1512 waittime = &cwaittime;
1513 }
1514
1515 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1516 SCM_VALIDATE_THREAD (3, owner);
1517
1518 exception = fat_mutex_lock (m, waittime, owner, &ret);
1519 if (!scm_is_false (exception))
1520 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1521 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1522 }
1523 #undef FUNC_NAME
1524
1525 static void
1526 lock_mutex_return_void (SCM mx)
1527 {
1528 (void) scm_lock_mutex (mx);
1529 }
1530
1531 static void
1532 unlock_mutex_return_void (SCM mx)
1533 {
1534 (void) scm_unlock_mutex (mx);
1535 }
1536
1537 void
1538 scm_dynwind_lock_mutex (SCM mutex)
1539 {
1540 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1541 SCM_F_WIND_EXPLICITLY);
1542 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1543 SCM_F_WIND_EXPLICITLY);
1544 }
1545
1546 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1547 (SCM mutex),
1548 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1549 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1550 #define FUNC_NAME s_scm_try_mutex
1551 {
1552 SCM exception;
1553 int ret = 0;
1554 scm_t_timespec cwaittime, *waittime = NULL;
1555
1556 SCM_VALIDATE_MUTEX (1, mutex);
1557
1558 to_timespec (scm_from_int(0), &cwaittime);
1559 waittime = &cwaittime;
1560
1561 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1562 if (!scm_is_false (exception))
1563 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1564 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1565 }
1566 #undef FUNC_NAME
1567
1568 /*** Fat condition variables */
1569
1570 typedef struct {
1571 scm_i_pthread_mutex_t lock;
1572 SCM waiting; /* the threads waiting for this condition. */
1573 } fat_cond;
1574
1575 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1576 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1577
1578 static int
1579 fat_mutex_unlock (SCM mutex, SCM cond,
1580 const scm_t_timespec *waittime, int relock)
1581 {
1582 SCM owner;
1583 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1584 fat_cond *c = NULL;
1585 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1586 int err = 0, ret = 0;
1587
1588 scm_i_scm_pthread_mutex_lock (&m->lock);
1589
1590 owner = m->owner;
1591
1592 if (!scm_is_eq (owner, t->handle))
1593 {
1594 if (m->level == 0)
1595 {
1596 if (!m->unchecked_unlock)
1597 {
1598 scm_i_pthread_mutex_unlock (&m->lock);
1599 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1600 }
1601 owner = t->handle;
1602 }
1603 else if (!m->allow_external_unlock)
1604 {
1605 scm_i_pthread_mutex_unlock (&m->lock);
1606 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1607 }
1608 }
1609
1610 if (! (SCM_UNBNDP (cond)))
1611 {
1612 c = SCM_CONDVAR_DATA (cond);
1613 while (1)
1614 {
1615 int brk = 0;
1616
1617 if (m->level > 0)
1618 m->level--;
1619 if (m->level == 0)
1620 {
1621 /* Change the owner of MUTEX. */
1622 t->mutexes = scm_delq_x (mutex, t->mutexes);
1623 m->owner = unblock_from_queue (m->waiting);
1624 }
1625
1626 t->block_asyncs++;
1627
1628 err = block_self (c->waiting, cond, &m->lock, waittime);
1629 scm_i_pthread_mutex_unlock (&m->lock);
1630
1631 if (err == 0)
1632 {
1633 ret = 1;
1634 brk = 1;
1635 }
1636 else if (err == ETIMEDOUT)
1637 {
1638 ret = 0;
1639 brk = 1;
1640 }
1641 else if (err != EINTR)
1642 {
1643 errno = err;
1644 scm_syserror (NULL);
1645 }
1646
1647 if (brk)
1648 {
1649 if (relock)
1650 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1651 t->block_asyncs--;
1652 break;
1653 }
1654
1655 t->block_asyncs--;
1656 scm_async_click ();
1657
1658 scm_remember_upto_here_2 (cond, mutex);
1659
1660 scm_i_scm_pthread_mutex_lock (&m->lock);
1661 }
1662 }
1663 else
1664 {
1665 if (m->level > 0)
1666 m->level--;
1667 if (m->level == 0)
1668 {
1669 /* Change the owner of MUTEX. */
1670 t->mutexes = scm_delq_x (mutex, t->mutexes);
1671 m->owner = unblock_from_queue (m->waiting);
1672 }
1673
1674 scm_i_pthread_mutex_unlock (&m->lock);
1675 ret = 1;
1676 }
1677
1678 return ret;
1679 }
1680
1681 SCM scm_unlock_mutex (SCM mx)
1682 {
1683 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1684 }
1685
1686 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1687 (SCM mx, SCM cond, SCM timeout),
1688 "Unlocks @var{mutex} if the calling thread owns the lock on "
1689 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1690 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1691 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1692 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1693 "with a call to @code{unlock-mutex}. Only the last call to "
1694 "@code{unlock-mutex} will actually unlock the mutex. ")
1695 #define FUNC_NAME s_scm_unlock_mutex_timed
1696 {
1697 scm_t_timespec cwaittime, *waittime = NULL;
1698
1699 SCM_VALIDATE_MUTEX (1, mx);
1700 if (! (SCM_UNBNDP (cond)))
1701 {
1702 SCM_VALIDATE_CONDVAR (2, cond);
1703
1704 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1705 {
1706 to_timespec (timeout, &cwaittime);
1707 waittime = &cwaittime;
1708 }
1709 }
1710
1711 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1712 }
1713 #undef FUNC_NAME
1714
1715 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1716 (SCM obj),
1717 "Return @code{#t} if @var{obj} is a mutex.")
1718 #define FUNC_NAME s_scm_mutex_p
1719 {
1720 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1721 }
1722 #undef FUNC_NAME
1723
1724 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1725 (SCM mx),
1726 "Return the thread owning @var{mx}, or @code{#f}.")
1727 #define FUNC_NAME s_scm_mutex_owner
1728 {
1729 SCM owner;
1730 fat_mutex *m = NULL;
1731
1732 SCM_VALIDATE_MUTEX (1, mx);
1733 m = SCM_MUTEX_DATA (mx);
1734 scm_i_pthread_mutex_lock (&m->lock);
1735 owner = m->owner;
1736 scm_i_pthread_mutex_unlock (&m->lock);
1737
1738 return owner;
1739 }
1740 #undef FUNC_NAME
1741
1742 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1743 (SCM mx),
1744 "Return the lock level of mutex @var{mx}.")
1745 #define FUNC_NAME s_scm_mutex_level
1746 {
1747 SCM_VALIDATE_MUTEX (1, mx);
1748 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1749 }
1750 #undef FUNC_NAME
1751
1752 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1753 (SCM mx),
1754 "Returns @code{#t} if the mutex @var{mx} is locked.")
1755 #define FUNC_NAME s_scm_mutex_locked_p
1756 {
1757 SCM_VALIDATE_MUTEX (1, mx);
1758 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1759 }
1760 #undef FUNC_NAME
1761
1762 static int
1763 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1764 {
1765 fat_cond *c = SCM_CONDVAR_DATA (cv);
1766 scm_puts ("#<condition-variable ", port);
1767 scm_uintprint ((scm_t_bits)c, 16, port);
1768 scm_puts (">", port);
1769 return 1;
1770 }
1771
1772 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1773 (void),
1774 "Make a new condition variable.")
1775 #define FUNC_NAME s_scm_make_condition_variable
1776 {
1777 fat_cond *c;
1778 SCM cv;
1779
1780 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1781 c->waiting = SCM_EOL;
1782 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1783 c->waiting = make_queue ();
1784 return cv;
1785 }
1786 #undef FUNC_NAME
1787
1788 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1789 (SCM cv, SCM mx, SCM t),
1790 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1791 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1792 "is locked again when this function returns. When @var{t} is given, "
1793 "it specifies a point in time where the waiting should be aborted. It "
1794 "can be either a integer as returned by @code{current-time} or a pair "
1795 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1796 "mutex is locked and @code{#f} is returned. When the condition "
1797 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1798 "is returned. ")
1799 #define FUNC_NAME s_scm_timed_wait_condition_variable
1800 {
1801 scm_t_timespec waittime, *waitptr = NULL;
1802
1803 SCM_VALIDATE_CONDVAR (1, cv);
1804 SCM_VALIDATE_MUTEX (2, mx);
1805
1806 if (!SCM_UNBNDP (t))
1807 {
1808 to_timespec (t, &waittime);
1809 waitptr = &waittime;
1810 }
1811
1812 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1813 }
1814 #undef FUNC_NAME
1815
1816 static void
1817 fat_cond_signal (fat_cond *c)
1818 {
1819 unblock_from_queue (c->waiting);
1820 }
1821
1822 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1823 (SCM cv),
1824 "Wake up one thread that is waiting for @var{cv}")
1825 #define FUNC_NAME s_scm_signal_condition_variable
1826 {
1827 SCM_VALIDATE_CONDVAR (1, cv);
1828 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1829 return SCM_BOOL_T;
1830 }
1831 #undef FUNC_NAME
1832
1833 static void
1834 fat_cond_broadcast (fat_cond *c)
1835 {
1836 while (scm_is_true (unblock_from_queue (c->waiting)))
1837 ;
1838 }
1839
1840 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1841 (SCM cv),
1842 "Wake up all threads that are waiting for @var{cv}. ")
1843 #define FUNC_NAME s_scm_broadcast_condition_variable
1844 {
1845 SCM_VALIDATE_CONDVAR (1, cv);
1846 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1847 return SCM_BOOL_T;
1848 }
1849 #undef FUNC_NAME
1850
1851 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1852 (SCM obj),
1853 "Return @code{#t} if @var{obj} is a condition variable.")
1854 #define FUNC_NAME s_scm_condition_variable_p
1855 {
1856 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1857 }
1858 #undef FUNC_NAME
1859
1860
1861 \f
1862 /*** Select */
1863
1864 struct select_args
1865 {
1866 int nfds;
1867 fd_set *read_fds;
1868 fd_set *write_fds;
1869 fd_set *except_fds;
1870 struct timeval *timeout;
1871
1872 int result;
1873 int errno_value;
1874 };
1875
1876 static void *
1877 do_std_select (void *args)
1878 {
1879 struct select_args *select_args;
1880
1881 select_args = (struct select_args *) args;
1882
1883 select_args->result =
1884 select (select_args->nfds,
1885 select_args->read_fds, select_args->write_fds,
1886 select_args->except_fds, select_args->timeout);
1887 select_args->errno_value = errno;
1888
1889 return NULL;
1890 }
1891
1892 int
1893 scm_std_select (int nfds,
1894 fd_set *readfds,
1895 fd_set *writefds,
1896 fd_set *exceptfds,
1897 struct timeval *timeout)
1898 {
1899 fd_set my_readfds;
1900 int res, eno, wakeup_fd;
1901 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1902 struct select_args args;
1903
1904 if (readfds == NULL)
1905 {
1906 FD_ZERO (&my_readfds);
1907 readfds = &my_readfds;
1908 }
1909
1910 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1911 SCM_TICK;
1912
1913 wakeup_fd = t->sleep_pipe[0];
1914 FD_SET (wakeup_fd, readfds);
1915 if (wakeup_fd >= nfds)
1916 nfds = wakeup_fd+1;
1917
1918 args.nfds = nfds;
1919 args.read_fds = readfds;
1920 args.write_fds = writefds;
1921 args.except_fds = exceptfds;
1922 args.timeout = timeout;
1923
1924 /* Explicitly cooperate with the GC. */
1925 scm_without_guile (do_std_select, &args);
1926
1927 res = args.result;
1928 eno = args.errno_value;
1929
1930 t->sleep_fd = -1;
1931 scm_i_reset_sleep (t);
1932
1933 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1934 {
1935 char dummy;
1936 full_read (wakeup_fd, &dummy, 1);
1937
1938 FD_CLR (wakeup_fd, readfds);
1939 res -= 1;
1940 if (res == 0)
1941 {
1942 eno = EINTR;
1943 res = -1;
1944 }
1945 }
1946 errno = eno;
1947 return res;
1948 }
1949
1950 /* Convenience API for blocking while in guile mode. */
1951
1952 #if SCM_USE_PTHREAD_THREADS
1953
1954 /* It seems reasonable to not run procedures related to mutex and condition
1955 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1956 without it, and (ii) the only potential gain would be GC latency. See
1957 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1958 for a discussion of the pros and cons. */
1959
1960 int
1961 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1962 {
1963 int res = scm_i_pthread_mutex_lock (mutex);
1964 return res;
1965 }
1966
1967 static void
1968 do_unlock (void *data)
1969 {
1970 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1971 }
1972
1973 void
1974 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1975 {
1976 scm_i_scm_pthread_mutex_lock (mutex);
1977 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1978 }
1979
1980 int
1981 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1982 {
1983 int res;
1984 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1985
1986 t->held_mutex = mutex;
1987 res = scm_i_pthread_cond_wait (cond, mutex);
1988 t->held_mutex = NULL;
1989
1990 return res;
1991 }
1992
1993 int
1994 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1995 scm_i_pthread_mutex_t *mutex,
1996 const scm_t_timespec *wt)
1997 {
1998 int res;
1999 scm_i_thread *t = SCM_I_CURRENT_THREAD;
2000
2001 t->held_mutex = mutex;
2002 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2003 t->held_mutex = NULL;
2004
2005 return res;
2006 }
2007
2008 #endif
2009
2010 static void
2011 do_unlock_with_asyncs (void *data)
2012 {
2013 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
2014 SCM_I_CURRENT_THREAD->block_asyncs--;
2015 }
2016
2017 void
2018 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
2019 {
2020 SCM_I_CURRENT_THREAD->block_asyncs++;
2021 scm_i_scm_pthread_mutex_lock (mutex);
2022 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
2023 SCM_F_WIND_EXPLICITLY);
2024 }
2025
2026 unsigned long
2027 scm_std_usleep (unsigned long usecs)
2028 {
2029 struct timeval tv;
2030 tv.tv_usec = usecs % 1000000;
2031 tv.tv_sec = usecs / 1000000;
2032 scm_std_select (0, NULL, NULL, NULL, &tv);
2033 return tv.tv_sec * 1000000 + tv.tv_usec;
2034 }
2035
2036 unsigned int
2037 scm_std_sleep (unsigned int secs)
2038 {
2039 struct timeval tv;
2040 tv.tv_usec = 0;
2041 tv.tv_sec = secs;
2042 scm_std_select (0, NULL, NULL, NULL, &tv);
2043 return tv.tv_sec;
2044 }
2045
2046 /*** Misc */
2047
2048 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2049 (void),
2050 "Return the thread that called this function.")
2051 #define FUNC_NAME s_scm_current_thread
2052 {
2053 return SCM_I_CURRENT_THREAD->handle;
2054 }
2055 #undef FUNC_NAME
2056
2057 static SCM
2058 scm_c_make_list (size_t n, SCM fill)
2059 {
2060 SCM res = SCM_EOL;
2061 while (n-- > 0)
2062 res = scm_cons (fill, res);
2063 return res;
2064 }
2065
2066 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2067 (void),
2068 "Return a list of all threads.")
2069 #define FUNC_NAME s_scm_all_threads
2070 {
2071 /* We can not allocate while holding the thread_admin_mutex because
2072 of the way GC is done.
2073 */
2074 int n = thread_count;
2075 scm_i_thread *t;
2076 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2077
2078 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2079 l = &list;
2080 for (t = all_threads; t && n > 0; t = t->next_thread)
2081 {
2082 if (t != scm_i_signal_delivery_thread)
2083 {
2084 SCM_SETCAR (*l, t->handle);
2085 l = SCM_CDRLOC (*l);
2086 }
2087 n--;
2088 }
2089 *l = SCM_EOL;
2090 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2091 return list;
2092 }
2093 #undef FUNC_NAME
2094
2095 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2096 (SCM thread),
2097 "Return @code{#t} iff @var{thread} has exited.\n")
2098 #define FUNC_NAME s_scm_thread_exited_p
2099 {
2100 return scm_from_bool (scm_c_thread_exited_p (thread));
2101 }
2102 #undef FUNC_NAME
2103
2104 int
2105 scm_c_thread_exited_p (SCM thread)
2106 #define FUNC_NAME s_scm_thread_exited_p
2107 {
2108 scm_i_thread *t;
2109 SCM_VALIDATE_THREAD (1, thread);
2110 t = SCM_I_THREAD_DATA (thread);
2111 return t->exited;
2112 }
2113 #undef FUNC_NAME
2114
2115 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2116 (void),
2117 "Return the total number of processors of the machine, which\n"
2118 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2119 "thread execution unit, which can be either:\n\n"
2120 "@itemize\n"
2121 "@item an execution core in a (possibly multi-core) chip, in a\n"
2122 " (possibly multi- chip) module, in a single computer, or\n"
2123 "@item a thread execution unit inside a core in the case of\n"
2124 " @dfn{hyper-threaded} CPUs.\n"
2125 "@end itemize\n\n"
2126 "Which of the two definitions is used, is unspecified.\n")
2127 #define FUNC_NAME s_scm_total_processor_count
2128 {
2129 return scm_from_ulong (num_processors (NPROC_ALL));
2130 }
2131 #undef FUNC_NAME
2132
2133 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2134 (void),
2135 "Like @code{total-processor-count}, but return the number of\n"
2136 "processors available to the current process. See\n"
2137 "@code{setaffinity} and @code{getaffinity} for more\n"
2138 "information.\n")
2139 #define FUNC_NAME s_scm_current_processor_count
2140 {
2141 return scm_from_ulong (num_processors (NPROC_CURRENT));
2142 }
2143 #undef FUNC_NAME
2144
2145
2146 \f
2147
2148 static scm_i_pthread_cond_t wake_up_cond;
2149 static int threads_initialized_p = 0;
2150
2151
2152 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2153 */
2154 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2155
2156 static SCM dynwind_critical_section_mutex;
2157
2158 void
2159 scm_dynwind_critical_section (SCM mutex)
2160 {
2161 if (scm_is_false (mutex))
2162 mutex = dynwind_critical_section_mutex;
2163 scm_dynwind_lock_mutex (mutex);
2164 scm_dynwind_block_asyncs ();
2165 }
2166
2167 /*** Initialization */
2168
2169 scm_i_pthread_mutex_t scm_i_misc_mutex;
2170
2171 #if SCM_USE_PTHREAD_THREADS
2172 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2173 #endif
2174
2175 void
2176 scm_threads_prehistory (void *base)
2177 {
2178 #if SCM_USE_PTHREAD_THREADS
2179 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2180 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2181 PTHREAD_MUTEX_RECURSIVE);
2182 #endif
2183
2184 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2185 scm_i_pthread_mutexattr_recursive);
2186 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2187 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2188
2189 guilify_self_1 ((struct GC_stack_base *) base);
2190 }
2191
2192 scm_t_bits scm_tc16_thread;
2193 scm_t_bits scm_tc16_mutex;
2194 scm_t_bits scm_tc16_condvar;
2195
2196 void
2197 scm_init_threads ()
2198 {
2199 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2200 scm_set_smob_print (scm_tc16_thread, thread_print);
2201
2202 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2203 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2204 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2205
2206 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2207 sizeof (fat_cond));
2208 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2209
2210 scm_i_default_dynamic_state = SCM_BOOL_F;
2211 guilify_self_2 (SCM_BOOL_F);
2212 threads_initialized_p = 1;
2213
2214 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2215 }
2216
2217 void
2218 scm_init_threads_default_dynamic_state ()
2219 {
2220 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2221 scm_i_default_dynamic_state = state;
2222 }
2223
2224 void
2225 scm_init_thread_procs ()
2226 {
2227 #include "libguile/threads.x"
2228 }
2229
2230 \f
2231 /* IA64-specific things. */
2232
2233 #ifdef __ia64__
2234 # ifdef __hpux
2235 # include <sys/param.h>
2236 # include <sys/pstat.h>
2237 void *
2238 scm_ia64_register_backing_store_base (void)
2239 {
2240 struct pst_vm_status vm_status;
2241 int i = 0;
2242 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2243 if (vm_status.pst_type == PS_RSESTACK)
2244 return (void *) vm_status.pst_vaddr;
2245 abort ();
2246 }
2247 void *
2248 scm_ia64_ar_bsp (const void *ctx)
2249 {
2250 uint64_t bsp;
2251 __uc_get_ar_bsp (ctx, &bsp);
2252 return (void *) bsp;
2253 }
2254 # endif /* hpux */
2255 # ifdef linux
2256 # include <ucontext.h>
2257 void *
2258 scm_ia64_register_backing_store_base (void)
2259 {
2260 extern void *__libc_ia64_register_backing_store_base;
2261 return __libc_ia64_register_backing_store_base;
2262 }
2263 void *
2264 scm_ia64_ar_bsp (const void *opaque)
2265 {
2266 const ucontext_t *ctx = opaque;
2267 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2268 }
2269 # endif /* linux */
2270 # ifdef __FreeBSD__
2271 # include <ucontext.h>
2272 void *
2273 scm_ia64_register_backing_store_base (void)
2274 {
2275 return (void *)0x8000000000000000;
2276 }
2277 void *
2278 scm_ia64_ar_bsp (const void *opaque)
2279 {
2280 const ucontext_t *ctx = opaque;
2281 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2282 + ctx->uc_mcontext.mc_special.ndirty);
2283 }
2284 # endif /* __FreeBSD__ */
2285 #endif /* __ia64__ */
2286
2287
2288 /*
2289 Local Variables:
2290 c-file-style: "gnu"
2291 End:
2292 */