Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
29
30 #include <stdlib.h>
31 #if HAVE_UNISTD_H
32 #include <unistd.h>
33 #endif
34 #include <stdio.h>
35
36 #ifdef HAVE_STRING_H
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38 #endif
39
40 #if HAVE_SYS_TIME_H
41 #include <sys/time.h>
42 #endif
43
44 #if HAVE_PTHREAD_NP_H
45 # include <pthread_np.h>
46 #endif
47
48 #include <sys/select.h>
49
50 #include <assert.h>
51 #include <fcntl.h>
52 #include <nproc.h>
53
54 #include "libguile/validate.h"
55 #include "libguile/root.h"
56 #include "libguile/eval.h"
57 #include "libguile/async.h"
58 #include "libguile/ports.h"
59 #include "libguile/threads.h"
60 #include "libguile/dynwind.h"
61 #include "libguile/iselect.h"
62 #include "libguile/fluids.h"
63 #include "libguile/continuations.h"
64 #include "libguile/gc.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
68
69 #include <full-read.h>
70
71
72 \f
73
74 /* First some libgc shims. */
75
76 /* Make sure GC_fn_type is defined; it is missing from the public
77 headers of GC 7.1 and earlier. */
78 #ifndef HAVE_GC_FN_TYPE
79 typedef void * (* GC_fn_type) (void *);
80 #endif
81
82
83 #ifndef GC_SUCCESS
84 #define GC_SUCCESS 0
85 #endif
86
87 #ifndef GC_UNIMPLEMENTED
88 #define GC_UNIMPLEMENTED 3
89 #endif
90
91 /* Likewise struct GC_stack_base is missing before 7.1. */
92 #ifndef HAVE_GC_STACK_BASE
93 struct GC_stack_base {
94 void * mem_base; /* Base of memory stack. */
95 #ifdef __ia64__
96 void * reg_base; /* Base of separate register stack. */
97 #endif
98 };
99
100 static int
101 GC_register_my_thread (struct GC_stack_base *stack_base)
102 {
103 return GC_UNIMPLEMENTED;
104 }
105
106 static void
107 GC_unregister_my_thread ()
108 {
109 }
110
111 #if !SCM_USE_PTHREAD_THREADS
112 /* No threads; we can just use GC_stackbottom. */
113 static void *
114 get_thread_stack_base ()
115 {
116 return GC_stackbottom;
117 }
118
119 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
120 && defined PTHREAD_ATTR_GETSTACK_WORKS
121 /* This method for GNU/Linux and perhaps some other systems.
122 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
123 available on them. */
124 static void *
125 get_thread_stack_base ()
126 {
127 pthread_attr_t attr;
128 void *start, *end;
129 size_t size;
130
131 pthread_getattr_np (pthread_self (), &attr);
132 pthread_attr_getstack (&attr, &start, &size);
133 end = (char *)start + size;
134
135 #if SCM_STACK_GROWS_UP
136 return start;
137 #else
138 return end;
139 #endif
140 }
141
142 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
143 /* This method for MacOS X.
144 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
145 but as of 2006 there's nothing obvious at apple.com. */
146 static void *
147 get_thread_stack_base ()
148 {
149 return pthread_get_stackaddr_np (pthread_self ());
150 }
151
152 #elif HAVE_PTHREAD_ATTR_GET_NP
153 /* This one is for FreeBSD 9. */
154 static void *
155 get_thread_stack_base ()
156 {
157 pthread_attr_t attr;
158 void *start, *end;
159 size_t size;
160
161 pthread_attr_init (&attr);
162 pthread_attr_get_np (pthread_self (), &attr);
163 pthread_attr_getstack (&attr, &start, &size);
164 pthread_attr_destroy (&attr);
165
166 end = (char *)start + size;
167
168 #if SCM_STACK_GROWS_UP
169 return start;
170 #else
171 return end;
172 #endif
173 }
174
175 #else
176 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
177 #endif
178
179 static int
180 GC_get_stack_base (struct GC_stack_base *stack_base)
181 {
182 stack_base->mem_base = get_thread_stack_base ();
183 #ifdef __ia64__
184 /* Calculate and store off the base of this thread's register
185 backing store (RBS). Unfortunately our implementation(s) of
186 scm_ia64_register_backing_store_base are only reliable for the
187 main thread. For other threads, therefore, find out the current
188 top of the RBS, and use that as a maximum. */
189 stack_base->reg_base = scm_ia64_register_backing_store_base ();
190 {
191 ucontext_t ctx;
192 void *bsp;
193 getcontext (&ctx);
194 bsp = scm_ia64_ar_bsp (&ctx);
195 if (stack_base->reg_base > bsp)
196 stack_base->reg_base = bsp;
197 }
198 #endif
199 return GC_SUCCESS;
200 }
201
202 static void *
203 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
204 {
205 struct GC_stack_base stack_base;
206
207 stack_base.mem_base = (void*)&stack_base;
208 #ifdef __ia64__
209 /* FIXME: Untested. */
210 {
211 ucontext_t ctx;
212 getcontext (&ctx);
213 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
214 }
215 #endif
216
217 return fn (&stack_base, arg);
218 }
219 #endif /* HAVE_GC_STACK_BASE */
220
221
222 /* Now define with_gc_active and with_gc_inactive. */
223
224 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
225
226 /* We have a sufficiently new libgc (7.2 or newer). */
227
228 static void*
229 with_gc_inactive (GC_fn_type func, void *data)
230 {
231 return GC_do_blocking (func, data);
232 }
233
234 static void*
235 with_gc_active (GC_fn_type func, void *data)
236 {
237 return GC_call_with_gc_active (func, data);
238 }
239
240 #else
241
242 /* libgc not new enough, so never actually deactivate GC.
243
244 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
245 GC_call_with_gc_active. */
246
247 static void*
248 with_gc_inactive (GC_fn_type func, void *data)
249 {
250 return func (data);
251 }
252
253 static void*
254 with_gc_active (GC_fn_type func, void *data)
255 {
256 return func (data);
257 }
258
259 #endif /* HAVE_GC_DO_BLOCKING */
260
261
262 \f
263 static void
264 to_timespec (SCM t, scm_t_timespec *waittime)
265 {
266 if (scm_is_pair (t))
267 {
268 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
269 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
270 }
271 else
272 {
273 double time = scm_to_double (t);
274 double sec = scm_c_truncate (time);
275
276 waittime->tv_sec = (long) sec;
277 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
278 }
279 }
280
281 \f
282 /*** Queues */
283
284 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
285 the risk of false references leading to unbounded retained space as
286 described in "Bounding Space Usage of Conservative Garbage Collectors",
287 H.J. Boehm, 2001. */
288
289 /* Make an empty queue data structure.
290 */
291 static SCM
292 make_queue ()
293 {
294 return scm_cons (SCM_EOL, SCM_EOL);
295 }
296
297 /* Put T at the back of Q and return a handle that can be used with
298 remqueue to remove T from Q again.
299 */
300 static SCM
301 enqueue (SCM q, SCM t)
302 {
303 SCM c = scm_cons (t, SCM_EOL);
304 SCM_CRITICAL_SECTION_START;
305 if (scm_is_null (SCM_CDR (q)))
306 SCM_SETCDR (q, c);
307 else
308 SCM_SETCDR (SCM_CAR (q), c);
309 SCM_SETCAR (q, c);
310 SCM_CRITICAL_SECTION_END;
311 return c;
312 }
313
314 /* Remove the element that the handle C refers to from the queue Q. C
315 must have been returned from a call to enqueue. The return value
316 is zero when the element referred to by C has already been removed.
317 Otherwise, 1 is returned.
318 */
319 static int
320 remqueue (SCM q, SCM c)
321 {
322 SCM p, prev = q;
323 SCM_CRITICAL_SECTION_START;
324 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
325 {
326 if (scm_is_eq (p, c))
327 {
328 if (scm_is_eq (c, SCM_CAR (q)))
329 SCM_SETCAR (q, SCM_CDR (c));
330 SCM_SETCDR (prev, SCM_CDR (c));
331
332 /* GC-robust */
333 SCM_SETCDR (c, SCM_EOL);
334
335 SCM_CRITICAL_SECTION_END;
336 return 1;
337 }
338 prev = p;
339 }
340 SCM_CRITICAL_SECTION_END;
341 return 0;
342 }
343
344 /* Remove the front-most element from the queue Q and return it.
345 Return SCM_BOOL_F when Q is empty.
346 */
347 static SCM
348 dequeue (SCM q)
349 {
350 SCM c;
351 SCM_CRITICAL_SECTION_START;
352 c = SCM_CDR (q);
353 if (scm_is_null (c))
354 {
355 SCM_CRITICAL_SECTION_END;
356 return SCM_BOOL_F;
357 }
358 else
359 {
360 SCM_SETCDR (q, SCM_CDR (c));
361 if (scm_is_null (SCM_CDR (q)))
362 SCM_SETCAR (q, SCM_EOL);
363 SCM_CRITICAL_SECTION_END;
364
365 /* GC-robust */
366 SCM_SETCDR (c, SCM_EOL);
367
368 return SCM_CAR (c);
369 }
370 }
371
372 /*** Thread smob routines */
373
374
375 static int
376 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
377 {
378 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
379 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
380 the struct case, hence we go via a union, and extract according to the
381 size of pthread_t. */
382 union {
383 scm_i_pthread_t p;
384 unsigned short us;
385 unsigned int ui;
386 unsigned long ul;
387 scm_t_uintmax um;
388 } u;
389 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
390 scm_i_pthread_t p = t->pthread;
391 scm_t_uintmax id;
392 u.p = p;
393 if (sizeof (p) == sizeof (unsigned short))
394 id = u.us;
395 else if (sizeof (p) == sizeof (unsigned int))
396 id = u.ui;
397 else if (sizeof (p) == sizeof (unsigned long))
398 id = u.ul;
399 else
400 id = u.um;
401
402 scm_puts_unlocked ("#<thread ", port);
403 scm_uintprint (id, 10, port);
404 scm_puts_unlocked (" (", port);
405 scm_uintprint ((scm_t_bits)t, 16, port);
406 scm_puts_unlocked (")>", port);
407 return 1;
408 }
409
410 \f
411 /*** Blocking on queues. */
412
413 /* See also scm_i_queue_async_cell for how such a block is
414 interrputed.
415 */
416
417 /* Put the current thread on QUEUE and go to sleep, waiting for it to
418 be woken up by a call to 'unblock_from_queue', or to be
419 interrupted. Upon return of this function, the current thread is
420 no longer on QUEUE, even when the sleep has been interrupted.
421
422 The caller of block_self must hold MUTEX. It will be atomically
423 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
424
425 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
426 as MUTEX is needed.
427
428 When WAITTIME is not NULL, the sleep will be aborted at that time.
429
430 The return value of block_self is an errno value. It will be zero
431 when the sleep has been successfully completed by a call to
432 unblock_from_queue, EINTR when it has been interrupted by the
433 delivery of a system async, and ETIMEDOUT when the timeout has
434 expired.
435
436 The system asyncs themselves are not executed by block_self.
437 */
438 static int
439 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
440 const scm_t_timespec *waittime)
441 {
442 scm_i_thread *t = SCM_I_CURRENT_THREAD;
443 SCM q_handle;
444 int err;
445
446 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
447 err = EINTR;
448 else
449 {
450 t->block_asyncs++;
451 q_handle = enqueue (queue, t->handle);
452 if (waittime == NULL)
453 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
454 else
455 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
456
457 /* When we are still on QUEUE, we have been interrupted. We
458 report this only when no other error (such as a timeout) has
459 happened above.
460 */
461 if (remqueue (queue, q_handle) && err == 0)
462 err = EINTR;
463 t->block_asyncs--;
464 scm_i_reset_sleep (t);
465 }
466
467 return err;
468 }
469
470 /* Wake up the first thread on QUEUE, if any. The awoken thread is
471 returned, or #f if the queue was empty.
472 */
473 static SCM
474 unblock_from_queue (SCM queue)
475 {
476 SCM thread = dequeue (queue);
477 if (scm_is_true (thread))
478 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
479 return thread;
480 }
481
482 \f
483 /* Getting into and out of guile mode.
484 */
485
486 /* Key used to attach a cleanup handler to a given thread. Also, if
487 thread-local storage is unavailable, this key is used to retrieve the
488 current thread with `pthread_getspecific ()'. */
489 scm_i_pthread_key_t scm_i_thread_key;
490
491
492 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
493
494 /* When thread-local storage (TLS) is available, a pointer to the
495 current-thread object is kept in TLS. Note that storing the thread-object
496 itself in TLS (rather than a pointer to some malloc'd memory) is not
497 possible since thread objects may live longer than the actual thread they
498 represent. */
499 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
500
501 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
502
503
504 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
505 static scm_i_thread *all_threads = NULL;
506 static int thread_count;
507
508 static SCM scm_i_default_dynamic_state;
509
510 /* Run when a fluid is collected. */
511 void
512 scm_i_reset_fluid (size_t n)
513 {
514 scm_i_thread *t;
515
516 scm_i_pthread_mutex_lock (&thread_admin_mutex);
517 for (t = all_threads; t; t = t->next_thread)
518 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
519 {
520 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
521
522 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
523 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
524 }
525 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
526 }
527
528 /* Perform first stage of thread initialisation, in non-guile mode.
529 */
530 static void
531 guilify_self_1 (struct GC_stack_base *base)
532 {
533 scm_i_thread t;
534
535 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
536 before allocating anything in this thread, because allocation could
537 cause GC to run, and GC could cause finalizers, which could invoke
538 Scheme functions, which need the current thread to be set. */
539
540 t.pthread = scm_i_pthread_self ();
541 t.handle = SCM_BOOL_F;
542 t.result = SCM_BOOL_F;
543 t.cleanup_handler = SCM_BOOL_F;
544 t.mutexes = SCM_EOL;
545 t.held_mutex = NULL;
546 t.join_queue = SCM_EOL;
547 t.dynamic_state = SCM_BOOL_F;
548 t.dynstack.base = NULL;
549 t.dynstack.top = NULL;
550 t.dynstack.limit = NULL;
551 t.active_asyncs = SCM_EOL;
552 t.block_asyncs = 1;
553 t.pending_asyncs = 1;
554 t.critical_section_level = 0;
555 t.base = base->mem_base;
556 #ifdef __ia64__
557 t.register_backing_store_base = base->reg_base;
558 #endif
559 t.continuation_root = SCM_EOL;
560 t.continuation_base = t.base;
561 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
562 t.sleep_mutex = NULL;
563 t.sleep_object = SCM_BOOL_F;
564 t.sleep_fd = -1;
565
566 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
567 /* FIXME: Error conditions during the initialization phase are handled
568 gracelessly since public functions such as `scm_init_guile ()'
569 currently have type `void'. */
570 abort ();
571
572 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
573 t.current_mark_stack_ptr = NULL;
574 t.current_mark_stack_limit = NULL;
575 t.canceled = 0;
576 t.exited = 0;
577 t.guile_mode = 0;
578
579 /* The switcheroo. */
580 {
581 scm_i_thread *t_ptr = &t;
582
583 GC_disable ();
584 t_ptr = GC_malloc (sizeof (scm_i_thread));
585 memcpy (t_ptr, &t, sizeof t);
586
587 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
588
589 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
590 /* Cache the current thread in TLS for faster lookup. */
591 scm_i_current_thread = t_ptr;
592 #endif
593
594 scm_i_pthread_mutex_lock (&thread_admin_mutex);
595 t_ptr->next_thread = all_threads;
596 all_threads = t_ptr;
597 thread_count++;
598 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
599
600 GC_enable ();
601 }
602 }
603
604 /* Perform second stage of thread initialisation, in guile mode.
605 */
606 static void
607 guilify_self_2 (SCM parent)
608 {
609 scm_i_thread *t = SCM_I_CURRENT_THREAD;
610
611 t->guile_mode = 1;
612
613 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
614
615 t->continuation_root = scm_cons (t->handle, SCM_EOL);
616 t->continuation_base = t->base;
617 t->vm = SCM_BOOL_F;
618
619 if (scm_is_true (parent))
620 t->dynamic_state = scm_make_dynamic_state (parent);
621 else
622 t->dynamic_state = scm_i_make_initial_dynamic_state ();
623
624 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
625 t->dynstack.limit = t->dynstack.base + 16;
626 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
627
628 t->join_queue = make_queue ();
629 t->block_asyncs = 0;
630
631 /* See note in finalizers.c:queue_finalizer_async(). */
632 GC_invoke_finalizers ();
633 }
634
635 \f
636 /*** Fat mutexes */
637
638 /* We implement our own mutex type since we want them to be 'fair', we
639 want to do fancy things while waiting for them (like running
640 asyncs) and we might want to add things that are nice for
641 debugging.
642 */
643
644 typedef struct {
645 scm_i_pthread_mutex_t lock;
646 SCM owner;
647 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
648
649 int recursive; /* allow recursive locking? */
650 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
651 int allow_external_unlock; /* is it an error to unlock a mutex that is not
652 owned by the current thread? */
653
654 SCM waiting; /* the threads waiting for this mutex. */
655 } fat_mutex;
656
657 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
658 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
659
660 static SCM
661 call_cleanup (void *data)
662 {
663 SCM *proc_p = data;
664 return scm_call_0 (*proc_p);
665 }
666
667 /* Perform thread tear-down, in guile mode.
668 */
669 static void *
670 do_thread_exit (void *v)
671 {
672 scm_i_thread *t = (scm_i_thread *) v;
673
674 if (!scm_is_false (t->cleanup_handler))
675 {
676 SCM ptr = t->cleanup_handler;
677
678 t->cleanup_handler = SCM_BOOL_F;
679 t->result = scm_internal_catch (SCM_BOOL_T,
680 call_cleanup, &ptr,
681 scm_handle_by_message_noexit, NULL);
682 }
683
684 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
685
686 t->exited = 1;
687 close (t->sleep_pipe[0]);
688 close (t->sleep_pipe[1]);
689 while (scm_is_true (unblock_from_queue (t->join_queue)))
690 ;
691
692 while (!scm_is_null (t->mutexes))
693 {
694 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
695
696 if (scm_is_true (mutex))
697 {
698 fat_mutex *m = SCM_MUTEX_DATA (mutex);
699
700 scm_i_pthread_mutex_lock (&m->lock);
701
702 /* Check whether T owns MUTEX. This is usually the case, unless
703 T abandoned MUTEX; in that case, T is no longer its owner (see
704 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
705 if (scm_is_eq (m->owner, t->handle))
706 unblock_from_queue (m->waiting);
707
708 scm_i_pthread_mutex_unlock (&m->lock);
709 }
710
711 t->mutexes = scm_cdr (t->mutexes);
712 }
713
714 scm_i_pthread_mutex_unlock (&t->admin_mutex);
715
716 return NULL;
717 }
718
719 static void *
720 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
721 {
722 /* Won't hurt if we are already registered. */
723 #if SCM_USE_PTHREAD_THREADS
724 GC_register_my_thread (sb);
725 #endif
726
727 return scm_with_guile (do_thread_exit, v);
728 }
729
730 static void
731 on_thread_exit (void *v)
732 {
733 /* This handler is executed in non-guile mode. */
734 scm_i_thread *t = (scm_i_thread *) v, **tp;
735
736 /* If we were canceled, we were unable to clear `t->guile_mode', so do
737 it here. */
738 t->guile_mode = 0;
739
740 /* If this thread was cancelled while doing a cond wait, it will
741 still have a mutex locked, so we unlock it here. */
742 if (t->held_mutex)
743 {
744 scm_i_pthread_mutex_unlock (t->held_mutex);
745 t->held_mutex = NULL;
746 }
747
748 /* Reinstate the current thread for purposes of scm_with_guile
749 guile-mode cleanup handlers. Only really needed in the non-TLS
750 case but it doesn't hurt to be consistent. */
751 scm_i_pthread_setspecific (scm_i_thread_key, t);
752
753 /* Scheme-level thread finalizers and other cleanup needs to happen in
754 guile mode. */
755 GC_call_with_stack_base (do_thread_exit_trampoline, t);
756
757 /* Removing ourself from the list of all threads needs to happen in
758 non-guile mode since all SCM values on our stack become
759 unprotected once we are no longer in the list. */
760 scm_i_pthread_mutex_lock (&thread_admin_mutex);
761 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
762 if (*tp == t)
763 {
764 *tp = t->next_thread;
765
766 /* GC-robust */
767 t->next_thread = NULL;
768
769 break;
770 }
771 thread_count--;
772
773 /* If there's only one other thread, it could be the signal delivery
774 thread, so we need to notify it to shut down by closing its read pipe.
775 If it's not the signal delivery thread, then closing the read pipe isn't
776 going to hurt. */
777 if (thread_count <= 1)
778 scm_i_close_signal_pipe ();
779
780 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
781
782 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
783
784 #if SCM_USE_PTHREAD_THREADS
785 GC_unregister_my_thread ();
786 #endif
787 }
788
789 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
790
791 static void
792 init_thread_key (void)
793 {
794 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
795 }
796
797 /* Perform any initializations necessary to make the current thread
798 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
799 if necessary.
800
801 BASE is the stack base to use with GC.
802
803 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
804 which case the default dynamic state is used.
805
806 Returns zero when the thread was known to guile already; otherwise
807 return 1.
808
809 Note that it could be the case that the thread was known
810 to Guile, but not in guile mode (because we are within a
811 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
812 be sure. New threads are put into guile mode implicitly. */
813
814 static int
815 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
816 {
817 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
818
819 if (SCM_I_CURRENT_THREAD)
820 {
821 /* Thread is already known to Guile.
822 */
823 return 0;
824 }
825 else
826 {
827 /* This thread has not been guilified yet.
828 */
829
830 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
831 if (scm_initialized_p == 0)
832 {
833 /* First thread ever to enter Guile. Run the full
834 initialization.
835 */
836 scm_i_init_guile (base);
837
838 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
839 /* Allow other threads to come in later. */
840 GC_allow_register_threads ();
841 #endif
842
843 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
844 }
845 else
846 {
847 /* Guile is already initialized, but this thread enters it for
848 the first time. Only initialize this thread.
849 */
850 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
851
852 /* Register this thread with libgc. */
853 #if SCM_USE_PTHREAD_THREADS
854 GC_register_my_thread (base);
855 #endif
856
857 guilify_self_1 (base);
858 guilify_self_2 (parent);
859 }
860 return 1;
861 }
862 }
863
864 void
865 scm_init_guile ()
866 {
867 struct GC_stack_base stack_base;
868
869 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
870 scm_i_init_thread_for_guile (&stack_base,
871 scm_i_default_dynamic_state);
872 else
873 {
874 fprintf (stderr, "Failed to get stack base for current thread.\n");
875 exit (EXIT_FAILURE);
876 }
877 }
878
879 struct with_guile_args
880 {
881 GC_fn_type func;
882 void *data;
883 SCM parent;
884 };
885
886 static void *
887 with_guile_trampoline (void *data)
888 {
889 struct with_guile_args *args = data;
890
891 return scm_c_with_continuation_barrier (args->func, args->data);
892 }
893
894 static void *
895 with_guile_and_parent (struct GC_stack_base *base, void *data)
896 {
897 void *res;
898 int new_thread;
899 scm_i_thread *t;
900 struct with_guile_args *args = data;
901
902 new_thread = scm_i_init_thread_for_guile (base, args->parent);
903 t = SCM_I_CURRENT_THREAD;
904 if (new_thread)
905 {
906 /* We are in Guile mode. */
907 assert (t->guile_mode);
908
909 res = scm_c_with_continuation_barrier (args->func, args->data);
910
911 /* Leave Guile mode. */
912 t->guile_mode = 0;
913 }
914 else if (t->guile_mode)
915 {
916 /* Already in Guile mode. */
917 res = scm_c_with_continuation_barrier (args->func, args->data);
918 }
919 else
920 {
921 /* We are not in Guile mode, either because we are not within a
922 scm_with_guile, or because we are within a scm_without_guile.
923
924 This call to scm_with_guile() could happen from anywhere on the
925 stack, and in particular lower on the stack than when it was
926 when this thread was first guilified. Thus, `base' must be
927 updated. */
928 #if SCM_STACK_GROWS_UP
929 if (SCM_STACK_PTR (base->mem_base) < t->base)
930 t->base = SCM_STACK_PTR (base->mem_base);
931 #else
932 if (SCM_STACK_PTR (base->mem_base) > t->base)
933 t->base = SCM_STACK_PTR (base->mem_base);
934 #endif
935
936 t->guile_mode = 1;
937 res = with_gc_active (with_guile_trampoline, args);
938 t->guile_mode = 0;
939 }
940 return res;
941 }
942
943 static void *
944 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
945 {
946 struct with_guile_args args;
947
948 args.func = func;
949 args.data = data;
950 args.parent = parent;
951
952 return GC_call_with_stack_base (with_guile_and_parent, &args);
953 }
954
955 void *
956 scm_with_guile (void *(*func)(void *), void *data)
957 {
958 return scm_i_with_guile_and_parent (func, data,
959 scm_i_default_dynamic_state);
960 }
961
962 void *
963 scm_without_guile (void *(*func)(void *), void *data)
964 {
965 void *result;
966 scm_i_thread *t = SCM_I_CURRENT_THREAD;
967
968 if (t->guile_mode)
969 {
970 SCM_I_CURRENT_THREAD->guile_mode = 0;
971 result = with_gc_inactive (func, data);
972 SCM_I_CURRENT_THREAD->guile_mode = 1;
973 }
974 else
975 /* Otherwise we're not in guile mode, so nothing to do. */
976 result = func (data);
977
978 return result;
979 }
980
981 \f
982 /*** Thread creation */
983
984 typedef struct {
985 SCM parent;
986 SCM thunk;
987 SCM handler;
988 SCM thread;
989 scm_i_pthread_mutex_t mutex;
990 scm_i_pthread_cond_t cond;
991 } launch_data;
992
993 static void *
994 really_launch (void *d)
995 {
996 launch_data *data = (launch_data *)d;
997 SCM thunk = data->thunk, handler = data->handler;
998 scm_i_thread *t;
999
1000 t = SCM_I_CURRENT_THREAD;
1001
1002 scm_i_scm_pthread_mutex_lock (&data->mutex);
1003 data->thread = scm_current_thread ();
1004 scm_i_pthread_cond_signal (&data->cond);
1005 scm_i_pthread_mutex_unlock (&data->mutex);
1006
1007 if (SCM_UNBNDP (handler))
1008 t->result = scm_call_0 (thunk);
1009 else
1010 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1011
1012 return 0;
1013 }
1014
1015 static void *
1016 launch_thread (void *d)
1017 {
1018 launch_data *data = (launch_data *)d;
1019 scm_i_pthread_detach (scm_i_pthread_self ());
1020 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1021 return NULL;
1022 }
1023
1024 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
1025 (SCM thunk, SCM handler),
1026 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1027 "returning a new thread object representing the thread. The procedure\n"
1028 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1029 "\n"
1030 "When @var{handler} is specified, then @var{thunk} is called from\n"
1031 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1032 "handler. This catch is established inside the continuation barrier.\n"
1033 "\n"
1034 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1035 "the @emph{exit value} of the thread and the thread is terminated.")
1036 #define FUNC_NAME s_scm_call_with_new_thread
1037 {
1038 launch_data data;
1039 scm_i_pthread_t id;
1040 int err;
1041
1042 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1043 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1044 handler, SCM_ARG2, FUNC_NAME);
1045
1046 GC_collect_a_little ();
1047 data.parent = scm_current_dynamic_state ();
1048 data.thunk = thunk;
1049 data.handler = handler;
1050 data.thread = SCM_BOOL_F;
1051 scm_i_pthread_mutex_init (&data.mutex, NULL);
1052 scm_i_pthread_cond_init (&data.cond, NULL);
1053
1054 scm_i_scm_pthread_mutex_lock (&data.mutex);
1055 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1056 if (err)
1057 {
1058 scm_i_pthread_mutex_unlock (&data.mutex);
1059 errno = err;
1060 scm_syserror (NULL);
1061 }
1062
1063 while (scm_is_false (data.thread))
1064 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1065
1066 scm_i_pthread_mutex_unlock (&data.mutex);
1067
1068 return data.thread;
1069 }
1070 #undef FUNC_NAME
1071
1072 typedef struct {
1073 SCM parent;
1074 scm_t_catch_body body;
1075 void *body_data;
1076 scm_t_catch_handler handler;
1077 void *handler_data;
1078 SCM thread;
1079 scm_i_pthread_mutex_t mutex;
1080 scm_i_pthread_cond_t cond;
1081 } spawn_data;
1082
1083 static void *
1084 really_spawn (void *d)
1085 {
1086 spawn_data *data = (spawn_data *)d;
1087 scm_t_catch_body body = data->body;
1088 void *body_data = data->body_data;
1089 scm_t_catch_handler handler = data->handler;
1090 void *handler_data = data->handler_data;
1091 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1092
1093 scm_i_scm_pthread_mutex_lock (&data->mutex);
1094 data->thread = scm_current_thread ();
1095 scm_i_pthread_cond_signal (&data->cond);
1096 scm_i_pthread_mutex_unlock (&data->mutex);
1097
1098 if (handler == NULL)
1099 t->result = body (body_data);
1100 else
1101 t->result = scm_internal_catch (SCM_BOOL_T,
1102 body, body_data,
1103 handler, handler_data);
1104
1105 return 0;
1106 }
1107
1108 static void *
1109 spawn_thread (void *d)
1110 {
1111 spawn_data *data = (spawn_data *)d;
1112 scm_i_pthread_detach (scm_i_pthread_self ());
1113 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1114 return NULL;
1115 }
1116
1117 SCM
1118 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1119 scm_t_catch_handler handler, void *handler_data)
1120 {
1121 spawn_data data;
1122 scm_i_pthread_t id;
1123 int err;
1124
1125 data.parent = scm_current_dynamic_state ();
1126 data.body = body;
1127 data.body_data = body_data;
1128 data.handler = handler;
1129 data.handler_data = handler_data;
1130 data.thread = SCM_BOOL_F;
1131 scm_i_pthread_mutex_init (&data.mutex, NULL);
1132 scm_i_pthread_cond_init (&data.cond, NULL);
1133
1134 scm_i_scm_pthread_mutex_lock (&data.mutex);
1135 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1136 if (err)
1137 {
1138 scm_i_pthread_mutex_unlock (&data.mutex);
1139 errno = err;
1140 scm_syserror (NULL);
1141 }
1142
1143 while (scm_is_false (data.thread))
1144 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1145
1146 scm_i_pthread_mutex_unlock (&data.mutex);
1147
1148 assert (SCM_I_IS_THREAD (data.thread));
1149
1150 return data.thread;
1151 }
1152
1153 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1154 (),
1155 "Move the calling thread to the end of the scheduling queue.")
1156 #define FUNC_NAME s_scm_yield
1157 {
1158 return scm_from_bool (scm_i_sched_yield ());
1159 }
1160 #undef FUNC_NAME
1161
1162 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1163 (SCM thread),
1164 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1165 "cannot be the current thread, and if @var{thread} has already terminated or "
1166 "been signaled to terminate, this function is a no-op.")
1167 #define FUNC_NAME s_scm_cancel_thread
1168 {
1169 scm_i_thread *t = NULL;
1170
1171 SCM_VALIDATE_THREAD (1, thread);
1172 t = SCM_I_THREAD_DATA (thread);
1173 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1174 if (!t->canceled)
1175 {
1176 t->canceled = 1;
1177 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1178 scm_i_pthread_cancel (t->pthread);
1179 }
1180 else
1181 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1182
1183 return SCM_UNSPECIFIED;
1184 }
1185 #undef FUNC_NAME
1186
1187 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1188 (SCM thread, SCM proc),
1189 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1190 "This handler will be called when the thread exits.")
1191 #define FUNC_NAME s_scm_set_thread_cleanup_x
1192 {
1193 scm_i_thread *t;
1194
1195 SCM_VALIDATE_THREAD (1, thread);
1196 if (!scm_is_false (proc))
1197 SCM_VALIDATE_THUNK (2, proc);
1198
1199 t = SCM_I_THREAD_DATA (thread);
1200 scm_i_pthread_mutex_lock (&t->admin_mutex);
1201
1202 if (!(t->exited || t->canceled))
1203 t->cleanup_handler = proc;
1204
1205 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1206
1207 return SCM_UNSPECIFIED;
1208 }
1209 #undef FUNC_NAME
1210
1211 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1212 (SCM thread),
1213 "Return the cleanup handler installed for the thread @var{thread}.")
1214 #define FUNC_NAME s_scm_thread_cleanup
1215 {
1216 scm_i_thread *t;
1217 SCM ret;
1218
1219 SCM_VALIDATE_THREAD (1, thread);
1220
1221 t = SCM_I_THREAD_DATA (thread);
1222 scm_i_pthread_mutex_lock (&t->admin_mutex);
1223 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1224 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1225
1226 return ret;
1227 }
1228 #undef FUNC_NAME
1229
1230 SCM scm_join_thread (SCM thread)
1231 {
1232 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1233 }
1234
1235 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1236 (SCM thread, SCM timeout, SCM timeoutval),
1237 "Suspend execution of the calling thread until the target @var{thread} "
1238 "terminates, unless the target @var{thread} has already terminated. ")
1239 #define FUNC_NAME s_scm_join_thread_timed
1240 {
1241 scm_i_thread *t;
1242 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1243 SCM res = SCM_BOOL_F;
1244
1245 if (! (SCM_UNBNDP (timeoutval)))
1246 res = timeoutval;
1247
1248 SCM_VALIDATE_THREAD (1, thread);
1249 if (scm_is_eq (scm_current_thread (), thread))
1250 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1251
1252 t = SCM_I_THREAD_DATA (thread);
1253 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1254
1255 if (! SCM_UNBNDP (timeout))
1256 {
1257 to_timespec (timeout, &ctimeout);
1258 timeout_ptr = &ctimeout;
1259 }
1260
1261 if (t->exited)
1262 res = t->result;
1263 else
1264 {
1265 while (1)
1266 {
1267 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1268 timeout_ptr);
1269 if (err == 0)
1270 {
1271 if (t->exited)
1272 {
1273 res = t->result;
1274 break;
1275 }
1276 }
1277 else if (err == ETIMEDOUT)
1278 break;
1279
1280 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1281 SCM_TICK;
1282 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1283
1284 /* Check for exit again, since we just released and
1285 reacquired the admin mutex, before the next block_self
1286 call (which would block forever if t has already
1287 exited). */
1288 if (t->exited)
1289 {
1290 res = t->result;
1291 break;
1292 }
1293 }
1294 }
1295
1296 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1297
1298 return res;
1299 }
1300 #undef FUNC_NAME
1301
1302 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1303 (SCM obj),
1304 "Return @code{#t} if @var{obj} is a thread.")
1305 #define FUNC_NAME s_scm_thread_p
1306 {
1307 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1308 }
1309 #undef FUNC_NAME
1310
1311
1312 static int
1313 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1314 {
1315 fat_mutex *m = SCM_MUTEX_DATA (mx);
1316 scm_puts_unlocked ("#<mutex ", port);
1317 scm_uintprint ((scm_t_bits)m, 16, port);
1318 scm_puts_unlocked (">", port);
1319 return 1;
1320 }
1321
1322 static SCM
1323 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1324 {
1325 fat_mutex *m;
1326 SCM mx;
1327 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1328
1329 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1330 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1331 and so we can just copy it. */
1332 memcpy (&m->lock, &lock, sizeof (m->lock));
1333 m->owner = SCM_BOOL_F;
1334 m->level = 0;
1335
1336 m->recursive = recursive;
1337 m->unchecked_unlock = unchecked_unlock;
1338 m->allow_external_unlock = external_unlock;
1339
1340 m->waiting = SCM_EOL;
1341 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1342 m->waiting = make_queue ();
1343 return mx;
1344 }
1345
1346 SCM scm_make_mutex (void)
1347 {
1348 return scm_make_mutex_with_flags (SCM_EOL);
1349 }
1350
1351 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1352 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1353 SCM_SYMBOL (recursive_sym, "recursive");
1354
1355 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1356 (SCM flags),
1357 "Create a new mutex. ")
1358 #define FUNC_NAME s_scm_make_mutex_with_flags
1359 {
1360 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1361
1362 SCM ptr = flags;
1363 while (! scm_is_null (ptr))
1364 {
1365 SCM flag = SCM_CAR (ptr);
1366 if (scm_is_eq (flag, unchecked_unlock_sym))
1367 unchecked_unlock = 1;
1368 else if (scm_is_eq (flag, allow_external_unlock_sym))
1369 external_unlock = 1;
1370 else if (scm_is_eq (flag, recursive_sym))
1371 recursive = 1;
1372 else
1373 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1374 ptr = SCM_CDR (ptr);
1375 }
1376 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1377 }
1378 #undef FUNC_NAME
1379
1380 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1381 (void),
1382 "Create a new recursive mutex. ")
1383 #define FUNC_NAME s_scm_make_recursive_mutex
1384 {
1385 return make_fat_mutex (1, 0, 0);
1386 }
1387 #undef FUNC_NAME
1388
1389 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1390
1391 static SCM
1392 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1393 {
1394 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1395
1396 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1397 SCM err = SCM_BOOL_F;
1398
1399 struct timeval current_time;
1400
1401 scm_i_scm_pthread_mutex_lock (&m->lock);
1402
1403 while (1)
1404 {
1405 if (m->level == 0)
1406 {
1407 m->owner = new_owner;
1408 m->level++;
1409
1410 if (SCM_I_IS_THREAD (new_owner))
1411 {
1412 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1413
1414 /* FIXME: The order in which `t->admin_mutex' and
1415 `m->lock' are taken differs from that in
1416 `on_thread_exit', potentially leading to deadlocks. */
1417 scm_i_pthread_mutex_lock (&t->admin_mutex);
1418
1419 /* Only keep a weak reference to MUTEX so that it's not
1420 retained when not referenced elsewhere (bug #27450).
1421 The weak pair itself is eventually removed when MUTEX
1422 is unlocked. Note that `t->mutexes' lists mutexes
1423 currently held by T, so it should be small. */
1424 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1425 t->mutexes);
1426
1427 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1428 }
1429 *ret = 1;
1430 break;
1431 }
1432 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1433 {
1434 m->owner = new_owner;
1435 err = scm_cons (scm_abandoned_mutex_error_key,
1436 scm_from_locale_string ("lock obtained on abandoned "
1437 "mutex"));
1438 *ret = 1;
1439 break;
1440 }
1441 else if (scm_is_eq (m->owner, new_owner))
1442 {
1443 if (m->recursive)
1444 {
1445 m->level++;
1446 *ret = 1;
1447 }
1448 else
1449 {
1450 err = scm_cons (scm_misc_error_key,
1451 scm_from_locale_string ("mutex already locked "
1452 "by thread"));
1453 *ret = 0;
1454 }
1455 break;
1456 }
1457 else
1458 {
1459 if (timeout != NULL)
1460 {
1461 gettimeofday (&current_time, NULL);
1462 if (current_time.tv_sec > timeout->tv_sec ||
1463 (current_time.tv_sec == timeout->tv_sec &&
1464 current_time.tv_usec * 1000 > timeout->tv_nsec))
1465 {
1466 *ret = 0;
1467 break;
1468 }
1469 }
1470 block_self (m->waiting, mutex, &m->lock, timeout);
1471 scm_i_pthread_mutex_unlock (&m->lock);
1472 SCM_TICK;
1473 scm_i_scm_pthread_mutex_lock (&m->lock);
1474 }
1475 }
1476 scm_i_pthread_mutex_unlock (&m->lock);
1477 return err;
1478 }
1479
1480 SCM scm_lock_mutex (SCM mx)
1481 {
1482 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1483 }
1484
1485 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1486 (SCM m, SCM timeout, SCM owner),
1487 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1488 "thread blocks until the mutex becomes available. The function\n"
1489 "returns when the calling thread owns the lock on @var{m}.\n"
1490 "Locking a mutex that a thread already owns will succeed right\n"
1491 "away and will not block the thread. That is, Guile's mutexes\n"
1492 "are @emph{recursive}.")
1493 #define FUNC_NAME s_scm_lock_mutex_timed
1494 {
1495 SCM exception;
1496 int ret = 0;
1497 scm_t_timespec cwaittime, *waittime = NULL;
1498
1499 SCM_VALIDATE_MUTEX (1, m);
1500
1501 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1502 {
1503 to_timespec (timeout, &cwaittime);
1504 waittime = &cwaittime;
1505 }
1506
1507 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1508 SCM_VALIDATE_THREAD (3, owner);
1509
1510 exception = fat_mutex_lock (m, waittime, owner, &ret);
1511 if (!scm_is_false (exception))
1512 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1513 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1514 }
1515 #undef FUNC_NAME
1516
1517 static void
1518 lock_mutex_return_void (SCM mx)
1519 {
1520 (void) scm_lock_mutex (mx);
1521 }
1522
1523 static void
1524 unlock_mutex_return_void (SCM mx)
1525 {
1526 (void) scm_unlock_mutex (mx);
1527 }
1528
1529 void
1530 scm_dynwind_lock_mutex (SCM mutex)
1531 {
1532 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1533 SCM_F_WIND_EXPLICITLY);
1534 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1535 SCM_F_WIND_EXPLICITLY);
1536 }
1537
1538 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1539 (SCM mutex),
1540 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1541 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1542 #define FUNC_NAME s_scm_try_mutex
1543 {
1544 SCM exception;
1545 int ret = 0;
1546 scm_t_timespec cwaittime, *waittime = NULL;
1547
1548 SCM_VALIDATE_MUTEX (1, mutex);
1549
1550 to_timespec (scm_from_int(0), &cwaittime);
1551 waittime = &cwaittime;
1552
1553 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1554 if (!scm_is_false (exception))
1555 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1556 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1557 }
1558 #undef FUNC_NAME
1559
1560 /*** Fat condition variables */
1561
1562 typedef struct {
1563 scm_i_pthread_mutex_t lock;
1564 SCM waiting; /* the threads waiting for this condition. */
1565 } fat_cond;
1566
1567 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1568 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1569
1570 static void
1571 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1572 {
1573 SCM walk, prev;
1574
1575 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1576 walk = SCM_CDR (walk))
1577 {
1578 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1579 {
1580 if (scm_is_pair (prev))
1581 SCM_SETCDR (prev, SCM_CDR (walk));
1582 else
1583 t->mutexes = SCM_CDR (walk);
1584 break;
1585 }
1586 }
1587 }
1588
1589 static int
1590 fat_mutex_unlock (SCM mutex, SCM cond,
1591 const scm_t_timespec *waittime, int relock)
1592 {
1593 SCM owner;
1594 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1595 fat_cond *c = NULL;
1596 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1597 int err = 0, ret = 0;
1598
1599 scm_i_scm_pthread_mutex_lock (&m->lock);
1600
1601 owner = m->owner;
1602
1603 if (!scm_is_eq (owner, t->handle))
1604 {
1605 if (m->level == 0)
1606 {
1607 if (!m->unchecked_unlock)
1608 {
1609 scm_i_pthread_mutex_unlock (&m->lock);
1610 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1611 }
1612 owner = t->handle;
1613 }
1614 else if (!m->allow_external_unlock)
1615 {
1616 scm_i_pthread_mutex_unlock (&m->lock);
1617 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1618 }
1619 }
1620
1621 if (! (SCM_UNBNDP (cond)))
1622 {
1623 c = SCM_CONDVAR_DATA (cond);
1624 while (1)
1625 {
1626 int brk = 0;
1627
1628 if (m->level > 0)
1629 m->level--;
1630 if (m->level == 0)
1631 {
1632 /* Change the owner of MUTEX. */
1633 remove_mutex_from_thread (mutex, t);
1634 m->owner = unblock_from_queue (m->waiting);
1635 }
1636
1637 t->block_asyncs++;
1638
1639 err = block_self (c->waiting, cond, &m->lock, waittime);
1640 scm_i_pthread_mutex_unlock (&m->lock);
1641
1642 if (err == 0)
1643 {
1644 ret = 1;
1645 brk = 1;
1646 }
1647 else if (err == ETIMEDOUT)
1648 {
1649 ret = 0;
1650 brk = 1;
1651 }
1652 else if (err != EINTR)
1653 {
1654 errno = err;
1655 scm_syserror (NULL);
1656 }
1657
1658 if (brk)
1659 {
1660 if (relock)
1661 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1662 t->block_asyncs--;
1663 break;
1664 }
1665
1666 t->block_asyncs--;
1667 scm_async_tick ();
1668
1669 scm_remember_upto_here_2 (cond, mutex);
1670
1671 scm_i_scm_pthread_mutex_lock (&m->lock);
1672 }
1673 }
1674 else
1675 {
1676 if (m->level > 0)
1677 m->level--;
1678 if (m->level == 0)
1679 {
1680 /* Change the owner of MUTEX. */
1681 remove_mutex_from_thread (mutex, t);
1682 m->owner = unblock_from_queue (m->waiting);
1683 }
1684
1685 scm_i_pthread_mutex_unlock (&m->lock);
1686 ret = 1;
1687 }
1688
1689 return ret;
1690 }
1691
1692 SCM scm_unlock_mutex (SCM mx)
1693 {
1694 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1695 }
1696
1697 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1698 (SCM mx, SCM cond, SCM timeout),
1699 "Unlocks @var{mutex} if the calling thread owns the lock on "
1700 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1701 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1702 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1703 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1704 "with a call to @code{unlock-mutex}. Only the last call to "
1705 "@code{unlock-mutex} will actually unlock the mutex. ")
1706 #define FUNC_NAME s_scm_unlock_mutex_timed
1707 {
1708 scm_t_timespec cwaittime, *waittime = NULL;
1709
1710 SCM_VALIDATE_MUTEX (1, mx);
1711 if (! (SCM_UNBNDP (cond)))
1712 {
1713 SCM_VALIDATE_CONDVAR (2, cond);
1714
1715 if (! (SCM_UNBNDP (timeout)))
1716 {
1717 to_timespec (timeout, &cwaittime);
1718 waittime = &cwaittime;
1719 }
1720 }
1721
1722 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1723 }
1724 #undef FUNC_NAME
1725
1726 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1727 (SCM obj),
1728 "Return @code{#t} if @var{obj} is a mutex.")
1729 #define FUNC_NAME s_scm_mutex_p
1730 {
1731 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1732 }
1733 #undef FUNC_NAME
1734
1735 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1736 (SCM mx),
1737 "Return the thread owning @var{mx}, or @code{#f}.")
1738 #define FUNC_NAME s_scm_mutex_owner
1739 {
1740 SCM owner;
1741 fat_mutex *m = NULL;
1742
1743 SCM_VALIDATE_MUTEX (1, mx);
1744 m = SCM_MUTEX_DATA (mx);
1745 scm_i_pthread_mutex_lock (&m->lock);
1746 owner = m->owner;
1747 scm_i_pthread_mutex_unlock (&m->lock);
1748
1749 return owner;
1750 }
1751 #undef FUNC_NAME
1752
1753 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1754 (SCM mx),
1755 "Return the lock level of mutex @var{mx}.")
1756 #define FUNC_NAME s_scm_mutex_level
1757 {
1758 SCM_VALIDATE_MUTEX (1, mx);
1759 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1760 }
1761 #undef FUNC_NAME
1762
1763 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1764 (SCM mx),
1765 "Returns @code{#t} if the mutex @var{mx} is locked.")
1766 #define FUNC_NAME s_scm_mutex_locked_p
1767 {
1768 SCM_VALIDATE_MUTEX (1, mx);
1769 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1770 }
1771 #undef FUNC_NAME
1772
1773 static int
1774 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1775 {
1776 fat_cond *c = SCM_CONDVAR_DATA (cv);
1777 scm_puts_unlocked ("#<condition-variable ", port);
1778 scm_uintprint ((scm_t_bits)c, 16, port);
1779 scm_puts_unlocked (">", port);
1780 return 1;
1781 }
1782
1783 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1784 (void),
1785 "Make a new condition variable.")
1786 #define FUNC_NAME s_scm_make_condition_variable
1787 {
1788 fat_cond *c;
1789 SCM cv;
1790
1791 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1792 c->waiting = SCM_EOL;
1793 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1794 c->waiting = make_queue ();
1795 return cv;
1796 }
1797 #undef FUNC_NAME
1798
1799 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1800 (SCM cv, SCM mx, SCM t),
1801 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1802 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1803 "is locked again when this function returns. When @var{t} is given, "
1804 "it specifies a point in time where the waiting should be aborted. It "
1805 "can be either a integer as returned by @code{current-time} or a pair "
1806 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1807 "mutex is locked and @code{#f} is returned. When the condition "
1808 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1809 "is returned. ")
1810 #define FUNC_NAME s_scm_timed_wait_condition_variable
1811 {
1812 scm_t_timespec waittime, *waitptr = NULL;
1813
1814 SCM_VALIDATE_CONDVAR (1, cv);
1815 SCM_VALIDATE_MUTEX (2, mx);
1816
1817 if (!SCM_UNBNDP (t))
1818 {
1819 to_timespec (t, &waittime);
1820 waitptr = &waittime;
1821 }
1822
1823 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1824 }
1825 #undef FUNC_NAME
1826
1827 static void
1828 fat_cond_signal (fat_cond *c)
1829 {
1830 unblock_from_queue (c->waiting);
1831 }
1832
1833 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1834 (SCM cv),
1835 "Wake up one thread that is waiting for @var{cv}")
1836 #define FUNC_NAME s_scm_signal_condition_variable
1837 {
1838 SCM_VALIDATE_CONDVAR (1, cv);
1839 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1840 return SCM_BOOL_T;
1841 }
1842 #undef FUNC_NAME
1843
1844 static void
1845 fat_cond_broadcast (fat_cond *c)
1846 {
1847 while (scm_is_true (unblock_from_queue (c->waiting)))
1848 ;
1849 }
1850
1851 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1852 (SCM cv),
1853 "Wake up all threads that are waiting for @var{cv}. ")
1854 #define FUNC_NAME s_scm_broadcast_condition_variable
1855 {
1856 SCM_VALIDATE_CONDVAR (1, cv);
1857 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1858 return SCM_BOOL_T;
1859 }
1860 #undef FUNC_NAME
1861
1862 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1863 (SCM obj),
1864 "Return @code{#t} if @var{obj} is a condition variable.")
1865 #define FUNC_NAME s_scm_condition_variable_p
1866 {
1867 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1868 }
1869 #undef FUNC_NAME
1870
1871
1872 \f
1873 /*** Select */
1874
1875 struct select_args
1876 {
1877 int nfds;
1878 fd_set *read_fds;
1879 fd_set *write_fds;
1880 fd_set *except_fds;
1881 struct timeval *timeout;
1882
1883 int result;
1884 int errno_value;
1885 };
1886
1887 static void *
1888 do_std_select (void *args)
1889 {
1890 struct select_args *select_args;
1891
1892 select_args = (struct select_args *) args;
1893
1894 select_args->result =
1895 select (select_args->nfds,
1896 select_args->read_fds, select_args->write_fds,
1897 select_args->except_fds, select_args->timeout);
1898 select_args->errno_value = errno;
1899
1900 return NULL;
1901 }
1902
1903 #if !SCM_HAVE_SYS_SELECT_H
1904 static int scm_std_select (int nfds,
1905 fd_set *readfds,
1906 fd_set *writefds,
1907 fd_set *exceptfds,
1908 struct timeval *timeout);
1909 #endif
1910
1911 int
1912 scm_std_select (int nfds,
1913 fd_set *readfds,
1914 fd_set *writefds,
1915 fd_set *exceptfds,
1916 struct timeval *timeout)
1917 {
1918 fd_set my_readfds;
1919 int res, eno, wakeup_fd;
1920 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1921 struct select_args args;
1922
1923 if (readfds == NULL)
1924 {
1925 FD_ZERO (&my_readfds);
1926 readfds = &my_readfds;
1927 }
1928
1929 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1930 SCM_TICK;
1931
1932 wakeup_fd = t->sleep_pipe[0];
1933 FD_SET (wakeup_fd, readfds);
1934 if (wakeup_fd >= nfds)
1935 nfds = wakeup_fd+1;
1936
1937 args.nfds = nfds;
1938 args.read_fds = readfds;
1939 args.write_fds = writefds;
1940 args.except_fds = exceptfds;
1941 args.timeout = timeout;
1942
1943 /* Explicitly cooperate with the GC. */
1944 scm_without_guile (do_std_select, &args);
1945
1946 res = args.result;
1947 eno = args.errno_value;
1948
1949 t->sleep_fd = -1;
1950 scm_i_reset_sleep (t);
1951
1952 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1953 {
1954 char dummy;
1955 full_read (wakeup_fd, &dummy, 1);
1956
1957 FD_CLR (wakeup_fd, readfds);
1958 res -= 1;
1959 if (res == 0)
1960 {
1961 eno = EINTR;
1962 res = -1;
1963 }
1964 }
1965 errno = eno;
1966 return res;
1967 }
1968
1969 /* Convenience API for blocking while in guile mode. */
1970
1971 #if SCM_USE_PTHREAD_THREADS
1972
1973 /* It seems reasonable to not run procedures related to mutex and condition
1974 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1975 without it, and (ii) the only potential gain would be GC latency. See
1976 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1977 for a discussion of the pros and cons. */
1978
1979 int
1980 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1981 {
1982 int res = scm_i_pthread_mutex_lock (mutex);
1983 return res;
1984 }
1985
1986 static void
1987 do_unlock (void *data)
1988 {
1989 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1990 }
1991
1992 void
1993 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1994 {
1995 scm_i_scm_pthread_mutex_lock (mutex);
1996 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1997 }
1998
1999 int
2000 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
2001 {
2002 int res;
2003 scm_i_thread *t = SCM_I_CURRENT_THREAD;
2004
2005 t->held_mutex = mutex;
2006 res = scm_i_pthread_cond_wait (cond, mutex);
2007 t->held_mutex = NULL;
2008
2009 return res;
2010 }
2011
2012 int
2013 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
2014 scm_i_pthread_mutex_t *mutex,
2015 const scm_t_timespec *wt)
2016 {
2017 int res;
2018 scm_i_thread *t = SCM_I_CURRENT_THREAD;
2019
2020 t->held_mutex = mutex;
2021 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2022 t->held_mutex = NULL;
2023
2024 return res;
2025 }
2026
2027 #endif
2028
2029 unsigned long
2030 scm_std_usleep (unsigned long usecs)
2031 {
2032 struct timeval tv;
2033 tv.tv_usec = usecs % 1000000;
2034 tv.tv_sec = usecs / 1000000;
2035 scm_std_select (0, NULL, NULL, NULL, &tv);
2036 return tv.tv_sec * 1000000 + tv.tv_usec;
2037 }
2038
2039 unsigned int
2040 scm_std_sleep (unsigned int secs)
2041 {
2042 struct timeval tv;
2043 tv.tv_usec = 0;
2044 tv.tv_sec = secs;
2045 scm_std_select (0, NULL, NULL, NULL, &tv);
2046 return tv.tv_sec;
2047 }
2048
2049 /*** Misc */
2050
2051 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2052 (void),
2053 "Return the thread that called this function.")
2054 #define FUNC_NAME s_scm_current_thread
2055 {
2056 return SCM_I_CURRENT_THREAD->handle;
2057 }
2058 #undef FUNC_NAME
2059
2060 static SCM
2061 scm_c_make_list (size_t n, SCM fill)
2062 {
2063 SCM res = SCM_EOL;
2064 while (n-- > 0)
2065 res = scm_cons (fill, res);
2066 return res;
2067 }
2068
2069 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2070 (void),
2071 "Return a list of all threads.")
2072 #define FUNC_NAME s_scm_all_threads
2073 {
2074 /* We can not allocate while holding the thread_admin_mutex because
2075 of the way GC is done.
2076 */
2077 int n = thread_count;
2078 scm_i_thread *t;
2079 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2080
2081 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2082 l = &list;
2083 for (t = all_threads; t && n > 0; t = t->next_thread)
2084 {
2085 if (t != scm_i_signal_delivery_thread)
2086 {
2087 SCM_SETCAR (*l, t->handle);
2088 l = SCM_CDRLOC (*l);
2089 }
2090 n--;
2091 }
2092 *l = SCM_EOL;
2093 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2094 return list;
2095 }
2096 #undef FUNC_NAME
2097
2098 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2099 (SCM thread),
2100 "Return @code{#t} iff @var{thread} has exited.\n")
2101 #define FUNC_NAME s_scm_thread_exited_p
2102 {
2103 return scm_from_bool (scm_c_thread_exited_p (thread));
2104 }
2105 #undef FUNC_NAME
2106
2107 int
2108 scm_c_thread_exited_p (SCM thread)
2109 #define FUNC_NAME s_scm_thread_exited_p
2110 {
2111 scm_i_thread *t;
2112 SCM_VALIDATE_THREAD (1, thread);
2113 t = SCM_I_THREAD_DATA (thread);
2114 return t->exited;
2115 }
2116 #undef FUNC_NAME
2117
2118 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2119 (void),
2120 "Return the total number of processors of the machine, which\n"
2121 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2122 "thread execution unit, which can be either:\n\n"
2123 "@itemize\n"
2124 "@item an execution core in a (possibly multi-core) chip, in a\n"
2125 " (possibly multi- chip) module, in a single computer, or\n"
2126 "@item a thread execution unit inside a core in the case of\n"
2127 " @dfn{hyper-threaded} CPUs.\n"
2128 "@end itemize\n\n"
2129 "Which of the two definitions is used, is unspecified.\n")
2130 #define FUNC_NAME s_scm_total_processor_count
2131 {
2132 return scm_from_ulong (num_processors (NPROC_ALL));
2133 }
2134 #undef FUNC_NAME
2135
2136 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2137 (void),
2138 "Like @code{total-processor-count}, but return the number of\n"
2139 "processors available to the current process. See\n"
2140 "@code{setaffinity} and @code{getaffinity} for more\n"
2141 "information.\n")
2142 #define FUNC_NAME s_scm_current_processor_count
2143 {
2144 return scm_from_ulong (num_processors (NPROC_CURRENT));
2145 }
2146 #undef FUNC_NAME
2147
2148
2149 \f
2150
2151 static scm_i_pthread_cond_t wake_up_cond;
2152 static int threads_initialized_p = 0;
2153
2154
2155 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2156 */
2157 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2158
2159 static SCM dynwind_critical_section_mutex;
2160
2161 void
2162 scm_dynwind_critical_section (SCM mutex)
2163 {
2164 if (scm_is_false (mutex))
2165 mutex = dynwind_critical_section_mutex;
2166 scm_dynwind_lock_mutex (mutex);
2167 scm_dynwind_block_asyncs ();
2168 }
2169
2170 /*** Initialization */
2171
2172 scm_i_pthread_mutex_t scm_i_misc_mutex;
2173
2174 #if SCM_USE_PTHREAD_THREADS
2175 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2176 #endif
2177
2178 void
2179 scm_threads_prehistory (void *base)
2180 {
2181 #if SCM_USE_PTHREAD_THREADS
2182 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2183 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2184 PTHREAD_MUTEX_RECURSIVE);
2185 #endif
2186
2187 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2188 scm_i_pthread_mutexattr_recursive);
2189 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2190 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2191
2192 guilify_self_1 ((struct GC_stack_base *) base);
2193 }
2194
2195 scm_t_bits scm_tc16_thread;
2196 scm_t_bits scm_tc16_mutex;
2197 scm_t_bits scm_tc16_condvar;
2198
2199 void
2200 scm_init_threads ()
2201 {
2202 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2203 scm_set_smob_print (scm_tc16_thread, thread_print);
2204
2205 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2206 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2207
2208 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2209 sizeof (fat_cond));
2210 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2211
2212 scm_i_default_dynamic_state = SCM_BOOL_F;
2213 guilify_self_2 (SCM_BOOL_F);
2214 threads_initialized_p = 1;
2215
2216 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2217 }
2218
2219 void
2220 scm_init_threads_default_dynamic_state ()
2221 {
2222 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2223 scm_i_default_dynamic_state = state;
2224 }
2225
2226 void
2227 scm_init_thread_procs ()
2228 {
2229 #include "libguile/threads.x"
2230 }
2231
2232 \f
2233 /* IA64-specific things. */
2234
2235 #ifdef __ia64__
2236 # ifdef __hpux
2237 # include <sys/param.h>
2238 # include <sys/pstat.h>
2239 void *
2240 scm_ia64_register_backing_store_base (void)
2241 {
2242 struct pst_vm_status vm_status;
2243 int i = 0;
2244 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2245 if (vm_status.pst_type == PS_RSESTACK)
2246 return (void *) vm_status.pst_vaddr;
2247 abort ();
2248 }
2249 void *
2250 scm_ia64_ar_bsp (const void *ctx)
2251 {
2252 uint64_t bsp;
2253 __uc_get_ar_bsp (ctx, &bsp);
2254 return (void *) bsp;
2255 }
2256 # endif /* hpux */
2257 # ifdef linux
2258 # include <ucontext.h>
2259 void *
2260 scm_ia64_register_backing_store_base (void)
2261 {
2262 extern void *__libc_ia64_register_backing_store_base;
2263 return __libc_ia64_register_backing_store_base;
2264 }
2265 void *
2266 scm_ia64_ar_bsp (const void *opaque)
2267 {
2268 const ucontext_t *ctx = opaque;
2269 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2270 }
2271 # endif /* linux */
2272 # ifdef __FreeBSD__
2273 # include <ucontext.h>
2274 void *
2275 scm_ia64_register_backing_store_base (void)
2276 {
2277 return (void *)0x8000000000000000;
2278 }
2279 void *
2280 scm_ia64_ar_bsp (const void *opaque)
2281 {
2282 const ucontext_t *ctx = opaque;
2283 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2284 + ctx->uc_mcontext.mc_special.ndirty);
2285 }
2286 # endif /* __FreeBSD__ */
2287 #endif /* __ia64__ */
2288
2289
2290 /*
2291 Local Variables:
2292 c-file-style: "gnu"
2293 End:
2294 */