Regenerate `psyntax-pp.scm'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #if HAVE_PTHREAD_NP_H
42 # include <pthread_np.h>
43 #endif
44
45 #include <assert.h>
46 #include <fcntl.h>
47 #include <nproc.h>
48
49 #include "libguile/validate.h"
50 #include "libguile/root.h"
51 #include "libguile/eval.h"
52 #include "libguile/async.h"
53 #include "libguile/ports.h"
54 #include "libguile/threads.h"
55 #include "libguile/dynwind.h"
56 #include "libguile/iselect.h"
57 #include "libguile/fluids.h"
58 #include "libguile/continuations.h"
59 #include "libguile/gc.h"
60 #include "libguile/init.h"
61 #include "libguile/scmsigs.h"
62 #include "libguile/strings.h"
63 #include "libguile/weaks.h"
64
65 #include <full-read.h>
66
67
68 \f
69
70 /* First some libgc shims. */
71
72 /* Make sure GC_fn_type is defined; it is missing from the public
73 headers of GC 7.1 and earlier. */
74 #ifndef HAVE_GC_FN_TYPE
75 typedef void * (* GC_fn_type) (void *);
76 #endif
77
78
79 #ifndef GC_SUCCESS
80 #define GC_SUCCESS 0
81 #endif
82
83 #ifndef GC_UNIMPLEMENTED
84 #define GC_UNIMPLEMENTED 3
85 #endif
86
87 /* Likewise struct GC_stack_base is missing before 7.1. */
88 #ifndef HAVE_GC_STACK_BASE
89 struct GC_stack_base {
90 void * mem_base; /* Base of memory stack. */
91 #ifdef __ia64__
92 void * reg_base; /* Base of separate register stack. */
93 #endif
94 };
95
96 static int
97 GC_register_my_thread (struct GC_stack_base *stack_base)
98 {
99 return GC_UNIMPLEMENTED;
100 }
101
102 static void
103 GC_unregister_my_thread ()
104 {
105 }
106
107 #if !SCM_USE_PTHREAD_THREADS
108 /* No threads; we can just use GC_stackbottom. */
109 static void *
110 get_thread_stack_base ()
111 {
112 return GC_stackbottom;
113 }
114
115 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
116 && defined PTHREAD_ATTR_GETSTACK_WORKS
117 /* This method for GNU/Linux and perhaps some other systems.
118 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
119 available on them. */
120 static void *
121 get_thread_stack_base ()
122 {
123 pthread_attr_t attr;
124 void *start, *end;
125 size_t size;
126
127 pthread_getattr_np (pthread_self (), &attr);
128 pthread_attr_getstack (&attr, &start, &size);
129 end = (char *)start + size;
130
131 #if SCM_STACK_GROWS_UP
132 return start;
133 #else
134 return end;
135 #endif
136 }
137
138 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
139 /* This method for MacOS X.
140 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
141 but as of 2006 there's nothing obvious at apple.com. */
142 static void *
143 get_thread_stack_base ()
144 {
145 return pthread_get_stackaddr_np (pthread_self ());
146 }
147
148 #elif HAVE_PTHREAD_ATTR_GET_NP
149 /* This one is for FreeBSD 9. */
150 static void *
151 get_thread_stack_base ()
152 {
153 pthread_attr_t attr;
154 void *start, *end;
155 size_t size;
156
157 pthread_attr_init (&attr);
158 pthread_attr_get_np (pthread_self (), &attr);
159 pthread_attr_getstack (&attr, &start, &size);
160 pthread_attr_destroy (&attr);
161
162 end = (char *)start + size;
163
164 #if SCM_STACK_GROWS_UP
165 return start;
166 #else
167 return end;
168 #endif
169 }
170
171 #else
172 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
173 #endif
174
175 static int
176 GC_get_stack_base (struct GC_stack_base *stack_base)
177 {
178 stack_base->mem_base = get_thread_stack_base ();
179 #ifdef __ia64__
180 /* Calculate and store off the base of this thread's register
181 backing store (RBS). Unfortunately our implementation(s) of
182 scm_ia64_register_backing_store_base are only reliable for the
183 main thread. For other threads, therefore, find out the current
184 top of the RBS, and use that as a maximum. */
185 stack_base->reg_base = scm_ia64_register_backing_store_base ();
186 {
187 ucontext_t ctx;
188 void *bsp;
189 getcontext (&ctx);
190 bsp = scm_ia64_ar_bsp (&ctx);
191 if (stack_base->reg_base > bsp)
192 stack_base->reg_base = bsp;
193 }
194 #endif
195 return GC_SUCCESS;
196 }
197
198 static void *
199 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
200 {
201 struct GC_stack_base stack_base;
202
203 stack_base.mem_base = (void*)&stack_base;
204 #ifdef __ia64__
205 /* FIXME: Untested. */
206 {
207 ucontext_t ctx;
208 getcontext (&ctx);
209 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
210 }
211 #endif
212
213 return fn (&stack_base, arg);
214 }
215 #endif /* HAVE_GC_STACK_BASE */
216
217
218 /* Now define with_gc_active and with_gc_inactive. */
219
220 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
221
222 /* We have a sufficiently new libgc (7.2 or newer). */
223
224 static void*
225 with_gc_inactive (GC_fn_type func, void *data)
226 {
227 return GC_do_blocking (func, data);
228 }
229
230 static void*
231 with_gc_active (GC_fn_type func, void *data)
232 {
233 return GC_call_with_gc_active (func, data);
234 }
235
236 #else
237
238 /* libgc not new enough, so never actually deactivate GC.
239
240 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
241 GC_call_with_gc_active. */
242
243 static void*
244 with_gc_inactive (GC_fn_type func, void *data)
245 {
246 return func (data);
247 }
248
249 static void*
250 with_gc_active (GC_fn_type func, void *data)
251 {
252 return func (data);
253 }
254
255 #endif /* HAVE_GC_DO_BLOCKING */
256
257
258 \f
259 static void
260 to_timespec (SCM t, scm_t_timespec *waittime)
261 {
262 if (scm_is_pair (t))
263 {
264 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
265 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
266 }
267 else
268 {
269 double time = scm_to_double (t);
270 double sec = scm_c_truncate (time);
271
272 waittime->tv_sec = (long) sec;
273 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
274 }
275 }
276
277 \f
278 /*** Queues */
279
280 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
281 the risk of false references leading to unbounded retained space as
282 described in "Bounding Space Usage of Conservative Garbage Collectors",
283 H.J. Boehm, 2001. */
284
285 /* Make an empty queue data structure.
286 */
287 static SCM
288 make_queue ()
289 {
290 return scm_cons (SCM_EOL, SCM_EOL);
291 }
292
293 /* Put T at the back of Q and return a handle that can be used with
294 remqueue to remove T from Q again.
295 */
296 static SCM
297 enqueue (SCM q, SCM t)
298 {
299 SCM c = scm_cons (t, SCM_EOL);
300 SCM_CRITICAL_SECTION_START;
301 if (scm_is_null (SCM_CDR (q)))
302 SCM_SETCDR (q, c);
303 else
304 SCM_SETCDR (SCM_CAR (q), c);
305 SCM_SETCAR (q, c);
306 SCM_CRITICAL_SECTION_END;
307 return c;
308 }
309
310 /* Remove the element that the handle C refers to from the queue Q. C
311 must have been returned from a call to enqueue. The return value
312 is zero when the element referred to by C has already been removed.
313 Otherwise, 1 is returned.
314 */
315 static int
316 remqueue (SCM q, SCM c)
317 {
318 SCM p, prev = q;
319 SCM_CRITICAL_SECTION_START;
320 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
321 {
322 if (scm_is_eq (p, c))
323 {
324 if (scm_is_eq (c, SCM_CAR (q)))
325 SCM_SETCAR (q, SCM_CDR (c));
326 SCM_SETCDR (prev, SCM_CDR (c));
327
328 /* GC-robust */
329 SCM_SETCDR (c, SCM_EOL);
330
331 SCM_CRITICAL_SECTION_END;
332 return 1;
333 }
334 prev = p;
335 }
336 SCM_CRITICAL_SECTION_END;
337 return 0;
338 }
339
340 /* Remove the front-most element from the queue Q and return it.
341 Return SCM_BOOL_F when Q is empty.
342 */
343 static SCM
344 dequeue (SCM q)
345 {
346 SCM c;
347 SCM_CRITICAL_SECTION_START;
348 c = SCM_CDR (q);
349 if (scm_is_null (c))
350 {
351 SCM_CRITICAL_SECTION_END;
352 return SCM_BOOL_F;
353 }
354 else
355 {
356 SCM_SETCDR (q, SCM_CDR (c));
357 if (scm_is_null (SCM_CDR (q)))
358 SCM_SETCAR (q, SCM_EOL);
359 SCM_CRITICAL_SECTION_END;
360
361 /* GC-robust */
362 SCM_SETCDR (c, SCM_EOL);
363
364 return SCM_CAR (c);
365 }
366 }
367
368 /*** Thread smob routines */
369
370
371 static int
372 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
373 {
374 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
375 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
376 the struct case, hence we go via a union, and extract according to the
377 size of pthread_t. */
378 union {
379 scm_i_pthread_t p;
380 unsigned short us;
381 unsigned int ui;
382 unsigned long ul;
383 scm_t_uintmax um;
384 } u;
385 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
386 scm_i_pthread_t p = t->pthread;
387 scm_t_uintmax id;
388 u.p = p;
389 if (sizeof (p) == sizeof (unsigned short))
390 id = u.us;
391 else if (sizeof (p) == sizeof (unsigned int))
392 id = u.ui;
393 else if (sizeof (p) == sizeof (unsigned long))
394 id = u.ul;
395 else
396 id = u.um;
397
398 scm_puts ("#<thread ", port);
399 scm_uintprint (id, 10, port);
400 scm_puts (" (", port);
401 scm_uintprint ((scm_t_bits)t, 16, port);
402 scm_puts (")>", port);
403 return 1;
404 }
405
406 \f
407 /*** Blocking on queues. */
408
409 /* See also scm_i_queue_async_cell for how such a block is
410 interrputed.
411 */
412
413 /* Put the current thread on QUEUE and go to sleep, waiting for it to
414 be woken up by a call to 'unblock_from_queue', or to be
415 interrupted. Upon return of this function, the current thread is
416 no longer on QUEUE, even when the sleep has been interrupted.
417
418 The caller of block_self must hold MUTEX. It will be atomically
419 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
420
421 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
422 as MUTEX is needed.
423
424 When WAITTIME is not NULL, the sleep will be aborted at that time.
425
426 The return value of block_self is an errno value. It will be zero
427 when the sleep has been successfully completed by a call to
428 unblock_from_queue, EINTR when it has been interrupted by the
429 delivery of a system async, and ETIMEDOUT when the timeout has
430 expired.
431
432 The system asyncs themselves are not executed by block_self.
433 */
434 static int
435 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
436 const scm_t_timespec *waittime)
437 {
438 scm_i_thread *t = SCM_I_CURRENT_THREAD;
439 SCM q_handle;
440 int err;
441
442 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
443 err = EINTR;
444 else
445 {
446 t->block_asyncs++;
447 q_handle = enqueue (queue, t->handle);
448 if (waittime == NULL)
449 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
450 else
451 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
452
453 /* When we are still on QUEUE, we have been interrupted. We
454 report this only when no other error (such as a timeout) has
455 happened above.
456 */
457 if (remqueue (queue, q_handle) && err == 0)
458 err = EINTR;
459 t->block_asyncs--;
460 scm_i_reset_sleep (t);
461 }
462
463 return err;
464 }
465
466 /* Wake up the first thread on QUEUE, if any. The awoken thread is
467 returned, or #f if the queue was empty.
468 */
469 static SCM
470 unblock_from_queue (SCM queue)
471 {
472 SCM thread = dequeue (queue);
473 if (scm_is_true (thread))
474 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
475 return thread;
476 }
477
478 \f
479 /* Getting into and out of guile mode.
480 */
481
482 /* Key used to attach a cleanup handler to a given thread. Also, if
483 thread-local storage is unavailable, this key is used to retrieve the
484 current thread with `pthread_getspecific ()'. */
485 scm_i_pthread_key_t scm_i_thread_key;
486
487
488 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
489
490 /* When thread-local storage (TLS) is available, a pointer to the
491 current-thread object is kept in TLS. Note that storing the thread-object
492 itself in TLS (rather than a pointer to some malloc'd memory) is not
493 possible since thread objects may live longer than the actual thread they
494 represent. */
495 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
496
497 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
498
499
500 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
501 static scm_i_thread *all_threads = NULL;
502 static int thread_count;
503
504 static SCM scm_i_default_dynamic_state;
505
506 /* Run when a fluid is collected. */
507 void
508 scm_i_reset_fluid (size_t n)
509 {
510 scm_i_thread *t;
511
512 scm_i_pthread_mutex_lock (&thread_admin_mutex);
513 for (t = all_threads; t; t = t->next_thread)
514 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
515 {
516 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
517
518 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
519 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
520 }
521 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
522 }
523
524 /* Perform first stage of thread initialisation, in non-guile mode.
525 */
526 static void
527 guilify_self_1 (struct GC_stack_base *base)
528 {
529 scm_i_thread t;
530
531 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
532 before allocating anything in this thread, because allocation could
533 cause GC to run, and GC could cause finalizers, which could invoke
534 Scheme functions, which need the current thread to be set. */
535
536 t.pthread = scm_i_pthread_self ();
537 t.handle = SCM_BOOL_F;
538 t.result = SCM_BOOL_F;
539 t.cleanup_handler = SCM_BOOL_F;
540 t.mutexes = SCM_EOL;
541 t.held_mutex = NULL;
542 t.join_queue = SCM_EOL;
543 t.dynamic_state = SCM_BOOL_F;
544 t.dynwinds = SCM_EOL;
545 t.active_asyncs = SCM_EOL;
546 t.block_asyncs = 1;
547 t.pending_asyncs = 1;
548 t.critical_section_level = 0;
549 t.base = base->mem_base;
550 #ifdef __ia64__
551 t.register_backing_store_base = base->reg-base;
552 #endif
553 t.continuation_root = SCM_EOL;
554 t.continuation_base = t.base;
555 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
556 t.sleep_mutex = NULL;
557 t.sleep_object = SCM_BOOL_F;
558 t.sleep_fd = -1;
559
560 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
561 /* FIXME: Error conditions during the initialization phase are handled
562 gracelessly since public functions such as `scm_init_guile ()'
563 currently have type `void'. */
564 abort ();
565
566 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
567 t.current_mark_stack_ptr = NULL;
568 t.current_mark_stack_limit = NULL;
569 t.canceled = 0;
570 t.exited = 0;
571 t.guile_mode = 0;
572
573 /* The switcheroo. */
574 {
575 scm_i_thread *t_ptr = &t;
576
577 GC_disable ();
578 t_ptr = GC_malloc (sizeof (scm_i_thread));
579 memcpy (t_ptr, &t, sizeof t);
580
581 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
582
583 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
584 /* Cache the current thread in TLS for faster lookup. */
585 scm_i_current_thread = t_ptr;
586 #endif
587
588 scm_i_pthread_mutex_lock (&thread_admin_mutex);
589 t_ptr->next_thread = all_threads;
590 all_threads = t_ptr;
591 thread_count++;
592 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
593
594 GC_enable ();
595 }
596 }
597
598 /* Perform second stage of thread initialisation, in guile mode.
599 */
600 static void
601 guilify_self_2 (SCM parent)
602 {
603 scm_i_thread *t = SCM_I_CURRENT_THREAD;
604
605 t->guile_mode = 1;
606
607 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
608
609 t->continuation_root = scm_cons (t->handle, SCM_EOL);
610 t->continuation_base = t->base;
611 t->vm = SCM_BOOL_F;
612
613 if (scm_is_true (parent))
614 t->dynamic_state = scm_make_dynamic_state (parent);
615 else
616 t->dynamic_state = scm_i_make_initial_dynamic_state ();
617
618 t->join_queue = make_queue ();
619 t->block_asyncs = 0;
620 }
621
622 \f
623 /*** Fat mutexes */
624
625 /* We implement our own mutex type since we want them to be 'fair', we
626 want to do fancy things while waiting for them (like running
627 asyncs) and we might want to add things that are nice for
628 debugging.
629 */
630
631 typedef struct {
632 scm_i_pthread_mutex_t lock;
633 SCM owner;
634 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
635
636 int recursive; /* allow recursive locking? */
637 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
638 int allow_external_unlock; /* is it an error to unlock a mutex that is not
639 owned by the current thread? */
640
641 SCM waiting; /* the threads waiting for this mutex. */
642 } fat_mutex;
643
644 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
645 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
646
647 static SCM
648 call_cleanup (void *data)
649 {
650 SCM *proc_p = data;
651 return scm_call_0 (*proc_p);
652 }
653
654 /* Perform thread tear-down, in guile mode.
655 */
656 static void *
657 do_thread_exit (void *v)
658 {
659 scm_i_thread *t = (scm_i_thread *) v;
660
661 /* Ensure the signal handling thread has been launched, because we might be
662 shutting it down. This needs to be done in Guile mode. */
663 scm_i_ensure_signal_delivery_thread ();
664
665 if (!scm_is_false (t->cleanup_handler))
666 {
667 SCM ptr = t->cleanup_handler;
668
669 t->cleanup_handler = SCM_BOOL_F;
670 t->result = scm_internal_catch (SCM_BOOL_T,
671 call_cleanup, &ptr,
672 scm_handle_by_message_noexit, NULL);
673 }
674
675 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
676
677 t->exited = 1;
678 close (t->sleep_pipe[0]);
679 close (t->sleep_pipe[1]);
680 while (scm_is_true (unblock_from_queue (t->join_queue)))
681 ;
682
683 while (!scm_is_null (t->mutexes))
684 {
685 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
686
687 if (!SCM_UNBNDP (mutex))
688 {
689 fat_mutex *m = SCM_MUTEX_DATA (mutex);
690
691 scm_i_pthread_mutex_lock (&m->lock);
692
693 /* Since MUTEX is in `t->mutexes', T must be its owner. */
694 assert (scm_is_eq (m->owner, t->handle));
695
696 unblock_from_queue (m->waiting);
697
698 scm_i_pthread_mutex_unlock (&m->lock);
699 }
700
701 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
702 }
703
704 scm_i_pthread_mutex_unlock (&t->admin_mutex);
705
706 return NULL;
707 }
708
709 static void *
710 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
711 {
712 /* Won't hurt if we are already registered. */
713 #if SCM_USE_PTHREAD_THREADS
714 GC_register_my_thread (sb);
715 #endif
716
717 return scm_with_guile (do_thread_exit, v);
718 }
719
720 static void
721 on_thread_exit (void *v)
722 {
723 /* This handler is executed in non-guile mode. */
724 scm_i_thread *t = (scm_i_thread *) v, **tp;
725
726 /* If we were canceled, we were unable to clear `t->guile_mode', so do
727 it here. */
728 t->guile_mode = 0;
729
730 /* If this thread was cancelled while doing a cond wait, it will
731 still have a mutex locked, so we unlock it here. */
732 if (t->held_mutex)
733 {
734 scm_i_pthread_mutex_unlock (t->held_mutex);
735 t->held_mutex = NULL;
736 }
737
738 /* Reinstate the current thread for purposes of scm_with_guile
739 guile-mode cleanup handlers. Only really needed in the non-TLS
740 case but it doesn't hurt to be consistent. */
741 scm_i_pthread_setspecific (scm_i_thread_key, t);
742
743 /* Scheme-level thread finalizers and other cleanup needs to happen in
744 guile mode. */
745 GC_call_with_stack_base (do_thread_exit_trampoline, t);
746
747 /* Removing ourself from the list of all threads needs to happen in
748 non-guile mode since all SCM values on our stack become
749 unprotected once we are no longer in the list. */
750 scm_i_pthread_mutex_lock (&thread_admin_mutex);
751 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
752 if (*tp == t)
753 {
754 *tp = t->next_thread;
755
756 /* GC-robust */
757 t->next_thread = NULL;
758
759 break;
760 }
761 thread_count--;
762
763 /* If there's only one other thread, it could be the signal delivery
764 thread, so we need to notify it to shut down by closing its read pipe.
765 If it's not the signal delivery thread, then closing the read pipe isn't
766 going to hurt. */
767 if (thread_count <= 1)
768 scm_i_close_signal_pipe ();
769
770 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
771
772 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
773
774 #if SCM_USE_PTHREAD_THREADS
775 GC_unregister_my_thread ();
776 #endif
777 }
778
779 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
780
781 static void
782 init_thread_key (void)
783 {
784 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
785 }
786
787 /* Perform any initializations necessary to make the current thread
788 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
789 if necessary.
790
791 BASE is the stack base to use with GC.
792
793 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
794 which case the default dynamic state is used.
795
796 Returns zero when the thread was known to guile already; otherwise
797 return 1.
798
799 Note that it could be the case that the thread was known
800 to Guile, but not in guile mode (because we are within a
801 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
802 be sure. New threads are put into guile mode implicitly. */
803
804 static int
805 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
806 {
807 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
808
809 if (SCM_I_CURRENT_THREAD)
810 {
811 /* Thread is already known to Guile.
812 */
813 return 0;
814 }
815 else
816 {
817 /* This thread has not been guilified yet.
818 */
819
820 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
821 if (scm_initialized_p == 0)
822 {
823 /* First thread ever to enter Guile. Run the full
824 initialization.
825 */
826 scm_i_init_guile (base);
827
828 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
829 /* Allow other threads to come in later. */
830 GC_allow_register_threads ();
831 #endif
832
833 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
834 }
835 else
836 {
837 /* Guile is already initialized, but this thread enters it for
838 the first time. Only initialize this thread.
839 */
840 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
841
842 /* Register this thread with libgc. */
843 #if SCM_USE_PTHREAD_THREADS
844 GC_register_my_thread (base);
845 #endif
846
847 guilify_self_1 (base);
848 guilify_self_2 (parent);
849 }
850 return 1;
851 }
852 }
853
854 void
855 scm_init_guile ()
856 {
857 struct GC_stack_base stack_base;
858
859 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
860 scm_i_init_thread_for_guile (&stack_base,
861 scm_i_default_dynamic_state);
862 else
863 {
864 fprintf (stderr, "Failed to get stack base for current thread.\n");
865 exit (1);
866 }
867 }
868
869 struct with_guile_args
870 {
871 GC_fn_type func;
872 void *data;
873 SCM parent;
874 };
875
876 static void *
877 with_guile_trampoline (void *data)
878 {
879 struct with_guile_args *args = data;
880
881 return scm_c_with_continuation_barrier (args->func, args->data);
882 }
883
884 static void *
885 with_guile_and_parent (struct GC_stack_base *base, void *data)
886 {
887 void *res;
888 int new_thread;
889 scm_i_thread *t;
890 struct with_guile_args *args = data;
891
892 new_thread = scm_i_init_thread_for_guile (base, args->parent);
893 t = SCM_I_CURRENT_THREAD;
894 if (new_thread)
895 {
896 /* We are in Guile mode. */
897 assert (t->guile_mode);
898
899 res = scm_c_with_continuation_barrier (args->func, args->data);
900
901 /* Leave Guile mode. */
902 t->guile_mode = 0;
903 }
904 else if (t->guile_mode)
905 {
906 /* Already in Guile mode. */
907 res = scm_c_with_continuation_barrier (args->func, args->data);
908 }
909 else
910 {
911 /* We are not in Guile mode, either because we are not within a
912 scm_with_guile, or because we are within a scm_without_guile.
913
914 This call to scm_with_guile() could happen from anywhere on the
915 stack, and in particular lower on the stack than when it was
916 when this thread was first guilified. Thus, `base' must be
917 updated. */
918 #if SCM_STACK_GROWS_UP
919 if (SCM_STACK_PTR (base->mem_base) < t->base)
920 t->base = SCM_STACK_PTR (base->mem_base);
921 #else
922 if (SCM_STACK_PTR (base->mem_base) > t->base)
923 t->base = SCM_STACK_PTR (base->mem_base);
924 #endif
925
926 t->guile_mode = 1;
927 res = with_gc_active (with_guile_trampoline, args);
928 t->guile_mode = 0;
929 }
930 return res;
931 }
932
933 static void *
934 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
935 {
936 struct with_guile_args args;
937
938 args.func = func;
939 args.data = data;
940 args.parent = parent;
941
942 return GC_call_with_stack_base (with_guile_and_parent, &args);
943 }
944
945 void *
946 scm_with_guile (void *(*func)(void *), void *data)
947 {
948 return scm_i_with_guile_and_parent (func, data,
949 scm_i_default_dynamic_state);
950 }
951
952 void *
953 scm_without_guile (void *(*func)(void *), void *data)
954 {
955 void *result;
956 scm_i_thread *t = SCM_I_CURRENT_THREAD;
957
958 if (t->guile_mode)
959 {
960 SCM_I_CURRENT_THREAD->guile_mode = 0;
961 result = with_gc_inactive (func, data);
962 SCM_I_CURRENT_THREAD->guile_mode = 1;
963 }
964 else
965 /* Otherwise we're not in guile mode, so nothing to do. */
966 result = func (data);
967
968 return result;
969 }
970
971 \f
972 /*** Thread creation */
973
974 typedef struct {
975 SCM parent;
976 SCM thunk;
977 SCM handler;
978 SCM thread;
979 scm_i_pthread_mutex_t mutex;
980 scm_i_pthread_cond_t cond;
981 } launch_data;
982
983 static void *
984 really_launch (void *d)
985 {
986 launch_data *data = (launch_data *)d;
987 SCM thunk = data->thunk, handler = data->handler;
988 scm_i_thread *t;
989
990 t = SCM_I_CURRENT_THREAD;
991
992 scm_i_scm_pthread_mutex_lock (&data->mutex);
993 data->thread = scm_current_thread ();
994 scm_i_pthread_cond_signal (&data->cond);
995 scm_i_pthread_mutex_unlock (&data->mutex);
996
997 if (SCM_UNBNDP (handler))
998 t->result = scm_call_0 (thunk);
999 else
1000 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1001
1002 return 0;
1003 }
1004
1005 static void *
1006 launch_thread (void *d)
1007 {
1008 launch_data *data = (launch_data *)d;
1009 scm_i_pthread_detach (scm_i_pthread_self ());
1010 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1011 return NULL;
1012 }
1013
1014 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
1015 (SCM thunk, SCM handler),
1016 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1017 "returning a new thread object representing the thread. The procedure\n"
1018 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1019 "\n"
1020 "When @var{handler} is specified, then @var{thunk} is called from\n"
1021 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1022 "handler. This catch is established inside the continuation barrier.\n"
1023 "\n"
1024 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1025 "the @emph{exit value} of the thread and the thread is terminated.")
1026 #define FUNC_NAME s_scm_call_with_new_thread
1027 {
1028 launch_data data;
1029 scm_i_pthread_t id;
1030 int err;
1031
1032 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1033 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1034 handler, SCM_ARG2, FUNC_NAME);
1035
1036 GC_collect_a_little ();
1037 data.parent = scm_current_dynamic_state ();
1038 data.thunk = thunk;
1039 data.handler = handler;
1040 data.thread = SCM_BOOL_F;
1041 scm_i_pthread_mutex_init (&data.mutex, NULL);
1042 scm_i_pthread_cond_init (&data.cond, NULL);
1043
1044 scm_i_scm_pthread_mutex_lock (&data.mutex);
1045 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1046 if (err)
1047 {
1048 scm_i_pthread_mutex_unlock (&data.mutex);
1049 errno = err;
1050 scm_syserror (NULL);
1051 }
1052 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1053 scm_i_pthread_mutex_unlock (&data.mutex);
1054
1055 return data.thread;
1056 }
1057 #undef FUNC_NAME
1058
1059 typedef struct {
1060 SCM parent;
1061 scm_t_catch_body body;
1062 void *body_data;
1063 scm_t_catch_handler handler;
1064 void *handler_data;
1065 SCM thread;
1066 scm_i_pthread_mutex_t mutex;
1067 scm_i_pthread_cond_t cond;
1068 } spawn_data;
1069
1070 static void *
1071 really_spawn (void *d)
1072 {
1073 spawn_data *data = (spawn_data *)d;
1074 scm_t_catch_body body = data->body;
1075 void *body_data = data->body_data;
1076 scm_t_catch_handler handler = data->handler;
1077 void *handler_data = data->handler_data;
1078 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1079
1080 scm_i_scm_pthread_mutex_lock (&data->mutex);
1081 data->thread = scm_current_thread ();
1082 scm_i_pthread_cond_signal (&data->cond);
1083 scm_i_pthread_mutex_unlock (&data->mutex);
1084
1085 if (handler == NULL)
1086 t->result = body (body_data);
1087 else
1088 t->result = scm_internal_catch (SCM_BOOL_T,
1089 body, body_data,
1090 handler, handler_data);
1091
1092 return 0;
1093 }
1094
1095 static void *
1096 spawn_thread (void *d)
1097 {
1098 spawn_data *data = (spawn_data *)d;
1099 scm_i_pthread_detach (scm_i_pthread_self ());
1100 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1101 return NULL;
1102 }
1103
1104 SCM
1105 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1106 scm_t_catch_handler handler, void *handler_data)
1107 {
1108 spawn_data data;
1109 scm_i_pthread_t id;
1110 int err;
1111
1112 data.parent = scm_current_dynamic_state ();
1113 data.body = body;
1114 data.body_data = body_data;
1115 data.handler = handler;
1116 data.handler_data = handler_data;
1117 data.thread = SCM_BOOL_F;
1118 scm_i_pthread_mutex_init (&data.mutex, NULL);
1119 scm_i_pthread_cond_init (&data.cond, NULL);
1120
1121 scm_i_scm_pthread_mutex_lock (&data.mutex);
1122 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1123 if (err)
1124 {
1125 scm_i_pthread_mutex_unlock (&data.mutex);
1126 errno = err;
1127 scm_syserror (NULL);
1128 }
1129 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1130 scm_i_pthread_mutex_unlock (&data.mutex);
1131
1132 return data.thread;
1133 }
1134
1135 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1136 (),
1137 "Move the calling thread to the end of the scheduling queue.")
1138 #define FUNC_NAME s_scm_yield
1139 {
1140 return scm_from_bool (scm_i_sched_yield ());
1141 }
1142 #undef FUNC_NAME
1143
1144 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1145 (SCM thread),
1146 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1147 "cannot be the current thread, and if @var{thread} has already terminated or "
1148 "been signaled to terminate, this function is a no-op.")
1149 #define FUNC_NAME s_scm_cancel_thread
1150 {
1151 scm_i_thread *t = NULL;
1152
1153 SCM_VALIDATE_THREAD (1, thread);
1154 t = SCM_I_THREAD_DATA (thread);
1155 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1156 if (!t->canceled)
1157 {
1158 t->canceled = 1;
1159 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1160 scm_i_pthread_cancel (t->pthread);
1161 }
1162 else
1163 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1164
1165 return SCM_UNSPECIFIED;
1166 }
1167 #undef FUNC_NAME
1168
1169 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1170 (SCM thread, SCM proc),
1171 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1172 "This handler will be called when the thread exits.")
1173 #define FUNC_NAME s_scm_set_thread_cleanup_x
1174 {
1175 scm_i_thread *t;
1176
1177 SCM_VALIDATE_THREAD (1, thread);
1178 if (!scm_is_false (proc))
1179 SCM_VALIDATE_THUNK (2, proc);
1180
1181 t = SCM_I_THREAD_DATA (thread);
1182 scm_i_pthread_mutex_lock (&t->admin_mutex);
1183
1184 if (!(t->exited || t->canceled))
1185 t->cleanup_handler = proc;
1186
1187 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1188
1189 return SCM_UNSPECIFIED;
1190 }
1191 #undef FUNC_NAME
1192
1193 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1194 (SCM thread),
1195 "Return the cleanup handler installed for the thread @var{thread}.")
1196 #define FUNC_NAME s_scm_thread_cleanup
1197 {
1198 scm_i_thread *t;
1199 SCM ret;
1200
1201 SCM_VALIDATE_THREAD (1, thread);
1202
1203 t = SCM_I_THREAD_DATA (thread);
1204 scm_i_pthread_mutex_lock (&t->admin_mutex);
1205 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1206 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1207
1208 return ret;
1209 }
1210 #undef FUNC_NAME
1211
1212 SCM scm_join_thread (SCM thread)
1213 {
1214 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1215 }
1216
1217 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1218 (SCM thread, SCM timeout, SCM timeoutval),
1219 "Suspend execution of the calling thread until the target @var{thread} "
1220 "terminates, unless the target @var{thread} has already terminated. ")
1221 #define FUNC_NAME s_scm_join_thread_timed
1222 {
1223 scm_i_thread *t;
1224 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1225 SCM res = SCM_BOOL_F;
1226
1227 if (! (SCM_UNBNDP (timeoutval)))
1228 res = timeoutval;
1229
1230 SCM_VALIDATE_THREAD (1, thread);
1231 if (scm_is_eq (scm_current_thread (), thread))
1232 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1233
1234 t = SCM_I_THREAD_DATA (thread);
1235 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1236
1237 if (! SCM_UNBNDP (timeout))
1238 {
1239 to_timespec (timeout, &ctimeout);
1240 timeout_ptr = &ctimeout;
1241 }
1242
1243 if (t->exited)
1244 res = t->result;
1245 else
1246 {
1247 while (1)
1248 {
1249 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1250 timeout_ptr);
1251 if (err == 0)
1252 {
1253 if (t->exited)
1254 {
1255 res = t->result;
1256 break;
1257 }
1258 }
1259 else if (err == ETIMEDOUT)
1260 break;
1261
1262 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1263 SCM_TICK;
1264 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1265
1266 /* Check for exit again, since we just released and
1267 reacquired the admin mutex, before the next block_self
1268 call (which would block forever if t has already
1269 exited). */
1270 if (t->exited)
1271 {
1272 res = t->result;
1273 break;
1274 }
1275 }
1276 }
1277
1278 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1279
1280 return res;
1281 }
1282 #undef FUNC_NAME
1283
1284 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1285 (SCM obj),
1286 "Return @code{#t} if @var{obj} is a thread.")
1287 #define FUNC_NAME s_scm_thread_p
1288 {
1289 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1290 }
1291 #undef FUNC_NAME
1292
1293
1294 static size_t
1295 fat_mutex_free (SCM mx)
1296 {
1297 fat_mutex *m = SCM_MUTEX_DATA (mx);
1298 scm_i_pthread_mutex_destroy (&m->lock);
1299 return 0;
1300 }
1301
1302 static int
1303 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1304 {
1305 fat_mutex *m = SCM_MUTEX_DATA (mx);
1306 scm_puts ("#<mutex ", port);
1307 scm_uintprint ((scm_t_bits)m, 16, port);
1308 scm_puts (">", port);
1309 return 1;
1310 }
1311
1312 static SCM
1313 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1314 {
1315 fat_mutex *m;
1316 SCM mx;
1317
1318 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1319 scm_i_pthread_mutex_init (&m->lock, NULL);
1320 m->owner = SCM_BOOL_F;
1321 m->level = 0;
1322
1323 m->recursive = recursive;
1324 m->unchecked_unlock = unchecked_unlock;
1325 m->allow_external_unlock = external_unlock;
1326
1327 m->waiting = SCM_EOL;
1328 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1329 m->waiting = make_queue ();
1330 return mx;
1331 }
1332
1333 SCM scm_make_mutex (void)
1334 {
1335 return scm_make_mutex_with_flags (SCM_EOL);
1336 }
1337
1338 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1339 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1340 SCM_SYMBOL (recursive_sym, "recursive");
1341
1342 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1343 (SCM flags),
1344 "Create a new mutex. ")
1345 #define FUNC_NAME s_scm_make_mutex_with_flags
1346 {
1347 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1348
1349 SCM ptr = flags;
1350 while (! scm_is_null (ptr))
1351 {
1352 SCM flag = SCM_CAR (ptr);
1353 if (scm_is_eq (flag, unchecked_unlock_sym))
1354 unchecked_unlock = 1;
1355 else if (scm_is_eq (flag, allow_external_unlock_sym))
1356 external_unlock = 1;
1357 else if (scm_is_eq (flag, recursive_sym))
1358 recursive = 1;
1359 else
1360 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1361 ptr = SCM_CDR (ptr);
1362 }
1363 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1364 }
1365 #undef FUNC_NAME
1366
1367 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1368 (void),
1369 "Create a new recursive mutex. ")
1370 #define FUNC_NAME s_scm_make_recursive_mutex
1371 {
1372 return make_fat_mutex (1, 0, 0);
1373 }
1374 #undef FUNC_NAME
1375
1376 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1377
1378 static SCM
1379 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1380 {
1381 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1382
1383 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1384 SCM err = SCM_BOOL_F;
1385
1386 struct timeval current_time;
1387
1388 scm_i_scm_pthread_mutex_lock (&m->lock);
1389
1390 while (1)
1391 {
1392 if (m->level == 0)
1393 {
1394 m->owner = new_owner;
1395 m->level++;
1396
1397 if (SCM_I_IS_THREAD (new_owner))
1398 {
1399 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1400
1401 /* FIXME: The order in which `t->admin_mutex' and
1402 `m->lock' are taken differs from that in
1403 `on_thread_exit', potentially leading to deadlocks. */
1404 scm_i_pthread_mutex_lock (&t->admin_mutex);
1405
1406 /* Only keep a weak reference to MUTEX so that it's not
1407 retained when not referenced elsewhere (bug #27450).
1408 The weak pair itself is eventually removed when MUTEX
1409 is unlocked. Note that `t->mutexes' lists mutexes
1410 currently held by T, so it should be small. */
1411 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1412
1413 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1414 }
1415 *ret = 1;
1416 break;
1417 }
1418 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1419 {
1420 m->owner = new_owner;
1421 err = scm_cons (scm_abandoned_mutex_error_key,
1422 scm_from_locale_string ("lock obtained on abandoned "
1423 "mutex"));
1424 *ret = 1;
1425 break;
1426 }
1427 else if (scm_is_eq (m->owner, new_owner))
1428 {
1429 if (m->recursive)
1430 {
1431 m->level++;
1432 *ret = 1;
1433 }
1434 else
1435 {
1436 err = scm_cons (scm_misc_error_key,
1437 scm_from_locale_string ("mutex already locked "
1438 "by thread"));
1439 *ret = 0;
1440 }
1441 break;
1442 }
1443 else
1444 {
1445 if (timeout != NULL)
1446 {
1447 gettimeofday (&current_time, NULL);
1448 if (current_time.tv_sec > timeout->tv_sec ||
1449 (current_time.tv_sec == timeout->tv_sec &&
1450 current_time.tv_usec * 1000 > timeout->tv_nsec))
1451 {
1452 *ret = 0;
1453 break;
1454 }
1455 }
1456 block_self (m->waiting, mutex, &m->lock, timeout);
1457 scm_i_pthread_mutex_unlock (&m->lock);
1458 SCM_TICK;
1459 scm_i_scm_pthread_mutex_lock (&m->lock);
1460 }
1461 }
1462 scm_i_pthread_mutex_unlock (&m->lock);
1463 return err;
1464 }
1465
1466 SCM scm_lock_mutex (SCM mx)
1467 {
1468 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1469 }
1470
1471 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1472 (SCM m, SCM timeout, SCM owner),
1473 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1474 "blocks until the mutex becomes available. The function returns when "
1475 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1476 "a thread already owns will succeed right away and will not block the "
1477 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1478 #define FUNC_NAME s_scm_lock_mutex_timed
1479 {
1480 SCM exception;
1481 int ret = 0;
1482 scm_t_timespec cwaittime, *waittime = NULL;
1483
1484 SCM_VALIDATE_MUTEX (1, m);
1485
1486 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1487 {
1488 to_timespec (timeout, &cwaittime);
1489 waittime = &cwaittime;
1490 }
1491
1492 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1493 SCM_VALIDATE_THREAD (3, owner);
1494
1495 exception = fat_mutex_lock (m, waittime, owner, &ret);
1496 if (!scm_is_false (exception))
1497 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1498 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1499 }
1500 #undef FUNC_NAME
1501
1502 static void
1503 lock_mutex_return_void (SCM mx)
1504 {
1505 (void) scm_lock_mutex (mx);
1506 }
1507
1508 static void
1509 unlock_mutex_return_void (SCM mx)
1510 {
1511 (void) scm_unlock_mutex (mx);
1512 }
1513
1514 void
1515 scm_dynwind_lock_mutex (SCM mutex)
1516 {
1517 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1518 SCM_F_WIND_EXPLICITLY);
1519 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1520 SCM_F_WIND_EXPLICITLY);
1521 }
1522
1523 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1524 (SCM mutex),
1525 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1526 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1527 #define FUNC_NAME s_scm_try_mutex
1528 {
1529 SCM exception;
1530 int ret = 0;
1531 scm_t_timespec cwaittime, *waittime = NULL;
1532
1533 SCM_VALIDATE_MUTEX (1, mutex);
1534
1535 to_timespec (scm_from_int(0), &cwaittime);
1536 waittime = &cwaittime;
1537
1538 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1539 if (!scm_is_false (exception))
1540 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1541 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1542 }
1543 #undef FUNC_NAME
1544
1545 /*** Fat condition variables */
1546
1547 typedef struct {
1548 scm_i_pthread_mutex_t lock;
1549 SCM waiting; /* the threads waiting for this condition. */
1550 } fat_cond;
1551
1552 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1553 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1554
1555 static int
1556 fat_mutex_unlock (SCM mutex, SCM cond,
1557 const scm_t_timespec *waittime, int relock)
1558 {
1559 SCM owner;
1560 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1561 fat_cond *c = NULL;
1562 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1563 int err = 0, ret = 0;
1564
1565 scm_i_scm_pthread_mutex_lock (&m->lock);
1566
1567 owner = m->owner;
1568
1569 if (!scm_is_eq (owner, t->handle))
1570 {
1571 if (m->level == 0)
1572 {
1573 if (!m->unchecked_unlock)
1574 {
1575 scm_i_pthread_mutex_unlock (&m->lock);
1576 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1577 }
1578 owner = t->handle;
1579 }
1580 else if (!m->allow_external_unlock)
1581 {
1582 scm_i_pthread_mutex_unlock (&m->lock);
1583 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1584 }
1585 }
1586
1587 if (! (SCM_UNBNDP (cond)))
1588 {
1589 c = SCM_CONDVAR_DATA (cond);
1590 while (1)
1591 {
1592 int brk = 0;
1593
1594 if (m->level > 0)
1595 m->level--;
1596 if (m->level == 0)
1597 {
1598 /* Change the owner of MUTEX. */
1599 t->mutexes = scm_delq_x (mutex, t->mutexes);
1600 m->owner = unblock_from_queue (m->waiting);
1601 }
1602
1603 t->block_asyncs++;
1604
1605 err = block_self (c->waiting, cond, &m->lock, waittime);
1606 scm_i_pthread_mutex_unlock (&m->lock);
1607
1608 if (err == 0)
1609 {
1610 ret = 1;
1611 brk = 1;
1612 }
1613 else if (err == ETIMEDOUT)
1614 {
1615 ret = 0;
1616 brk = 1;
1617 }
1618 else if (err != EINTR)
1619 {
1620 errno = err;
1621 scm_syserror (NULL);
1622 }
1623
1624 if (brk)
1625 {
1626 if (relock)
1627 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1628 t->block_asyncs--;
1629 break;
1630 }
1631
1632 t->block_asyncs--;
1633 scm_async_click ();
1634
1635 scm_remember_upto_here_2 (cond, mutex);
1636
1637 scm_i_scm_pthread_mutex_lock (&m->lock);
1638 }
1639 }
1640 else
1641 {
1642 if (m->level > 0)
1643 m->level--;
1644 if (m->level == 0)
1645 {
1646 /* Change the owner of MUTEX. */
1647 t->mutexes = scm_delq_x (mutex, t->mutexes);
1648 m->owner = unblock_from_queue (m->waiting);
1649 }
1650
1651 scm_i_pthread_mutex_unlock (&m->lock);
1652 ret = 1;
1653 }
1654
1655 return ret;
1656 }
1657
1658 SCM scm_unlock_mutex (SCM mx)
1659 {
1660 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1661 }
1662
1663 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1664 (SCM mx, SCM cond, SCM timeout),
1665 "Unlocks @var{mutex} if the calling thread owns the lock on "
1666 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1667 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1668 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1669 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1670 "with a call to @code{unlock-mutex}. Only the last call to "
1671 "@code{unlock-mutex} will actually unlock the mutex. ")
1672 #define FUNC_NAME s_scm_unlock_mutex_timed
1673 {
1674 scm_t_timespec cwaittime, *waittime = NULL;
1675
1676 SCM_VALIDATE_MUTEX (1, mx);
1677 if (! (SCM_UNBNDP (cond)))
1678 {
1679 SCM_VALIDATE_CONDVAR (2, cond);
1680
1681 if (! (SCM_UNBNDP (timeout)))
1682 {
1683 to_timespec (timeout, &cwaittime);
1684 waittime = &cwaittime;
1685 }
1686 }
1687
1688 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1689 }
1690 #undef FUNC_NAME
1691
1692 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1693 (SCM obj),
1694 "Return @code{#t} if @var{obj} is a mutex.")
1695 #define FUNC_NAME s_scm_mutex_p
1696 {
1697 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1698 }
1699 #undef FUNC_NAME
1700
1701 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1702 (SCM mx),
1703 "Return the thread owning @var{mx}, or @code{#f}.")
1704 #define FUNC_NAME s_scm_mutex_owner
1705 {
1706 SCM owner;
1707 fat_mutex *m = NULL;
1708
1709 SCM_VALIDATE_MUTEX (1, mx);
1710 m = SCM_MUTEX_DATA (mx);
1711 scm_i_pthread_mutex_lock (&m->lock);
1712 owner = m->owner;
1713 scm_i_pthread_mutex_unlock (&m->lock);
1714
1715 return owner;
1716 }
1717 #undef FUNC_NAME
1718
1719 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1720 (SCM mx),
1721 "Return the lock level of mutex @var{mx}.")
1722 #define FUNC_NAME s_scm_mutex_level
1723 {
1724 SCM_VALIDATE_MUTEX (1, mx);
1725 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1726 }
1727 #undef FUNC_NAME
1728
1729 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1730 (SCM mx),
1731 "Returns @code{#t} if the mutex @var{mx} is locked.")
1732 #define FUNC_NAME s_scm_mutex_locked_p
1733 {
1734 SCM_VALIDATE_MUTEX (1, mx);
1735 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1736 }
1737 #undef FUNC_NAME
1738
1739 static int
1740 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1741 {
1742 fat_cond *c = SCM_CONDVAR_DATA (cv);
1743 scm_puts ("#<condition-variable ", port);
1744 scm_uintprint ((scm_t_bits)c, 16, port);
1745 scm_puts (">", port);
1746 return 1;
1747 }
1748
1749 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1750 (void),
1751 "Make a new condition variable.")
1752 #define FUNC_NAME s_scm_make_condition_variable
1753 {
1754 fat_cond *c;
1755 SCM cv;
1756
1757 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1758 c->waiting = SCM_EOL;
1759 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1760 c->waiting = make_queue ();
1761 return cv;
1762 }
1763 #undef FUNC_NAME
1764
1765 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1766 (SCM cv, SCM mx, SCM t),
1767 "Wait until @var{cond-var} has been signalled. While waiting, "
1768 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1769 "is locked again when this function returns. When @var{time} is given, "
1770 "it specifies a point in time where the waiting should be aborted. It "
1771 "can be either a integer as returned by @code{current-time} or a pair "
1772 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1773 "mutex is locked and @code{#f} is returned. When the condition "
1774 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1775 "is returned. ")
1776 #define FUNC_NAME s_scm_timed_wait_condition_variable
1777 {
1778 scm_t_timespec waittime, *waitptr = NULL;
1779
1780 SCM_VALIDATE_CONDVAR (1, cv);
1781 SCM_VALIDATE_MUTEX (2, mx);
1782
1783 if (!SCM_UNBNDP (t))
1784 {
1785 to_timespec (t, &waittime);
1786 waitptr = &waittime;
1787 }
1788
1789 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1790 }
1791 #undef FUNC_NAME
1792
1793 static void
1794 fat_cond_signal (fat_cond *c)
1795 {
1796 unblock_from_queue (c->waiting);
1797 }
1798
1799 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1800 (SCM cv),
1801 "Wake up one thread that is waiting for @var{cv}")
1802 #define FUNC_NAME s_scm_signal_condition_variable
1803 {
1804 SCM_VALIDATE_CONDVAR (1, cv);
1805 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1806 return SCM_BOOL_T;
1807 }
1808 #undef FUNC_NAME
1809
1810 static void
1811 fat_cond_broadcast (fat_cond *c)
1812 {
1813 while (scm_is_true (unblock_from_queue (c->waiting)))
1814 ;
1815 }
1816
1817 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1818 (SCM cv),
1819 "Wake up all threads that are waiting for @var{cv}. ")
1820 #define FUNC_NAME s_scm_broadcast_condition_variable
1821 {
1822 SCM_VALIDATE_CONDVAR (1, cv);
1823 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1824 return SCM_BOOL_T;
1825 }
1826 #undef FUNC_NAME
1827
1828 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1829 (SCM obj),
1830 "Return @code{#t} if @var{obj} is a condition variable.")
1831 #define FUNC_NAME s_scm_condition_variable_p
1832 {
1833 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1834 }
1835 #undef FUNC_NAME
1836
1837
1838 \f
1839 /*** Select */
1840
1841 struct select_args
1842 {
1843 int nfds;
1844 SELECT_TYPE *read_fds;
1845 SELECT_TYPE *write_fds;
1846 SELECT_TYPE *except_fds;
1847 struct timeval *timeout;
1848
1849 int result;
1850 int errno_value;
1851 };
1852
1853 static void *
1854 do_std_select (void *args)
1855 {
1856 struct select_args *select_args;
1857
1858 select_args = (struct select_args *) args;
1859
1860 select_args->result =
1861 select (select_args->nfds,
1862 select_args->read_fds, select_args->write_fds,
1863 select_args->except_fds, select_args->timeout);
1864 select_args->errno_value = errno;
1865
1866 return NULL;
1867 }
1868
1869 int
1870 scm_std_select (int nfds,
1871 SELECT_TYPE *readfds,
1872 SELECT_TYPE *writefds,
1873 SELECT_TYPE *exceptfds,
1874 struct timeval *timeout)
1875 {
1876 fd_set my_readfds;
1877 int res, eno, wakeup_fd;
1878 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1879 struct select_args args;
1880
1881 if (readfds == NULL)
1882 {
1883 FD_ZERO (&my_readfds);
1884 readfds = &my_readfds;
1885 }
1886
1887 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1888 SCM_TICK;
1889
1890 wakeup_fd = t->sleep_pipe[0];
1891 FD_SET (wakeup_fd, readfds);
1892 if (wakeup_fd >= nfds)
1893 nfds = wakeup_fd+1;
1894
1895 args.nfds = nfds;
1896 args.read_fds = readfds;
1897 args.write_fds = writefds;
1898 args.except_fds = exceptfds;
1899 args.timeout = timeout;
1900
1901 /* Explicitly cooperate with the GC. */
1902 scm_without_guile (do_std_select, &args);
1903
1904 res = args.result;
1905 eno = args.errno_value;
1906
1907 t->sleep_fd = -1;
1908 scm_i_reset_sleep (t);
1909
1910 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1911 {
1912 char dummy;
1913 full_read (wakeup_fd, &dummy, 1);
1914
1915 FD_CLR (wakeup_fd, readfds);
1916 res -= 1;
1917 if (res == 0)
1918 {
1919 eno = EINTR;
1920 res = -1;
1921 }
1922 }
1923 errno = eno;
1924 return res;
1925 }
1926
1927 /* Convenience API for blocking while in guile mode. */
1928
1929 #if SCM_USE_PTHREAD_THREADS
1930
1931 /* It seems reasonable to not run procedures related to mutex and condition
1932 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1933 without it, and (ii) the only potential gain would be GC latency. See
1934 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1935 for a discussion of the pros and cons. */
1936
1937 int
1938 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1939 {
1940 int res = scm_i_pthread_mutex_lock (mutex);
1941 return res;
1942 }
1943
1944 static void
1945 do_unlock (void *data)
1946 {
1947 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1948 }
1949
1950 void
1951 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1952 {
1953 scm_i_scm_pthread_mutex_lock (mutex);
1954 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1955 }
1956
1957 int
1958 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1959 {
1960 int res;
1961 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1962
1963 t->held_mutex = mutex;
1964 res = scm_i_pthread_cond_wait (cond, mutex);
1965 t->held_mutex = NULL;
1966
1967 return res;
1968 }
1969
1970 int
1971 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1972 scm_i_pthread_mutex_t *mutex,
1973 const scm_t_timespec *wt)
1974 {
1975 int res;
1976 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1977
1978 t->held_mutex = mutex;
1979 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1980 t->held_mutex = NULL;
1981
1982 return res;
1983 }
1984
1985 #endif
1986
1987 unsigned long
1988 scm_std_usleep (unsigned long usecs)
1989 {
1990 struct timeval tv;
1991 tv.tv_usec = usecs % 1000000;
1992 tv.tv_sec = usecs / 1000000;
1993 scm_std_select (0, NULL, NULL, NULL, &tv);
1994 return tv.tv_sec * 1000000 + tv.tv_usec;
1995 }
1996
1997 unsigned int
1998 scm_std_sleep (unsigned int secs)
1999 {
2000 struct timeval tv;
2001 tv.tv_usec = 0;
2002 tv.tv_sec = secs;
2003 scm_std_select (0, NULL, NULL, NULL, &tv);
2004 return tv.tv_sec;
2005 }
2006
2007 /*** Misc */
2008
2009 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2010 (void),
2011 "Return the thread that called this function.")
2012 #define FUNC_NAME s_scm_current_thread
2013 {
2014 return SCM_I_CURRENT_THREAD->handle;
2015 }
2016 #undef FUNC_NAME
2017
2018 static SCM
2019 scm_c_make_list (size_t n, SCM fill)
2020 {
2021 SCM res = SCM_EOL;
2022 while (n-- > 0)
2023 res = scm_cons (fill, res);
2024 return res;
2025 }
2026
2027 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2028 (void),
2029 "Return a list of all threads.")
2030 #define FUNC_NAME s_scm_all_threads
2031 {
2032 /* We can not allocate while holding the thread_admin_mutex because
2033 of the way GC is done.
2034 */
2035 int n = thread_count;
2036 scm_i_thread *t;
2037 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2038
2039 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2040 l = &list;
2041 for (t = all_threads; t && n > 0; t = t->next_thread)
2042 {
2043 if (t != scm_i_signal_delivery_thread)
2044 {
2045 SCM_SETCAR (*l, t->handle);
2046 l = SCM_CDRLOC (*l);
2047 }
2048 n--;
2049 }
2050 *l = SCM_EOL;
2051 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2052 return list;
2053 }
2054 #undef FUNC_NAME
2055
2056 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2057 (SCM thread),
2058 "Return @code{#t} iff @var{thread} has exited.\n")
2059 #define FUNC_NAME s_scm_thread_exited_p
2060 {
2061 return scm_from_bool (scm_c_thread_exited_p (thread));
2062 }
2063 #undef FUNC_NAME
2064
2065 int
2066 scm_c_thread_exited_p (SCM thread)
2067 #define FUNC_NAME s_scm_thread_exited_p
2068 {
2069 scm_i_thread *t;
2070 SCM_VALIDATE_THREAD (1, thread);
2071 t = SCM_I_THREAD_DATA (thread);
2072 return t->exited;
2073 }
2074 #undef FUNC_NAME
2075
2076 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2077 (void),
2078 "Return the total number of processors of the machine, which\n"
2079 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2080 "thread execution unit, which can be either:\n\n"
2081 "@itemize\n"
2082 "@item an execution core in a (possibly multi-core) chip, in a\n"
2083 " (possibly multi- chip) module, in a single computer, or\n"
2084 "@item a thread execution unit inside a core in the case of\n"
2085 " @dfn{hyper-threaded} CPUs.\n"
2086 "@end itemize\n\n"
2087 "Which of the two definitions is used, is unspecified.\n")
2088 #define FUNC_NAME s_scm_total_processor_count
2089 {
2090 return scm_from_ulong (num_processors (NPROC_ALL));
2091 }
2092 #undef FUNC_NAME
2093
2094 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2095 (void),
2096 "Like @code{total-processor-count}, but return the number of\n"
2097 "processors available to the current process. See\n"
2098 "@code{setaffinity} and @code{getaffinity} for more\n"
2099 "information.\n")
2100 #define FUNC_NAME s_scm_current_processor_count
2101 {
2102 return scm_from_ulong (num_processors (NPROC_CURRENT));
2103 }
2104 #undef FUNC_NAME
2105
2106
2107 \f
2108
2109 static scm_i_pthread_cond_t wake_up_cond;
2110 static int threads_initialized_p = 0;
2111
2112
2113 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2114 */
2115 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2116
2117 static SCM dynwind_critical_section_mutex;
2118
2119 void
2120 scm_dynwind_critical_section (SCM mutex)
2121 {
2122 if (scm_is_false (mutex))
2123 mutex = dynwind_critical_section_mutex;
2124 scm_dynwind_lock_mutex (mutex);
2125 scm_dynwind_block_asyncs ();
2126 }
2127
2128 /*** Initialization */
2129
2130 scm_i_pthread_mutex_t scm_i_misc_mutex;
2131
2132 #if SCM_USE_PTHREAD_THREADS
2133 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2134 #endif
2135
2136 void
2137 scm_threads_prehistory (void *base)
2138 {
2139 #if SCM_USE_PTHREAD_THREADS
2140 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2141 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2142 PTHREAD_MUTEX_RECURSIVE);
2143 #endif
2144
2145 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2146 scm_i_pthread_mutexattr_recursive);
2147 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2148 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2149
2150 guilify_self_1 ((struct GC_stack_base *) base);
2151 }
2152
2153 scm_t_bits scm_tc16_thread;
2154 scm_t_bits scm_tc16_mutex;
2155 scm_t_bits scm_tc16_condvar;
2156
2157 void
2158 scm_init_threads ()
2159 {
2160 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2161 scm_set_smob_print (scm_tc16_thread, thread_print);
2162
2163 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2164 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2165 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2166
2167 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2168 sizeof (fat_cond));
2169 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2170
2171 scm_i_default_dynamic_state = SCM_BOOL_F;
2172 guilify_self_2 (SCM_BOOL_F);
2173 threads_initialized_p = 1;
2174
2175 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2176 }
2177
2178 void
2179 scm_init_threads_default_dynamic_state ()
2180 {
2181 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2182 scm_i_default_dynamic_state = state;
2183 }
2184
2185 void
2186 scm_init_thread_procs ()
2187 {
2188 #include "libguile/threads.x"
2189 }
2190
2191 \f
2192 /* IA64-specific things. */
2193
2194 #ifdef __ia64__
2195 # ifdef __hpux
2196 # include <sys/param.h>
2197 # include <sys/pstat.h>
2198 void *
2199 scm_ia64_register_backing_store_base (void)
2200 {
2201 struct pst_vm_status vm_status;
2202 int i = 0;
2203 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2204 if (vm_status.pst_type == PS_RSESTACK)
2205 return (void *) vm_status.pst_vaddr;
2206 abort ();
2207 }
2208 void *
2209 scm_ia64_ar_bsp (const void *ctx)
2210 {
2211 uint64_t bsp;
2212 __uc_get_ar_bsp (ctx, &bsp);
2213 return (void *) bsp;
2214 }
2215 # endif /* hpux */
2216 # ifdef linux
2217 # include <ucontext.h>
2218 void *
2219 scm_ia64_register_backing_store_base (void)
2220 {
2221 extern void *__libc_ia64_register_backing_store_base;
2222 return __libc_ia64_register_backing_store_base;
2223 }
2224 void *
2225 scm_ia64_ar_bsp (const void *opaque)
2226 {
2227 const ucontext_t *ctx = opaque;
2228 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2229 }
2230 # endif /* linux */
2231 # ifdef __FreeBSD__
2232 # include <ucontext.h>
2233 void *
2234 scm_ia64_register_backing_store_base (void)
2235 {
2236 return (void *)0x8000000000000000;
2237 }
2238 void *
2239 scm_ia64_ar_bsp (const void *opaque)
2240 {
2241 const ucontext_t *ctx = opaque;
2242 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2243 + ctx->uc_mcontext.mc_special.ndirty);
2244 }
2245 # endif /* __FreeBSD__ */
2246 #endif /* __ia64__ */
2247
2248
2249 /*
2250 Local Variables:
2251 c-file-style: "gnu"
2252 End:
2253 */