Rely on Gnulib for 'select', 'lstat', and 'mkstemp'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
29
30 #include <stdlib.h>
31 #if HAVE_UNISTD_H
32 #include <unistd.h>
33 #endif
34 #include <stdio.h>
35
36 #ifdef HAVE_STRING_H
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38 #endif
39
40 #if HAVE_SYS_TIME_H
41 #include <sys/time.h>
42 #endif
43
44 #if HAVE_PTHREAD_NP_H
45 # include <pthread_np.h>
46 #endif
47
48 #include <sys/select.h>
49
50 #include <assert.h>
51 #include <fcntl.h>
52 #include <nproc.h>
53
54 #include "libguile/validate.h"
55 #include "libguile/root.h"
56 #include "libguile/eval.h"
57 #include "libguile/async.h"
58 #include "libguile/ports.h"
59 #include "libguile/threads.h"
60 #include "libguile/dynwind.h"
61 #include "libguile/iselect.h"
62 #include "libguile/fluids.h"
63 #include "libguile/continuations.h"
64 #include "libguile/gc.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
68 #include "libguile/weaks.h"
69
70 #include <full-read.h>
71
72
73 \f
74
75 /* First some libgc shims. */
76
77 /* Make sure GC_fn_type is defined; it is missing from the public
78 headers of GC 7.1 and earlier. */
79 #ifndef HAVE_GC_FN_TYPE
80 typedef void * (* GC_fn_type) (void *);
81 #endif
82
83
84 #ifndef GC_SUCCESS
85 #define GC_SUCCESS 0
86 #endif
87
88 #ifndef GC_UNIMPLEMENTED
89 #define GC_UNIMPLEMENTED 3
90 #endif
91
92 /* Likewise struct GC_stack_base is missing before 7.1. */
93 #ifndef HAVE_GC_STACK_BASE
94 struct GC_stack_base {
95 void * mem_base; /* Base of memory stack. */
96 #ifdef __ia64__
97 void * reg_base; /* Base of separate register stack. */
98 #endif
99 };
100
101 static int
102 GC_register_my_thread (struct GC_stack_base *stack_base)
103 {
104 return GC_UNIMPLEMENTED;
105 }
106
107 static void
108 GC_unregister_my_thread ()
109 {
110 }
111
112 #if !SCM_USE_PTHREAD_THREADS
113 /* No threads; we can just use GC_stackbottom. */
114 static void *
115 get_thread_stack_base ()
116 {
117 return GC_stackbottom;
118 }
119
120 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
121 && defined PTHREAD_ATTR_GETSTACK_WORKS
122 /* This method for GNU/Linux and perhaps some other systems.
123 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
124 available on them. */
125 static void *
126 get_thread_stack_base ()
127 {
128 pthread_attr_t attr;
129 void *start, *end;
130 size_t size;
131
132 pthread_getattr_np (pthread_self (), &attr);
133 pthread_attr_getstack (&attr, &start, &size);
134 end = (char *)start + size;
135
136 #if SCM_STACK_GROWS_UP
137 return start;
138 #else
139 return end;
140 #endif
141 }
142
143 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
144 /* This method for MacOS X.
145 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
146 but as of 2006 there's nothing obvious at apple.com. */
147 static void *
148 get_thread_stack_base ()
149 {
150 return pthread_get_stackaddr_np (pthread_self ());
151 }
152
153 #elif HAVE_PTHREAD_ATTR_GET_NP
154 /* This one is for FreeBSD 9. */
155 static void *
156 get_thread_stack_base ()
157 {
158 pthread_attr_t attr;
159 void *start, *end;
160 size_t size;
161
162 pthread_attr_init (&attr);
163 pthread_attr_get_np (pthread_self (), &attr);
164 pthread_attr_getstack (&attr, &start, &size);
165 pthread_attr_destroy (&attr);
166
167 end = (char *)start + size;
168
169 #if SCM_STACK_GROWS_UP
170 return start;
171 #else
172 return end;
173 #endif
174 }
175
176 #else
177 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
178 #endif
179
180 static int
181 GC_get_stack_base (struct GC_stack_base *stack_base)
182 {
183 stack_base->mem_base = get_thread_stack_base ();
184 #ifdef __ia64__
185 /* Calculate and store off the base of this thread's register
186 backing store (RBS). Unfortunately our implementation(s) of
187 scm_ia64_register_backing_store_base are only reliable for the
188 main thread. For other threads, therefore, find out the current
189 top of the RBS, and use that as a maximum. */
190 stack_base->reg_base = scm_ia64_register_backing_store_base ();
191 {
192 ucontext_t ctx;
193 void *bsp;
194 getcontext (&ctx);
195 bsp = scm_ia64_ar_bsp (&ctx);
196 if (stack_base->reg_base > bsp)
197 stack_base->reg_base = bsp;
198 }
199 #endif
200 return GC_SUCCESS;
201 }
202
203 static void *
204 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
205 {
206 struct GC_stack_base stack_base;
207
208 stack_base.mem_base = (void*)&stack_base;
209 #ifdef __ia64__
210 /* FIXME: Untested. */
211 {
212 ucontext_t ctx;
213 getcontext (&ctx);
214 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
215 }
216 #endif
217
218 return fn (&stack_base, arg);
219 }
220 #endif /* HAVE_GC_STACK_BASE */
221
222
223 /* Now define with_gc_active and with_gc_inactive. */
224
225 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
226
227 /* We have a sufficiently new libgc (7.2 or newer). */
228
229 static void*
230 with_gc_inactive (GC_fn_type func, void *data)
231 {
232 return GC_do_blocking (func, data);
233 }
234
235 static void*
236 with_gc_active (GC_fn_type func, void *data)
237 {
238 return GC_call_with_gc_active (func, data);
239 }
240
241 #else
242
243 /* libgc not new enough, so never actually deactivate GC.
244
245 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
246 GC_call_with_gc_active. */
247
248 static void*
249 with_gc_inactive (GC_fn_type func, void *data)
250 {
251 return func (data);
252 }
253
254 static void*
255 with_gc_active (GC_fn_type func, void *data)
256 {
257 return func (data);
258 }
259
260 #endif /* HAVE_GC_DO_BLOCKING */
261
262
263 \f
264 static void
265 to_timespec (SCM t, scm_t_timespec *waittime)
266 {
267 if (scm_is_pair (t))
268 {
269 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
270 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
271 }
272 else
273 {
274 double time = scm_to_double (t);
275 double sec = scm_c_truncate (time);
276
277 waittime->tv_sec = (long) sec;
278 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
279 }
280 }
281
282 \f
283 /*** Queues */
284
285 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
286 the risk of false references leading to unbounded retained space as
287 described in "Bounding Space Usage of Conservative Garbage Collectors",
288 H.J. Boehm, 2001. */
289
290 /* Make an empty queue data structure.
291 */
292 static SCM
293 make_queue ()
294 {
295 return scm_cons (SCM_EOL, SCM_EOL);
296 }
297
298 /* Put T at the back of Q and return a handle that can be used with
299 remqueue to remove T from Q again.
300 */
301 static SCM
302 enqueue (SCM q, SCM t)
303 {
304 SCM c = scm_cons (t, SCM_EOL);
305 SCM_CRITICAL_SECTION_START;
306 if (scm_is_null (SCM_CDR (q)))
307 SCM_SETCDR (q, c);
308 else
309 SCM_SETCDR (SCM_CAR (q), c);
310 SCM_SETCAR (q, c);
311 SCM_CRITICAL_SECTION_END;
312 return c;
313 }
314
315 /* Remove the element that the handle C refers to from the queue Q. C
316 must have been returned from a call to enqueue. The return value
317 is zero when the element referred to by C has already been removed.
318 Otherwise, 1 is returned.
319 */
320 static int
321 remqueue (SCM q, SCM c)
322 {
323 SCM p, prev = q;
324 SCM_CRITICAL_SECTION_START;
325 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
326 {
327 if (scm_is_eq (p, c))
328 {
329 if (scm_is_eq (c, SCM_CAR (q)))
330 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
331 SCM_SETCDR (prev, SCM_CDR (c));
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
336 SCM_CRITICAL_SECTION_END;
337 return 1;
338 }
339 prev = p;
340 }
341 SCM_CRITICAL_SECTION_END;
342 return 0;
343 }
344
345 /* Remove the front-most element from the queue Q and return it.
346 Return SCM_BOOL_F when Q is empty.
347 */
348 static SCM
349 dequeue (SCM q)
350 {
351 SCM c;
352 SCM_CRITICAL_SECTION_START;
353 c = SCM_CDR (q);
354 if (scm_is_null (c))
355 {
356 SCM_CRITICAL_SECTION_END;
357 return SCM_BOOL_F;
358 }
359 else
360 {
361 SCM_SETCDR (q, SCM_CDR (c));
362 if (scm_is_null (SCM_CDR (q)))
363 SCM_SETCAR (q, SCM_EOL);
364 SCM_CRITICAL_SECTION_END;
365
366 /* GC-robust */
367 SCM_SETCDR (c, SCM_EOL);
368
369 return SCM_CAR (c);
370 }
371 }
372
373 /*** Thread smob routines */
374
375
376 static int
377 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
378 {
379 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
380 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
381 the struct case, hence we go via a union, and extract according to the
382 size of pthread_t. */
383 union {
384 scm_i_pthread_t p;
385 unsigned short us;
386 unsigned int ui;
387 unsigned long ul;
388 scm_t_uintmax um;
389 } u;
390 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
391 scm_i_pthread_t p = t->pthread;
392 scm_t_uintmax id;
393 u.p = p;
394 if (sizeof (p) == sizeof (unsigned short))
395 id = u.us;
396 else if (sizeof (p) == sizeof (unsigned int))
397 id = u.ui;
398 else if (sizeof (p) == sizeof (unsigned long))
399 id = u.ul;
400 else
401 id = u.um;
402
403 scm_puts ("#<thread ", port);
404 scm_uintprint (id, 10, port);
405 scm_puts (" (", port);
406 scm_uintprint ((scm_t_bits)t, 16, port);
407 scm_puts (")>", port);
408 return 1;
409 }
410
411 \f
412 /*** Blocking on queues. */
413
414 /* See also scm_i_queue_async_cell for how such a block is
415 interrputed.
416 */
417
418 /* Put the current thread on QUEUE and go to sleep, waiting for it to
419 be woken up by a call to 'unblock_from_queue', or to be
420 interrupted. Upon return of this function, the current thread is
421 no longer on QUEUE, even when the sleep has been interrupted.
422
423 The caller of block_self must hold MUTEX. It will be atomically
424 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
425
426 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
427 as MUTEX is needed.
428
429 When WAITTIME is not NULL, the sleep will be aborted at that time.
430
431 The return value of block_self is an errno value. It will be zero
432 when the sleep has been successfully completed by a call to
433 unblock_from_queue, EINTR when it has been interrupted by the
434 delivery of a system async, and ETIMEDOUT when the timeout has
435 expired.
436
437 The system asyncs themselves are not executed by block_self.
438 */
439 static int
440 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
441 const scm_t_timespec *waittime)
442 {
443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
444 SCM q_handle;
445 int err;
446
447 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
448 err = EINTR;
449 else
450 {
451 t->block_asyncs++;
452 q_handle = enqueue (queue, t->handle);
453 if (waittime == NULL)
454 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
455 else
456 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
457
458 /* When we are still on QUEUE, we have been interrupted. We
459 report this only when no other error (such as a timeout) has
460 happened above.
461 */
462 if (remqueue (queue, q_handle) && err == 0)
463 err = EINTR;
464 t->block_asyncs--;
465 scm_i_reset_sleep (t);
466 }
467
468 return err;
469 }
470
471 /* Wake up the first thread on QUEUE, if any. The awoken thread is
472 returned, or #f if the queue was empty.
473 */
474 static SCM
475 unblock_from_queue (SCM queue)
476 {
477 SCM thread = dequeue (queue);
478 if (scm_is_true (thread))
479 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
480 return thread;
481 }
482
483 \f
484 /* Getting into and out of guile mode.
485 */
486
487 /* Key used to attach a cleanup handler to a given thread. Also, if
488 thread-local storage is unavailable, this key is used to retrieve the
489 current thread with `pthread_getspecific ()'. */
490 scm_i_pthread_key_t scm_i_thread_key;
491
492
493 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
494
495 /* When thread-local storage (TLS) is available, a pointer to the
496 current-thread object is kept in TLS. Note that storing the thread-object
497 itself in TLS (rather than a pointer to some malloc'd memory) is not
498 possible since thread objects may live longer than the actual thread they
499 represent. */
500 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
501
502 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
503
504
505 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
506 static scm_i_thread *all_threads = NULL;
507 static int thread_count;
508
509 static SCM scm_i_default_dynamic_state;
510
511 /* Run when a fluid is collected. */
512 void
513 scm_i_reset_fluid (size_t n)
514 {
515 scm_i_thread *t;
516
517 scm_i_pthread_mutex_lock (&thread_admin_mutex);
518 for (t = all_threads; t; t = t->next_thread)
519 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
520 {
521 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
522
523 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
524 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
525 }
526 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
527 }
528
529 /* Perform first stage of thread initialisation, in non-guile mode.
530 */
531 static void
532 guilify_self_1 (struct GC_stack_base *base)
533 {
534 scm_i_thread t;
535
536 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
537 before allocating anything in this thread, because allocation could
538 cause GC to run, and GC could cause finalizers, which could invoke
539 Scheme functions, which need the current thread to be set. */
540
541 t.pthread = scm_i_pthread_self ();
542 t.handle = SCM_BOOL_F;
543 t.result = SCM_BOOL_F;
544 t.cleanup_handler = SCM_BOOL_F;
545 t.mutexes = SCM_EOL;
546 t.held_mutex = NULL;
547 t.join_queue = SCM_EOL;
548 t.dynamic_state = SCM_BOOL_F;
549 t.dynwinds = SCM_EOL;
550 t.active_asyncs = SCM_EOL;
551 t.block_asyncs = 1;
552 t.pending_asyncs = 1;
553 t.critical_section_level = 0;
554 t.base = base->mem_base;
555 #ifdef __ia64__
556 t.register_backing_store_base = base->reg_base;
557 #endif
558 t.continuation_root = SCM_EOL;
559 t.continuation_base = t.base;
560 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
561 t.sleep_mutex = NULL;
562 t.sleep_object = SCM_BOOL_F;
563 t.sleep_fd = -1;
564
565 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
566 /* FIXME: Error conditions during the initialization phase are handled
567 gracelessly since public functions such as `scm_init_guile ()'
568 currently have type `void'. */
569 abort ();
570
571 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
572 t.current_mark_stack_ptr = NULL;
573 t.current_mark_stack_limit = NULL;
574 t.canceled = 0;
575 t.exited = 0;
576 t.guile_mode = 0;
577
578 /* The switcheroo. */
579 {
580 scm_i_thread *t_ptr = &t;
581
582 GC_disable ();
583 t_ptr = GC_malloc (sizeof (scm_i_thread));
584 memcpy (t_ptr, &t, sizeof t);
585
586 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
587
588 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
589 /* Cache the current thread in TLS for faster lookup. */
590 scm_i_current_thread = t_ptr;
591 #endif
592
593 scm_i_pthread_mutex_lock (&thread_admin_mutex);
594 t_ptr->next_thread = all_threads;
595 all_threads = t_ptr;
596 thread_count++;
597 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
598
599 GC_enable ();
600 }
601 }
602
603 /* Perform second stage of thread initialisation, in guile mode.
604 */
605 static void
606 guilify_self_2 (SCM parent)
607 {
608 scm_i_thread *t = SCM_I_CURRENT_THREAD;
609
610 t->guile_mode = 1;
611
612 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
613
614 t->continuation_root = scm_cons (t->handle, SCM_EOL);
615 t->continuation_base = t->base;
616 t->vm = SCM_BOOL_F;
617
618 if (scm_is_true (parent))
619 t->dynamic_state = scm_make_dynamic_state (parent);
620 else
621 t->dynamic_state = scm_i_make_initial_dynamic_state ();
622
623 t->join_queue = make_queue ();
624 t->block_asyncs = 0;
625
626 /* See note in finalizers.c:queue_finalizer_async(). */
627 GC_invoke_finalizers ();
628 }
629
630 \f
631 /*** Fat mutexes */
632
633 /* We implement our own mutex type since we want them to be 'fair', we
634 want to do fancy things while waiting for them (like running
635 asyncs) and we might want to add things that are nice for
636 debugging.
637 */
638
639 typedef struct {
640 scm_i_pthread_mutex_t lock;
641 SCM owner;
642 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
643
644 int recursive; /* allow recursive locking? */
645 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
646 int allow_external_unlock; /* is it an error to unlock a mutex that is not
647 owned by the current thread? */
648
649 SCM waiting; /* the threads waiting for this mutex. */
650 } fat_mutex;
651
652 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
653 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
654
655 static SCM
656 call_cleanup (void *data)
657 {
658 SCM *proc_p = data;
659 return scm_call_0 (*proc_p);
660 }
661
662 /* Perform thread tear-down, in guile mode.
663 */
664 static void *
665 do_thread_exit (void *v)
666 {
667 scm_i_thread *t = (scm_i_thread *) v;
668
669 /* Ensure the signal handling thread has been launched, because we might be
670 shutting it down. This needs to be done in Guile mode. */
671 scm_i_ensure_signal_delivery_thread ();
672
673 if (!scm_is_false (t->cleanup_handler))
674 {
675 SCM ptr = t->cleanup_handler;
676
677 t->cleanup_handler = SCM_BOOL_F;
678 t->result = scm_internal_catch (SCM_BOOL_T,
679 call_cleanup, &ptr,
680 scm_handle_by_message_noexit, NULL);
681 }
682
683 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
684
685 t->exited = 1;
686 close (t->sleep_pipe[0]);
687 close (t->sleep_pipe[1]);
688 while (scm_is_true (unblock_from_queue (t->join_queue)))
689 ;
690
691 while (!scm_is_null (t->mutexes))
692 {
693 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
694
695 if (!SCM_UNBNDP (mutex))
696 {
697 fat_mutex *m = SCM_MUTEX_DATA (mutex);
698
699 scm_i_pthread_mutex_lock (&m->lock);
700
701 /* Check whether T owns MUTEX. This is usually the case, unless
702 T abandoned MUTEX; in that case, T is no longer its owner (see
703 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
704 if (scm_is_eq (m->owner, t->handle))
705 unblock_from_queue (m->waiting);
706
707 scm_i_pthread_mutex_unlock (&m->lock);
708 }
709
710 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
711 }
712
713 scm_i_pthread_mutex_unlock (&t->admin_mutex);
714
715 return NULL;
716 }
717
718 static void *
719 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
720 {
721 /* Won't hurt if we are already registered. */
722 #if SCM_USE_PTHREAD_THREADS
723 GC_register_my_thread (sb);
724 #endif
725
726 return scm_with_guile (do_thread_exit, v);
727 }
728
729 static void
730 on_thread_exit (void *v)
731 {
732 /* This handler is executed in non-guile mode. */
733 scm_i_thread *t = (scm_i_thread *) v, **tp;
734
735 /* If we were canceled, we were unable to clear `t->guile_mode', so do
736 it here. */
737 t->guile_mode = 0;
738
739 /* If this thread was cancelled while doing a cond wait, it will
740 still have a mutex locked, so we unlock it here. */
741 if (t->held_mutex)
742 {
743 scm_i_pthread_mutex_unlock (t->held_mutex);
744 t->held_mutex = NULL;
745 }
746
747 /* Reinstate the current thread for purposes of scm_with_guile
748 guile-mode cleanup handlers. Only really needed in the non-TLS
749 case but it doesn't hurt to be consistent. */
750 scm_i_pthread_setspecific (scm_i_thread_key, t);
751
752 /* Scheme-level thread finalizers and other cleanup needs to happen in
753 guile mode. */
754 GC_call_with_stack_base (do_thread_exit_trampoline, t);
755
756 /* Removing ourself from the list of all threads needs to happen in
757 non-guile mode since all SCM values on our stack become
758 unprotected once we are no longer in the list. */
759 scm_i_pthread_mutex_lock (&thread_admin_mutex);
760 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
761 if (*tp == t)
762 {
763 *tp = t->next_thread;
764
765 /* GC-robust */
766 t->next_thread = NULL;
767
768 break;
769 }
770 thread_count--;
771
772 /* If there's only one other thread, it could be the signal delivery
773 thread, so we need to notify it to shut down by closing its read pipe.
774 If it's not the signal delivery thread, then closing the read pipe isn't
775 going to hurt. */
776 if (thread_count <= 1)
777 scm_i_close_signal_pipe ();
778
779 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
780
781 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
782
783 #if SCM_USE_PTHREAD_THREADS
784 GC_unregister_my_thread ();
785 #endif
786 }
787
788 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
789
790 static void
791 init_thread_key (void)
792 {
793 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
794 }
795
796 /* Perform any initializations necessary to make the current thread
797 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
798 if necessary.
799
800 BASE is the stack base to use with GC.
801
802 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
803 which case the default dynamic state is used.
804
805 Returns zero when the thread was known to guile already; otherwise
806 return 1.
807
808 Note that it could be the case that the thread was known
809 to Guile, but not in guile mode (because we are within a
810 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
811 be sure. New threads are put into guile mode implicitly. */
812
813 static int
814 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
815 {
816 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
817
818 if (SCM_I_CURRENT_THREAD)
819 {
820 /* Thread is already known to Guile.
821 */
822 return 0;
823 }
824 else
825 {
826 /* This thread has not been guilified yet.
827 */
828
829 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
830 if (scm_initialized_p == 0)
831 {
832 /* First thread ever to enter Guile. Run the full
833 initialization.
834 */
835 scm_i_init_guile (base);
836
837 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
838 /* Allow other threads to come in later. */
839 GC_allow_register_threads ();
840 #endif
841
842 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
843 }
844 else
845 {
846 /* Guile is already initialized, but this thread enters it for
847 the first time. Only initialize this thread.
848 */
849 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
850
851 /* Register this thread with libgc. */
852 #if SCM_USE_PTHREAD_THREADS
853 GC_register_my_thread (base);
854 #endif
855
856 guilify_self_1 (base);
857 guilify_self_2 (parent);
858 }
859 return 1;
860 }
861 }
862
863 void
864 scm_init_guile ()
865 {
866 struct GC_stack_base stack_base;
867
868 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
869 scm_i_init_thread_for_guile (&stack_base,
870 scm_i_default_dynamic_state);
871 else
872 {
873 fprintf (stderr, "Failed to get stack base for current thread.\n");
874 exit (EXIT_FAILURE);
875 }
876 }
877
878 struct with_guile_args
879 {
880 GC_fn_type func;
881 void *data;
882 SCM parent;
883 };
884
885 static void *
886 with_guile_trampoline (void *data)
887 {
888 struct with_guile_args *args = data;
889
890 return scm_c_with_continuation_barrier (args->func, args->data);
891 }
892
893 static void *
894 with_guile_and_parent (struct GC_stack_base *base, void *data)
895 {
896 void *res;
897 int new_thread;
898 scm_i_thread *t;
899 struct with_guile_args *args = data;
900
901 new_thread = scm_i_init_thread_for_guile (base, args->parent);
902 t = SCM_I_CURRENT_THREAD;
903 if (new_thread)
904 {
905 /* We are in Guile mode. */
906 assert (t->guile_mode);
907
908 res = scm_c_with_continuation_barrier (args->func, args->data);
909
910 /* Leave Guile mode. */
911 t->guile_mode = 0;
912 }
913 else if (t->guile_mode)
914 {
915 /* Already in Guile mode. */
916 res = scm_c_with_continuation_barrier (args->func, args->data);
917 }
918 else
919 {
920 /* We are not in Guile mode, either because we are not within a
921 scm_with_guile, or because we are within a scm_without_guile.
922
923 This call to scm_with_guile() could happen from anywhere on the
924 stack, and in particular lower on the stack than when it was
925 when this thread was first guilified. Thus, `base' must be
926 updated. */
927 #if SCM_STACK_GROWS_UP
928 if (SCM_STACK_PTR (base->mem_base) < t->base)
929 t->base = SCM_STACK_PTR (base->mem_base);
930 #else
931 if (SCM_STACK_PTR (base->mem_base) > t->base)
932 t->base = SCM_STACK_PTR (base->mem_base);
933 #endif
934
935 t->guile_mode = 1;
936 res = with_gc_active (with_guile_trampoline, args);
937 t->guile_mode = 0;
938 }
939 return res;
940 }
941
942 static void *
943 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
944 {
945 struct with_guile_args args;
946
947 args.func = func;
948 args.data = data;
949 args.parent = parent;
950
951 return GC_call_with_stack_base (with_guile_and_parent, &args);
952 }
953
954 void *
955 scm_with_guile (void *(*func)(void *), void *data)
956 {
957 return scm_i_with_guile_and_parent (func, data,
958 scm_i_default_dynamic_state);
959 }
960
961 void *
962 scm_without_guile (void *(*func)(void *), void *data)
963 {
964 void *result;
965 scm_i_thread *t = SCM_I_CURRENT_THREAD;
966
967 if (t->guile_mode)
968 {
969 SCM_I_CURRENT_THREAD->guile_mode = 0;
970 result = with_gc_inactive (func, data);
971 SCM_I_CURRENT_THREAD->guile_mode = 1;
972 }
973 else
974 /* Otherwise we're not in guile mode, so nothing to do. */
975 result = func (data);
976
977 return result;
978 }
979
980 \f
981 /*** Thread creation */
982
983 typedef struct {
984 SCM parent;
985 SCM thunk;
986 SCM handler;
987 SCM thread;
988 scm_i_pthread_mutex_t mutex;
989 scm_i_pthread_cond_t cond;
990 } launch_data;
991
992 static void *
993 really_launch (void *d)
994 {
995 launch_data *data = (launch_data *)d;
996 SCM thunk = data->thunk, handler = data->handler;
997 scm_i_thread *t;
998
999 t = SCM_I_CURRENT_THREAD;
1000
1001 scm_i_scm_pthread_mutex_lock (&data->mutex);
1002 data->thread = scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data->cond);
1004 scm_i_pthread_mutex_unlock (&data->mutex);
1005
1006 if (SCM_UNBNDP (handler))
1007 t->result = scm_call_0 (thunk);
1008 else
1009 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1010
1011 return 0;
1012 }
1013
1014 static void *
1015 launch_thread (void *d)
1016 {
1017 launch_data *data = (launch_data *)d;
1018 scm_i_pthread_detach (scm_i_pthread_self ());
1019 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1020 return NULL;
1021 }
1022
1023 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
1024 (SCM thunk, SCM handler),
1025 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1026 "returning a new thread object representing the thread. The procedure\n"
1027 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1028 "\n"
1029 "When @var{handler} is specified, then @var{thunk} is called from\n"
1030 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1031 "handler. This catch is established inside the continuation barrier.\n"
1032 "\n"
1033 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1034 "the @emph{exit value} of the thread and the thread is terminated.")
1035 #define FUNC_NAME s_scm_call_with_new_thread
1036 {
1037 launch_data data;
1038 scm_i_pthread_t id;
1039 int err;
1040
1041 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1042 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1043 handler, SCM_ARG2, FUNC_NAME);
1044
1045 GC_collect_a_little ();
1046 data.parent = scm_current_dynamic_state ();
1047 data.thunk = thunk;
1048 data.handler = handler;
1049 data.thread = SCM_BOOL_F;
1050 scm_i_pthread_mutex_init (&data.mutex, NULL);
1051 scm_i_pthread_cond_init (&data.cond, NULL);
1052
1053 scm_i_scm_pthread_mutex_lock (&data.mutex);
1054 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1055 if (err)
1056 {
1057 scm_i_pthread_mutex_unlock (&data.mutex);
1058 errno = err;
1059 scm_syserror (NULL);
1060 }
1061
1062 while (scm_is_false (data.thread))
1063 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1064
1065 scm_i_pthread_mutex_unlock (&data.mutex);
1066
1067 return data.thread;
1068 }
1069 #undef FUNC_NAME
1070
1071 typedef struct {
1072 SCM parent;
1073 scm_t_catch_body body;
1074 void *body_data;
1075 scm_t_catch_handler handler;
1076 void *handler_data;
1077 SCM thread;
1078 scm_i_pthread_mutex_t mutex;
1079 scm_i_pthread_cond_t cond;
1080 } spawn_data;
1081
1082 static void *
1083 really_spawn (void *d)
1084 {
1085 spawn_data *data = (spawn_data *)d;
1086 scm_t_catch_body body = data->body;
1087 void *body_data = data->body_data;
1088 scm_t_catch_handler handler = data->handler;
1089 void *handler_data = data->handler_data;
1090 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1091
1092 scm_i_scm_pthread_mutex_lock (&data->mutex);
1093 data->thread = scm_current_thread ();
1094 scm_i_pthread_cond_signal (&data->cond);
1095 scm_i_pthread_mutex_unlock (&data->mutex);
1096
1097 if (handler == NULL)
1098 t->result = body (body_data);
1099 else
1100 t->result = scm_internal_catch (SCM_BOOL_T,
1101 body, body_data,
1102 handler, handler_data);
1103
1104 return 0;
1105 }
1106
1107 static void *
1108 spawn_thread (void *d)
1109 {
1110 spawn_data *data = (spawn_data *)d;
1111 scm_i_pthread_detach (scm_i_pthread_self ());
1112 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1113 return NULL;
1114 }
1115
1116 SCM
1117 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1118 scm_t_catch_handler handler, void *handler_data)
1119 {
1120 spawn_data data;
1121 scm_i_pthread_t id;
1122 int err;
1123
1124 data.parent = scm_current_dynamic_state ();
1125 data.body = body;
1126 data.body_data = body_data;
1127 data.handler = handler;
1128 data.handler_data = handler_data;
1129 data.thread = SCM_BOOL_F;
1130 scm_i_pthread_mutex_init (&data.mutex, NULL);
1131 scm_i_pthread_cond_init (&data.cond, NULL);
1132
1133 scm_i_scm_pthread_mutex_lock (&data.mutex);
1134 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1135 if (err)
1136 {
1137 scm_i_pthread_mutex_unlock (&data.mutex);
1138 errno = err;
1139 scm_syserror (NULL);
1140 }
1141
1142 while (scm_is_false (data.thread))
1143 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1144
1145 scm_i_pthread_mutex_unlock (&data.mutex);
1146
1147 assert (SCM_I_IS_THREAD (data.thread));
1148
1149 return data.thread;
1150 }
1151
1152 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1153 (),
1154 "Move the calling thread to the end of the scheduling queue.")
1155 #define FUNC_NAME s_scm_yield
1156 {
1157 return scm_from_bool (scm_i_sched_yield ());
1158 }
1159 #undef FUNC_NAME
1160
1161 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1162 (SCM thread),
1163 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1164 "cannot be the current thread, and if @var{thread} has already terminated or "
1165 "been signaled to terminate, this function is a no-op.")
1166 #define FUNC_NAME s_scm_cancel_thread
1167 {
1168 scm_i_thread *t = NULL;
1169
1170 SCM_VALIDATE_THREAD (1, thread);
1171 t = SCM_I_THREAD_DATA (thread);
1172 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1173 if (!t->canceled)
1174 {
1175 t->canceled = 1;
1176 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1177 scm_i_pthread_cancel (t->pthread);
1178 }
1179 else
1180 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1181
1182 return SCM_UNSPECIFIED;
1183 }
1184 #undef FUNC_NAME
1185
1186 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1187 (SCM thread, SCM proc),
1188 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1189 "This handler will be called when the thread exits.")
1190 #define FUNC_NAME s_scm_set_thread_cleanup_x
1191 {
1192 scm_i_thread *t;
1193
1194 SCM_VALIDATE_THREAD (1, thread);
1195 if (!scm_is_false (proc))
1196 SCM_VALIDATE_THUNK (2, proc);
1197
1198 t = SCM_I_THREAD_DATA (thread);
1199 scm_i_pthread_mutex_lock (&t->admin_mutex);
1200
1201 if (!(t->exited || t->canceled))
1202 t->cleanup_handler = proc;
1203
1204 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1205
1206 return SCM_UNSPECIFIED;
1207 }
1208 #undef FUNC_NAME
1209
1210 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1211 (SCM thread),
1212 "Return the cleanup handler installed for the thread @var{thread}.")
1213 #define FUNC_NAME s_scm_thread_cleanup
1214 {
1215 scm_i_thread *t;
1216 SCM ret;
1217
1218 SCM_VALIDATE_THREAD (1, thread);
1219
1220 t = SCM_I_THREAD_DATA (thread);
1221 scm_i_pthread_mutex_lock (&t->admin_mutex);
1222 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1223 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1224
1225 return ret;
1226 }
1227 #undef FUNC_NAME
1228
1229 SCM scm_join_thread (SCM thread)
1230 {
1231 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1232 }
1233
1234 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1235 (SCM thread, SCM timeout, SCM timeoutval),
1236 "Suspend execution of the calling thread until the target @var{thread} "
1237 "terminates, unless the target @var{thread} has already terminated. ")
1238 #define FUNC_NAME s_scm_join_thread_timed
1239 {
1240 scm_i_thread *t;
1241 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1242 SCM res = SCM_BOOL_F;
1243
1244 if (! (SCM_UNBNDP (timeoutval)))
1245 res = timeoutval;
1246
1247 SCM_VALIDATE_THREAD (1, thread);
1248 if (scm_is_eq (scm_current_thread (), thread))
1249 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1250
1251 t = SCM_I_THREAD_DATA (thread);
1252 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1253
1254 if (! SCM_UNBNDP (timeout))
1255 {
1256 to_timespec (timeout, &ctimeout);
1257 timeout_ptr = &ctimeout;
1258 }
1259
1260 if (t->exited)
1261 res = t->result;
1262 else
1263 {
1264 while (1)
1265 {
1266 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1267 timeout_ptr);
1268 if (err == 0)
1269 {
1270 if (t->exited)
1271 {
1272 res = t->result;
1273 break;
1274 }
1275 }
1276 else if (err == ETIMEDOUT)
1277 break;
1278
1279 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1280 SCM_TICK;
1281 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1282
1283 /* Check for exit again, since we just released and
1284 reacquired the admin mutex, before the next block_self
1285 call (which would block forever if t has already
1286 exited). */
1287 if (t->exited)
1288 {
1289 res = t->result;
1290 break;
1291 }
1292 }
1293 }
1294
1295 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1296
1297 return res;
1298 }
1299 #undef FUNC_NAME
1300
1301 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1302 (SCM obj),
1303 "Return @code{#t} if @var{obj} is a thread.")
1304 #define FUNC_NAME s_scm_thread_p
1305 {
1306 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1307 }
1308 #undef FUNC_NAME
1309
1310
1311 static size_t
1312 fat_mutex_free (SCM mx)
1313 {
1314 fat_mutex *m = SCM_MUTEX_DATA (mx);
1315 scm_i_pthread_mutex_destroy (&m->lock);
1316 return 0;
1317 }
1318
1319 static int
1320 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1321 {
1322 fat_mutex *m = SCM_MUTEX_DATA (mx);
1323 scm_puts ("#<mutex ", port);
1324 scm_uintprint ((scm_t_bits)m, 16, port);
1325 scm_puts (">", port);
1326 return 1;
1327 }
1328
1329 static SCM
1330 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1331 {
1332 fat_mutex *m;
1333 SCM mx;
1334
1335 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1336 scm_i_pthread_mutex_init (&m->lock, NULL);
1337 m->owner = SCM_BOOL_F;
1338 m->level = 0;
1339
1340 m->recursive = recursive;
1341 m->unchecked_unlock = unchecked_unlock;
1342 m->allow_external_unlock = external_unlock;
1343
1344 m->waiting = SCM_EOL;
1345 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1346 m->waiting = make_queue ();
1347 return mx;
1348 }
1349
1350 SCM scm_make_mutex (void)
1351 {
1352 return scm_make_mutex_with_flags (SCM_EOL);
1353 }
1354
1355 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1356 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1357 SCM_SYMBOL (recursive_sym, "recursive");
1358
1359 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1360 (SCM flags),
1361 "Create a new mutex. ")
1362 #define FUNC_NAME s_scm_make_mutex_with_flags
1363 {
1364 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1365
1366 SCM ptr = flags;
1367 while (! scm_is_null (ptr))
1368 {
1369 SCM flag = SCM_CAR (ptr);
1370 if (scm_is_eq (flag, unchecked_unlock_sym))
1371 unchecked_unlock = 1;
1372 else if (scm_is_eq (flag, allow_external_unlock_sym))
1373 external_unlock = 1;
1374 else if (scm_is_eq (flag, recursive_sym))
1375 recursive = 1;
1376 else
1377 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1378 ptr = SCM_CDR (ptr);
1379 }
1380 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1381 }
1382 #undef FUNC_NAME
1383
1384 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1385 (void),
1386 "Create a new recursive mutex. ")
1387 #define FUNC_NAME s_scm_make_recursive_mutex
1388 {
1389 return make_fat_mutex (1, 0, 0);
1390 }
1391 #undef FUNC_NAME
1392
1393 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1394
1395 static SCM
1396 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1397 {
1398 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1399
1400 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1401 SCM err = SCM_BOOL_F;
1402
1403 struct timeval current_time;
1404
1405 scm_i_scm_pthread_mutex_lock (&m->lock);
1406
1407 while (1)
1408 {
1409 if (m->level == 0)
1410 {
1411 m->owner = new_owner;
1412 m->level++;
1413
1414 if (SCM_I_IS_THREAD (new_owner))
1415 {
1416 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1417
1418 /* FIXME: The order in which `t->admin_mutex' and
1419 `m->lock' are taken differs from that in
1420 `on_thread_exit', potentially leading to deadlocks. */
1421 scm_i_pthread_mutex_lock (&t->admin_mutex);
1422
1423 /* Only keep a weak reference to MUTEX so that it's not
1424 retained when not referenced elsewhere (bug #27450).
1425 The weak pair itself is eventually removed when MUTEX
1426 is unlocked. Note that `t->mutexes' lists mutexes
1427 currently held by T, so it should be small. */
1428 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1429
1430 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1431 }
1432 *ret = 1;
1433 break;
1434 }
1435 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1436 {
1437 m->owner = new_owner;
1438 err = scm_cons (scm_abandoned_mutex_error_key,
1439 scm_from_locale_string ("lock obtained on abandoned "
1440 "mutex"));
1441 *ret = 1;
1442 break;
1443 }
1444 else if (scm_is_eq (m->owner, new_owner))
1445 {
1446 if (m->recursive)
1447 {
1448 m->level++;
1449 *ret = 1;
1450 }
1451 else
1452 {
1453 err = scm_cons (scm_misc_error_key,
1454 scm_from_locale_string ("mutex already locked "
1455 "by thread"));
1456 *ret = 0;
1457 }
1458 break;
1459 }
1460 else
1461 {
1462 if (timeout != NULL)
1463 {
1464 gettimeofday (&current_time, NULL);
1465 if (current_time.tv_sec > timeout->tv_sec ||
1466 (current_time.tv_sec == timeout->tv_sec &&
1467 current_time.tv_usec * 1000 > timeout->tv_nsec))
1468 {
1469 *ret = 0;
1470 break;
1471 }
1472 }
1473 block_self (m->waiting, mutex, &m->lock, timeout);
1474 scm_i_pthread_mutex_unlock (&m->lock);
1475 SCM_TICK;
1476 scm_i_scm_pthread_mutex_lock (&m->lock);
1477 }
1478 }
1479 scm_i_pthread_mutex_unlock (&m->lock);
1480 return err;
1481 }
1482
1483 SCM scm_lock_mutex (SCM mx)
1484 {
1485 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1486 }
1487
1488 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1489 (SCM m, SCM timeout, SCM owner),
1490 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1491 "thread blocks until the mutex becomes available. The function\n"
1492 "returns when the calling thread owns the lock on @var{m}.\n"
1493 "Locking a mutex that a thread already owns will succeed right\n"
1494 "away and will not block the thread. That is, Guile's mutexes\n"
1495 "are @emph{recursive}.")
1496 #define FUNC_NAME s_scm_lock_mutex_timed
1497 {
1498 SCM exception;
1499 int ret = 0;
1500 scm_t_timespec cwaittime, *waittime = NULL;
1501
1502 SCM_VALIDATE_MUTEX (1, m);
1503
1504 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1505 {
1506 to_timespec (timeout, &cwaittime);
1507 waittime = &cwaittime;
1508 }
1509
1510 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1511 SCM_VALIDATE_THREAD (3, owner);
1512
1513 exception = fat_mutex_lock (m, waittime, owner, &ret);
1514 if (!scm_is_false (exception))
1515 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1516 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1517 }
1518 #undef FUNC_NAME
1519
1520 static void
1521 lock_mutex_return_void (SCM mx)
1522 {
1523 (void) scm_lock_mutex (mx);
1524 }
1525
1526 static void
1527 unlock_mutex_return_void (SCM mx)
1528 {
1529 (void) scm_unlock_mutex (mx);
1530 }
1531
1532 void
1533 scm_dynwind_lock_mutex (SCM mutex)
1534 {
1535 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1536 SCM_F_WIND_EXPLICITLY);
1537 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1538 SCM_F_WIND_EXPLICITLY);
1539 }
1540
1541 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1542 (SCM mutex),
1543 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1544 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1545 #define FUNC_NAME s_scm_try_mutex
1546 {
1547 SCM exception;
1548 int ret = 0;
1549 scm_t_timespec cwaittime, *waittime = NULL;
1550
1551 SCM_VALIDATE_MUTEX (1, mutex);
1552
1553 to_timespec (scm_from_int(0), &cwaittime);
1554 waittime = &cwaittime;
1555
1556 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1557 if (!scm_is_false (exception))
1558 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1559 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1560 }
1561 #undef FUNC_NAME
1562
1563 /*** Fat condition variables */
1564
1565 typedef struct {
1566 scm_i_pthread_mutex_t lock;
1567 SCM waiting; /* the threads waiting for this condition. */
1568 } fat_cond;
1569
1570 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1571 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1572
1573 static int
1574 fat_mutex_unlock (SCM mutex, SCM cond,
1575 const scm_t_timespec *waittime, int relock)
1576 {
1577 SCM owner;
1578 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1579 fat_cond *c = NULL;
1580 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1581 int err = 0, ret = 0;
1582
1583 scm_i_scm_pthread_mutex_lock (&m->lock);
1584
1585 owner = m->owner;
1586
1587 if (!scm_is_eq (owner, t->handle))
1588 {
1589 if (m->level == 0)
1590 {
1591 if (!m->unchecked_unlock)
1592 {
1593 scm_i_pthread_mutex_unlock (&m->lock);
1594 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1595 }
1596 owner = t->handle;
1597 }
1598 else if (!m->allow_external_unlock)
1599 {
1600 scm_i_pthread_mutex_unlock (&m->lock);
1601 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1602 }
1603 }
1604
1605 if (! (SCM_UNBNDP (cond)))
1606 {
1607 c = SCM_CONDVAR_DATA (cond);
1608 while (1)
1609 {
1610 int brk = 0;
1611
1612 if (m->level > 0)
1613 m->level--;
1614 if (m->level == 0)
1615 {
1616 /* Change the owner of MUTEX. */
1617 t->mutexes = scm_delq_x (mutex, t->mutexes);
1618 m->owner = unblock_from_queue (m->waiting);
1619 }
1620
1621 t->block_asyncs++;
1622
1623 err = block_self (c->waiting, cond, &m->lock, waittime);
1624 scm_i_pthread_mutex_unlock (&m->lock);
1625
1626 if (err == 0)
1627 {
1628 ret = 1;
1629 brk = 1;
1630 }
1631 else if (err == ETIMEDOUT)
1632 {
1633 ret = 0;
1634 brk = 1;
1635 }
1636 else if (err != EINTR)
1637 {
1638 errno = err;
1639 scm_syserror (NULL);
1640 }
1641
1642 if (brk)
1643 {
1644 if (relock)
1645 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1646 t->block_asyncs--;
1647 break;
1648 }
1649
1650 t->block_asyncs--;
1651 scm_async_click ();
1652
1653 scm_remember_upto_here_2 (cond, mutex);
1654
1655 scm_i_scm_pthread_mutex_lock (&m->lock);
1656 }
1657 }
1658 else
1659 {
1660 if (m->level > 0)
1661 m->level--;
1662 if (m->level == 0)
1663 {
1664 /* Change the owner of MUTEX. */
1665 t->mutexes = scm_delq_x (mutex, t->mutexes);
1666 m->owner = unblock_from_queue (m->waiting);
1667 }
1668
1669 scm_i_pthread_mutex_unlock (&m->lock);
1670 ret = 1;
1671 }
1672
1673 return ret;
1674 }
1675
1676 SCM scm_unlock_mutex (SCM mx)
1677 {
1678 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1679 }
1680
1681 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1682 (SCM mx, SCM cond, SCM timeout),
1683 "Unlocks @var{mutex} if the calling thread owns the lock on "
1684 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1685 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1686 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1687 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1688 "with a call to @code{unlock-mutex}. Only the last call to "
1689 "@code{unlock-mutex} will actually unlock the mutex. ")
1690 #define FUNC_NAME s_scm_unlock_mutex_timed
1691 {
1692 scm_t_timespec cwaittime, *waittime = NULL;
1693
1694 SCM_VALIDATE_MUTEX (1, mx);
1695 if (! (SCM_UNBNDP (cond)))
1696 {
1697 SCM_VALIDATE_CONDVAR (2, cond);
1698
1699 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1700 {
1701 to_timespec (timeout, &cwaittime);
1702 waittime = &cwaittime;
1703 }
1704 }
1705
1706 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1707 }
1708 #undef FUNC_NAME
1709
1710 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1711 (SCM obj),
1712 "Return @code{#t} if @var{obj} is a mutex.")
1713 #define FUNC_NAME s_scm_mutex_p
1714 {
1715 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1716 }
1717 #undef FUNC_NAME
1718
1719 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1720 (SCM mx),
1721 "Return the thread owning @var{mx}, or @code{#f}.")
1722 #define FUNC_NAME s_scm_mutex_owner
1723 {
1724 SCM owner;
1725 fat_mutex *m = NULL;
1726
1727 SCM_VALIDATE_MUTEX (1, mx);
1728 m = SCM_MUTEX_DATA (mx);
1729 scm_i_pthread_mutex_lock (&m->lock);
1730 owner = m->owner;
1731 scm_i_pthread_mutex_unlock (&m->lock);
1732
1733 return owner;
1734 }
1735 #undef FUNC_NAME
1736
1737 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1738 (SCM mx),
1739 "Return the lock level of mutex @var{mx}.")
1740 #define FUNC_NAME s_scm_mutex_level
1741 {
1742 SCM_VALIDATE_MUTEX (1, mx);
1743 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1744 }
1745 #undef FUNC_NAME
1746
1747 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1748 (SCM mx),
1749 "Returns @code{#t} if the mutex @var{mx} is locked.")
1750 #define FUNC_NAME s_scm_mutex_locked_p
1751 {
1752 SCM_VALIDATE_MUTEX (1, mx);
1753 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1754 }
1755 #undef FUNC_NAME
1756
1757 static int
1758 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1759 {
1760 fat_cond *c = SCM_CONDVAR_DATA (cv);
1761 scm_puts ("#<condition-variable ", port);
1762 scm_uintprint ((scm_t_bits)c, 16, port);
1763 scm_puts (">", port);
1764 return 1;
1765 }
1766
1767 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1768 (void),
1769 "Make a new condition variable.")
1770 #define FUNC_NAME s_scm_make_condition_variable
1771 {
1772 fat_cond *c;
1773 SCM cv;
1774
1775 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1776 c->waiting = SCM_EOL;
1777 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1778 c->waiting = make_queue ();
1779 return cv;
1780 }
1781 #undef FUNC_NAME
1782
1783 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1784 (SCM cv, SCM mx, SCM t),
1785 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1786 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1787 "is locked again when this function returns. When @var{t} is given, "
1788 "it specifies a point in time where the waiting should be aborted. It "
1789 "can be either a integer as returned by @code{current-time} or a pair "
1790 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1791 "mutex is locked and @code{#f} is returned. When the condition "
1792 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1793 "is returned. ")
1794 #define FUNC_NAME s_scm_timed_wait_condition_variable
1795 {
1796 scm_t_timespec waittime, *waitptr = NULL;
1797
1798 SCM_VALIDATE_CONDVAR (1, cv);
1799 SCM_VALIDATE_MUTEX (2, mx);
1800
1801 if (!SCM_UNBNDP (t))
1802 {
1803 to_timespec (t, &waittime);
1804 waitptr = &waittime;
1805 }
1806
1807 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1808 }
1809 #undef FUNC_NAME
1810
1811 static void
1812 fat_cond_signal (fat_cond *c)
1813 {
1814 unblock_from_queue (c->waiting);
1815 }
1816
1817 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1818 (SCM cv),
1819 "Wake up one thread that is waiting for @var{cv}")
1820 #define FUNC_NAME s_scm_signal_condition_variable
1821 {
1822 SCM_VALIDATE_CONDVAR (1, cv);
1823 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1824 return SCM_BOOL_T;
1825 }
1826 #undef FUNC_NAME
1827
1828 static void
1829 fat_cond_broadcast (fat_cond *c)
1830 {
1831 while (scm_is_true (unblock_from_queue (c->waiting)))
1832 ;
1833 }
1834
1835 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1836 (SCM cv),
1837 "Wake up all threads that are waiting for @var{cv}. ")
1838 #define FUNC_NAME s_scm_broadcast_condition_variable
1839 {
1840 SCM_VALIDATE_CONDVAR (1, cv);
1841 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1842 return SCM_BOOL_T;
1843 }
1844 #undef FUNC_NAME
1845
1846 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1847 (SCM obj),
1848 "Return @code{#t} if @var{obj} is a condition variable.")
1849 #define FUNC_NAME s_scm_condition_variable_p
1850 {
1851 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1852 }
1853 #undef FUNC_NAME
1854
1855
1856 \f
1857 /*** Select */
1858
1859 struct select_args
1860 {
1861 int nfds;
1862 fd_set *read_fds;
1863 fd_set *write_fds;
1864 fd_set *except_fds;
1865 struct timeval *timeout;
1866
1867 int result;
1868 int errno_value;
1869 };
1870
1871 static void *
1872 do_std_select (void *args)
1873 {
1874 struct select_args *select_args;
1875
1876 select_args = (struct select_args *) args;
1877
1878 select_args->result =
1879 select (select_args->nfds,
1880 select_args->read_fds, select_args->write_fds,
1881 select_args->except_fds, select_args->timeout);
1882 select_args->errno_value = errno;
1883
1884 return NULL;
1885 }
1886
1887 int
1888 scm_std_select (int nfds,
1889 fd_set *readfds,
1890 fd_set *writefds,
1891 fd_set *exceptfds,
1892 struct timeval *timeout)
1893 {
1894 fd_set my_readfds;
1895 int res, eno, wakeup_fd;
1896 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1897 struct select_args args;
1898
1899 if (readfds == NULL)
1900 {
1901 FD_ZERO (&my_readfds);
1902 readfds = &my_readfds;
1903 }
1904
1905 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1906 SCM_TICK;
1907
1908 wakeup_fd = t->sleep_pipe[0];
1909 FD_SET (wakeup_fd, readfds);
1910 if (wakeup_fd >= nfds)
1911 nfds = wakeup_fd+1;
1912
1913 args.nfds = nfds;
1914 args.read_fds = readfds;
1915 args.write_fds = writefds;
1916 args.except_fds = exceptfds;
1917 args.timeout = timeout;
1918
1919 /* Explicitly cooperate with the GC. */
1920 scm_without_guile (do_std_select, &args);
1921
1922 res = args.result;
1923 eno = args.errno_value;
1924
1925 t->sleep_fd = -1;
1926 scm_i_reset_sleep (t);
1927
1928 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1929 {
1930 char dummy;
1931 full_read (wakeup_fd, &dummy, 1);
1932
1933 FD_CLR (wakeup_fd, readfds);
1934 res -= 1;
1935 if (res == 0)
1936 {
1937 eno = EINTR;
1938 res = -1;
1939 }
1940 }
1941 errno = eno;
1942 return res;
1943 }
1944
1945 /* Convenience API for blocking while in guile mode. */
1946
1947 #if SCM_USE_PTHREAD_THREADS
1948
1949 /* It seems reasonable to not run procedures related to mutex and condition
1950 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1951 without it, and (ii) the only potential gain would be GC latency. See
1952 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1953 for a discussion of the pros and cons. */
1954
1955 int
1956 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1957 {
1958 int res = scm_i_pthread_mutex_lock (mutex);
1959 return res;
1960 }
1961
1962 static void
1963 do_unlock (void *data)
1964 {
1965 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1966 }
1967
1968 void
1969 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1970 {
1971 scm_i_scm_pthread_mutex_lock (mutex);
1972 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1973 }
1974
1975 int
1976 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1977 {
1978 int res;
1979 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1980
1981 t->held_mutex = mutex;
1982 res = scm_i_pthread_cond_wait (cond, mutex);
1983 t->held_mutex = NULL;
1984
1985 return res;
1986 }
1987
1988 int
1989 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1990 scm_i_pthread_mutex_t *mutex,
1991 const scm_t_timespec *wt)
1992 {
1993 int res;
1994 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1995
1996 t->held_mutex = mutex;
1997 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1998 t->held_mutex = NULL;
1999
2000 return res;
2001 }
2002
2003 #endif
2004
2005 static void
2006 do_unlock_with_asyncs (void *data)
2007 {
2008 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
2009 SCM_I_CURRENT_THREAD->block_asyncs--;
2010 }
2011
2012 void
2013 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
2014 {
2015 SCM_I_CURRENT_THREAD->block_asyncs++;
2016 scm_i_scm_pthread_mutex_lock (mutex);
2017 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
2018 SCM_F_WIND_EXPLICITLY);
2019 }
2020
2021 unsigned long
2022 scm_std_usleep (unsigned long usecs)
2023 {
2024 struct timeval tv;
2025 tv.tv_usec = usecs % 1000000;
2026 tv.tv_sec = usecs / 1000000;
2027 scm_std_select (0, NULL, NULL, NULL, &tv);
2028 return tv.tv_sec * 1000000 + tv.tv_usec;
2029 }
2030
2031 unsigned int
2032 scm_std_sleep (unsigned int secs)
2033 {
2034 struct timeval tv;
2035 tv.tv_usec = 0;
2036 tv.tv_sec = secs;
2037 scm_std_select (0, NULL, NULL, NULL, &tv);
2038 return tv.tv_sec;
2039 }
2040
2041 /*** Misc */
2042
2043 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2044 (void),
2045 "Return the thread that called this function.")
2046 #define FUNC_NAME s_scm_current_thread
2047 {
2048 return SCM_I_CURRENT_THREAD->handle;
2049 }
2050 #undef FUNC_NAME
2051
2052 static SCM
2053 scm_c_make_list (size_t n, SCM fill)
2054 {
2055 SCM res = SCM_EOL;
2056 while (n-- > 0)
2057 res = scm_cons (fill, res);
2058 return res;
2059 }
2060
2061 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2062 (void),
2063 "Return a list of all threads.")
2064 #define FUNC_NAME s_scm_all_threads
2065 {
2066 /* We can not allocate while holding the thread_admin_mutex because
2067 of the way GC is done.
2068 */
2069 int n = thread_count;
2070 scm_i_thread *t;
2071 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
2072
2073 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2074 l = &list;
2075 for (t = all_threads; t && n > 0; t = t->next_thread)
2076 {
2077 if (t != scm_i_signal_delivery_thread)
2078 {
2079 SCM_SETCAR (*l, t->handle);
2080 l = SCM_CDRLOC (*l);
2081 }
2082 n--;
2083 }
2084 *l = SCM_EOL;
2085 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2086 return list;
2087 }
2088 #undef FUNC_NAME
2089
2090 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2091 (SCM thread),
2092 "Return @code{#t} iff @var{thread} has exited.\n")
2093 #define FUNC_NAME s_scm_thread_exited_p
2094 {
2095 return scm_from_bool (scm_c_thread_exited_p (thread));
2096 }
2097 #undef FUNC_NAME
2098
2099 int
2100 scm_c_thread_exited_p (SCM thread)
2101 #define FUNC_NAME s_scm_thread_exited_p
2102 {
2103 scm_i_thread *t;
2104 SCM_VALIDATE_THREAD (1, thread);
2105 t = SCM_I_THREAD_DATA (thread);
2106 return t->exited;
2107 }
2108 #undef FUNC_NAME
2109
2110 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2111 (void),
2112 "Return the total number of processors of the machine, which\n"
2113 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2114 "thread execution unit, which can be either:\n\n"
2115 "@itemize\n"
2116 "@item an execution core in a (possibly multi-core) chip, in a\n"
2117 " (possibly multi- chip) module, in a single computer, or\n"
2118 "@item a thread execution unit inside a core in the case of\n"
2119 " @dfn{hyper-threaded} CPUs.\n"
2120 "@end itemize\n\n"
2121 "Which of the two definitions is used, is unspecified.\n")
2122 #define FUNC_NAME s_scm_total_processor_count
2123 {
2124 return scm_from_ulong (num_processors (NPROC_ALL));
2125 }
2126 #undef FUNC_NAME
2127
2128 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2129 (void),
2130 "Like @code{total-processor-count}, but return the number of\n"
2131 "processors available to the current process. See\n"
2132 "@code{setaffinity} and @code{getaffinity} for more\n"
2133 "information.\n")
2134 #define FUNC_NAME s_scm_current_processor_count
2135 {
2136 return scm_from_ulong (num_processors (NPROC_CURRENT));
2137 }
2138 #undef FUNC_NAME
2139
2140
2141 \f
2142
2143 static scm_i_pthread_cond_t wake_up_cond;
2144 static int threads_initialized_p = 0;
2145
2146
2147 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2148 */
2149 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2150
2151 static SCM dynwind_critical_section_mutex;
2152
2153 void
2154 scm_dynwind_critical_section (SCM mutex)
2155 {
2156 if (scm_is_false (mutex))
2157 mutex = dynwind_critical_section_mutex;
2158 scm_dynwind_lock_mutex (mutex);
2159 scm_dynwind_block_asyncs ();
2160 }
2161
2162 /*** Initialization */
2163
2164 scm_i_pthread_mutex_t scm_i_misc_mutex;
2165
2166 #if SCM_USE_PTHREAD_THREADS
2167 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2168 #endif
2169
2170 void
2171 scm_threads_prehistory (void *base)
2172 {
2173 #if SCM_USE_PTHREAD_THREADS
2174 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2175 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2176 PTHREAD_MUTEX_RECURSIVE);
2177 #endif
2178
2179 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2180 scm_i_pthread_mutexattr_recursive);
2181 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2182 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2183
2184 guilify_self_1 ((struct GC_stack_base *) base);
2185 }
2186
2187 scm_t_bits scm_tc16_thread;
2188 scm_t_bits scm_tc16_mutex;
2189 scm_t_bits scm_tc16_condvar;
2190
2191 void
2192 scm_init_threads ()
2193 {
2194 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2195 scm_set_smob_print (scm_tc16_thread, thread_print);
2196
2197 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2198 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2199 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2200
2201 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2202 sizeof (fat_cond));
2203 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2204
2205 scm_i_default_dynamic_state = SCM_BOOL_F;
2206 guilify_self_2 (SCM_BOOL_F);
2207 threads_initialized_p = 1;
2208
2209 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2210 }
2211
2212 void
2213 scm_init_threads_default_dynamic_state ()
2214 {
2215 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2216 scm_i_default_dynamic_state = state;
2217 }
2218
2219 void
2220 scm_init_thread_procs ()
2221 {
2222 #include "libguile/threads.x"
2223 }
2224
2225 \f
2226 /* IA64-specific things. */
2227
2228 #ifdef __ia64__
2229 # ifdef __hpux
2230 # include <sys/param.h>
2231 # include <sys/pstat.h>
2232 void *
2233 scm_ia64_register_backing_store_base (void)
2234 {
2235 struct pst_vm_status vm_status;
2236 int i = 0;
2237 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2238 if (vm_status.pst_type == PS_RSESTACK)
2239 return (void *) vm_status.pst_vaddr;
2240 abort ();
2241 }
2242 void *
2243 scm_ia64_ar_bsp (const void *ctx)
2244 {
2245 uint64_t bsp;
2246 __uc_get_ar_bsp (ctx, &bsp);
2247 return (void *) bsp;
2248 }
2249 # endif /* hpux */
2250 # ifdef linux
2251 # include <ucontext.h>
2252 void *
2253 scm_ia64_register_backing_store_base (void)
2254 {
2255 extern void *__libc_ia64_register_backing_store_base;
2256 return __libc_ia64_register_backing_store_base;
2257 }
2258 void *
2259 scm_ia64_ar_bsp (const void *opaque)
2260 {
2261 const ucontext_t *ctx = opaque;
2262 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2263 }
2264 # endif /* linux */
2265 # ifdef __FreeBSD__
2266 # include <ucontext.h>
2267 void *
2268 scm_ia64_register_backing_store_base (void)
2269 {
2270 return (void *)0x8000000000000000;
2271 }
2272 void *
2273 scm_ia64_ar_bsp (const void *opaque)
2274 {
2275 const ucontext_t *ctx = opaque;
2276 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2277 + ctx->uc_mcontext.mc_special.ndirty);
2278 }
2279 # endif /* __FreeBSD__ */
2280 #endif /* __ia64__ */
2281
2282
2283 /*
2284 Local Variables:
2285 c-file-style: "gnu"
2286 End:
2287 */