Merge branch 'stable-2.0'
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 */
20
21
22 \f
23 #ifdef HAVE_CONFIG_H
24 # include <config.h>
25 #endif
26
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
30
31 #include <stdlib.h>
32 #if HAVE_UNISTD_H
33 #include <unistd.h>
34 #endif
35 #include <stdio.h>
36
37 #ifdef HAVE_STRING_H
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
39 #endif
40
41 #if HAVE_SYS_TIME_H
42 #include <sys/time.h>
43 #endif
44
45 #if HAVE_PTHREAD_NP_H
46 # include <pthread_np.h>
47 #endif
48
49 #include <sys/select.h>
50
51 #include <assert.h>
52 #include <fcntl.h>
53 #include <nproc.h>
54
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/init.h"
67 #include "libguile/scmsigs.h"
68 #include "libguile/strings.h"
69 #include "libguile/vm.h"
70
71 #include <full-read.h>
72
73
74 \f
75
76 /* The GC "kind" for threads that allow them to mark their VM
77 stacks. */
78 static int thread_gc_kind;
79
80 static struct GC_ms_entry *
81 thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
82 struct GC_ms_entry *mark_stack_limit, GC_word env)
83 {
84 int word;
85 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
86
87 if (SCM_UNPACK (t->handle) == 0)
88 /* T must be on the free-list; ignore. (See warning in
89 gc_mark.h.) */
90 return mark_stack_ptr;
91
92 /* Mark T. We could be more precise, but it doesn't matter. */
93 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
94 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
95 mark_stack_ptr, mark_stack_limit,
96 NULL);
97
98 if (t->vp)
99 mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
100 mark_stack_limit);
101
102 return mark_stack_ptr;
103 }
104
105
106 \f
107 static void
108 to_timespec (SCM t, scm_t_timespec *waittime)
109 {
110 if (scm_is_pair (t))
111 {
112 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
113 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
114 }
115 else
116 {
117 double time = scm_to_double (t);
118 double sec = scm_c_truncate (time);
119
120 waittime->tv_sec = (long) sec;
121 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
122 }
123 }
124
125
126 \f
127 /*** Queues */
128
129 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
130 the risk of false references leading to unbounded retained space as
131 described in "Bounding Space Usage of Conservative Garbage Collectors",
132 H.J. Boehm, 2001. */
133
134 /* Make an empty queue data structure.
135 */
136 static SCM
137 make_queue ()
138 {
139 return scm_cons (SCM_EOL, SCM_EOL);
140 }
141
142 /* Put T at the back of Q and return a handle that can be used with
143 remqueue to remove T from Q again.
144 */
145 static SCM
146 enqueue (SCM q, SCM t)
147 {
148 SCM c = scm_cons (t, SCM_EOL);
149 SCM_CRITICAL_SECTION_START;
150 if (scm_is_null (SCM_CDR (q)))
151 SCM_SETCDR (q, c);
152 else
153 SCM_SETCDR (SCM_CAR (q), c);
154 SCM_SETCAR (q, c);
155 SCM_CRITICAL_SECTION_END;
156 return c;
157 }
158
159 /* Remove the element that the handle C refers to from the queue Q. C
160 must have been returned from a call to enqueue. The return value
161 is zero when the element referred to by C has already been removed.
162 Otherwise, 1 is returned.
163 */
164 static int
165 remqueue (SCM q, SCM c)
166 {
167 SCM p, prev = q;
168 SCM_CRITICAL_SECTION_START;
169 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
170 {
171 if (scm_is_eq (p, c))
172 {
173 if (scm_is_eq (c, SCM_CAR (q)))
174 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
175 SCM_SETCDR (prev, SCM_CDR (c));
176
177 /* GC-robust */
178 SCM_SETCDR (c, SCM_EOL);
179
180 SCM_CRITICAL_SECTION_END;
181 return 1;
182 }
183 prev = p;
184 }
185 SCM_CRITICAL_SECTION_END;
186 return 0;
187 }
188
189 /* Remove the front-most element from the queue Q and return it.
190 Return SCM_BOOL_F when Q is empty.
191 */
192 static SCM
193 dequeue (SCM q)
194 {
195 SCM c;
196 SCM_CRITICAL_SECTION_START;
197 c = SCM_CDR (q);
198 if (scm_is_null (c))
199 {
200 SCM_CRITICAL_SECTION_END;
201 return SCM_BOOL_F;
202 }
203 else
204 {
205 SCM_SETCDR (q, SCM_CDR (c));
206 if (scm_is_null (SCM_CDR (q)))
207 SCM_SETCAR (q, SCM_EOL);
208 SCM_CRITICAL_SECTION_END;
209
210 /* GC-robust */
211 SCM_SETCDR (c, SCM_EOL);
212
213 return SCM_CAR (c);
214 }
215 }
216
217 /*** Thread smob routines */
218
219
220 static int
221 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
222 {
223 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
224 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
225 the struct case, hence we go via a union, and extract according to the
226 size of pthread_t. */
227 union {
228 scm_i_pthread_t p;
229 unsigned short us;
230 unsigned int ui;
231 unsigned long ul;
232 scm_t_uintmax um;
233 } u;
234 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
235 scm_i_pthread_t p = t->pthread;
236 scm_t_uintmax id;
237 u.p = p;
238 if (sizeof (p) == sizeof (unsigned short))
239 id = u.us;
240 else if (sizeof (p) == sizeof (unsigned int))
241 id = u.ui;
242 else if (sizeof (p) == sizeof (unsigned long))
243 id = u.ul;
244 else
245 id = u.um;
246
247 scm_puts_unlocked ("#<thread ", port);
248 scm_uintprint (id, 10, port);
249 scm_puts_unlocked (" (", port);
250 scm_uintprint ((scm_t_bits)t, 16, port);
251 scm_puts_unlocked (")>", port);
252 return 1;
253 }
254
255 \f
256 /*** Blocking on queues. */
257
258 /* See also scm_i_queue_async_cell for how such a block is
259 interrputed.
260 */
261
262 /* Put the current thread on QUEUE and go to sleep, waiting for it to
263 be woken up by a call to 'unblock_from_queue', or to be
264 interrupted. Upon return of this function, the current thread is
265 no longer on QUEUE, even when the sleep has been interrupted.
266
267 The caller of block_self must hold MUTEX. It will be atomically
268 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
269
270 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
271 as MUTEX is needed.
272
273 When WAITTIME is not NULL, the sleep will be aborted at that time.
274
275 The return value of block_self is an errno value. It will be zero
276 when the sleep has been successfully completed by a call to
277 unblock_from_queue, EINTR when it has been interrupted by the
278 delivery of a system async, and ETIMEDOUT when the timeout has
279 expired.
280
281 The system asyncs themselves are not executed by block_self.
282 */
283 static int
284 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
285 const scm_t_timespec *waittime)
286 {
287 scm_i_thread *t = SCM_I_CURRENT_THREAD;
288 SCM q_handle;
289 int err;
290
291 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
292 err = EINTR;
293 else
294 {
295 t->block_asyncs++;
296 q_handle = enqueue (queue, t->handle);
297 if (waittime == NULL)
298 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
299 else
300 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
301
302 /* When we are still on QUEUE, we have been interrupted. We
303 report this only when no other error (such as a timeout) has
304 happened above.
305 */
306 if (remqueue (queue, q_handle) && err == 0)
307 err = EINTR;
308 t->block_asyncs--;
309 scm_i_reset_sleep (t);
310 }
311
312 return err;
313 }
314
315 /* Wake up the first thread on QUEUE, if any. The awoken thread is
316 returned, or #f if the queue was empty.
317 */
318 static SCM
319 unblock_from_queue (SCM queue)
320 {
321 SCM thread = dequeue (queue);
322 if (scm_is_true (thread))
323 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
324 return thread;
325 }
326
327 \f
328 /* Getting into and out of guile mode.
329 */
330
331 /* Key used to attach a cleanup handler to a given thread. Also, if
332 thread-local storage is unavailable, this key is used to retrieve the
333 current thread with `pthread_getspecific ()'. */
334 scm_i_pthread_key_t scm_i_thread_key;
335
336
337 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
338
339 /* When thread-local storage (TLS) is available, a pointer to the
340 current-thread object is kept in TLS. Note that storing the thread-object
341 itself in TLS (rather than a pointer to some malloc'd memory) is not
342 possible since thread objects may live longer than the actual thread they
343 represent. */
344 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
345
346 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
347
348
349 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
350 static scm_i_thread *all_threads = NULL;
351 static int thread_count;
352
353 static SCM scm_i_default_dynamic_state;
354
355 /* Run when a fluid is collected. */
356 void
357 scm_i_reset_fluid (size_t n)
358 {
359 scm_i_thread *t;
360
361 scm_i_pthread_mutex_lock (&thread_admin_mutex);
362 for (t = all_threads; t; t = t->next_thread)
363 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
364 {
365 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
366
367 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
368 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
369 }
370 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
371 }
372
373 /* Perform first stage of thread initialisation, in non-guile mode.
374 */
375 static void
376 guilify_self_1 (struct GC_stack_base *base)
377 {
378 scm_i_thread t;
379
380 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
381 before allocating anything in this thread, because allocation could
382 cause GC to run, and GC could cause finalizers, which could invoke
383 Scheme functions, which need the current thread to be set. */
384
385 t.pthread = scm_i_pthread_self ();
386 t.handle = SCM_BOOL_F;
387 t.result = SCM_BOOL_F;
388 t.cleanup_handler = SCM_BOOL_F;
389 t.mutexes = SCM_EOL;
390 t.held_mutex = NULL;
391 t.join_queue = SCM_EOL;
392 t.dynamic_state = SCM_BOOL_F;
393 t.dynstack.base = NULL;
394 t.dynstack.top = NULL;
395 t.dynstack.limit = NULL;
396 t.active_asyncs = SCM_EOL;
397 t.block_asyncs = 1;
398 t.pending_asyncs = 1;
399 t.critical_section_level = 0;
400 t.base = base->mem_base;
401 #ifdef __ia64__
402 t.register_backing_store_base = base->reg_base;
403 #endif
404 t.continuation_root = SCM_EOL;
405 t.continuation_base = t.base;
406 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
407 t.sleep_mutex = NULL;
408 t.sleep_object = SCM_BOOL_F;
409 t.sleep_fd = -1;
410 t.vp = NULL;
411
412 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
413 /* FIXME: Error conditions during the initialization phase are handled
414 gracelessly since public functions such as `scm_init_guile ()'
415 currently have type `void'. */
416 abort ();
417
418 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
419 t.canceled = 0;
420 t.exited = 0;
421 t.guile_mode = 0;
422
423 /* The switcheroo. */
424 {
425 scm_i_thread *t_ptr = &t;
426
427 GC_disable ();
428 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
429 memcpy (t_ptr, &t, sizeof t);
430
431 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
432
433 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
434 /* Cache the current thread in TLS for faster lookup. */
435 scm_i_current_thread = t_ptr;
436 #endif
437
438 scm_i_pthread_mutex_lock (&thread_admin_mutex);
439 t_ptr->next_thread = all_threads;
440 all_threads = t_ptr;
441 thread_count++;
442 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
443
444 GC_enable ();
445 }
446 }
447
448 /* Perform second stage of thread initialisation, in guile mode.
449 */
450 static void
451 guilify_self_2 (SCM parent)
452 {
453 scm_i_thread *t = SCM_I_CURRENT_THREAD;
454
455 t->guile_mode = 1;
456
457 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
458
459 t->continuation_root = scm_cons (t->handle, SCM_EOL);
460 t->continuation_base = t->base;
461
462 if (scm_is_true (parent))
463 t->dynamic_state = scm_make_dynamic_state (parent);
464 else
465 t->dynamic_state = scm_i_make_initial_dynamic_state ();
466
467 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
468 t->dynstack.limit = t->dynstack.base + 16;
469 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
470
471 t->join_queue = make_queue ();
472 t->block_asyncs = 0;
473
474 /* See note in finalizers.c:queue_finalizer_async(). */
475 GC_invoke_finalizers ();
476 }
477
478 \f
479 /*** Fat mutexes */
480
481 /* We implement our own mutex type since we want them to be 'fair', we
482 want to do fancy things while waiting for them (like running
483 asyncs) and we might want to add things that are nice for
484 debugging.
485 */
486
487 typedef struct {
488 scm_i_pthread_mutex_t lock;
489 SCM owner;
490 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
491
492 int recursive; /* allow recursive locking? */
493 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
494 int allow_external_unlock; /* is it an error to unlock a mutex that is not
495 owned by the current thread? */
496
497 SCM waiting; /* the threads waiting for this mutex. */
498 } fat_mutex;
499
500 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
501 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
502
503 static SCM
504 call_cleanup (void *data)
505 {
506 SCM *proc_p = data;
507 return scm_call_0 (*proc_p);
508 }
509
510 /* Perform thread tear-down, in guile mode.
511 */
512 static void *
513 do_thread_exit (void *v)
514 {
515 scm_i_thread *t = (scm_i_thread *) v;
516
517 if (!scm_is_false (t->cleanup_handler))
518 {
519 SCM ptr = t->cleanup_handler;
520
521 t->cleanup_handler = SCM_BOOL_F;
522 t->result = scm_internal_catch (SCM_BOOL_T,
523 call_cleanup, &ptr,
524 scm_handle_by_message_noexit, NULL);
525 }
526
527 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
528
529 t->exited = 1;
530 close (t->sleep_pipe[0]);
531 close (t->sleep_pipe[1]);
532 while (scm_is_true (unblock_from_queue (t->join_queue)))
533 ;
534
535 while (!scm_is_null (t->mutexes))
536 {
537 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
538
539 if (scm_is_true (mutex))
540 {
541 fat_mutex *m = SCM_MUTEX_DATA (mutex);
542
543 scm_i_pthread_mutex_lock (&m->lock);
544
545 /* Check whether T owns MUTEX. This is usually the case, unless
546 T abandoned MUTEX; in that case, T is no longer its owner (see
547 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
548 if (scm_is_eq (m->owner, t->handle))
549 unblock_from_queue (m->waiting);
550
551 scm_i_pthread_mutex_unlock (&m->lock);
552 }
553
554 t->mutexes = scm_cdr (t->mutexes);
555 }
556
557 scm_i_pthread_mutex_unlock (&t->admin_mutex);
558
559 return NULL;
560 }
561
562 static void *
563 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
564 {
565 /* Won't hurt if we are already registered. */
566 #if SCM_USE_PTHREAD_THREADS
567 GC_register_my_thread (sb);
568 #endif
569
570 return scm_with_guile (do_thread_exit, v);
571 }
572
573 static void
574 on_thread_exit (void *v)
575 {
576 /* This handler is executed in non-guile mode. */
577 scm_i_thread *t = (scm_i_thread *) v, **tp;
578
579 /* If we were canceled, we were unable to clear `t->guile_mode', so do
580 it here. */
581 t->guile_mode = 0;
582
583 /* If this thread was cancelled while doing a cond wait, it will
584 still have a mutex locked, so we unlock it here. */
585 if (t->held_mutex)
586 {
587 scm_i_pthread_mutex_unlock (t->held_mutex);
588 t->held_mutex = NULL;
589 }
590
591 /* Reinstate the current thread for purposes of scm_with_guile
592 guile-mode cleanup handlers. Only really needed in the non-TLS
593 case but it doesn't hurt to be consistent. */
594 scm_i_pthread_setspecific (scm_i_thread_key, t);
595
596 /* Scheme-level thread finalizers and other cleanup needs to happen in
597 guile mode. */
598 GC_call_with_stack_base (do_thread_exit_trampoline, t);
599
600 /* Removing ourself from the list of all threads needs to happen in
601 non-guile mode since all SCM values on our stack become
602 unprotected once we are no longer in the list. */
603 scm_i_pthread_mutex_lock (&thread_admin_mutex);
604 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
605 if (*tp == t)
606 {
607 *tp = t->next_thread;
608
609 /* GC-robust */
610 t->next_thread = NULL;
611
612 break;
613 }
614 thread_count--;
615
616 /* If there's only one other thread, it could be the signal delivery
617 thread, so we need to notify it to shut down by closing its read pipe.
618 If it's not the signal delivery thread, then closing the read pipe isn't
619 going to hurt. */
620 if (thread_count <= 1)
621 scm_i_close_signal_pipe ();
622
623 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
624
625 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
626
627 if (t->vp)
628 {
629 scm_i_vm_free_stack (t->vp);
630 t->vp = NULL;
631 }
632
633 #if SCM_USE_PTHREAD_THREADS
634 GC_unregister_my_thread ();
635 #endif
636 }
637
638 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
639
640 static void
641 init_thread_key (void)
642 {
643 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
644 }
645
646 /* Perform any initializations necessary to make the current thread
647 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
648 if necessary.
649
650 BASE is the stack base to use with GC.
651
652 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
653 which case the default dynamic state is used.
654
655 Returns zero when the thread was known to guile already; otherwise
656 return 1.
657
658 Note that it could be the case that the thread was known
659 to Guile, but not in guile mode (because we are within a
660 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
661 be sure. New threads are put into guile mode implicitly. */
662
663 static int
664 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
665 {
666 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
667
668 if (SCM_I_CURRENT_THREAD)
669 {
670 /* Thread is already known to Guile.
671 */
672 return 0;
673 }
674 else
675 {
676 /* This thread has not been guilified yet.
677 */
678
679 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
680 if (scm_initialized_p == 0)
681 {
682 /* First thread ever to enter Guile. Run the full
683 initialization.
684 */
685 scm_i_init_guile (base);
686
687 #if SCM_USE_PTHREAD_THREADS
688 /* Allow other threads to come in later. */
689 GC_allow_register_threads ();
690 #endif
691
692 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
693 }
694 else
695 {
696 /* Guile is already initialized, but this thread enters it for
697 the first time. Only initialize this thread.
698 */
699 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
700
701 /* Register this thread with libgc. */
702 #if SCM_USE_PTHREAD_THREADS
703 GC_register_my_thread (base);
704 #endif
705
706 guilify_self_1 (base);
707 guilify_self_2 (parent);
708 }
709 return 1;
710 }
711 }
712
713 void
714 scm_init_guile ()
715 {
716 struct GC_stack_base stack_base;
717
718 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
719 scm_i_init_thread_for_guile (&stack_base,
720 scm_i_default_dynamic_state);
721 else
722 {
723 fprintf (stderr, "Failed to get stack base for current thread.\n");
724 exit (EXIT_FAILURE);
725 }
726 }
727
728 struct with_guile_args
729 {
730 GC_fn_type func;
731 void *data;
732 SCM parent;
733 };
734
735 static void *
736 with_guile_trampoline (void *data)
737 {
738 struct with_guile_args *args = data;
739
740 return scm_c_with_continuation_barrier (args->func, args->data);
741 }
742
743 static void *
744 with_guile_and_parent (struct GC_stack_base *base, void *data)
745 {
746 void *res;
747 int new_thread;
748 scm_i_thread *t;
749 struct with_guile_args *args = data;
750
751 new_thread = scm_i_init_thread_for_guile (base, args->parent);
752 t = SCM_I_CURRENT_THREAD;
753 if (new_thread)
754 {
755 /* We are in Guile mode. */
756 assert (t->guile_mode);
757
758 res = scm_c_with_continuation_barrier (args->func, args->data);
759
760 /* Leave Guile mode. */
761 t->guile_mode = 0;
762 }
763 else if (t->guile_mode)
764 {
765 /* Already in Guile mode. */
766 res = scm_c_with_continuation_barrier (args->func, args->data);
767 }
768 else
769 {
770 /* We are not in Guile mode, either because we are not within a
771 scm_with_guile, or because we are within a scm_without_guile.
772
773 This call to scm_with_guile() could happen from anywhere on the
774 stack, and in particular lower on the stack than when it was
775 when this thread was first guilified. Thus, `base' must be
776 updated. */
777 #if SCM_STACK_GROWS_UP
778 if (SCM_STACK_PTR (base->mem_base) < t->base)
779 t->base = SCM_STACK_PTR (base->mem_base);
780 #else
781 if (SCM_STACK_PTR (base->mem_base) > t->base)
782 t->base = SCM_STACK_PTR (base->mem_base);
783 #endif
784
785 t->guile_mode = 1;
786 res = GC_call_with_gc_active (with_guile_trampoline, args);
787 t->guile_mode = 0;
788 }
789 return res;
790 }
791
792 static void *
793 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
794 {
795 struct with_guile_args args;
796
797 args.func = func;
798 args.data = data;
799 args.parent = parent;
800
801 return GC_call_with_stack_base (with_guile_and_parent, &args);
802 }
803
804 void *
805 scm_with_guile (void *(*func)(void *), void *data)
806 {
807 return scm_i_with_guile_and_parent (func, data,
808 scm_i_default_dynamic_state);
809 }
810
811 void *
812 scm_without_guile (void *(*func)(void *), void *data)
813 {
814 void *result;
815 scm_i_thread *t = SCM_I_CURRENT_THREAD;
816
817 if (t->guile_mode)
818 {
819 SCM_I_CURRENT_THREAD->guile_mode = 0;
820 result = GC_do_blocking (func, data);
821 SCM_I_CURRENT_THREAD->guile_mode = 1;
822 }
823 else
824 /* Otherwise we're not in guile mode, so nothing to do. */
825 result = func (data);
826
827 return result;
828 }
829
830 \f
831 /*** Thread creation */
832
833 typedef struct {
834 SCM parent;
835 SCM thunk;
836 SCM handler;
837 SCM thread;
838 scm_i_pthread_mutex_t mutex;
839 scm_i_pthread_cond_t cond;
840 } launch_data;
841
842 static void *
843 really_launch (void *d)
844 {
845 launch_data *data = (launch_data *)d;
846 SCM thunk = data->thunk, handler = data->handler;
847 scm_i_thread *t;
848
849 t = SCM_I_CURRENT_THREAD;
850
851 scm_i_scm_pthread_mutex_lock (&data->mutex);
852 data->thread = scm_current_thread ();
853 scm_i_pthread_cond_signal (&data->cond);
854 scm_i_pthread_mutex_unlock (&data->mutex);
855
856 if (SCM_UNBNDP (handler))
857 t->result = scm_call_0 (thunk);
858 else
859 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
860
861 return 0;
862 }
863
864 static void *
865 launch_thread (void *d)
866 {
867 launch_data *data = (launch_data *)d;
868 scm_i_pthread_detach (scm_i_pthread_self ());
869 scm_i_with_guile_and_parent (really_launch, d, data->parent);
870 return NULL;
871 }
872
873 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
874 (SCM thunk, SCM handler),
875 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
876 "returning a new thread object representing the thread. The procedure\n"
877 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
878 "\n"
879 "When @var{handler} is specified, then @var{thunk} is called from\n"
880 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
881 "handler. This catch is established inside the continuation barrier.\n"
882 "\n"
883 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
884 "the @emph{exit value} of the thread and the thread is terminated.")
885 #define FUNC_NAME s_scm_call_with_new_thread
886 {
887 launch_data data;
888 scm_i_pthread_t id;
889 int err;
890
891 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
892 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
893 handler, SCM_ARG2, FUNC_NAME);
894
895 GC_collect_a_little ();
896 data.parent = scm_current_dynamic_state ();
897 data.thunk = thunk;
898 data.handler = handler;
899 data.thread = SCM_BOOL_F;
900 scm_i_pthread_mutex_init (&data.mutex, NULL);
901 scm_i_pthread_cond_init (&data.cond, NULL);
902
903 scm_i_scm_pthread_mutex_lock (&data.mutex);
904 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
905 if (err)
906 {
907 scm_i_pthread_mutex_unlock (&data.mutex);
908 errno = err;
909 scm_syserror (NULL);
910 }
911
912 while (scm_is_false (data.thread))
913 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
914
915 scm_i_pthread_mutex_unlock (&data.mutex);
916
917 return data.thread;
918 }
919 #undef FUNC_NAME
920
921 typedef struct {
922 SCM parent;
923 scm_t_catch_body body;
924 void *body_data;
925 scm_t_catch_handler handler;
926 void *handler_data;
927 SCM thread;
928 scm_i_pthread_mutex_t mutex;
929 scm_i_pthread_cond_t cond;
930 } spawn_data;
931
932 static void *
933 really_spawn (void *d)
934 {
935 spawn_data *data = (spawn_data *)d;
936 scm_t_catch_body body = data->body;
937 void *body_data = data->body_data;
938 scm_t_catch_handler handler = data->handler;
939 void *handler_data = data->handler_data;
940 scm_i_thread *t = SCM_I_CURRENT_THREAD;
941
942 scm_i_scm_pthread_mutex_lock (&data->mutex);
943 data->thread = scm_current_thread ();
944 scm_i_pthread_cond_signal (&data->cond);
945 scm_i_pthread_mutex_unlock (&data->mutex);
946
947 if (handler == NULL)
948 t->result = body (body_data);
949 else
950 t->result = scm_internal_catch (SCM_BOOL_T,
951 body, body_data,
952 handler, handler_data);
953
954 return 0;
955 }
956
957 static void *
958 spawn_thread (void *d)
959 {
960 spawn_data *data = (spawn_data *)d;
961 scm_i_pthread_detach (scm_i_pthread_self ());
962 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
963 return NULL;
964 }
965
966 SCM
967 scm_spawn_thread (scm_t_catch_body body, void *body_data,
968 scm_t_catch_handler handler, void *handler_data)
969 {
970 spawn_data data;
971 scm_i_pthread_t id;
972 int err;
973
974 data.parent = scm_current_dynamic_state ();
975 data.body = body;
976 data.body_data = body_data;
977 data.handler = handler;
978 data.handler_data = handler_data;
979 data.thread = SCM_BOOL_F;
980 scm_i_pthread_mutex_init (&data.mutex, NULL);
981 scm_i_pthread_cond_init (&data.cond, NULL);
982
983 scm_i_scm_pthread_mutex_lock (&data.mutex);
984 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
985 if (err)
986 {
987 scm_i_pthread_mutex_unlock (&data.mutex);
988 errno = err;
989 scm_syserror (NULL);
990 }
991
992 while (scm_is_false (data.thread))
993 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
994
995 scm_i_pthread_mutex_unlock (&data.mutex);
996
997 assert (SCM_I_IS_THREAD (data.thread));
998
999 return data.thread;
1000 }
1001
1002 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1003 (),
1004 "Move the calling thread to the end of the scheduling queue.")
1005 #define FUNC_NAME s_scm_yield
1006 {
1007 return scm_from_bool (scm_i_sched_yield ());
1008 }
1009 #undef FUNC_NAME
1010
1011 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1012 (SCM thread),
1013 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1014 "cannot be the current thread, and if @var{thread} has already terminated or "
1015 "been signaled to terminate, this function is a no-op.")
1016 #define FUNC_NAME s_scm_cancel_thread
1017 {
1018 scm_i_thread *t = NULL;
1019
1020 SCM_VALIDATE_THREAD (1, thread);
1021 t = SCM_I_THREAD_DATA (thread);
1022 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1023 if (!t->canceled)
1024 {
1025 t->canceled = 1;
1026 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1027 scm_i_pthread_cancel (t->pthread);
1028 }
1029 else
1030 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1031
1032 return SCM_UNSPECIFIED;
1033 }
1034 #undef FUNC_NAME
1035
1036 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1037 (SCM thread, SCM proc),
1038 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1039 "This handler will be called when the thread exits.")
1040 #define FUNC_NAME s_scm_set_thread_cleanup_x
1041 {
1042 scm_i_thread *t;
1043
1044 SCM_VALIDATE_THREAD (1, thread);
1045 if (!scm_is_false (proc))
1046 SCM_VALIDATE_THUNK (2, proc);
1047
1048 t = SCM_I_THREAD_DATA (thread);
1049 scm_i_pthread_mutex_lock (&t->admin_mutex);
1050
1051 if (!(t->exited || t->canceled))
1052 t->cleanup_handler = proc;
1053
1054 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1055
1056 return SCM_UNSPECIFIED;
1057 }
1058 #undef FUNC_NAME
1059
1060 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1061 (SCM thread),
1062 "Return the cleanup handler installed for the thread @var{thread}.")
1063 #define FUNC_NAME s_scm_thread_cleanup
1064 {
1065 scm_i_thread *t;
1066 SCM ret;
1067
1068 SCM_VALIDATE_THREAD (1, thread);
1069
1070 t = SCM_I_THREAD_DATA (thread);
1071 scm_i_pthread_mutex_lock (&t->admin_mutex);
1072 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1073 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1074
1075 return ret;
1076 }
1077 #undef FUNC_NAME
1078
1079 SCM scm_join_thread (SCM thread)
1080 {
1081 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1082 }
1083
1084 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1085 (SCM thread, SCM timeout, SCM timeoutval),
1086 "Suspend execution of the calling thread until the target @var{thread} "
1087 "terminates, unless the target @var{thread} has already terminated. ")
1088 #define FUNC_NAME s_scm_join_thread_timed
1089 {
1090 scm_i_thread *t;
1091 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1092 SCM res = SCM_BOOL_F;
1093
1094 if (! (SCM_UNBNDP (timeoutval)))
1095 res = timeoutval;
1096
1097 SCM_VALIDATE_THREAD (1, thread);
1098 if (scm_is_eq (scm_current_thread (), thread))
1099 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1100
1101 t = SCM_I_THREAD_DATA (thread);
1102 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1103
1104 if (! SCM_UNBNDP (timeout))
1105 {
1106 to_timespec (timeout, &ctimeout);
1107 timeout_ptr = &ctimeout;
1108 }
1109
1110 if (t->exited)
1111 res = t->result;
1112 else
1113 {
1114 while (1)
1115 {
1116 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1117 timeout_ptr);
1118 if (err == 0)
1119 {
1120 if (t->exited)
1121 {
1122 res = t->result;
1123 break;
1124 }
1125 }
1126 else if (err == ETIMEDOUT)
1127 break;
1128
1129 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1130 SCM_TICK;
1131 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1132
1133 /* Check for exit again, since we just released and
1134 reacquired the admin mutex, before the next block_self
1135 call (which would block forever if t has already
1136 exited). */
1137 if (t->exited)
1138 {
1139 res = t->result;
1140 break;
1141 }
1142 }
1143 }
1144
1145 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1146
1147 return res;
1148 }
1149 #undef FUNC_NAME
1150
1151 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1152 (SCM obj),
1153 "Return @code{#t} if @var{obj} is a thread.")
1154 #define FUNC_NAME s_scm_thread_p
1155 {
1156 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1157 }
1158 #undef FUNC_NAME
1159
1160
1161 static int
1162 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1163 {
1164 fat_mutex *m = SCM_MUTEX_DATA (mx);
1165 scm_puts_unlocked ("#<mutex ", port);
1166 scm_uintprint ((scm_t_bits)m, 16, port);
1167 scm_puts_unlocked (">", port);
1168 return 1;
1169 }
1170
1171 static SCM
1172 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1173 {
1174 fat_mutex *m;
1175 SCM mx;
1176 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
1177
1178 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1179 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1180 and so we can just copy it. */
1181 memcpy (&m->lock, &lock, sizeof (m->lock));
1182 m->owner = SCM_BOOL_F;
1183 m->level = 0;
1184
1185 m->recursive = recursive;
1186 m->unchecked_unlock = unchecked_unlock;
1187 m->allow_external_unlock = external_unlock;
1188
1189 m->waiting = SCM_EOL;
1190 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1191 m->waiting = make_queue ();
1192 return mx;
1193 }
1194
1195 SCM scm_make_mutex (void)
1196 {
1197 return scm_make_mutex_with_flags (SCM_EOL);
1198 }
1199
1200 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1201 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1202 SCM_SYMBOL (recursive_sym, "recursive");
1203
1204 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1205 (SCM flags),
1206 "Create a new mutex. ")
1207 #define FUNC_NAME s_scm_make_mutex_with_flags
1208 {
1209 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1210
1211 SCM ptr = flags;
1212 while (! scm_is_null (ptr))
1213 {
1214 SCM flag = SCM_CAR (ptr);
1215 if (scm_is_eq (flag, unchecked_unlock_sym))
1216 unchecked_unlock = 1;
1217 else if (scm_is_eq (flag, allow_external_unlock_sym))
1218 external_unlock = 1;
1219 else if (scm_is_eq (flag, recursive_sym))
1220 recursive = 1;
1221 else
1222 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1223 ptr = SCM_CDR (ptr);
1224 }
1225 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1226 }
1227 #undef FUNC_NAME
1228
1229 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1230 (void),
1231 "Create a new recursive mutex. ")
1232 #define FUNC_NAME s_scm_make_recursive_mutex
1233 {
1234 return make_fat_mutex (1, 0, 0);
1235 }
1236 #undef FUNC_NAME
1237
1238 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1239
1240 static SCM
1241 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1242 {
1243 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1244
1245 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1246 SCM err = SCM_BOOL_F;
1247
1248 struct timeval current_time;
1249
1250 scm_i_scm_pthread_mutex_lock (&m->lock);
1251
1252 while (1)
1253 {
1254 if (m->level == 0)
1255 {
1256 m->owner = new_owner;
1257 m->level++;
1258
1259 if (SCM_I_IS_THREAD (new_owner))
1260 {
1261 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1262
1263 /* FIXME: The order in which `t->admin_mutex' and
1264 `m->lock' are taken differs from that in
1265 `on_thread_exit', potentially leading to deadlocks. */
1266 scm_i_pthread_mutex_lock (&t->admin_mutex);
1267
1268 /* Only keep a weak reference to MUTEX so that it's not
1269 retained when not referenced elsewhere (bug #27450).
1270 The weak pair itself is eventually removed when MUTEX
1271 is unlocked. Note that `t->mutexes' lists mutexes
1272 currently held by T, so it should be small. */
1273 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1274 t->mutexes);
1275
1276 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1277 }
1278 *ret = 1;
1279 break;
1280 }
1281 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1282 {
1283 m->owner = new_owner;
1284 err = scm_cons (scm_abandoned_mutex_error_key,
1285 scm_from_locale_string ("lock obtained on abandoned "
1286 "mutex"));
1287 *ret = 1;
1288 break;
1289 }
1290 else if (scm_is_eq (m->owner, new_owner))
1291 {
1292 if (m->recursive)
1293 {
1294 m->level++;
1295 *ret = 1;
1296 }
1297 else
1298 {
1299 err = scm_cons (scm_misc_error_key,
1300 scm_from_locale_string ("mutex already locked "
1301 "by thread"));
1302 *ret = 0;
1303 }
1304 break;
1305 }
1306 else
1307 {
1308 if (timeout != NULL)
1309 {
1310 gettimeofday (&current_time, NULL);
1311 if (current_time.tv_sec > timeout->tv_sec ||
1312 (current_time.tv_sec == timeout->tv_sec &&
1313 current_time.tv_usec * 1000 > timeout->tv_nsec))
1314 {
1315 *ret = 0;
1316 break;
1317 }
1318 }
1319 block_self (m->waiting, mutex, &m->lock, timeout);
1320 scm_i_pthread_mutex_unlock (&m->lock);
1321 SCM_TICK;
1322 scm_i_scm_pthread_mutex_lock (&m->lock);
1323 }
1324 }
1325 scm_i_pthread_mutex_unlock (&m->lock);
1326 return err;
1327 }
1328
1329 SCM scm_lock_mutex (SCM mx)
1330 {
1331 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1332 }
1333
1334 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1335 (SCM m, SCM timeout, SCM owner),
1336 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1337 "thread blocks until the mutex becomes available. The function\n"
1338 "returns when the calling thread owns the lock on @var{m}.\n"
1339 "Locking a mutex that a thread already owns will succeed right\n"
1340 "away and will not block the thread. That is, Guile's mutexes\n"
1341 "are @emph{recursive}.")
1342 #define FUNC_NAME s_scm_lock_mutex_timed
1343 {
1344 SCM exception;
1345 int ret = 0;
1346 scm_t_timespec cwaittime, *waittime = NULL;
1347
1348 SCM_VALIDATE_MUTEX (1, m);
1349
1350 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1351 {
1352 to_timespec (timeout, &cwaittime);
1353 waittime = &cwaittime;
1354 }
1355
1356 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1357 SCM_VALIDATE_THREAD (3, owner);
1358
1359 exception = fat_mutex_lock (m, waittime, owner, &ret);
1360 if (!scm_is_false (exception))
1361 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1362 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1363 }
1364 #undef FUNC_NAME
1365
1366 static void
1367 lock_mutex_return_void (SCM mx)
1368 {
1369 (void) scm_lock_mutex (mx);
1370 }
1371
1372 static void
1373 unlock_mutex_return_void (SCM mx)
1374 {
1375 (void) scm_unlock_mutex (mx);
1376 }
1377
1378 void
1379 scm_dynwind_lock_mutex (SCM mutex)
1380 {
1381 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
1382 SCM_F_WIND_EXPLICITLY);
1383 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
1384 SCM_F_WIND_EXPLICITLY);
1385 }
1386
1387 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1388 (SCM mutex),
1389 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1390 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1391 #define FUNC_NAME s_scm_try_mutex
1392 {
1393 SCM exception;
1394 int ret = 0;
1395 scm_t_timespec cwaittime, *waittime = NULL;
1396
1397 SCM_VALIDATE_MUTEX (1, mutex);
1398
1399 to_timespec (scm_from_int(0), &cwaittime);
1400 waittime = &cwaittime;
1401
1402 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1403 if (!scm_is_false (exception))
1404 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1405 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1406 }
1407 #undef FUNC_NAME
1408
1409 /*** Fat condition variables */
1410
1411 typedef struct {
1412 scm_i_pthread_mutex_t lock;
1413 SCM waiting; /* the threads waiting for this condition. */
1414 } fat_cond;
1415
1416 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1417 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1418
1419 static void
1420 remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1421 {
1422 SCM walk, prev;
1423
1424 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1425 walk = SCM_CDR (walk))
1426 {
1427 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1428 {
1429 if (scm_is_pair (prev))
1430 SCM_SETCDR (prev, SCM_CDR (walk));
1431 else
1432 t->mutexes = SCM_CDR (walk);
1433 break;
1434 }
1435 }
1436 }
1437
1438 static int
1439 fat_mutex_unlock (SCM mutex, SCM cond,
1440 const scm_t_timespec *waittime, int relock)
1441 {
1442 SCM owner;
1443 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1444 fat_cond *c = NULL;
1445 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1446 int err = 0, ret = 0;
1447
1448 scm_i_scm_pthread_mutex_lock (&m->lock);
1449
1450 owner = m->owner;
1451
1452 if (!scm_is_eq (owner, t->handle))
1453 {
1454 if (m->level == 0)
1455 {
1456 if (!m->unchecked_unlock)
1457 {
1458 scm_i_pthread_mutex_unlock (&m->lock);
1459 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1460 }
1461 owner = t->handle;
1462 }
1463 else if (!m->allow_external_unlock)
1464 {
1465 scm_i_pthread_mutex_unlock (&m->lock);
1466 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1467 }
1468 }
1469
1470 if (! (SCM_UNBNDP (cond)))
1471 {
1472 c = SCM_CONDVAR_DATA (cond);
1473 while (1)
1474 {
1475 int brk = 0;
1476
1477 if (m->level > 0)
1478 m->level--;
1479 if (m->level == 0)
1480 {
1481 /* Change the owner of MUTEX. */
1482 remove_mutex_from_thread (mutex, t);
1483 m->owner = unblock_from_queue (m->waiting);
1484 }
1485
1486 t->block_asyncs++;
1487
1488 err = block_self (c->waiting, cond, &m->lock, waittime);
1489 scm_i_pthread_mutex_unlock (&m->lock);
1490
1491 if (err == 0)
1492 {
1493 ret = 1;
1494 brk = 1;
1495 }
1496 else if (err == ETIMEDOUT)
1497 {
1498 ret = 0;
1499 brk = 1;
1500 }
1501 else if (err != EINTR)
1502 {
1503 errno = err;
1504 scm_syserror (NULL);
1505 }
1506
1507 if (brk)
1508 {
1509 if (relock)
1510 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1511 t->block_asyncs--;
1512 break;
1513 }
1514
1515 t->block_asyncs--;
1516 scm_async_tick ();
1517
1518 scm_remember_upto_here_2 (cond, mutex);
1519
1520 scm_i_scm_pthread_mutex_lock (&m->lock);
1521 }
1522 }
1523 else
1524 {
1525 if (m->level > 0)
1526 m->level--;
1527 if (m->level == 0)
1528 {
1529 /* Change the owner of MUTEX. */
1530 remove_mutex_from_thread (mutex, t);
1531 m->owner = unblock_from_queue (m->waiting);
1532 }
1533
1534 scm_i_pthread_mutex_unlock (&m->lock);
1535 ret = 1;
1536 }
1537
1538 return ret;
1539 }
1540
1541 SCM scm_unlock_mutex (SCM mx)
1542 {
1543 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1544 }
1545
1546 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1547 (SCM mx, SCM cond, SCM timeout),
1548 "Unlocks @var{mutex} if the calling thread owns the lock on "
1549 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1550 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1551 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1552 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1553 "with a call to @code{unlock-mutex}. Only the last call to "
1554 "@code{unlock-mutex} will actually unlock the mutex. ")
1555 #define FUNC_NAME s_scm_unlock_mutex_timed
1556 {
1557 scm_t_timespec cwaittime, *waittime = NULL;
1558
1559 SCM_VALIDATE_MUTEX (1, mx);
1560 if (! (SCM_UNBNDP (cond)))
1561 {
1562 SCM_VALIDATE_CONDVAR (2, cond);
1563
1564 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1565 {
1566 to_timespec (timeout, &cwaittime);
1567 waittime = &cwaittime;
1568 }
1569 }
1570
1571 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1572 }
1573 #undef FUNC_NAME
1574
1575 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1576 (SCM obj),
1577 "Return @code{#t} if @var{obj} is a mutex.")
1578 #define FUNC_NAME s_scm_mutex_p
1579 {
1580 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1581 }
1582 #undef FUNC_NAME
1583
1584 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1585 (SCM mx),
1586 "Return the thread owning @var{mx}, or @code{#f}.")
1587 #define FUNC_NAME s_scm_mutex_owner
1588 {
1589 SCM owner;
1590 fat_mutex *m = NULL;
1591
1592 SCM_VALIDATE_MUTEX (1, mx);
1593 m = SCM_MUTEX_DATA (mx);
1594 scm_i_pthread_mutex_lock (&m->lock);
1595 owner = m->owner;
1596 scm_i_pthread_mutex_unlock (&m->lock);
1597
1598 return owner;
1599 }
1600 #undef FUNC_NAME
1601
1602 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1603 (SCM mx),
1604 "Return the lock level of mutex @var{mx}.")
1605 #define FUNC_NAME s_scm_mutex_level
1606 {
1607 SCM_VALIDATE_MUTEX (1, mx);
1608 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1609 }
1610 #undef FUNC_NAME
1611
1612 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1613 (SCM mx),
1614 "Returns @code{#t} if the mutex @var{mx} is locked.")
1615 #define FUNC_NAME s_scm_mutex_locked_p
1616 {
1617 SCM_VALIDATE_MUTEX (1, mx);
1618 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1619 }
1620 #undef FUNC_NAME
1621
1622 static int
1623 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1624 {
1625 fat_cond *c = SCM_CONDVAR_DATA (cv);
1626 scm_puts_unlocked ("#<condition-variable ", port);
1627 scm_uintprint ((scm_t_bits)c, 16, port);
1628 scm_puts_unlocked (">", port);
1629 return 1;
1630 }
1631
1632 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1633 (void),
1634 "Make a new condition variable.")
1635 #define FUNC_NAME s_scm_make_condition_variable
1636 {
1637 fat_cond *c;
1638 SCM cv;
1639
1640 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1641 c->waiting = SCM_EOL;
1642 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1643 c->waiting = make_queue ();
1644 return cv;
1645 }
1646 #undef FUNC_NAME
1647
1648 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1649 (SCM cv, SCM mx, SCM t),
1650 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1651 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1652 "is locked again when this function returns. When @var{t} is given, "
1653 "it specifies a point in time where the waiting should be aborted. It "
1654 "can be either a integer as returned by @code{current-time} or a pair "
1655 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1656 "mutex is locked and @code{#f} is returned. When the condition "
1657 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1658 "is returned. ")
1659 #define FUNC_NAME s_scm_timed_wait_condition_variable
1660 {
1661 scm_t_timespec waittime, *waitptr = NULL;
1662
1663 SCM_VALIDATE_CONDVAR (1, cv);
1664 SCM_VALIDATE_MUTEX (2, mx);
1665
1666 if (!SCM_UNBNDP (t))
1667 {
1668 to_timespec (t, &waittime);
1669 waitptr = &waittime;
1670 }
1671
1672 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1673 }
1674 #undef FUNC_NAME
1675
1676 static void
1677 fat_cond_signal (fat_cond *c)
1678 {
1679 unblock_from_queue (c->waiting);
1680 }
1681
1682 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1683 (SCM cv),
1684 "Wake up one thread that is waiting for @var{cv}")
1685 #define FUNC_NAME s_scm_signal_condition_variable
1686 {
1687 SCM_VALIDATE_CONDVAR (1, cv);
1688 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1689 return SCM_BOOL_T;
1690 }
1691 #undef FUNC_NAME
1692
1693 static void
1694 fat_cond_broadcast (fat_cond *c)
1695 {
1696 while (scm_is_true (unblock_from_queue (c->waiting)))
1697 ;
1698 }
1699
1700 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1701 (SCM cv),
1702 "Wake up all threads that are waiting for @var{cv}. ")
1703 #define FUNC_NAME s_scm_broadcast_condition_variable
1704 {
1705 SCM_VALIDATE_CONDVAR (1, cv);
1706 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1707 return SCM_BOOL_T;
1708 }
1709 #undef FUNC_NAME
1710
1711 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1712 (SCM obj),
1713 "Return @code{#t} if @var{obj} is a condition variable.")
1714 #define FUNC_NAME s_scm_condition_variable_p
1715 {
1716 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1717 }
1718 #undef FUNC_NAME
1719
1720
1721 \f
1722 /*** Select */
1723
1724 struct select_args
1725 {
1726 int nfds;
1727 fd_set *read_fds;
1728 fd_set *write_fds;
1729 fd_set *except_fds;
1730 struct timeval *timeout;
1731
1732 int result;
1733 int errno_value;
1734 };
1735
1736 static void *
1737 do_std_select (void *args)
1738 {
1739 struct select_args *select_args;
1740
1741 select_args = (struct select_args *) args;
1742
1743 select_args->result =
1744 select (select_args->nfds,
1745 select_args->read_fds, select_args->write_fds,
1746 select_args->except_fds, select_args->timeout);
1747 select_args->errno_value = errno;
1748
1749 return NULL;
1750 }
1751
1752 #if !SCM_HAVE_SYS_SELECT_H
1753 static int scm_std_select (int nfds,
1754 fd_set *readfds,
1755 fd_set *writefds,
1756 fd_set *exceptfds,
1757 struct timeval *timeout);
1758 #endif
1759
1760 int
1761 scm_std_select (int nfds,
1762 fd_set *readfds,
1763 fd_set *writefds,
1764 fd_set *exceptfds,
1765 struct timeval *timeout)
1766 {
1767 fd_set my_readfds;
1768 int res, eno, wakeup_fd;
1769 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1770 struct select_args args;
1771
1772 if (readfds == NULL)
1773 {
1774 FD_ZERO (&my_readfds);
1775 readfds = &my_readfds;
1776 }
1777
1778 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1779 SCM_TICK;
1780
1781 wakeup_fd = t->sleep_pipe[0];
1782 FD_SET (wakeup_fd, readfds);
1783 if (wakeup_fd >= nfds)
1784 nfds = wakeup_fd+1;
1785
1786 args.nfds = nfds;
1787 args.read_fds = readfds;
1788 args.write_fds = writefds;
1789 args.except_fds = exceptfds;
1790 args.timeout = timeout;
1791
1792 /* Explicitly cooperate with the GC. */
1793 scm_without_guile (do_std_select, &args);
1794
1795 res = args.result;
1796 eno = args.errno_value;
1797
1798 t->sleep_fd = -1;
1799 scm_i_reset_sleep (t);
1800
1801 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1802 {
1803 char dummy;
1804 full_read (wakeup_fd, &dummy, 1);
1805
1806 FD_CLR (wakeup_fd, readfds);
1807 res -= 1;
1808 if (res == 0)
1809 {
1810 eno = EINTR;
1811 res = -1;
1812 }
1813 }
1814 errno = eno;
1815 return res;
1816 }
1817
1818 /* Convenience API for blocking while in guile mode. */
1819
1820 #if SCM_USE_PTHREAD_THREADS
1821
1822 /* It seems reasonable to not run procedures related to mutex and condition
1823 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1824 without it, and (ii) the only potential gain would be GC latency. See
1825 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1826 for a discussion of the pros and cons. */
1827
1828 int
1829 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1830 {
1831 int res = scm_i_pthread_mutex_lock (mutex);
1832 return res;
1833 }
1834
1835 static void
1836 do_unlock (void *data)
1837 {
1838 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1839 }
1840
1841 void
1842 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1843 {
1844 scm_i_scm_pthread_mutex_lock (mutex);
1845 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1846 }
1847
1848 int
1849 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1850 {
1851 int res;
1852 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1853
1854 t->held_mutex = mutex;
1855 res = scm_i_pthread_cond_wait (cond, mutex);
1856 t->held_mutex = NULL;
1857
1858 return res;
1859 }
1860
1861 int
1862 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1863 scm_i_pthread_mutex_t *mutex,
1864 const scm_t_timespec *wt)
1865 {
1866 int res;
1867 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1868
1869 t->held_mutex = mutex;
1870 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1871 t->held_mutex = NULL;
1872
1873 return res;
1874 }
1875
1876 #endif
1877
1878 static void
1879 do_unlock_with_asyncs (void *data)
1880 {
1881 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1882 SCM_I_CURRENT_THREAD->block_asyncs--;
1883 }
1884
1885 void
1886 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
1887 {
1888 SCM_I_CURRENT_THREAD->block_asyncs++;
1889 scm_i_scm_pthread_mutex_lock (mutex);
1890 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
1891 SCM_F_WIND_EXPLICITLY);
1892 }
1893
1894 unsigned long
1895 scm_std_usleep (unsigned long usecs)
1896 {
1897 struct timeval tv;
1898 tv.tv_usec = usecs % 1000000;
1899 tv.tv_sec = usecs / 1000000;
1900 scm_std_select (0, NULL, NULL, NULL, &tv);
1901 return tv.tv_sec * 1000000 + tv.tv_usec;
1902 }
1903
1904 unsigned int
1905 scm_std_sleep (unsigned int secs)
1906 {
1907 struct timeval tv;
1908 tv.tv_usec = 0;
1909 tv.tv_sec = secs;
1910 scm_std_select (0, NULL, NULL, NULL, &tv);
1911 return tv.tv_sec;
1912 }
1913
1914 /*** Misc */
1915
1916 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1917 (void),
1918 "Return the thread that called this function.")
1919 #define FUNC_NAME s_scm_current_thread
1920 {
1921 return SCM_I_CURRENT_THREAD->handle;
1922 }
1923 #undef FUNC_NAME
1924
1925 static SCM
1926 scm_c_make_list (size_t n, SCM fill)
1927 {
1928 SCM res = SCM_EOL;
1929 while (n-- > 0)
1930 res = scm_cons (fill, res);
1931 return res;
1932 }
1933
1934 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1935 (void),
1936 "Return a list of all threads.")
1937 #define FUNC_NAME s_scm_all_threads
1938 {
1939 /* We can not allocate while holding the thread_admin_mutex because
1940 of the way GC is done.
1941 */
1942 int n = thread_count;
1943 scm_i_thread *t;
1944 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1945
1946 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1947 l = &list;
1948 for (t = all_threads; t && n > 0; t = t->next_thread)
1949 {
1950 if (t != scm_i_signal_delivery_thread)
1951 {
1952 SCM_SETCAR (*l, t->handle);
1953 l = SCM_CDRLOC (*l);
1954 }
1955 n--;
1956 }
1957 *l = SCM_EOL;
1958 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1959 return list;
1960 }
1961 #undef FUNC_NAME
1962
1963 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1964 (SCM thread),
1965 "Return @code{#t} iff @var{thread} has exited.\n")
1966 #define FUNC_NAME s_scm_thread_exited_p
1967 {
1968 return scm_from_bool (scm_c_thread_exited_p (thread));
1969 }
1970 #undef FUNC_NAME
1971
1972 int
1973 scm_c_thread_exited_p (SCM thread)
1974 #define FUNC_NAME s_scm_thread_exited_p
1975 {
1976 scm_i_thread *t;
1977 SCM_VALIDATE_THREAD (1, thread);
1978 t = SCM_I_THREAD_DATA (thread);
1979 return t->exited;
1980 }
1981 #undef FUNC_NAME
1982
1983 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
1984 (void),
1985 "Return the total number of processors of the machine, which\n"
1986 "is guaranteed to be at least 1. A ``processor'' here is a\n"
1987 "thread execution unit, which can be either:\n\n"
1988 "@itemize\n"
1989 "@item an execution core in a (possibly multi-core) chip, in a\n"
1990 " (possibly multi- chip) module, in a single computer, or\n"
1991 "@item a thread execution unit inside a core in the case of\n"
1992 " @dfn{hyper-threaded} CPUs.\n"
1993 "@end itemize\n\n"
1994 "Which of the two definitions is used, is unspecified.\n")
1995 #define FUNC_NAME s_scm_total_processor_count
1996 {
1997 return scm_from_ulong (num_processors (NPROC_ALL));
1998 }
1999 #undef FUNC_NAME
2000
2001 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2002 (void),
2003 "Like @code{total-processor-count}, but return the number of\n"
2004 "processors available to the current process. See\n"
2005 "@code{setaffinity} and @code{getaffinity} for more\n"
2006 "information.\n")
2007 #define FUNC_NAME s_scm_current_processor_count
2008 {
2009 return scm_from_ulong (num_processors (NPROC_CURRENT));
2010 }
2011 #undef FUNC_NAME
2012
2013
2014 \f
2015
2016 static scm_i_pthread_cond_t wake_up_cond;
2017 static int threads_initialized_p = 0;
2018
2019
2020 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2021 */
2022 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2023
2024 static SCM dynwind_critical_section_mutex;
2025
2026 void
2027 scm_dynwind_critical_section (SCM mutex)
2028 {
2029 if (scm_is_false (mutex))
2030 mutex = dynwind_critical_section_mutex;
2031 scm_dynwind_lock_mutex (mutex);
2032 scm_dynwind_block_asyncs ();
2033 }
2034
2035 /*** Initialization */
2036
2037 scm_i_pthread_mutex_t scm_i_misc_mutex;
2038
2039 #if SCM_USE_PTHREAD_THREADS
2040 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2041 #endif
2042
2043 void
2044 scm_threads_prehistory (void *base)
2045 {
2046 #if SCM_USE_PTHREAD_THREADS
2047 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2048 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2049 PTHREAD_MUTEX_RECURSIVE);
2050 #endif
2051
2052 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2053 scm_i_pthread_mutexattr_recursive);
2054 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2055 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2056
2057 thread_gc_kind =
2058 GC_new_kind (GC_new_free_list (),
2059 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
2060 0, 1);
2061
2062 guilify_self_1 ((struct GC_stack_base *) base);
2063 }
2064
2065 scm_t_bits scm_tc16_thread;
2066 scm_t_bits scm_tc16_mutex;
2067 scm_t_bits scm_tc16_condvar;
2068
2069 void
2070 scm_init_threads ()
2071 {
2072 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2073 scm_set_smob_print (scm_tc16_thread, thread_print);
2074
2075 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2076 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2077
2078 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2079 sizeof (fat_cond));
2080 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2081
2082 scm_i_default_dynamic_state = SCM_BOOL_F;
2083 guilify_self_2 (SCM_BOOL_F);
2084 threads_initialized_p = 1;
2085
2086 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2087 }
2088
2089 void
2090 scm_init_threads_default_dynamic_state ()
2091 {
2092 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2093 scm_i_default_dynamic_state = state;
2094 }
2095
2096 void
2097 scm_init_thread_procs ()
2098 {
2099 #include "libguile/threads.x"
2100 }
2101
2102 \f
2103 /* IA64-specific things. */
2104
2105 #ifdef __ia64__
2106 # ifdef __hpux
2107 # include <sys/param.h>
2108 # include <sys/pstat.h>
2109 void *
2110 scm_ia64_register_backing_store_base (void)
2111 {
2112 struct pst_vm_status vm_status;
2113 int i = 0;
2114 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2115 if (vm_status.pst_type == PS_RSESTACK)
2116 return (void *) vm_status.pst_vaddr;
2117 abort ();
2118 }
2119 void *
2120 scm_ia64_ar_bsp (const void *ctx)
2121 {
2122 uint64_t bsp;
2123 __uc_get_ar_bsp (ctx, &bsp);
2124 return (void *) bsp;
2125 }
2126 # endif /* hpux */
2127 # ifdef linux
2128 # include <ucontext.h>
2129 void *
2130 scm_ia64_register_backing_store_base (void)
2131 {
2132 extern void *__libc_ia64_register_backing_store_base;
2133 return __libc_ia64_register_backing_store_base;
2134 }
2135 void *
2136 scm_ia64_ar_bsp (const void *opaque)
2137 {
2138 const ucontext_t *ctx = opaque;
2139 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2140 }
2141 # endif /* linux */
2142 # ifdef __FreeBSD__
2143 # include <ucontext.h>
2144 void *
2145 scm_ia64_register_backing_store_base (void)
2146 {
2147 return (void *)0x8000000000000000;
2148 }
2149 void *
2150 scm_ia64_ar_bsp (const void *opaque)
2151 {
2152 const ucontext_t *ctx = opaque;
2153 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2154 + ctx->uc_mcontext.mc_special.ndirty);
2155 }
2156 # endif /* __FreeBSD__ */
2157 #endif /* __ia64__ */
2158
2159
2160 /*
2161 Local Variables:
2162 c-file-style: "gnu"
2163 End:
2164 */