rename scm_async_click() to scm_async_tick(); privatize SCM_ASYNC_TICK
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20 \f
21 #ifdef HAVE_CONFIG_H
22 # include <config.h>
23 #endif
24
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
27
28 #if HAVE_UNISTD_H
29 #include <unistd.h>
30 #endif
31 #include <stdio.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include <assert.h>
42 #include <nproc.h>
43
44 #include "libguile/validate.h"
45 #include "libguile/root.h"
46 #include "libguile/eval.h"
47 #include "libguile/async.h"
48 #include "libguile/ports.h"
49 #include "libguile/threads.h"
50 #include "libguile/dynwind.h"
51 #include "libguile/iselect.h"
52 #include "libguile/fluids.h"
53 #include "libguile/continuations.h"
54 #include "libguile/gc.h"
55 #include "libguile/init.h"
56 #include "libguile/scmsigs.h"
57 #include "libguile/strings.h"
58 #include "libguile/weaks.h"
59
60 #ifdef __MINGW32__
61 #ifndef ETIMEDOUT
62 # define ETIMEDOUT WSAETIMEDOUT
63 #endif
64 # include <fcntl.h>
65 # include <process.h>
66 # define pipe(fd) _pipe (fd, 256, O_BINARY)
67 #endif /* __MINGW32__ */
68
69 #include <full-read.h>
70
71
72 \f
73
74 /* First some libgc shims. */
75
76 /* Make sure GC_fn_type is defined; it is missing from the public
77 headers of GC 7.1 and earlier. */
78 #ifndef HAVE_GC_FN_TYPE
79 typedef void * (* GC_fn_type) (void *);
80 #endif
81
82
83 #ifndef GC_SUCCESS
84 #define GC_SUCCESS 0
85 #endif
86
87 #ifndef GC_UNIMPLEMENTED
88 #define GC_UNIMPLEMENTED 3
89 #endif
90
91 /* Likewise struct GC_stack_base is missing before 7.1. */
92 #ifndef HAVE_GC_STACK_BASE
93 struct GC_stack_base {
94 void * mem_base; /* Base of memory stack. */
95 #ifdef __ia64__
96 void * reg_base; /* Base of separate register stack. */
97 #endif
98 };
99
100 static int
101 GC_register_my_thread (struct GC_stack_base *stack_base)
102 {
103 return GC_UNIMPLEMENTED;
104 }
105
106 static void
107 GC_unregister_my_thread ()
108 {
109 }
110
111 #if !SCM_USE_PTHREAD_THREADS
112 /* No threads; we can just use GC_stackbottom. */
113 static void *
114 get_thread_stack_base ()
115 {
116 return GC_stackbottom;
117 }
118
119 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
120 && defined PTHREAD_ATTR_GETSTACK_WORKS
121 /* This method for GNU/Linux and perhaps some other systems.
122 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
123 available on them. */
124 static void *
125 get_thread_stack_base ()
126 {
127 pthread_attr_t attr;
128 void *start, *end;
129 size_t size;
130
131 pthread_getattr_np (pthread_self (), &attr);
132 pthread_attr_getstack (&attr, &start, &size);
133 end = (char *)start + size;
134
135 #if SCM_STACK_GROWS_UP
136 return start;
137 #else
138 return end;
139 #endif
140 }
141
142 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
143 /* This method for MacOS X.
144 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
145 but as of 2006 there's nothing obvious at apple.com. */
146 static void *
147 get_thread_stack_base ()
148 {
149 return pthread_get_stackaddr_np (pthread_self ());
150 }
151
152 #else
153 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
154 #endif
155
156 static int
157 GC_get_stack_base (struct GC_stack_base *stack_base)
158 {
159 stack_base->mem_base = get_thread_stack_base ();
160 #ifdef __ia64__
161 /* Calculate and store off the base of this thread's register
162 backing store (RBS). Unfortunately our implementation(s) of
163 scm_ia64_register_backing_store_base are only reliable for the
164 main thread. For other threads, therefore, find out the current
165 top of the RBS, and use that as a maximum. */
166 stack_base->reg_base = scm_ia64_register_backing_store_base ();
167 {
168 ucontext_t ctx;
169 void *bsp;
170 getcontext (&ctx);
171 bsp = scm_ia64_ar_bsp (&ctx);
172 if (stack_base->reg_base > bsp)
173 stack_base->reg_base = bsp;
174 }
175 #endif
176 return GC_SUCCESS;
177 }
178
179 static void *
180 GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
181 {
182 struct GC_stack_base stack_base;
183
184 stack_base.mem_base = (void*)&stack_base;
185 #ifdef __ia64__
186 /* FIXME: Untested. */
187 {
188 ucontext_t ctx;
189 getcontext (&ctx);
190 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
191 }
192 #endif
193
194 return fn (&stack_base, arg);
195 }
196 #endif /* HAVE_GC_STACK_BASE */
197
198
199 /* Now define with_gc_active and with_gc_inactive. */
200
201 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
202
203 /* We have a sufficiently new libgc (7.2 or newer). */
204
205 static void*
206 with_gc_inactive (GC_fn_type func, void *data)
207 {
208 return GC_do_blocking (func, data);
209 }
210
211 static void*
212 with_gc_active (GC_fn_type func, void *data)
213 {
214 return GC_call_with_gc_active (func, data);
215 }
216
217 #else
218
219 /* libgc not new enough, so never actually deactivate GC.
220
221 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
222 GC_call_with_gc_active. */
223
224 static void*
225 with_gc_inactive (GC_fn_type func, void *data)
226 {
227 return func (data);
228 }
229
230 static void*
231 with_gc_active (GC_fn_type func, void *data)
232 {
233 return func (data);
234 }
235
236 #endif /* HAVE_GC_DO_BLOCKING */
237
238
239 \f
240 static void
241 to_timespec (SCM t, scm_t_timespec *waittime)
242 {
243 if (scm_is_pair (t))
244 {
245 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
246 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
247 }
248 else
249 {
250 double time = scm_to_double (t);
251 double sec = scm_c_truncate (time);
252
253 waittime->tv_sec = (long) sec;
254 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
255 }
256 }
257
258 \f
259 /*** Queues */
260
261 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
262 the risk of false references leading to unbounded retained space as
263 described in "Bounding Space Usage of Conservative Garbage Collectors",
264 H.J. Boehm, 2001. */
265
266 /* Make an empty queue data structure.
267 */
268 static SCM
269 make_queue ()
270 {
271 return scm_cons (SCM_EOL, SCM_EOL);
272 }
273
274 /* Put T at the back of Q and return a handle that can be used with
275 remqueue to remove T from Q again.
276 */
277 static SCM
278 enqueue (SCM q, SCM t)
279 {
280 SCM c = scm_cons (t, SCM_EOL);
281 SCM_CRITICAL_SECTION_START;
282 if (scm_is_null (SCM_CDR (q)))
283 SCM_SETCDR (q, c);
284 else
285 SCM_SETCDR (SCM_CAR (q), c);
286 SCM_SETCAR (q, c);
287 SCM_CRITICAL_SECTION_END;
288 return c;
289 }
290
291 /* Remove the element that the handle C refers to from the queue Q. C
292 must have been returned from a call to enqueue. The return value
293 is zero when the element referred to by C has already been removed.
294 Otherwise, 1 is returned.
295 */
296 static int
297 remqueue (SCM q, SCM c)
298 {
299 SCM p, prev = q;
300 SCM_CRITICAL_SECTION_START;
301 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
302 {
303 if (scm_is_eq (p, c))
304 {
305 if (scm_is_eq (c, SCM_CAR (q)))
306 SCM_SETCAR (q, SCM_CDR (c));
307 SCM_SETCDR (prev, SCM_CDR (c));
308
309 /* GC-robust */
310 SCM_SETCDR (c, SCM_EOL);
311
312 SCM_CRITICAL_SECTION_END;
313 return 1;
314 }
315 prev = p;
316 }
317 SCM_CRITICAL_SECTION_END;
318 return 0;
319 }
320
321 /* Remove the front-most element from the queue Q and return it.
322 Return SCM_BOOL_F when Q is empty.
323 */
324 static SCM
325 dequeue (SCM q)
326 {
327 SCM c;
328 SCM_CRITICAL_SECTION_START;
329 c = SCM_CDR (q);
330 if (scm_is_null (c))
331 {
332 SCM_CRITICAL_SECTION_END;
333 return SCM_BOOL_F;
334 }
335 else
336 {
337 SCM_SETCDR (q, SCM_CDR (c));
338 if (scm_is_null (SCM_CDR (q)))
339 SCM_SETCAR (q, SCM_EOL);
340 SCM_CRITICAL_SECTION_END;
341
342 /* GC-robust */
343 SCM_SETCDR (c, SCM_EOL);
344
345 return SCM_CAR (c);
346 }
347 }
348
349 /*** Thread smob routines */
350
351
352 static int
353 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
354 {
355 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
356 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
357 the struct case, hence we go via a union, and extract according to the
358 size of pthread_t. */
359 union {
360 scm_i_pthread_t p;
361 unsigned short us;
362 unsigned int ui;
363 unsigned long ul;
364 scm_t_uintmax um;
365 } u;
366 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
367 scm_i_pthread_t p = t->pthread;
368 scm_t_uintmax id;
369 u.p = p;
370 if (sizeof (p) == sizeof (unsigned short))
371 id = u.us;
372 else if (sizeof (p) == sizeof (unsigned int))
373 id = u.ui;
374 else if (sizeof (p) == sizeof (unsigned long))
375 id = u.ul;
376 else
377 id = u.um;
378
379 scm_puts ("#<thread ", port);
380 scm_uintprint (id, 10, port);
381 scm_puts (" (", port);
382 scm_uintprint ((scm_t_bits)t, 16, port);
383 scm_puts (")>", port);
384 return 1;
385 }
386
387 \f
388 /*** Blocking on queues. */
389
390 /* See also scm_i_queue_async_cell for how such a block is
391 interrputed.
392 */
393
394 /* Put the current thread on QUEUE and go to sleep, waiting for it to
395 be woken up by a call to 'unblock_from_queue', or to be
396 interrupted. Upon return of this function, the current thread is
397 no longer on QUEUE, even when the sleep has been interrupted.
398
399 The caller of block_self must hold MUTEX. It will be atomically
400 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
401
402 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
403 as MUTEX is needed.
404
405 When WAITTIME is not NULL, the sleep will be aborted at that time.
406
407 The return value of block_self is an errno value. It will be zero
408 when the sleep has been successfully completed by a call to
409 unblock_from_queue, EINTR when it has been interrupted by the
410 delivery of a system async, and ETIMEDOUT when the timeout has
411 expired.
412
413 The system asyncs themselves are not executed by block_self.
414 */
415 static int
416 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
417 const scm_t_timespec *waittime)
418 {
419 scm_i_thread *t = SCM_I_CURRENT_THREAD;
420 SCM q_handle;
421 int err;
422
423 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
424 err = EINTR;
425 else
426 {
427 t->block_asyncs++;
428 q_handle = enqueue (queue, t->handle);
429 if (waittime == NULL)
430 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
431 else
432 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
433
434 /* When we are still on QUEUE, we have been interrupted. We
435 report this only when no other error (such as a timeout) has
436 happened above.
437 */
438 if (remqueue (queue, q_handle) && err == 0)
439 err = EINTR;
440 t->block_asyncs--;
441 scm_i_reset_sleep (t);
442 }
443
444 return err;
445 }
446
447 /* Wake up the first thread on QUEUE, if any. The awoken thread is
448 returned, or #f if the queue was empty.
449 */
450 static SCM
451 unblock_from_queue (SCM queue)
452 {
453 SCM thread = dequeue (queue);
454 if (scm_is_true (thread))
455 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
456 return thread;
457 }
458
459 \f
460 /* Getting into and out of guile mode.
461 */
462
463 /* Key used to attach a cleanup handler to a given thread. Also, if
464 thread-local storage is unavailable, this key is used to retrieve the
465 current thread with `pthread_getspecific ()'. */
466 scm_i_pthread_key_t scm_i_thread_key;
467
468
469 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
470
471 /* When thread-local storage (TLS) is available, a pointer to the
472 current-thread object is kept in TLS. Note that storing the thread-object
473 itself in TLS (rather than a pointer to some malloc'd memory) is not
474 possible since thread objects may live longer than the actual thread they
475 represent. */
476 SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
477
478 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
479
480
481 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
482 static scm_i_thread *all_threads = NULL;
483 static int thread_count;
484
485 static SCM scm_i_default_dynamic_state;
486
487 /* Perform first stage of thread initialisation, in non-guile mode.
488 */
489 static void
490 guilify_self_1 (struct GC_stack_base *base)
491 {
492 scm_i_thread t;
493
494 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
495 before allocating anything in this thread, because allocation could
496 cause GC to run, and GC could cause finalizers, which could invoke
497 Scheme functions, which need the current thread to be set. */
498
499 t.pthread = scm_i_pthread_self ();
500 t.handle = SCM_BOOL_F;
501 t.result = SCM_BOOL_F;
502 t.cleanup_handler = SCM_BOOL_F;
503 t.mutexes = SCM_EOL;
504 t.held_mutex = NULL;
505 t.join_queue = SCM_EOL;
506 t.dynamic_state = SCM_BOOL_F;
507 t.dynwinds = SCM_EOL;
508 t.active_asyncs = SCM_EOL;
509 t.block_asyncs = 1;
510 t.pending_asyncs = 1;
511 t.critical_section_level = 0;
512 t.base = base->mem_base;
513 #ifdef __ia64__
514 t.register_backing_store_base = base->reg-base;
515 #endif
516 t.continuation_root = SCM_EOL;
517 t.continuation_base = t.base;
518 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
519 t.sleep_mutex = NULL;
520 t.sleep_object = SCM_BOOL_F;
521 t.sleep_fd = -1;
522
523 if (pipe (t.sleep_pipe) != 0)
524 /* FIXME: Error conditions during the initialization phase are handled
525 gracelessly since public functions such as `scm_init_guile ()'
526 currently have type `void'. */
527 abort ();
528
529 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
530 t.current_mark_stack_ptr = NULL;
531 t.current_mark_stack_limit = NULL;
532 t.canceled = 0;
533 t.exited = 0;
534 t.guile_mode = 0;
535
536 /* The switcheroo. */
537 {
538 scm_i_thread *t_ptr = &t;
539
540 GC_disable ();
541 t_ptr = GC_malloc (sizeof (scm_i_thread));
542 memcpy (t_ptr, &t, sizeof t);
543
544 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
545
546 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
547 /* Cache the current thread in TLS for faster lookup. */
548 scm_i_current_thread = t_ptr;
549 #endif
550
551 scm_i_pthread_mutex_lock (&thread_admin_mutex);
552 t_ptr->next_thread = all_threads;
553 all_threads = t_ptr;
554 thread_count++;
555 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
556
557 GC_enable ();
558 }
559 }
560
561 /* Perform second stage of thread initialisation, in guile mode.
562 */
563 static void
564 guilify_self_2 (SCM parent)
565 {
566 scm_i_thread *t = SCM_I_CURRENT_THREAD;
567
568 t->guile_mode = 1;
569
570 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
571
572 t->continuation_root = scm_cons (t->handle, SCM_EOL);
573 t->continuation_base = t->base;
574 t->vm = SCM_BOOL_F;
575
576 if (scm_is_true (parent))
577 t->dynamic_state = scm_make_dynamic_state (parent);
578 else
579 t->dynamic_state = scm_i_make_initial_dynamic_state ();
580
581 t->join_queue = make_queue ();
582 t->block_asyncs = 0;
583 }
584
585 \f
586 /*** Fat mutexes */
587
588 /* We implement our own mutex type since we want them to be 'fair', we
589 want to do fancy things while waiting for them (like running
590 asyncs) and we might want to add things that are nice for
591 debugging.
592 */
593
594 typedef struct {
595 scm_i_pthread_mutex_t lock;
596 SCM owner;
597 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
598
599 int recursive; /* allow recursive locking? */
600 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
601 int allow_external_unlock; /* is it an error to unlock a mutex that is not
602 owned by the current thread? */
603
604 SCM waiting; /* the threads waiting for this mutex. */
605 } fat_mutex;
606
607 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
608 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
609
610 static SCM
611 call_cleanup (void *data)
612 {
613 SCM *proc_p = data;
614 return scm_call_0 (*proc_p);
615 }
616
617 /* Perform thread tear-down, in guile mode.
618 */
619 static void *
620 do_thread_exit (void *v)
621 {
622 scm_i_thread *t = (scm_i_thread *) v;
623
624 /* Ensure the signal handling thread has been launched, because we might be
625 shutting it down. This needs to be done in Guile mode. */
626 scm_i_ensure_signal_delivery_thread ();
627
628 if (!scm_is_false (t->cleanup_handler))
629 {
630 SCM ptr = t->cleanup_handler;
631
632 t->cleanup_handler = SCM_BOOL_F;
633 t->result = scm_internal_catch (SCM_BOOL_T,
634 call_cleanup, &ptr,
635 scm_handle_by_message_noexit, NULL);
636 }
637
638 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
639
640 t->exited = 1;
641 close (t->sleep_pipe[0]);
642 close (t->sleep_pipe[1]);
643 while (scm_is_true (unblock_from_queue (t->join_queue)))
644 ;
645
646 while (!scm_is_null (t->mutexes))
647 {
648 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
649
650 if (!SCM_UNBNDP (mutex))
651 {
652 fat_mutex *m = SCM_MUTEX_DATA (mutex);
653
654 scm_i_pthread_mutex_lock (&m->lock);
655
656 /* Since MUTEX is in `t->mutexes', T must be its owner. */
657 assert (scm_is_eq (m->owner, t->handle));
658
659 unblock_from_queue (m->waiting);
660
661 scm_i_pthread_mutex_unlock (&m->lock);
662 }
663
664 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
665 }
666
667 scm_i_pthread_mutex_unlock (&t->admin_mutex);
668
669 return NULL;
670 }
671
672 static void *
673 do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
674 {
675 /* Won't hurt if we are already registered. */
676 #if SCM_USE_PTHREAD_THREADS
677 GC_register_my_thread (sb);
678 #endif
679
680 return scm_with_guile (do_thread_exit, v);
681 }
682
683 static void
684 on_thread_exit (void *v)
685 {
686 /* This handler is executed in non-guile mode. */
687 scm_i_thread *t = (scm_i_thread *) v, **tp;
688
689 /* If this thread was cancelled while doing a cond wait, it will
690 still have a mutex locked, so we unlock it here. */
691 if (t->held_mutex)
692 {
693 scm_i_pthread_mutex_unlock (t->held_mutex);
694 t->held_mutex = NULL;
695 }
696
697 /* Reinstate the current thread for purposes of scm_with_guile
698 guile-mode cleanup handlers. Only really needed in the non-TLS
699 case but it doesn't hurt to be consistent. */
700 scm_i_pthread_setspecific (scm_i_thread_key, t);
701
702 /* Scheme-level thread finalizers and other cleanup needs to happen in
703 guile mode. */
704 GC_call_with_stack_base (do_thread_exit_trampoline, t);
705
706 /* Removing ourself from the list of all threads needs to happen in
707 non-guile mode since all SCM values on our stack become
708 unprotected once we are no longer in the list. */
709 scm_i_pthread_mutex_lock (&thread_admin_mutex);
710 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
711 if (*tp == t)
712 {
713 *tp = t->next_thread;
714
715 /* GC-robust */
716 t->next_thread = NULL;
717
718 break;
719 }
720 thread_count--;
721
722 /* If there's only one other thread, it could be the signal delivery
723 thread, so we need to notify it to shut down by closing its read pipe.
724 If it's not the signal delivery thread, then closing the read pipe isn't
725 going to hurt. */
726 if (thread_count <= 1)
727 scm_i_close_signal_pipe ();
728
729 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
730
731 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
732
733 #if SCM_USE_PTHREAD_THREADS
734 GC_unregister_my_thread ();
735 #endif
736 }
737
738 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
739
740 static void
741 init_thread_key (void)
742 {
743 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
744 }
745
746 /* Perform any initializations necessary to make the current thread
747 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
748 if necessary.
749
750 BASE is the stack base to use with GC.
751
752 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
753 which case the default dynamic state is used.
754
755 Returns zero when the thread was known to guile already; otherwise
756 return 1.
757
758 Note that it could be the case that the thread was known
759 to Guile, but not in guile mode (because we are within a
760 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
761 be sure. New threads are put into guile mode implicitly. */
762
763 static int
764 scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
765 {
766 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
767
768 if (SCM_I_CURRENT_THREAD)
769 {
770 /* Thread is already known to Guile.
771 */
772 return 0;
773 }
774 else
775 {
776 /* This thread has not been guilified yet.
777 */
778
779 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
780 if (scm_initialized_p == 0)
781 {
782 /* First thread ever to enter Guile. Run the full
783 initialization.
784 */
785 scm_i_init_guile (base);
786
787 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
788 /* Allow other threads to come in later. */
789 GC_allow_register_threads ();
790 #endif
791
792 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
793 }
794 else
795 {
796 /* Guile is already initialized, but this thread enters it for
797 the first time. Only initialize this thread.
798 */
799 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
800
801 /* Register this thread with libgc. */
802 #if SCM_USE_PTHREAD_THREADS
803 GC_register_my_thread (base);
804 #endif
805
806 guilify_self_1 (base);
807 guilify_self_2 (parent);
808 }
809 return 1;
810 }
811 }
812
813 void
814 scm_init_guile ()
815 {
816 struct GC_stack_base stack_base;
817
818 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
819 scm_i_init_thread_for_guile (&stack_base,
820 scm_i_default_dynamic_state);
821 else
822 {
823 fprintf (stderr, "Failed to get stack base for current thread.\n");
824 exit (1);
825 }
826 }
827
828 SCM_UNUSED static void
829 scm_leave_guile_cleanup (void *x)
830 {
831 on_thread_exit (SCM_I_CURRENT_THREAD);
832 }
833
834 struct with_guile_args
835 {
836 GC_fn_type func;
837 void *data;
838 SCM parent;
839 };
840
841 static void *
842 with_guile_trampoline (void *data)
843 {
844 struct with_guile_args *args = data;
845
846 return scm_c_with_continuation_barrier (args->func, args->data);
847 }
848
849 static void *
850 with_guile_and_parent (struct GC_stack_base *base, void *data)
851 {
852 void *res;
853 int new_thread;
854 scm_i_thread *t;
855 struct with_guile_args *args = data;
856
857 new_thread = scm_i_init_thread_for_guile (base, args->parent);
858 t = SCM_I_CURRENT_THREAD;
859 if (new_thread)
860 {
861 /* We are in Guile mode. */
862 assert (t->guile_mode);
863
864 res = scm_c_with_continuation_barrier (args->func, args->data);
865
866 /* Leave Guile mode. */
867 t->guile_mode = 0;
868 }
869 else if (t->guile_mode)
870 {
871 /* Already in Guile mode. */
872 res = scm_c_with_continuation_barrier (args->func, args->data);
873 }
874 else
875 {
876 /* We are not in Guile mode, either because we are not within a
877 scm_with_guile, or because we are within a scm_without_guile.
878
879 This call to scm_with_guile() could happen from anywhere on the
880 stack, and in particular lower on the stack than when it was
881 when this thread was first guilified. Thus, `base' must be
882 updated. */
883 #if SCM_STACK_GROWS_UP
884 if (SCM_STACK_PTR (base->mem_base) < t->base)
885 t->base = SCM_STACK_PTR (base->mem_base);
886 #else
887 if (SCM_STACK_PTR (base->mem_base) > t->base)
888 t->base = SCM_STACK_PTR (base->mem_base);
889 #endif
890
891 t->guile_mode = 1;
892 res = with_gc_active (with_guile_trampoline, args);
893 t->guile_mode = 0;
894 }
895 return res;
896 }
897
898 static void *
899 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
900 {
901 struct with_guile_args args;
902
903 args.func = func;
904 args.data = data;
905 args.parent = parent;
906
907 return GC_call_with_stack_base (with_guile_and_parent, &args);
908 }
909
910 void *
911 scm_with_guile (void *(*func)(void *), void *data)
912 {
913 return scm_i_with_guile_and_parent (func, data,
914 scm_i_default_dynamic_state);
915 }
916
917 void *
918 scm_without_guile (void *(*func)(void *), void *data)
919 {
920 void *result;
921 scm_i_thread *t = SCM_I_CURRENT_THREAD;
922
923 if (t->guile_mode)
924 {
925 SCM_I_CURRENT_THREAD->guile_mode = 0;
926 result = with_gc_inactive (func, data);
927 SCM_I_CURRENT_THREAD->guile_mode = 1;
928 }
929 else
930 /* Otherwise we're not in guile mode, so nothing to do. */
931 result = func (data);
932
933 return result;
934 }
935
936 \f
937 /*** Thread creation */
938
939 typedef struct {
940 SCM parent;
941 SCM thunk;
942 SCM handler;
943 SCM thread;
944 scm_i_pthread_mutex_t mutex;
945 scm_i_pthread_cond_t cond;
946 } launch_data;
947
948 static void *
949 really_launch (void *d)
950 {
951 launch_data *data = (launch_data *)d;
952 SCM thunk = data->thunk, handler = data->handler;
953 scm_i_thread *t;
954
955 t = SCM_I_CURRENT_THREAD;
956
957 scm_i_scm_pthread_mutex_lock (&data->mutex);
958 data->thread = scm_current_thread ();
959 scm_i_pthread_cond_signal (&data->cond);
960 scm_i_pthread_mutex_unlock (&data->mutex);
961
962 if (SCM_UNBNDP (handler))
963 t->result = scm_call_0 (thunk);
964 else
965 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
966
967 return 0;
968 }
969
970 static void *
971 launch_thread (void *d)
972 {
973 launch_data *data = (launch_data *)d;
974 scm_i_pthread_detach (scm_i_pthread_self ());
975 scm_i_with_guile_and_parent (really_launch, d, data->parent);
976 return NULL;
977 }
978
979 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
980 (SCM thunk, SCM handler),
981 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
982 "returning a new thread object representing the thread. The procedure\n"
983 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
984 "\n"
985 "When @var{handler} is specified, then @var{thunk} is called from\n"
986 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
987 "handler. This catch is established inside the continuation barrier.\n"
988 "\n"
989 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
990 "the @emph{exit value} of the thread and the thread is terminated.")
991 #define FUNC_NAME s_scm_call_with_new_thread
992 {
993 launch_data data;
994 scm_i_pthread_t id;
995 int err;
996
997 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
998 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
999 handler, SCM_ARG2, FUNC_NAME);
1000
1001 data.parent = scm_current_dynamic_state ();
1002 data.thunk = thunk;
1003 data.handler = handler;
1004 data.thread = SCM_BOOL_F;
1005 scm_i_pthread_mutex_init (&data.mutex, NULL);
1006 scm_i_pthread_cond_init (&data.cond, NULL);
1007
1008 scm_i_scm_pthread_mutex_lock (&data.mutex);
1009 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1010 if (err)
1011 {
1012 scm_i_pthread_mutex_unlock (&data.mutex);
1013 errno = err;
1014 scm_syserror (NULL);
1015 }
1016 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1017 scm_i_pthread_mutex_unlock (&data.mutex);
1018
1019 return data.thread;
1020 }
1021 #undef FUNC_NAME
1022
1023 typedef struct {
1024 SCM parent;
1025 scm_t_catch_body body;
1026 void *body_data;
1027 scm_t_catch_handler handler;
1028 void *handler_data;
1029 SCM thread;
1030 scm_i_pthread_mutex_t mutex;
1031 scm_i_pthread_cond_t cond;
1032 } spawn_data;
1033
1034 static void *
1035 really_spawn (void *d)
1036 {
1037 spawn_data *data = (spawn_data *)d;
1038 scm_t_catch_body body = data->body;
1039 void *body_data = data->body_data;
1040 scm_t_catch_handler handler = data->handler;
1041 void *handler_data = data->handler_data;
1042 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1043
1044 scm_i_scm_pthread_mutex_lock (&data->mutex);
1045 data->thread = scm_current_thread ();
1046 scm_i_pthread_cond_signal (&data->cond);
1047 scm_i_pthread_mutex_unlock (&data->mutex);
1048
1049 if (handler == NULL)
1050 t->result = body (body_data);
1051 else
1052 t->result = scm_internal_catch (SCM_BOOL_T,
1053 body, body_data,
1054 handler, handler_data);
1055
1056 return 0;
1057 }
1058
1059 static void *
1060 spawn_thread (void *d)
1061 {
1062 spawn_data *data = (spawn_data *)d;
1063 scm_i_pthread_detach (scm_i_pthread_self ());
1064 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1065 return NULL;
1066 }
1067
1068 SCM
1069 scm_spawn_thread (scm_t_catch_body body, void *body_data,
1070 scm_t_catch_handler handler, void *handler_data)
1071 {
1072 spawn_data data;
1073 scm_i_pthread_t id;
1074 int err;
1075
1076 data.parent = scm_current_dynamic_state ();
1077 data.body = body;
1078 data.body_data = body_data;
1079 data.handler = handler;
1080 data.handler_data = handler_data;
1081 data.thread = SCM_BOOL_F;
1082 scm_i_pthread_mutex_init (&data.mutex, NULL);
1083 scm_i_pthread_cond_init (&data.cond, NULL);
1084
1085 scm_i_scm_pthread_mutex_lock (&data.mutex);
1086 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1087 if (err)
1088 {
1089 scm_i_pthread_mutex_unlock (&data.mutex);
1090 errno = err;
1091 scm_syserror (NULL);
1092 }
1093 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1094 scm_i_pthread_mutex_unlock (&data.mutex);
1095
1096 return data.thread;
1097 }
1098
1099 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1100 (),
1101 "Move the calling thread to the end of the scheduling queue.")
1102 #define FUNC_NAME s_scm_yield
1103 {
1104 return scm_from_bool (scm_i_sched_yield ());
1105 }
1106 #undef FUNC_NAME
1107
1108 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1109 (SCM thread),
1110 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1111 "cannot be the current thread, and if @var{thread} has already terminated or "
1112 "been signaled to terminate, this function is a no-op.")
1113 #define FUNC_NAME s_scm_cancel_thread
1114 {
1115 scm_i_thread *t = NULL;
1116
1117 SCM_VALIDATE_THREAD (1, thread);
1118 t = SCM_I_THREAD_DATA (thread);
1119 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1120 if (!t->canceled)
1121 {
1122 t->canceled = 1;
1123 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1124 scm_i_pthread_cancel (t->pthread);
1125 }
1126 else
1127 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1128
1129 return SCM_UNSPECIFIED;
1130 }
1131 #undef FUNC_NAME
1132
1133 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1134 (SCM thread, SCM proc),
1135 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1136 "This handler will be called when the thread exits.")
1137 #define FUNC_NAME s_scm_set_thread_cleanup_x
1138 {
1139 scm_i_thread *t;
1140
1141 SCM_VALIDATE_THREAD (1, thread);
1142 if (!scm_is_false (proc))
1143 SCM_VALIDATE_THUNK (2, proc);
1144
1145 t = SCM_I_THREAD_DATA (thread);
1146 scm_i_pthread_mutex_lock (&t->admin_mutex);
1147
1148 if (!(t->exited || t->canceled))
1149 t->cleanup_handler = proc;
1150
1151 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1152
1153 return SCM_UNSPECIFIED;
1154 }
1155 #undef FUNC_NAME
1156
1157 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1158 (SCM thread),
1159 "Return the cleanup handler installed for the thread @var{thread}.")
1160 #define FUNC_NAME s_scm_thread_cleanup
1161 {
1162 scm_i_thread *t;
1163 SCM ret;
1164
1165 SCM_VALIDATE_THREAD (1, thread);
1166
1167 t = SCM_I_THREAD_DATA (thread);
1168 scm_i_pthread_mutex_lock (&t->admin_mutex);
1169 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1170 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1171
1172 return ret;
1173 }
1174 #undef FUNC_NAME
1175
1176 SCM scm_join_thread (SCM thread)
1177 {
1178 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1179 }
1180
1181 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1182 (SCM thread, SCM timeout, SCM timeoutval),
1183 "Suspend execution of the calling thread until the target @var{thread} "
1184 "terminates, unless the target @var{thread} has already terminated. ")
1185 #define FUNC_NAME s_scm_join_thread_timed
1186 {
1187 scm_i_thread *t;
1188 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1189 SCM res = SCM_BOOL_F;
1190
1191 if (! (SCM_UNBNDP (timeoutval)))
1192 res = timeoutval;
1193
1194 SCM_VALIDATE_THREAD (1, thread);
1195 if (scm_is_eq (scm_current_thread (), thread))
1196 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1197
1198 t = SCM_I_THREAD_DATA (thread);
1199 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1200
1201 if (! SCM_UNBNDP (timeout))
1202 {
1203 to_timespec (timeout, &ctimeout);
1204 timeout_ptr = &ctimeout;
1205 }
1206
1207 if (t->exited)
1208 res = t->result;
1209 else
1210 {
1211 while (1)
1212 {
1213 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1214 timeout_ptr);
1215 if (err == 0)
1216 {
1217 if (t->exited)
1218 {
1219 res = t->result;
1220 break;
1221 }
1222 }
1223 else if (err == ETIMEDOUT)
1224 break;
1225
1226 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1227 SCM_TICK;
1228 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1229
1230 /* Check for exit again, since we just released and
1231 reacquired the admin mutex, before the next block_self
1232 call (which would block forever if t has already
1233 exited). */
1234 if (t->exited)
1235 {
1236 res = t->result;
1237 break;
1238 }
1239 }
1240 }
1241
1242 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1243
1244 return res;
1245 }
1246 #undef FUNC_NAME
1247
1248 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1249 (SCM obj),
1250 "Return @code{#t} if @var{obj} is a thread.")
1251 #define FUNC_NAME s_scm_thread_p
1252 {
1253 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1254 }
1255 #undef FUNC_NAME
1256
1257
1258 static size_t
1259 fat_mutex_free (SCM mx)
1260 {
1261 fat_mutex *m = SCM_MUTEX_DATA (mx);
1262 scm_i_pthread_mutex_destroy (&m->lock);
1263 return 0;
1264 }
1265
1266 static int
1267 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1268 {
1269 fat_mutex *m = SCM_MUTEX_DATA (mx);
1270 scm_puts ("#<mutex ", port);
1271 scm_uintprint ((scm_t_bits)m, 16, port);
1272 scm_puts (">", port);
1273 return 1;
1274 }
1275
1276 static SCM
1277 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1278 {
1279 fat_mutex *m;
1280 SCM mx;
1281
1282 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1283 scm_i_pthread_mutex_init (&m->lock, NULL);
1284 m->owner = SCM_BOOL_F;
1285 m->level = 0;
1286
1287 m->recursive = recursive;
1288 m->unchecked_unlock = unchecked_unlock;
1289 m->allow_external_unlock = external_unlock;
1290
1291 m->waiting = SCM_EOL;
1292 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1293 m->waiting = make_queue ();
1294 return mx;
1295 }
1296
1297 SCM scm_make_mutex (void)
1298 {
1299 return scm_make_mutex_with_flags (SCM_EOL);
1300 }
1301
1302 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1303 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1304 SCM_SYMBOL (recursive_sym, "recursive");
1305
1306 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1307 (SCM flags),
1308 "Create a new mutex. ")
1309 #define FUNC_NAME s_scm_make_mutex_with_flags
1310 {
1311 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1312
1313 SCM ptr = flags;
1314 while (! scm_is_null (ptr))
1315 {
1316 SCM flag = SCM_CAR (ptr);
1317 if (scm_is_eq (flag, unchecked_unlock_sym))
1318 unchecked_unlock = 1;
1319 else if (scm_is_eq (flag, allow_external_unlock_sym))
1320 external_unlock = 1;
1321 else if (scm_is_eq (flag, recursive_sym))
1322 recursive = 1;
1323 else
1324 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1325 ptr = SCM_CDR (ptr);
1326 }
1327 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1328 }
1329 #undef FUNC_NAME
1330
1331 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1332 (void),
1333 "Create a new recursive mutex. ")
1334 #define FUNC_NAME s_scm_make_recursive_mutex
1335 {
1336 return make_fat_mutex (1, 0, 0);
1337 }
1338 #undef FUNC_NAME
1339
1340 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1341
1342 static SCM
1343 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1344 {
1345 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1346
1347 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1348 SCM err = SCM_BOOL_F;
1349
1350 struct timeval current_time;
1351
1352 scm_i_scm_pthread_mutex_lock (&m->lock);
1353
1354 while (1)
1355 {
1356 if (m->level == 0)
1357 {
1358 m->owner = new_owner;
1359 m->level++;
1360
1361 if (SCM_I_IS_THREAD (new_owner))
1362 {
1363 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1364 scm_i_pthread_mutex_lock (&t->admin_mutex);
1365
1366 /* Only keep a weak reference to MUTEX so that it's not
1367 retained when not referenced elsewhere (bug #27450).
1368 The weak pair itself is eventually removed when MUTEX
1369 is unlocked. Note that `t->mutexes' lists mutexes
1370 currently held by T, so it should be small. */
1371 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1372
1373 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1374 }
1375 *ret = 1;
1376 break;
1377 }
1378 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1379 {
1380 m->owner = new_owner;
1381 err = scm_cons (scm_abandoned_mutex_error_key,
1382 scm_from_locale_string ("lock obtained on abandoned "
1383 "mutex"));
1384 *ret = 1;
1385 break;
1386 }
1387 else if (scm_is_eq (m->owner, new_owner))
1388 {
1389 if (m->recursive)
1390 {
1391 m->level++;
1392 *ret = 1;
1393 }
1394 else
1395 {
1396 err = scm_cons (scm_misc_error_key,
1397 scm_from_locale_string ("mutex already locked "
1398 "by thread"));
1399 *ret = 0;
1400 }
1401 break;
1402 }
1403 else
1404 {
1405 if (timeout != NULL)
1406 {
1407 gettimeofday (&current_time, NULL);
1408 if (current_time.tv_sec > timeout->tv_sec ||
1409 (current_time.tv_sec == timeout->tv_sec &&
1410 current_time.tv_usec * 1000 > timeout->tv_nsec))
1411 {
1412 *ret = 0;
1413 break;
1414 }
1415 }
1416 block_self (m->waiting, mutex, &m->lock, timeout);
1417 scm_i_pthread_mutex_unlock (&m->lock);
1418 SCM_TICK;
1419 scm_i_scm_pthread_mutex_lock (&m->lock);
1420 }
1421 }
1422 scm_i_pthread_mutex_unlock (&m->lock);
1423 return err;
1424 }
1425
1426 SCM scm_lock_mutex (SCM mx)
1427 {
1428 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1429 }
1430
1431 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1432 (SCM m, SCM timeout, SCM owner),
1433 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1434 "blocks until the mutex becomes available. The function returns when "
1435 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1436 "a thread already owns will succeed right away and will not block the "
1437 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1438 #define FUNC_NAME s_scm_lock_mutex_timed
1439 {
1440 SCM exception;
1441 int ret = 0;
1442 scm_t_timespec cwaittime, *waittime = NULL;
1443
1444 SCM_VALIDATE_MUTEX (1, m);
1445
1446 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1447 {
1448 to_timespec (timeout, &cwaittime);
1449 waittime = &cwaittime;
1450 }
1451
1452 exception = fat_mutex_lock (m, waittime, owner, &ret);
1453 if (!scm_is_false (exception))
1454 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1455 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1456 }
1457 #undef FUNC_NAME
1458
1459 void
1460 scm_dynwind_lock_mutex (SCM mutex)
1461 {
1462 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1463 SCM_F_WIND_EXPLICITLY);
1464 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1465 SCM_F_WIND_EXPLICITLY);
1466 }
1467
1468 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1469 (SCM mutex),
1470 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1471 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1472 #define FUNC_NAME s_scm_try_mutex
1473 {
1474 SCM exception;
1475 int ret = 0;
1476 scm_t_timespec cwaittime, *waittime = NULL;
1477
1478 SCM_VALIDATE_MUTEX (1, mutex);
1479
1480 to_timespec (scm_from_int(0), &cwaittime);
1481 waittime = &cwaittime;
1482
1483 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1484 if (!scm_is_false (exception))
1485 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1486 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1487 }
1488 #undef FUNC_NAME
1489
1490 /*** Fat condition variables */
1491
1492 typedef struct {
1493 scm_i_pthread_mutex_t lock;
1494 SCM waiting; /* the threads waiting for this condition. */
1495 } fat_cond;
1496
1497 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1498 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1499
1500 static int
1501 fat_mutex_unlock (SCM mutex, SCM cond,
1502 const scm_t_timespec *waittime, int relock)
1503 {
1504 SCM owner;
1505 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1506 fat_cond *c = NULL;
1507 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1508 int err = 0, ret = 0;
1509
1510 scm_i_scm_pthread_mutex_lock (&m->lock);
1511
1512 owner = m->owner;
1513
1514 if (!scm_is_eq (owner, t->handle))
1515 {
1516 if (m->level == 0)
1517 {
1518 if (!m->unchecked_unlock)
1519 {
1520 scm_i_pthread_mutex_unlock (&m->lock);
1521 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1522 }
1523 owner = t->handle;
1524 }
1525 else if (!m->allow_external_unlock)
1526 {
1527 scm_i_pthread_mutex_unlock (&m->lock);
1528 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1529 }
1530 }
1531
1532 if (! (SCM_UNBNDP (cond)))
1533 {
1534 c = SCM_CONDVAR_DATA (cond);
1535 while (1)
1536 {
1537 int brk = 0;
1538
1539 if (m->level > 0)
1540 m->level--;
1541 if (m->level == 0)
1542 {
1543 /* Change the owner of MUTEX. */
1544 t->mutexes = scm_delq_x (mutex, t->mutexes);
1545 m->owner = unblock_from_queue (m->waiting);
1546 }
1547
1548 t->block_asyncs++;
1549
1550 err = block_self (c->waiting, cond, &m->lock, waittime);
1551 scm_i_pthread_mutex_unlock (&m->lock);
1552
1553 if (err == 0)
1554 {
1555 ret = 1;
1556 brk = 1;
1557 }
1558 else if (err == ETIMEDOUT)
1559 {
1560 ret = 0;
1561 brk = 1;
1562 }
1563 else if (err != EINTR)
1564 {
1565 errno = err;
1566 scm_syserror (NULL);
1567 }
1568
1569 if (brk)
1570 {
1571 if (relock)
1572 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1573 t->block_asyncs--;
1574 break;
1575 }
1576
1577 t->block_asyncs--;
1578 scm_async_tick ();
1579
1580 scm_remember_upto_here_2 (cond, mutex);
1581
1582 scm_i_scm_pthread_mutex_lock (&m->lock);
1583 }
1584 }
1585 else
1586 {
1587 if (m->level > 0)
1588 m->level--;
1589 if (m->level == 0)
1590 {
1591 /* Change the owner of MUTEX. */
1592 t->mutexes = scm_delq_x (mutex, t->mutexes);
1593 m->owner = unblock_from_queue (m->waiting);
1594 }
1595
1596 scm_i_pthread_mutex_unlock (&m->lock);
1597 ret = 1;
1598 }
1599
1600 return ret;
1601 }
1602
1603 SCM scm_unlock_mutex (SCM mx)
1604 {
1605 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1606 }
1607
1608 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1609 (SCM mx, SCM cond, SCM timeout),
1610 "Unlocks @var{mutex} if the calling thread owns the lock on "
1611 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1612 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1613 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1614 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1615 "with a call to @code{unlock-mutex}. Only the last call to "
1616 "@code{unlock-mutex} will actually unlock the mutex. ")
1617 #define FUNC_NAME s_scm_unlock_mutex_timed
1618 {
1619 scm_t_timespec cwaittime, *waittime = NULL;
1620
1621 SCM_VALIDATE_MUTEX (1, mx);
1622 if (! (SCM_UNBNDP (cond)))
1623 {
1624 SCM_VALIDATE_CONDVAR (2, cond);
1625
1626 if (! (SCM_UNBNDP (timeout)))
1627 {
1628 to_timespec (timeout, &cwaittime);
1629 waittime = &cwaittime;
1630 }
1631 }
1632
1633 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1634 }
1635 #undef FUNC_NAME
1636
1637 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1638 (SCM obj),
1639 "Return @code{#t} if @var{obj} is a mutex.")
1640 #define FUNC_NAME s_scm_mutex_p
1641 {
1642 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1643 }
1644 #undef FUNC_NAME
1645
1646 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1647 (SCM mx),
1648 "Return the thread owning @var{mx}, or @code{#f}.")
1649 #define FUNC_NAME s_scm_mutex_owner
1650 {
1651 SCM owner;
1652 fat_mutex *m = NULL;
1653
1654 SCM_VALIDATE_MUTEX (1, mx);
1655 m = SCM_MUTEX_DATA (mx);
1656 scm_i_pthread_mutex_lock (&m->lock);
1657 owner = m->owner;
1658 scm_i_pthread_mutex_unlock (&m->lock);
1659
1660 return owner;
1661 }
1662 #undef FUNC_NAME
1663
1664 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1665 (SCM mx),
1666 "Return the lock level of mutex @var{mx}.")
1667 #define FUNC_NAME s_scm_mutex_level
1668 {
1669 SCM_VALIDATE_MUTEX (1, mx);
1670 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1671 }
1672 #undef FUNC_NAME
1673
1674 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1675 (SCM mx),
1676 "Returns @code{#t} if the mutex @var{mx} is locked.")
1677 #define FUNC_NAME s_scm_mutex_locked_p
1678 {
1679 SCM_VALIDATE_MUTEX (1, mx);
1680 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1681 }
1682 #undef FUNC_NAME
1683
1684 static int
1685 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1686 {
1687 fat_cond *c = SCM_CONDVAR_DATA (cv);
1688 scm_puts ("#<condition-variable ", port);
1689 scm_uintprint ((scm_t_bits)c, 16, port);
1690 scm_puts (">", port);
1691 return 1;
1692 }
1693
1694 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1695 (void),
1696 "Make a new condition variable.")
1697 #define FUNC_NAME s_scm_make_condition_variable
1698 {
1699 fat_cond *c;
1700 SCM cv;
1701
1702 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1703 c->waiting = SCM_EOL;
1704 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1705 c->waiting = make_queue ();
1706 return cv;
1707 }
1708 #undef FUNC_NAME
1709
1710 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1711 (SCM cv, SCM mx, SCM t),
1712 "Wait until @var{cond-var} has been signalled. While waiting, "
1713 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1714 "is locked again when this function returns. When @var{time} is given, "
1715 "it specifies a point in time where the waiting should be aborted. It "
1716 "can be either a integer as returned by @code{current-time} or a pair "
1717 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1718 "mutex is locked and @code{#f} is returned. When the condition "
1719 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1720 "is returned. ")
1721 #define FUNC_NAME s_scm_timed_wait_condition_variable
1722 {
1723 scm_t_timespec waittime, *waitptr = NULL;
1724
1725 SCM_VALIDATE_CONDVAR (1, cv);
1726 SCM_VALIDATE_MUTEX (2, mx);
1727
1728 if (!SCM_UNBNDP (t))
1729 {
1730 to_timespec (t, &waittime);
1731 waitptr = &waittime;
1732 }
1733
1734 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1735 }
1736 #undef FUNC_NAME
1737
1738 static void
1739 fat_cond_signal (fat_cond *c)
1740 {
1741 unblock_from_queue (c->waiting);
1742 }
1743
1744 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1745 (SCM cv),
1746 "Wake up one thread that is waiting for @var{cv}")
1747 #define FUNC_NAME s_scm_signal_condition_variable
1748 {
1749 SCM_VALIDATE_CONDVAR (1, cv);
1750 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1751 return SCM_BOOL_T;
1752 }
1753 #undef FUNC_NAME
1754
1755 static void
1756 fat_cond_broadcast (fat_cond *c)
1757 {
1758 while (scm_is_true (unblock_from_queue (c->waiting)))
1759 ;
1760 }
1761
1762 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1763 (SCM cv),
1764 "Wake up all threads that are waiting for @var{cv}. ")
1765 #define FUNC_NAME s_scm_broadcast_condition_variable
1766 {
1767 SCM_VALIDATE_CONDVAR (1, cv);
1768 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1769 return SCM_BOOL_T;
1770 }
1771 #undef FUNC_NAME
1772
1773 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1774 (SCM obj),
1775 "Return @code{#t} if @var{obj} is a condition variable.")
1776 #define FUNC_NAME s_scm_condition_variable_p
1777 {
1778 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1779 }
1780 #undef FUNC_NAME
1781
1782
1783 \f
1784 /*** Select */
1785
1786 struct select_args
1787 {
1788 int nfds;
1789 SELECT_TYPE *read_fds;
1790 SELECT_TYPE *write_fds;
1791 SELECT_TYPE *except_fds;
1792 struct timeval *timeout;
1793
1794 int result;
1795 int errno_value;
1796 };
1797
1798 static void *
1799 do_std_select (void *args)
1800 {
1801 struct select_args *select_args;
1802
1803 select_args = (struct select_args *) args;
1804
1805 select_args->result =
1806 select (select_args->nfds,
1807 select_args->read_fds, select_args->write_fds,
1808 select_args->except_fds, select_args->timeout);
1809 select_args->errno_value = errno;
1810
1811 return NULL;
1812 }
1813
1814 int
1815 scm_std_select (int nfds,
1816 SELECT_TYPE *readfds,
1817 SELECT_TYPE *writefds,
1818 SELECT_TYPE *exceptfds,
1819 struct timeval *timeout)
1820 {
1821 fd_set my_readfds;
1822 int res, eno, wakeup_fd;
1823 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1824 struct select_args args;
1825
1826 if (readfds == NULL)
1827 {
1828 FD_ZERO (&my_readfds);
1829 readfds = &my_readfds;
1830 }
1831
1832 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1833 SCM_TICK;
1834
1835 wakeup_fd = t->sleep_pipe[0];
1836 FD_SET (wakeup_fd, readfds);
1837 if (wakeup_fd >= nfds)
1838 nfds = wakeup_fd+1;
1839
1840 args.nfds = nfds;
1841 args.read_fds = readfds;
1842 args.write_fds = writefds;
1843 args.except_fds = exceptfds;
1844 args.timeout = timeout;
1845
1846 /* Explicitly cooperate with the GC. */
1847 scm_without_guile (do_std_select, &args);
1848
1849 res = args.result;
1850 eno = args.errno_value;
1851
1852 t->sleep_fd = -1;
1853 scm_i_reset_sleep (t);
1854
1855 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1856 {
1857 char dummy;
1858 full_read (wakeup_fd, &dummy, 1);
1859
1860 FD_CLR (wakeup_fd, readfds);
1861 res -= 1;
1862 if (res == 0)
1863 {
1864 eno = EINTR;
1865 res = -1;
1866 }
1867 }
1868 errno = eno;
1869 return res;
1870 }
1871
1872 /* Convenience API for blocking while in guile mode. */
1873
1874 #if SCM_USE_PTHREAD_THREADS
1875
1876 /* It seems reasonable to not run procedures related to mutex and condition
1877 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1878 without it, and (ii) the only potential gain would be GC latency. See
1879 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1880 for a discussion of the pros and cons. */
1881
1882 int
1883 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1884 {
1885 int res = scm_i_pthread_mutex_lock (mutex);
1886 return res;
1887 }
1888
1889 static void
1890 do_unlock (void *data)
1891 {
1892 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1893 }
1894
1895 void
1896 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1897 {
1898 scm_i_scm_pthread_mutex_lock (mutex);
1899 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1900 }
1901
1902 int
1903 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1904 {
1905 int res;
1906 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1907
1908 t->held_mutex = mutex;
1909 res = scm_i_pthread_cond_wait (cond, mutex);
1910 t->held_mutex = NULL;
1911
1912 return res;
1913 }
1914
1915 int
1916 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1917 scm_i_pthread_mutex_t *mutex,
1918 const scm_t_timespec *wt)
1919 {
1920 int res;
1921 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1922
1923 t->held_mutex = mutex;
1924 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1925 t->held_mutex = NULL;
1926
1927 return res;
1928 }
1929
1930 #endif
1931
1932 unsigned long
1933 scm_std_usleep (unsigned long usecs)
1934 {
1935 struct timeval tv;
1936 tv.tv_usec = usecs % 1000000;
1937 tv.tv_sec = usecs / 1000000;
1938 scm_std_select (0, NULL, NULL, NULL, &tv);
1939 return tv.tv_sec * 1000000 + tv.tv_usec;
1940 }
1941
1942 unsigned int
1943 scm_std_sleep (unsigned int secs)
1944 {
1945 struct timeval tv;
1946 tv.tv_usec = 0;
1947 tv.tv_sec = secs;
1948 scm_std_select (0, NULL, NULL, NULL, &tv);
1949 return tv.tv_sec;
1950 }
1951
1952 /*** Misc */
1953
1954 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1955 (void),
1956 "Return the thread that called this function.")
1957 #define FUNC_NAME s_scm_current_thread
1958 {
1959 return SCM_I_CURRENT_THREAD->handle;
1960 }
1961 #undef FUNC_NAME
1962
1963 static SCM
1964 scm_c_make_list (size_t n, SCM fill)
1965 {
1966 SCM res = SCM_EOL;
1967 while (n-- > 0)
1968 res = scm_cons (fill, res);
1969 return res;
1970 }
1971
1972 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1973 (void),
1974 "Return a list of all threads.")
1975 #define FUNC_NAME s_scm_all_threads
1976 {
1977 /* We can not allocate while holding the thread_admin_mutex because
1978 of the way GC is done.
1979 */
1980 int n = thread_count;
1981 scm_i_thread *t;
1982 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1983
1984 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1985 l = &list;
1986 for (t = all_threads; t && n > 0; t = t->next_thread)
1987 {
1988 if (t != scm_i_signal_delivery_thread)
1989 {
1990 SCM_SETCAR (*l, t->handle);
1991 l = SCM_CDRLOC (*l);
1992 }
1993 n--;
1994 }
1995 *l = SCM_EOL;
1996 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1997 return list;
1998 }
1999 #undef FUNC_NAME
2000
2001 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2002 (SCM thread),
2003 "Return @code{#t} iff @var{thread} has exited.\n")
2004 #define FUNC_NAME s_scm_thread_exited_p
2005 {
2006 return scm_from_bool (scm_c_thread_exited_p (thread));
2007 }
2008 #undef FUNC_NAME
2009
2010 int
2011 scm_c_thread_exited_p (SCM thread)
2012 #define FUNC_NAME s_scm_thread_exited_p
2013 {
2014 scm_i_thread *t;
2015 SCM_VALIDATE_THREAD (1, thread);
2016 t = SCM_I_THREAD_DATA (thread);
2017 return t->exited;
2018 }
2019 #undef FUNC_NAME
2020
2021 SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2022 (void),
2023 "Return the total number of processors of the machine, which\n"
2024 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2025 "thread execution unit, which can be either:\n\n"
2026 "@itemize\n"
2027 "@item an execution core in a (possibly multi-core) chip, in a\n"
2028 " (possibly multi- chip) module, in a single computer, or\n"
2029 "@item a thread execution unit inside a core in the case of\n"
2030 " @dfn{hyper-threaded} CPUs.\n"
2031 "@end itemize\n\n"
2032 "Which of the two definitions is used, is unspecified.\n")
2033 #define FUNC_NAME s_scm_total_processor_count
2034 {
2035 return scm_from_ulong (num_processors (NPROC_ALL));
2036 }
2037 #undef FUNC_NAME
2038
2039 SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2040 (void),
2041 "Like @code{total-processor-count}, but return the number of\n"
2042 "processors available to the current process. See\n"
2043 "@code{setaffinity} and @code{getaffinity} for more\n"
2044 "information.\n")
2045 #define FUNC_NAME s_scm_current_processor_count
2046 {
2047 return scm_from_ulong (num_processors (NPROC_CURRENT));
2048 }
2049 #undef FUNC_NAME
2050
2051
2052 \f
2053
2054 static scm_i_pthread_cond_t wake_up_cond;
2055 static int threads_initialized_p = 0;
2056
2057
2058 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2059 */
2060 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
2061
2062 static SCM dynwind_critical_section_mutex;
2063
2064 void
2065 scm_dynwind_critical_section (SCM mutex)
2066 {
2067 if (scm_is_false (mutex))
2068 mutex = dynwind_critical_section_mutex;
2069 scm_dynwind_lock_mutex (mutex);
2070 scm_dynwind_block_asyncs ();
2071 }
2072
2073 /*** Initialization */
2074
2075 scm_i_pthread_mutex_t scm_i_misc_mutex;
2076
2077 #if SCM_USE_PTHREAD_THREADS
2078 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2079 #endif
2080
2081 void
2082 scm_threads_prehistory (void *base)
2083 {
2084 #if SCM_USE_PTHREAD_THREADS
2085 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2086 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2087 PTHREAD_MUTEX_RECURSIVE);
2088 #endif
2089
2090 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2091 scm_i_pthread_mutexattr_recursive);
2092 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2093 scm_i_pthread_cond_init (&wake_up_cond, NULL);
2094
2095 guilify_self_1 ((struct GC_stack_base *) base);
2096 }
2097
2098 scm_t_bits scm_tc16_thread;
2099 scm_t_bits scm_tc16_mutex;
2100 scm_t_bits scm_tc16_condvar;
2101
2102 void
2103 scm_init_threads ()
2104 {
2105 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2106 scm_set_smob_print (scm_tc16_thread, thread_print);
2107
2108 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2109 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2110 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2111
2112 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2113 sizeof (fat_cond));
2114 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2115
2116 scm_i_default_dynamic_state = SCM_BOOL_F;
2117 guilify_self_2 (SCM_BOOL_F);
2118 threads_initialized_p = 1;
2119
2120 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
2121 }
2122
2123 void
2124 scm_init_threads_default_dynamic_state ()
2125 {
2126 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2127 scm_i_default_dynamic_state = state;
2128 }
2129
2130 void
2131 scm_init_thread_procs ()
2132 {
2133 #include "libguile/threads.x"
2134 }
2135
2136 \f
2137 /* IA64-specific things. */
2138
2139 #ifdef __ia64__
2140 # ifdef __hpux
2141 # include <sys/param.h>
2142 # include <sys/pstat.h>
2143 void *
2144 scm_ia64_register_backing_store_base (void)
2145 {
2146 struct pst_vm_status vm_status;
2147 int i = 0;
2148 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2149 if (vm_status.pst_type == PS_RSESTACK)
2150 return (void *) vm_status.pst_vaddr;
2151 abort ();
2152 }
2153 void *
2154 scm_ia64_ar_bsp (const void *ctx)
2155 {
2156 uint64_t bsp;
2157 __uc_get_ar_bsp (ctx, &bsp);
2158 return (void *) bsp;
2159 }
2160 # endif /* hpux */
2161 # ifdef linux
2162 # include <ucontext.h>
2163 void *
2164 scm_ia64_register_backing_store_base (void)
2165 {
2166 extern void *__libc_ia64_register_backing_store_base;
2167 return __libc_ia64_register_backing_store_base;
2168 }
2169 void *
2170 scm_ia64_ar_bsp (const void *opaque)
2171 {
2172 const ucontext_t *ctx = opaque;
2173 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2174 }
2175 # endif /* linux */
2176 #endif /* __ia64__ */
2177
2178
2179 /*
2180 Local Variables:
2181 c-file-style: "gnu"
2182 End:
2183 */