Remove explicit thread/condvar/mutex finalization.
[bpt/guile.git] / libguile / threads.c
... / ...
CommitLineData
1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
17 */
18
19
20\f
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
24
25#include "libguile/bdw-gc.h"
26#include "libguile/_scm.h"
27
28#if HAVE_UNISTD_H
29#include <unistd.h>
30#endif
31#include <stdio.h>
32#include <assert.h>
33
34#ifdef HAVE_STRING_H
35#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36#endif
37
38#if HAVE_SYS_TIME_H
39#include <sys/time.h>
40#endif
41
42#include "libguile/validate.h"
43#include "libguile/root.h"
44#include "libguile/eval.h"
45#include "libguile/async.h"
46#include "libguile/ports.h"
47#include "libguile/threads.h"
48#include "libguile/dynwind.h"
49#include "libguile/iselect.h"
50#include "libguile/fluids.h"
51#include "libguile/continuations.h"
52#include "libguile/gc.h"
53#include "libguile/init.h"
54#include "libguile/scmsigs.h"
55#include "libguile/strings.h"
56
57#ifdef __MINGW32__
58#ifndef ETIMEDOUT
59# define ETIMEDOUT WSAETIMEDOUT
60#endif
61# include <fcntl.h>
62# include <process.h>
63# define pipe(fd) _pipe (fd, 256, O_BINARY)
64#endif /* __MINGW32__ */
65
66#include <full-read.h>
67
68\f
69static void
70to_timespec (SCM t, scm_t_timespec *waittime)
71{
72 if (scm_is_pair (t))
73 {
74 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
75 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
76 }
77 else
78 {
79 double time = scm_to_double (t);
80 double sec = scm_c_truncate (time);
81
82 waittime->tv_sec = (long) sec;
83 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
84 }
85}
86
87/*** Queues */
88
89/* Make an empty queue data structure.
90 */
91static SCM
92make_queue ()
93{
94 return scm_cons (SCM_EOL, SCM_EOL);
95}
96
97/* Put T at the back of Q and return a handle that can be used with
98 remqueue to remove T from Q again.
99 */
100static SCM
101enqueue (SCM q, SCM t)
102{
103 SCM c = scm_cons (t, SCM_EOL);
104 SCM_CRITICAL_SECTION_START;
105 if (scm_is_null (SCM_CDR (q)))
106 SCM_SETCDR (q, c);
107 else
108 SCM_SETCDR (SCM_CAR (q), c);
109 SCM_SETCAR (q, c);
110 SCM_CRITICAL_SECTION_END;
111 return c;
112}
113
114/* Remove the element that the handle C refers to from the queue Q. C
115 must have been returned from a call to enqueue. The return value
116 is zero when the element referred to by C has already been removed.
117 Otherwise, 1 is returned.
118*/
119static int
120remqueue (SCM q, SCM c)
121{
122 SCM p, prev = q;
123 SCM_CRITICAL_SECTION_START;
124 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
125 {
126 if (scm_is_eq (p, c))
127 {
128 if (scm_is_eq (c, SCM_CAR (q)))
129 SCM_SETCAR (q, SCM_CDR (c));
130 SCM_SETCDR (prev, SCM_CDR (c));
131 SCM_CRITICAL_SECTION_END;
132 return 1;
133 }
134 prev = p;
135 }
136 SCM_CRITICAL_SECTION_END;
137 return 0;
138}
139
140/* Remove the front-most element from the queue Q and return it.
141 Return SCM_BOOL_F when Q is empty.
142*/
143static SCM
144dequeue (SCM q)
145{
146 SCM c;
147 SCM_CRITICAL_SECTION_START;
148 c = SCM_CDR (q);
149 if (scm_is_null (c))
150 {
151 SCM_CRITICAL_SECTION_END;
152 return SCM_BOOL_F;
153 }
154 else
155 {
156 SCM_SETCDR (q, SCM_CDR (c));
157 if (scm_is_null (SCM_CDR (q)))
158 SCM_SETCAR (q, SCM_EOL);
159 SCM_CRITICAL_SECTION_END;
160 return SCM_CAR (c);
161 }
162}
163
164/*** Thread smob routines */
165
166
167static int
168thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
169{
170 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
171 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
172 the struct case, hence we go via a union, and extract according to the
173 size of pthread_t. */
174 union {
175 scm_i_pthread_t p;
176 unsigned short us;
177 unsigned int ui;
178 unsigned long ul;
179 scm_t_uintmax um;
180 } u;
181 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
182 scm_i_pthread_t p = t->pthread;
183 scm_t_uintmax id;
184 u.p = p;
185 if (sizeof (p) == sizeof (unsigned short))
186 id = u.us;
187 else if (sizeof (p) == sizeof (unsigned int))
188 id = u.ui;
189 else if (sizeof (p) == sizeof (unsigned long))
190 id = u.ul;
191 else
192 id = u.um;
193
194 scm_puts ("#<thread ", port);
195 scm_uintprint (id, 10, port);
196 scm_puts (" (", port);
197 scm_uintprint ((scm_t_bits)t, 16, port);
198 scm_puts (")>", port);
199 return 1;
200}
201
202\f
203/*** Blocking on queues. */
204
205/* See also scm_i_queue_async_cell for how such a block is
206 interrputed.
207*/
208
209/* Put the current thread on QUEUE and go to sleep, waiting for it to
210 be woken up by a call to 'unblock_from_queue', or to be
211 interrupted. Upon return of this function, the current thread is
212 no longer on QUEUE, even when the sleep has been interrupted.
213
214 The caller of block_self must hold MUTEX. It will be atomically
215 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
216
217 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
218 as MUTEX is needed.
219
220 When WAITTIME is not NULL, the sleep will be aborted at that time.
221
222 The return value of block_self is an errno value. It will be zero
223 when the sleep has been successfully completed by a call to
224 unblock_from_queue, EINTR when it has been interrupted by the
225 delivery of a system async, and ETIMEDOUT when the timeout has
226 expired.
227
228 The system asyncs themselves are not executed by block_self.
229*/
230static int
231block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
232 const scm_t_timespec *waittime)
233{
234 scm_i_thread *t = SCM_I_CURRENT_THREAD;
235 SCM q_handle;
236 int err;
237
238 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
239 err = EINTR;
240 else
241 {
242 t->block_asyncs++;
243 q_handle = enqueue (queue, t->handle);
244 if (waittime == NULL)
245 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
246 else
247 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
248
249 /* When we are still on QUEUE, we have been interrupted. We
250 report this only when no other error (such as a timeout) has
251 happened above.
252 */
253 if (remqueue (queue, q_handle) && err == 0)
254 err = EINTR;
255 t->block_asyncs--;
256 scm_i_reset_sleep (t);
257 }
258
259 return err;
260}
261
262/* Wake up the first thread on QUEUE, if any. The awoken thread is
263 returned, or #f if the queue was empty.
264 */
265static SCM
266unblock_from_queue (SCM queue)
267{
268 SCM thread = dequeue (queue);
269 if (scm_is_true (thread))
270 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
271 return thread;
272}
273
274\f
275/* Getting into and out of guile mode.
276 */
277
278scm_i_pthread_key_t scm_i_thread_key;
279
280
281static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
282static scm_i_thread *all_threads = NULL;
283static int thread_count;
284
285static SCM scm_i_default_dynamic_state;
286
287/* Perform first stage of thread initialisation, in non-guile mode.
288 */
289static void
290guilify_self_1 (SCM_STACKITEM *base)
291{
292 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
293
294 t->pthread = scm_i_pthread_self ();
295 t->handle = SCM_BOOL_F;
296 t->result = SCM_BOOL_F;
297 t->cleanup_handler = SCM_BOOL_F;
298 t->mutexes = SCM_EOL;
299 t->held_mutex = NULL;
300 t->join_queue = SCM_EOL;
301 t->dynamic_state = SCM_BOOL_F;
302 t->dynwinds = SCM_EOL;
303 t->active_asyncs = SCM_EOL;
304 t->block_asyncs = 1;
305 t->pending_asyncs = 1;
306 t->last_debug_frame = NULL;
307 t->base = base;
308#ifdef __ia64__
309 /* Calculate and store off the base of this thread's register
310 backing store (RBS). Unfortunately our implementation(s) of
311 scm_ia64_register_backing_store_base are only reliable for the
312 main thread. For other threads, therefore, find out the current
313 top of the RBS, and use that as a maximum. */
314 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
315 {
316 ucontext_t ctx;
317 void *bsp;
318 getcontext (&ctx);
319 bsp = scm_ia64_ar_bsp (&ctx);
320 if (t->register_backing_store_base > bsp)
321 t->register_backing_store_base = bsp;
322 }
323#endif
324 t->continuation_root = SCM_EOL;
325 t->continuation_base = base;
326 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
327 t->sleep_mutex = NULL;
328 t->sleep_object = SCM_BOOL_F;
329 t->sleep_fd = -1;
330
331 if (pipe (t->sleep_pipe) != 0)
332 /* FIXME: Error conditions during the initialization phase are handled
333 gracelessly since public functions such as `scm_init_guile ()'
334 currently have type `void'. */
335 abort ();
336
337 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
338 t->current_mark_stack_ptr = NULL;
339 t->current_mark_stack_limit = NULL;
340 t->canceled = 0;
341 t->exited = 0;
342 t->guile_mode = 0;
343
344 scm_i_pthread_setspecific (scm_i_thread_key, t);
345
346 scm_i_pthread_mutex_lock (&thread_admin_mutex);
347 t->next_thread = all_threads;
348 all_threads = t;
349 thread_count++;
350 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
351}
352
353/* Perform second stage of thread initialisation, in guile mode.
354 */
355static void
356guilify_self_2 (SCM parent)
357{
358 scm_i_thread *t = SCM_I_CURRENT_THREAD;
359
360 t->guile_mode = 1;
361
362 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
363
364 t->continuation_root = scm_cons (t->handle, SCM_EOL);
365 t->continuation_base = t->base;
366 t->vm = SCM_BOOL_F;
367
368 if (scm_is_true (parent))
369 t->dynamic_state = scm_make_dynamic_state (parent);
370 else
371 t->dynamic_state = scm_i_make_initial_dynamic_state ();
372
373 t->join_queue = make_queue ();
374 t->block_asyncs = 0;
375}
376
377\f
378/*** Fat mutexes */
379
380/* We implement our own mutex type since we want them to be 'fair', we
381 want to do fancy things while waiting for them (like running
382 asyncs) and we might want to add things that are nice for
383 debugging.
384*/
385
386typedef struct {
387 scm_i_pthread_mutex_t lock;
388 SCM owner;
389 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
390
391 int recursive; /* allow recursive locking? */
392 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
393 int allow_external_unlock; /* is it an error to unlock a mutex that is not
394 owned by the current thread? */
395
396 SCM waiting; /* the threads waiting for this mutex. */
397} fat_mutex;
398
399#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
400#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
401
402/* Perform thread tear-down, in guile mode.
403 */
404static void *
405do_thread_exit (void *v)
406{
407 scm_i_thread *t = (scm_i_thread *) v;
408
409 if (!scm_is_false (t->cleanup_handler))
410 {
411 SCM ptr = t->cleanup_handler;
412
413 t->cleanup_handler = SCM_BOOL_F;
414 t->result = scm_internal_catch (SCM_BOOL_T,
415 (scm_t_catch_body) scm_call_0, ptr,
416 scm_handle_by_message_noexit, NULL);
417 }
418
419 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
420
421 t->exited = 1;
422 close (t->sleep_pipe[0]);
423 close (t->sleep_pipe[1]);
424 while (scm_is_true (unblock_from_queue (t->join_queue)))
425 ;
426
427 while (!scm_is_null (t->mutexes))
428 {
429 SCM mutex = SCM_CAR (t->mutexes);
430 fat_mutex *m = SCM_MUTEX_DATA (mutex);
431 scm_i_pthread_mutex_lock (&m->lock);
432
433 unblock_from_queue (m->waiting);
434
435 scm_i_pthread_mutex_unlock (&m->lock);
436 t->mutexes = SCM_CDR (t->mutexes);
437 }
438
439 scm_i_pthread_mutex_unlock (&t->admin_mutex);
440
441 return NULL;
442}
443
444static void
445on_thread_exit (void *v)
446{
447 /* This handler is executed in non-guile mode. */
448 scm_i_thread *t = (scm_i_thread *) v, **tp;
449
450 /* If this thread was cancelled while doing a cond wait, it will
451 still have a mutex locked, so we unlock it here. */
452 if (t->held_mutex)
453 {
454 scm_i_pthread_mutex_unlock (t->held_mutex);
455 t->held_mutex = NULL;
456 }
457
458 scm_i_pthread_setspecific (scm_i_thread_key, v);
459
460 /* Ensure the signal handling thread has been launched, because we might be
461 shutting it down. */
462 scm_i_ensure_signal_delivery_thread ();
463
464 /* Unblocking the joining threads needs to happen in guile mode
465 since the queue is a SCM data structure. */
466
467 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
468 assume the GC is usable at this point, and notably that thread-local
469 storage (TLS) hasn't been deallocated yet. */
470 do_thread_exit (v);
471
472 /* Removing ourself from the list of all threads needs to happen in
473 non-guile mode since all SCM values on our stack become
474 unprotected once we are no longer in the list. */
475 scm_i_pthread_mutex_lock (&thread_admin_mutex);
476 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
477 if (*tp == t)
478 {
479 *tp = t->next_thread;
480 break;
481 }
482 thread_count--;
483
484 /* If there's only one other thread, it could be the signal delivery
485 thread, so we need to notify it to shut down by closing its read pipe.
486 If it's not the signal delivery thread, then closing the read pipe isn't
487 going to hurt. */
488 if (thread_count <= 1)
489 scm_i_close_signal_pipe ();
490
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
492
493 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
494}
495
496static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
497
498static void
499init_thread_key (void)
500{
501 scm_i_pthread_key_create (&scm_i_thread_key, NULL);
502}
503
504/* Perform any initializations necessary to bring the current thread
505 into guile mode, initializing Guile itself, if necessary.
506
507 BASE is the stack base to use with GC.
508
509 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
510 which case the default dynamic state is used.
511
512 Return zero when the thread was in guile mode already; otherwise
513 return 1.
514*/
515
516static int
517scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
518{
519 scm_i_thread *t;
520
521 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
522
523 if ((t = SCM_I_CURRENT_THREAD) == NULL)
524 {
525 /* This thread has not been guilified yet.
526 */
527
528 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
529 if (scm_initialized_p == 0)
530 {
531 /* First thread ever to enter Guile. Run the full
532 initialization.
533 */
534 scm_i_init_guile (base);
535 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
536 }
537 else
538 {
539 /* Guile is already initialized, but this thread enters it for
540 the first time. Only initialize this thread.
541 */
542 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
543 guilify_self_1 (base);
544 guilify_self_2 (parent);
545 }
546 return 1;
547 }
548 else if (t->top)
549 {
550 /* This thread is already guilified but not in guile mode, just
551 resume it.
552
553 A user call to scm_with_guile() will lead us to here. This could
554 happen from anywhere on the stack, and in particular lower on the
555 stack than when it was when this thread was first guilified. Thus,
556 `base' must be updated. */
557#if SCM_STACK_GROWS_UP
558 if (base < t->base)
559 t->base = base;
560#else
561 if (base > t->base)
562 t->base = base;
563#endif
564
565 t->top = NULL;
566 return 1;
567 }
568 else
569 {
570 /* Thread is already in guile mode. Nothing to do.
571 */
572 return 0;
573 }
574}
575
576#if SCM_USE_PTHREAD_THREADS
577
578#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
579/* This method for GNU/Linux and perhaps some other systems.
580 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
581 available on them. */
582#define HAVE_GET_THREAD_STACK_BASE
583
584static SCM_STACKITEM *
585get_thread_stack_base ()
586{
587 pthread_attr_t attr;
588 void *start, *end;
589 size_t size;
590
591 pthread_getattr_np (pthread_self (), &attr);
592 pthread_attr_getstack (&attr, &start, &size);
593 end = (char *)start + size;
594
595 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
596 for the main thread, but we can use scm_get_stack_base in that
597 case.
598 */
599
600#ifndef PTHREAD_ATTR_GETSTACK_WORKS
601 if ((void *)&attr < start || (void *)&attr >= end)
602 return (SCM_STACKITEM *) GC_stackbottom;
603 else
604#endif
605 {
606#if SCM_STACK_GROWS_UP
607 return start;
608#else
609 return end;
610#endif
611 }
612}
613
614#elif HAVE_PTHREAD_GET_STACKADDR_NP
615/* This method for MacOS X.
616 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
617 but as of 2006 there's nothing obvious at apple.com. */
618#define HAVE_GET_THREAD_STACK_BASE
619static SCM_STACKITEM *
620get_thread_stack_base ()
621{
622 return pthread_get_stackaddr_np (pthread_self ());
623}
624
625#elif defined (__MINGW32__)
626/* This method for mingw. In mingw the basic scm_get_stack_base can be used
627 in any thread. We don't like hard-coding the name of a system, but there
628 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
629 work. */
630#define HAVE_GET_THREAD_STACK_BASE
631static SCM_STACKITEM *
632get_thread_stack_base ()
633{
634 return (SCM_STACKITEM *) GC_stackbottom;
635}
636
637#endif /* pthread methods of get_thread_stack_base */
638
639#else /* !SCM_USE_PTHREAD_THREADS */
640
641#define HAVE_GET_THREAD_STACK_BASE
642
643static SCM_STACKITEM *
644get_thread_stack_base ()
645{
646 return (SCM_STACKITEM *) GC_stackbottom;
647}
648
649#endif /* !SCM_USE_PTHREAD_THREADS */
650
651#ifdef HAVE_GET_THREAD_STACK_BASE
652
653void
654scm_init_guile ()
655{
656 scm_i_init_thread_for_guile (get_thread_stack_base (),
657 scm_i_default_dynamic_state);
658}
659
660#endif
661
662void *
663scm_with_guile (void *(*func)(void *), void *data)
664{
665 return scm_i_with_guile_and_parent (func, data,
666 scm_i_default_dynamic_state);
667}
668
669SCM_UNUSED static void
670scm_leave_guile_cleanup (void *x)
671{
672 on_thread_exit (SCM_I_CURRENT_THREAD);
673}
674
675void *
676scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
677{
678 void *res;
679 int really_entered;
680 SCM_STACKITEM base_item;
681
682 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
683 if (really_entered)
684 {
685 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
686 res = scm_c_with_continuation_barrier (func, data);
687 scm_i_pthread_cleanup_pop (0);
688 }
689 else
690 res = scm_c_with_continuation_barrier (func, data);
691
692 return res;
693}
694
695\f
696/*** Non-guile mode. */
697
698#if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
699
700/* This declaration is missing from the public headers of GC 7.1. */
701extern void GC_do_blocking (void (*) (void *), void *);
702
703#endif
704
705#ifdef HAVE_GC_DO_BLOCKING
706struct without_guile_arg
707{
708 void * (*function) (void *);
709 void *data;
710 void *result;
711};
712
713static void
714without_guile_trampoline (void *closure)
715{
716 struct without_guile_arg *arg;
717
718 SCM_I_CURRENT_THREAD->guile_mode = 0;
719
720 arg = (struct without_guile_arg *) closure;
721 arg->result = arg->function (arg->data);
722
723 SCM_I_CURRENT_THREAD->guile_mode = 1;
724}
725#endif
726
727void *
728scm_without_guile (void *(*func)(void *), void *data)
729{
730 void *result;
731
732#ifdef HAVE_GC_DO_BLOCKING
733 if (SCM_I_CURRENT_THREAD->guile_mode)
734 {
735 struct without_guile_arg arg;
736
737 arg.function = func;
738 arg.data = data;
739 GC_do_blocking (without_guile_trampoline, &arg);
740 result = arg.result;
741 }
742 else
743#endif
744 result = func (data);
745
746 return result;
747}
748
749\f
750/*** Thread creation */
751
752typedef struct {
753 SCM parent;
754 SCM thunk;
755 SCM handler;
756 SCM thread;
757 scm_i_pthread_mutex_t mutex;
758 scm_i_pthread_cond_t cond;
759} launch_data;
760
761static void *
762really_launch (void *d)
763{
764 launch_data *data = (launch_data *)d;
765 SCM thunk = data->thunk, handler = data->handler;
766 scm_i_thread *t;
767
768 t = SCM_I_CURRENT_THREAD;
769
770 scm_i_scm_pthread_mutex_lock (&data->mutex);
771 data->thread = scm_current_thread ();
772 scm_i_pthread_cond_signal (&data->cond);
773 scm_i_pthread_mutex_unlock (&data->mutex);
774
775 if (SCM_UNBNDP (handler))
776 t->result = scm_call_0 (thunk);
777 else
778 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
779
780 /* Trigger a call to `on_thread_exit ()'. */
781 pthread_exit (NULL);
782
783 return 0;
784}
785
786static void *
787launch_thread (void *d)
788{
789 launch_data *data = (launch_data *)d;
790 scm_i_pthread_detach (scm_i_pthread_self ());
791 scm_i_with_guile_and_parent (really_launch, d, data->parent);
792 return NULL;
793}
794
795SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
796 (SCM thunk, SCM handler),
797 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
798 "returning a new thread object representing the thread. The procedure\n"
799 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
800 "\n"
801 "When @var{handler} is specified, then @var{thunk} is called from\n"
802 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
803 "handler. This catch is established inside the continuation barrier.\n"
804 "\n"
805 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
806 "the @emph{exit value} of the thread and the thread is terminated.")
807#define FUNC_NAME s_scm_call_with_new_thread
808{
809 launch_data data;
810 scm_i_pthread_t id;
811 int err;
812
813 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
814 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
815 handler, SCM_ARG2, FUNC_NAME);
816
817 data.parent = scm_current_dynamic_state ();
818 data.thunk = thunk;
819 data.handler = handler;
820 data.thread = SCM_BOOL_F;
821 scm_i_pthread_mutex_init (&data.mutex, NULL);
822 scm_i_pthread_cond_init (&data.cond, NULL);
823
824 scm_i_scm_pthread_mutex_lock (&data.mutex);
825 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
826 if (err)
827 {
828 scm_i_pthread_mutex_unlock (&data.mutex);
829 errno = err;
830 scm_syserror (NULL);
831 }
832 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
833 scm_i_pthread_mutex_unlock (&data.mutex);
834
835 return data.thread;
836}
837#undef FUNC_NAME
838
839typedef struct {
840 SCM parent;
841 scm_t_catch_body body;
842 void *body_data;
843 scm_t_catch_handler handler;
844 void *handler_data;
845 SCM thread;
846 scm_i_pthread_mutex_t mutex;
847 scm_i_pthread_cond_t cond;
848} spawn_data;
849
850static void *
851really_spawn (void *d)
852{
853 spawn_data *data = (spawn_data *)d;
854 scm_t_catch_body body = data->body;
855 void *body_data = data->body_data;
856 scm_t_catch_handler handler = data->handler;
857 void *handler_data = data->handler_data;
858 scm_i_thread *t = SCM_I_CURRENT_THREAD;
859
860 scm_i_scm_pthread_mutex_lock (&data->mutex);
861 data->thread = scm_current_thread ();
862 scm_i_pthread_cond_signal (&data->cond);
863 scm_i_pthread_mutex_unlock (&data->mutex);
864
865 if (handler == NULL)
866 t->result = body (body_data);
867 else
868 t->result = scm_internal_catch (SCM_BOOL_T,
869 body, body_data,
870 handler, handler_data);
871
872 return 0;
873}
874
875static void *
876spawn_thread (void *d)
877{
878 spawn_data *data = (spawn_data *)d;
879 scm_i_pthread_detach (scm_i_pthread_self ());
880 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
881 return NULL;
882}
883
884SCM
885scm_spawn_thread (scm_t_catch_body body, void *body_data,
886 scm_t_catch_handler handler, void *handler_data)
887{
888 spawn_data data;
889 scm_i_pthread_t id;
890 int err;
891
892 data.parent = scm_current_dynamic_state ();
893 data.body = body;
894 data.body_data = body_data;
895 data.handler = handler;
896 data.handler_data = handler_data;
897 data.thread = SCM_BOOL_F;
898 scm_i_pthread_mutex_init (&data.mutex, NULL);
899 scm_i_pthread_cond_init (&data.cond, NULL);
900
901 scm_i_scm_pthread_mutex_lock (&data.mutex);
902 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
903 if (err)
904 {
905 scm_i_pthread_mutex_unlock (&data.mutex);
906 errno = err;
907 scm_syserror (NULL);
908 }
909 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
910 scm_i_pthread_mutex_unlock (&data.mutex);
911
912 return data.thread;
913}
914
915SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
916 (),
917"Move the calling thread to the end of the scheduling queue.")
918#define FUNC_NAME s_scm_yield
919{
920 return scm_from_bool (scm_i_sched_yield ());
921}
922#undef FUNC_NAME
923
924SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
925 (SCM thread),
926"Asynchronously force the target @var{thread} to terminate. @var{thread} "
927"cannot be the current thread, and if @var{thread} has already terminated or "
928"been signaled to terminate, this function is a no-op.")
929#define FUNC_NAME s_scm_cancel_thread
930{
931 scm_i_thread *t = NULL;
932
933 SCM_VALIDATE_THREAD (1, thread);
934 t = SCM_I_THREAD_DATA (thread);
935 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
936 if (!t->canceled)
937 {
938 t->canceled = 1;
939 scm_i_pthread_mutex_unlock (&t->admin_mutex);
940 scm_i_pthread_cancel (t->pthread);
941 }
942 else
943 scm_i_pthread_mutex_unlock (&t->admin_mutex);
944
945 return SCM_UNSPECIFIED;
946}
947#undef FUNC_NAME
948
949SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
950 (SCM thread, SCM proc),
951"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
952"This handler will be called when the thread exits.")
953#define FUNC_NAME s_scm_set_thread_cleanup_x
954{
955 scm_i_thread *t;
956
957 SCM_VALIDATE_THREAD (1, thread);
958 if (!scm_is_false (proc))
959 SCM_VALIDATE_THUNK (2, proc);
960
961 t = SCM_I_THREAD_DATA (thread);
962 scm_i_pthread_mutex_lock (&t->admin_mutex);
963
964 if (!(t->exited || t->canceled))
965 t->cleanup_handler = proc;
966
967 scm_i_pthread_mutex_unlock (&t->admin_mutex);
968
969 return SCM_UNSPECIFIED;
970}
971#undef FUNC_NAME
972
973SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
974 (SCM thread),
975"Return the cleanup handler installed for the thread @var{thread}.")
976#define FUNC_NAME s_scm_thread_cleanup
977{
978 scm_i_thread *t;
979 SCM ret;
980
981 SCM_VALIDATE_THREAD (1, thread);
982
983 t = SCM_I_THREAD_DATA (thread);
984 scm_i_pthread_mutex_lock (&t->admin_mutex);
985 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
986 scm_i_pthread_mutex_unlock (&t->admin_mutex);
987
988 return ret;
989}
990#undef FUNC_NAME
991
992SCM scm_join_thread (SCM thread)
993{
994 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
995}
996
997SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
998 (SCM thread, SCM timeout, SCM timeoutval),
999"Suspend execution of the calling thread until the target @var{thread} "
1000"terminates, unless the target @var{thread} has already terminated. ")
1001#define FUNC_NAME s_scm_join_thread_timed
1002{
1003 scm_i_thread *t;
1004 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1005 SCM res = SCM_BOOL_F;
1006
1007 if (! (SCM_UNBNDP (timeoutval)))
1008 res = timeoutval;
1009
1010 SCM_VALIDATE_THREAD (1, thread);
1011 if (scm_is_eq (scm_current_thread (), thread))
1012 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1013
1014 t = SCM_I_THREAD_DATA (thread);
1015 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1016
1017 if (! SCM_UNBNDP (timeout))
1018 {
1019 to_timespec (timeout, &ctimeout);
1020 timeout_ptr = &ctimeout;
1021 }
1022
1023 if (t->exited)
1024 res = t->result;
1025 else
1026 {
1027 while (1)
1028 {
1029 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1030 timeout_ptr);
1031 if (err == 0)
1032 {
1033 if (t->exited)
1034 {
1035 res = t->result;
1036 break;
1037 }
1038 }
1039 else if (err == ETIMEDOUT)
1040 break;
1041
1042 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1043 SCM_TICK;
1044 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1045
1046 /* Check for exit again, since we just released and
1047 reacquired the admin mutex, before the next block_self
1048 call (which would block forever if t has already
1049 exited). */
1050 if (t->exited)
1051 {
1052 res = t->result;
1053 break;
1054 }
1055 }
1056 }
1057
1058 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1059
1060 return res;
1061}
1062#undef FUNC_NAME
1063
1064SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1065 (SCM obj),
1066 "Return @code{#t} if @var{obj} is a thread.")
1067#define FUNC_NAME s_scm_thread_p
1068{
1069 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1070}
1071#undef FUNC_NAME
1072
1073
1074static size_t
1075fat_mutex_free (SCM mx)
1076{
1077 fat_mutex *m = SCM_MUTEX_DATA (mx);
1078 scm_i_pthread_mutex_destroy (&m->lock);
1079 return 0;
1080}
1081
1082static int
1083fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1084{
1085 fat_mutex *m = SCM_MUTEX_DATA (mx);
1086 scm_puts ("#<mutex ", port);
1087 scm_uintprint ((scm_t_bits)m, 16, port);
1088 scm_puts (">", port);
1089 return 1;
1090}
1091
1092static SCM
1093make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1094{
1095 fat_mutex *m;
1096 SCM mx;
1097
1098 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1099 scm_i_pthread_mutex_init (&m->lock, NULL);
1100 m->owner = SCM_BOOL_F;
1101 m->level = 0;
1102
1103 m->recursive = recursive;
1104 m->unchecked_unlock = unchecked_unlock;
1105 m->allow_external_unlock = external_unlock;
1106
1107 m->waiting = SCM_EOL;
1108 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1109 m->waiting = make_queue ();
1110 return mx;
1111}
1112
1113SCM scm_make_mutex (void)
1114{
1115 return scm_make_mutex_with_flags (SCM_EOL);
1116}
1117
1118SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1119SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1120SCM_SYMBOL (recursive_sym, "recursive");
1121
1122SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1123 (SCM flags),
1124 "Create a new mutex. ")
1125#define FUNC_NAME s_scm_make_mutex_with_flags
1126{
1127 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1128
1129 SCM ptr = flags;
1130 while (! scm_is_null (ptr))
1131 {
1132 SCM flag = SCM_CAR (ptr);
1133 if (scm_is_eq (flag, unchecked_unlock_sym))
1134 unchecked_unlock = 1;
1135 else if (scm_is_eq (flag, allow_external_unlock_sym))
1136 external_unlock = 1;
1137 else if (scm_is_eq (flag, recursive_sym))
1138 recursive = 1;
1139 else
1140 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1141 ptr = SCM_CDR (ptr);
1142 }
1143 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1144}
1145#undef FUNC_NAME
1146
1147SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1148 (void),
1149 "Create a new recursive mutex. ")
1150#define FUNC_NAME s_scm_make_recursive_mutex
1151{
1152 return make_fat_mutex (1, 0, 0);
1153}
1154#undef FUNC_NAME
1155
1156SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1157
1158static SCM
1159fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1160{
1161 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1162
1163 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1164 SCM err = SCM_BOOL_F;
1165
1166 struct timeval current_time;
1167
1168 scm_i_scm_pthread_mutex_lock (&m->lock);
1169
1170 while (1)
1171 {
1172 if (m->level == 0)
1173 {
1174 m->owner = new_owner;
1175 m->level++;
1176
1177 if (SCM_I_IS_THREAD (new_owner))
1178 {
1179 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1180 scm_i_pthread_mutex_lock (&t->admin_mutex);
1181 t->mutexes = scm_cons (mutex, t->mutexes);
1182 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1183 }
1184 *ret = 1;
1185 break;
1186 }
1187 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1188 {
1189 m->owner = new_owner;
1190 err = scm_cons (scm_abandoned_mutex_error_key,
1191 scm_from_locale_string ("lock obtained on abandoned "
1192 "mutex"));
1193 *ret = 1;
1194 break;
1195 }
1196 else if (scm_is_eq (m->owner, new_owner))
1197 {
1198 if (m->recursive)
1199 {
1200 m->level++;
1201 *ret = 1;
1202 }
1203 else
1204 {
1205 err = scm_cons (scm_misc_error_key,
1206 scm_from_locale_string ("mutex already locked "
1207 "by thread"));
1208 *ret = 0;
1209 }
1210 break;
1211 }
1212 else
1213 {
1214 if (timeout != NULL)
1215 {
1216 gettimeofday (&current_time, NULL);
1217 if (current_time.tv_sec > timeout->tv_sec ||
1218 (current_time.tv_sec == timeout->tv_sec &&
1219 current_time.tv_usec * 1000 > timeout->tv_nsec))
1220 {
1221 *ret = 0;
1222 break;
1223 }
1224 }
1225 block_self (m->waiting, mutex, &m->lock, timeout);
1226 scm_i_pthread_mutex_unlock (&m->lock);
1227 SCM_TICK;
1228 scm_i_scm_pthread_mutex_lock (&m->lock);
1229 }
1230 }
1231 scm_i_pthread_mutex_unlock (&m->lock);
1232 return err;
1233}
1234
1235SCM scm_lock_mutex (SCM mx)
1236{
1237 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1238}
1239
1240SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1241 (SCM m, SCM timeout, SCM owner),
1242"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1243"blocks until the mutex becomes available. The function returns when "
1244"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1245"a thread already owns will succeed right away and will not block the "
1246"thread. That is, Guile's mutexes are @emph{recursive}. ")
1247#define FUNC_NAME s_scm_lock_mutex_timed
1248{
1249 SCM exception;
1250 int ret = 0;
1251 scm_t_timespec cwaittime, *waittime = NULL;
1252
1253 SCM_VALIDATE_MUTEX (1, m);
1254
1255 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1256 {
1257 to_timespec (timeout, &cwaittime);
1258 waittime = &cwaittime;
1259 }
1260
1261 exception = fat_mutex_lock (m, waittime, owner, &ret);
1262 if (!scm_is_false (exception))
1263 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1264 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1265}
1266#undef FUNC_NAME
1267
1268void
1269scm_dynwind_lock_mutex (SCM mutex)
1270{
1271 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1272 SCM_F_WIND_EXPLICITLY);
1273 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1274 SCM_F_WIND_EXPLICITLY);
1275}
1276
1277SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1278 (SCM mutex),
1279"Try to lock @var{mutex}. If the mutex is already locked by someone "
1280"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1281#define FUNC_NAME s_scm_try_mutex
1282{
1283 SCM exception;
1284 int ret = 0;
1285 scm_t_timespec cwaittime, *waittime = NULL;
1286
1287 SCM_VALIDATE_MUTEX (1, mutex);
1288
1289 to_timespec (scm_from_int(0), &cwaittime);
1290 waittime = &cwaittime;
1291
1292 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1293 if (!scm_is_false (exception))
1294 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1295 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1296}
1297#undef FUNC_NAME
1298
1299/*** Fat condition variables */
1300
1301typedef struct {
1302 scm_i_pthread_mutex_t lock;
1303 SCM waiting; /* the threads waiting for this condition. */
1304} fat_cond;
1305
1306#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1307#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1308
1309static int
1310fat_mutex_unlock (SCM mutex, SCM cond,
1311 const scm_t_timespec *waittime, int relock)
1312{
1313 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1314 fat_cond *c = NULL;
1315 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1316 int err = 0, ret = 0;
1317
1318 scm_i_scm_pthread_mutex_lock (&m->lock);
1319
1320 SCM owner = m->owner;
1321
1322 if (!scm_is_eq (owner, scm_current_thread ()))
1323 {
1324 if (m->level == 0)
1325 {
1326 if (!m->unchecked_unlock)
1327 {
1328 scm_i_pthread_mutex_unlock (&m->lock);
1329 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1330 }
1331 owner = scm_current_thread ();
1332 }
1333 else if (!m->allow_external_unlock)
1334 {
1335 scm_i_pthread_mutex_unlock (&m->lock);
1336 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1337 }
1338 }
1339
1340 if (! (SCM_UNBNDP (cond)))
1341 {
1342 c = SCM_CONDVAR_DATA (cond);
1343 while (1)
1344 {
1345 int brk = 0;
1346
1347 if (m->level > 0)
1348 m->level--;
1349 if (m->level == 0)
1350 m->owner = unblock_from_queue (m->waiting);
1351
1352 t->block_asyncs++;
1353
1354 err = block_self (c->waiting, cond, &m->lock, waittime);
1355 scm_i_pthread_mutex_unlock (&m->lock);
1356
1357 if (err == 0)
1358 {
1359 ret = 1;
1360 brk = 1;
1361 }
1362 else if (err == ETIMEDOUT)
1363 {
1364 ret = 0;
1365 brk = 1;
1366 }
1367 else if (err != EINTR)
1368 {
1369 errno = err;
1370 scm_syserror (NULL);
1371 }
1372
1373 if (brk)
1374 {
1375 if (relock)
1376 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1377 t->block_asyncs--;
1378 break;
1379 }
1380
1381 t->block_asyncs--;
1382 scm_async_click ();
1383
1384 scm_remember_upto_here_2 (cond, mutex);
1385
1386 scm_i_scm_pthread_mutex_lock (&m->lock);
1387 }
1388 }
1389 else
1390 {
1391 if (m->level > 0)
1392 m->level--;
1393 if (m->level == 0)
1394 m->owner = unblock_from_queue (m->waiting);
1395
1396 scm_i_pthread_mutex_unlock (&m->lock);
1397 ret = 1;
1398 }
1399
1400 return ret;
1401}
1402
1403SCM scm_unlock_mutex (SCM mx)
1404{
1405 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1406}
1407
1408SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1409 (SCM mx, SCM cond, SCM timeout),
1410"Unlocks @var{mutex} if the calling thread owns the lock on "
1411"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1412"thread results in undefined behaviour. Once a mutex has been unlocked, "
1413"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1414"lock. Every call to @code{lock-mutex} by this thread must be matched "
1415"with a call to @code{unlock-mutex}. Only the last call to "
1416"@code{unlock-mutex} will actually unlock the mutex. ")
1417#define FUNC_NAME s_scm_unlock_mutex_timed
1418{
1419 scm_t_timespec cwaittime, *waittime = NULL;
1420
1421 SCM_VALIDATE_MUTEX (1, mx);
1422 if (! (SCM_UNBNDP (cond)))
1423 {
1424 SCM_VALIDATE_CONDVAR (2, cond);
1425
1426 if (! (SCM_UNBNDP (timeout)))
1427 {
1428 to_timespec (timeout, &cwaittime);
1429 waittime = &cwaittime;
1430 }
1431 }
1432
1433 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1434}
1435#undef FUNC_NAME
1436
1437SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1438 (SCM obj),
1439 "Return @code{#t} if @var{obj} is a mutex.")
1440#define FUNC_NAME s_scm_mutex_p
1441{
1442 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1443}
1444#undef FUNC_NAME
1445
1446SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1447 (SCM mx),
1448 "Return the thread owning @var{mx}, or @code{#f}.")
1449#define FUNC_NAME s_scm_mutex_owner
1450{
1451 SCM owner;
1452 fat_mutex *m = NULL;
1453
1454 SCM_VALIDATE_MUTEX (1, mx);
1455 m = SCM_MUTEX_DATA (mx);
1456 scm_i_pthread_mutex_lock (&m->lock);
1457 owner = m->owner;
1458 scm_i_pthread_mutex_unlock (&m->lock);
1459
1460 return owner;
1461}
1462#undef FUNC_NAME
1463
1464SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1465 (SCM mx),
1466 "Return the lock level of mutex @var{mx}.")
1467#define FUNC_NAME s_scm_mutex_level
1468{
1469 SCM_VALIDATE_MUTEX (1, mx);
1470 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1471}
1472#undef FUNC_NAME
1473
1474SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1475 (SCM mx),
1476 "Returns @code{#t} if the mutex @var{mx} is locked.")
1477#define FUNC_NAME s_scm_mutex_locked_p
1478{
1479 SCM_VALIDATE_MUTEX (1, mx);
1480 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1481}
1482#undef FUNC_NAME
1483
1484static int
1485fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1486{
1487 fat_cond *c = SCM_CONDVAR_DATA (cv);
1488 scm_puts ("#<condition-variable ", port);
1489 scm_uintprint ((scm_t_bits)c, 16, port);
1490 scm_puts (">", port);
1491 return 1;
1492}
1493
1494SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1495 (void),
1496 "Make a new condition variable.")
1497#define FUNC_NAME s_scm_make_condition_variable
1498{
1499 fat_cond *c;
1500 SCM cv;
1501
1502 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1503 c->waiting = SCM_EOL;
1504 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1505 c->waiting = make_queue ();
1506 return cv;
1507}
1508#undef FUNC_NAME
1509
1510SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1511 (SCM cv, SCM mx, SCM t),
1512"Wait until @var{cond-var} has been signalled. While waiting, "
1513"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1514"is locked again when this function returns. When @var{time} is given, "
1515"it specifies a point in time where the waiting should be aborted. It "
1516"can be either a integer as returned by @code{current-time} or a pair "
1517"as returned by @code{gettimeofday}. When the waiting is aborted the "
1518"mutex is locked and @code{#f} is returned. When the condition "
1519"variable is in fact signalled, the mutex is also locked and @code{#t} "
1520"is returned. ")
1521#define FUNC_NAME s_scm_timed_wait_condition_variable
1522{
1523 scm_t_timespec waittime, *waitptr = NULL;
1524
1525 SCM_VALIDATE_CONDVAR (1, cv);
1526 SCM_VALIDATE_MUTEX (2, mx);
1527
1528 if (!SCM_UNBNDP (t))
1529 {
1530 to_timespec (t, &waittime);
1531 waitptr = &waittime;
1532 }
1533
1534 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1535}
1536#undef FUNC_NAME
1537
1538static void
1539fat_cond_signal (fat_cond *c)
1540{
1541 unblock_from_queue (c->waiting);
1542}
1543
1544SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1545 (SCM cv),
1546 "Wake up one thread that is waiting for @var{cv}")
1547#define FUNC_NAME s_scm_signal_condition_variable
1548{
1549 SCM_VALIDATE_CONDVAR (1, cv);
1550 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1551 return SCM_BOOL_T;
1552}
1553#undef FUNC_NAME
1554
1555static void
1556fat_cond_broadcast (fat_cond *c)
1557{
1558 while (scm_is_true (unblock_from_queue (c->waiting)))
1559 ;
1560}
1561
1562SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1563 (SCM cv),
1564 "Wake up all threads that are waiting for @var{cv}. ")
1565#define FUNC_NAME s_scm_broadcast_condition_variable
1566{
1567 SCM_VALIDATE_CONDVAR (1, cv);
1568 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1569 return SCM_BOOL_T;
1570}
1571#undef FUNC_NAME
1572
1573SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1574 (SCM obj),
1575 "Return @code{#t} if @var{obj} is a condition variable.")
1576#define FUNC_NAME s_scm_condition_variable_p
1577{
1578 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1579}
1580#undef FUNC_NAME
1581
1582
1583\f
1584/*** Select */
1585
1586struct select_args
1587{
1588 int nfds;
1589 SELECT_TYPE *read_fds;
1590 SELECT_TYPE *write_fds;
1591 SELECT_TYPE *except_fds;
1592 struct timeval *timeout;
1593
1594 int result;
1595 int errno_value;
1596};
1597
1598static void *
1599do_std_select (void *args)
1600{
1601 struct select_args *select_args;
1602
1603 select_args = (struct select_args *) args;
1604
1605 select_args->result =
1606 select (select_args->nfds,
1607 select_args->read_fds, select_args->write_fds,
1608 select_args->except_fds, select_args->timeout);
1609 select_args->errno_value = errno;
1610
1611 return NULL;
1612}
1613
1614int
1615scm_std_select (int nfds,
1616 SELECT_TYPE *readfds,
1617 SELECT_TYPE *writefds,
1618 SELECT_TYPE *exceptfds,
1619 struct timeval *timeout)
1620{
1621 fd_set my_readfds;
1622 int res, eno, wakeup_fd;
1623 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1624 struct select_args args;
1625
1626 if (readfds == NULL)
1627 {
1628 FD_ZERO (&my_readfds);
1629 readfds = &my_readfds;
1630 }
1631
1632 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1633 SCM_TICK;
1634
1635 wakeup_fd = t->sleep_pipe[0];
1636 FD_SET (wakeup_fd, readfds);
1637 if (wakeup_fd >= nfds)
1638 nfds = wakeup_fd+1;
1639
1640 args.nfds = nfds;
1641 args.read_fds = readfds;
1642 args.write_fds = writefds;
1643 args.except_fds = exceptfds;
1644 args.timeout = timeout;
1645
1646 /* Explicitly cooperate with the GC. */
1647 scm_without_guile (do_std_select, &args);
1648
1649 res = args.result;
1650 eno = args.errno_value;
1651
1652 t->sleep_fd = -1;
1653 scm_i_reset_sleep (t);
1654
1655 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1656 {
1657 char dummy;
1658 full_read (wakeup_fd, &dummy, 1);
1659
1660 FD_CLR (wakeup_fd, readfds);
1661 res -= 1;
1662 if (res == 0)
1663 {
1664 eno = EINTR;
1665 res = -1;
1666 }
1667 }
1668 errno = eno;
1669 return res;
1670}
1671
1672/* Convenience API for blocking while in guile mode. */
1673
1674#if SCM_USE_PTHREAD_THREADS
1675
1676/* It seems reasonable to not run procedures related to mutex and condition
1677 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1678 without it, and (ii) the only potential gain would be GC latency. See
1679 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1680 for a discussion of the pros and cons. */
1681
1682int
1683scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1684{
1685 int res = scm_i_pthread_mutex_lock (mutex);
1686 return res;
1687}
1688
1689static void
1690do_unlock (void *data)
1691{
1692 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1693}
1694
1695void
1696scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1697{
1698 scm_i_scm_pthread_mutex_lock (mutex);
1699 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1700}
1701
1702int
1703scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1704{
1705 int res;
1706 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1707
1708 t->held_mutex = mutex;
1709 res = scm_i_pthread_cond_wait (cond, mutex);
1710 t->held_mutex = NULL;
1711
1712 return res;
1713}
1714
1715int
1716scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1717 scm_i_pthread_mutex_t *mutex,
1718 const scm_t_timespec *wt)
1719{
1720 int res;
1721 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1722
1723 t->held_mutex = mutex;
1724 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1725 t->held_mutex = NULL;
1726
1727 return res;
1728}
1729
1730#endif
1731
1732unsigned long
1733scm_std_usleep (unsigned long usecs)
1734{
1735 struct timeval tv;
1736 tv.tv_usec = usecs % 1000000;
1737 tv.tv_sec = usecs / 1000000;
1738 scm_std_select (0, NULL, NULL, NULL, &tv);
1739 return tv.tv_sec * 1000000 + tv.tv_usec;
1740}
1741
1742unsigned int
1743scm_std_sleep (unsigned int secs)
1744{
1745 struct timeval tv;
1746 tv.tv_usec = 0;
1747 tv.tv_sec = secs;
1748 scm_std_select (0, NULL, NULL, NULL, &tv);
1749 return tv.tv_sec;
1750}
1751
1752/*** Misc */
1753
1754SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1755 (void),
1756 "Return the thread that called this function.")
1757#define FUNC_NAME s_scm_current_thread
1758{
1759 return SCM_I_CURRENT_THREAD->handle;
1760}
1761#undef FUNC_NAME
1762
1763static SCM
1764scm_c_make_list (size_t n, SCM fill)
1765{
1766 SCM res = SCM_EOL;
1767 while (n-- > 0)
1768 res = scm_cons (fill, res);
1769 return res;
1770}
1771
1772SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1773 (void),
1774 "Return a list of all threads.")
1775#define FUNC_NAME s_scm_all_threads
1776{
1777 /* We can not allocate while holding the thread_admin_mutex because
1778 of the way GC is done.
1779 */
1780 int n = thread_count;
1781 scm_i_thread *t;
1782 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1783
1784 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1785 l = &list;
1786 for (t = all_threads; t && n > 0; t = t->next_thread)
1787 {
1788 if (t != scm_i_signal_delivery_thread)
1789 {
1790 SCM_SETCAR (*l, t->handle);
1791 l = SCM_CDRLOC (*l);
1792 }
1793 n--;
1794 }
1795 *l = SCM_EOL;
1796 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1797 return list;
1798}
1799#undef FUNC_NAME
1800
1801SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1802 (SCM thread),
1803 "Return @code{#t} iff @var{thread} has exited.\n")
1804#define FUNC_NAME s_scm_thread_exited_p
1805{
1806 return scm_from_bool (scm_c_thread_exited_p (thread));
1807}
1808#undef FUNC_NAME
1809
1810int
1811scm_c_thread_exited_p (SCM thread)
1812#define FUNC_NAME s_scm_thread_exited_p
1813{
1814 scm_i_thread *t;
1815 SCM_VALIDATE_THREAD (1, thread);
1816 t = SCM_I_THREAD_DATA (thread);
1817 return t->exited;
1818}
1819#undef FUNC_NAME
1820
1821static scm_i_pthread_cond_t wake_up_cond;
1822static int threads_initialized_p = 0;
1823
1824
1825/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1826 */
1827scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1828int scm_i_critical_section_level = 0;
1829
1830static SCM dynwind_critical_section_mutex;
1831
1832void
1833scm_dynwind_critical_section (SCM mutex)
1834{
1835 if (scm_is_false (mutex))
1836 mutex = dynwind_critical_section_mutex;
1837 scm_dynwind_lock_mutex (mutex);
1838 scm_dynwind_block_asyncs ();
1839}
1840
1841/*** Initialization */
1842
1843scm_i_pthread_mutex_t scm_i_misc_mutex;
1844
1845#if SCM_USE_PTHREAD_THREADS
1846pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1847#endif
1848
1849void
1850scm_threads_prehistory (SCM_STACKITEM *base)
1851{
1852#if SCM_USE_PTHREAD_THREADS
1853 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1854 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1855 PTHREAD_MUTEX_RECURSIVE);
1856#endif
1857
1858 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1859 scm_i_pthread_mutexattr_recursive);
1860 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1861 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1862
1863 guilify_self_1 (base);
1864}
1865
1866scm_t_bits scm_tc16_thread;
1867scm_t_bits scm_tc16_mutex;
1868scm_t_bits scm_tc16_condvar;
1869
1870void
1871scm_init_threads ()
1872{
1873 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1874 scm_set_smob_print (scm_tc16_thread, thread_print);
1875
1876 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1877 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1878 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1879
1880 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1881 sizeof (fat_cond));
1882 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1883
1884 scm_i_default_dynamic_state = SCM_BOOL_F;
1885 guilify_self_2 (SCM_BOOL_F);
1886 threads_initialized_p = 1;
1887
1888 dynwind_critical_section_mutex =
1889 scm_permanent_object (scm_make_recursive_mutex ());
1890}
1891
1892void
1893scm_init_threads_default_dynamic_state ()
1894{
1895 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1896 scm_i_default_dynamic_state = scm_permanent_object (state);
1897}
1898
1899void
1900scm_init_thread_procs ()
1901{
1902#include "libguile/threads.x"
1903}
1904
1905\f
1906/* IA64-specific things. */
1907
1908#ifdef __ia64__
1909# ifdef __hpux
1910# include <sys/param.h>
1911# include <sys/pstat.h>
1912void *
1913scm_ia64_register_backing_store_base (void)
1914{
1915 struct pst_vm_status vm_status;
1916 int i = 0;
1917 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
1918 if (vm_status.pst_type == PS_RSESTACK)
1919 return (void *) vm_status.pst_vaddr;
1920 abort ();
1921}
1922void *
1923scm_ia64_ar_bsp (const void *ctx)
1924{
1925 uint64_t bsp;
1926 __uc_get_ar_bsp (ctx, &bsp);
1927 return (void *) bsp;
1928}
1929# endif /* hpux */
1930# ifdef linux
1931# include <ucontext.h>
1932void *
1933scm_ia64_register_backing_store_base (void)
1934{
1935 extern void *__libc_ia64_register_backing_store_base;
1936 return __libc_ia64_register_backing_store_base;
1937}
1938void *
1939scm_ia64_ar_bsp (const void *opaque)
1940{
1941 const ucontext_t *ctx = opaque;
1942 return (void *) ctx->uc_mcontext.sc_ar_bsp;
1943}
1944# endif /* linux */
1945#endif /* __ia64__ */
1946
1947
1948/*
1949 Local Variables:
1950 c-file-style: "gnu"
1951 End:
1952*/