Fix `procedure->pointer' for functions returning `void'.
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07
LC
41#include <assert.h>
42
d823b11b
MV
43#include "libguile/validate.h"
44#include "libguile/root.h"
45#include "libguile/eval.h"
46#include "libguile/async.h"
47#include "libguile/ports.h"
48#include "libguile/threads.h"
a0599745 49#include "libguile/dynwind.h"
d823b11b 50#include "libguile/iselect.h"
9de87eea
MV
51#include "libguile/fluids.h"
52#include "libguile/continuations.h"
2b829bbb 53#include "libguile/gc.h"
9de87eea 54#include "libguile/init.h"
2e77f720 55#include "libguile/scmsigs.h"
6180e336 56#include "libguile/strings.h"
a0faf7dd 57#include "libguile/weaks.h"
7bfd3b9e 58
ecc9f40f
MV
59#ifdef __MINGW32__
60#ifndef ETIMEDOUT
61# define ETIMEDOUT WSAETIMEDOUT
62#endif
63# include <fcntl.h>
64# include <process.h>
65# define pipe(fd) _pipe (fd, 256, O_BINARY)
66#endif /* __MINGW32__ */
67
634aa8de
LC
68#include <full-read.h>
69
cde24ce1
AW
70
71\f
72
73/* First some libgc shims. */
74
75/* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77#ifndef HAVE_GC_FN_TYPE
78typedef void * (* GC_fn_type) (void *);
79#endif
80
81
5f0d2951
AW
82#ifndef GC_SUCCESS
83#define GC_SUCCESS 0
84#endif
85
86#ifndef GC_UNIMPLEMENTED
87#define GC_UNIMPLEMENTED 3
88#endif
89
90/* Likewise struct GC_stack_base is missing before 7.1. */
91#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
92struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94#ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96#endif
97};
5f0d2951
AW
98
99static int
100GC_register_my_thread (struct GC_stack_base *)
101{
102 return GC_UNIMPLEMENTED;
103}
104
105static void
106GC_unregister_my_thread ()
107{
108}
109
12c1d861
AW
110#if !SCM_USE_PTHREAD_THREADS
111/* No threads; we can just use GC_stackbottom. */
5f0d2951 112static void *
12c1d861
AW
113get_thread_stack_base ()
114{
115 return GC_stackbottom;
116}
117
118#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120/* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123static void *
124get_thread_stack_base ()
125{
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134#if SCM_STACK_GROWS_UP
135 return start;
136#else
137 return end;
138#endif
139}
140
141#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142/* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145static void *
146get_thread_stack_base ()
147{
148 return pthread_get_stackaddr_np (pthread_self ());
149}
150
151#else
152#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
153#endif
154
155static int
156GC_get_stack_base (struct GC_stack_base *)
5f0d2951 157{
12c1d861
AW
158 stack_base->mem_base = get_thread_stack_base ();
159#ifdef __ia64__
160 /* Calculate and store off the base of this thread's register
161 backing store (RBS). Unfortunately our implementation(s) of
162 scm_ia64_register_backing_store_base are only reliable for the
163 main thread. For other threads, therefore, find out the current
164 top of the RBS, and use that as a maximum. */
165 stack_base->reg_base = scm_ia64_register_backing_store_base ();
166 {
167 ucontext_t ctx;
168 void *bsp;
169 getcontext (&ctx);
170 bsp = scm_ia64_ar_bsp (&ctx);
171 if (stack_base->reg_base > bsp)
172 stack_base->reg_base = bsp;
173 }
174#endif
175 return GC_SUCCESS;
5f0d2951 176}
12c1d861
AW
177
178static void *
179GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
180{
181 struct GC_stack_base stack_base;
182
183 stack_base.mem_base = (void*)&stack_base;
184#ifdef __ia64__
185 /* FIXME: Untested. */
186 {
187 ucontext_t ctx;
188 getcontext (&ctx);
189 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
190 }
5f0d2951
AW
191#endif
192
12c1d861
AW
193 return fn (&stack_base, arg);
194}
195#endif /* HAVE_GC_STACK_BASE */
196
5f0d2951 197
cde24ce1
AW
198/* Now define with_gc_active and with_gc_inactive. */
199
200#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
201
202/* We have a sufficiently new libgc (7.2 or newer). */
203
204static void*
205with_gc_inactive (GC_fn_type func, void *data)
206{
207 return GC_do_blocking (func, data);
208}
209
210static void*
211with_gc_active (GC_fn_type func, void *data)
212{
213 return GC_call_with_gc_active (func, data);
214}
215
216#else
217
218/* libgc not new enough, so never actually deactivate GC.
219
220 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
221 GC_call_with_gc_active. */
222
223static void*
224with_gc_inactive (GC_fn_type func, void *data)
225{
226 return func (data);
227}
228
229static void*
230with_gc_active (GC_fn_type func, void *data)
231{
232 return func (data);
233}
234
235#endif /* HAVE_GC_DO_BLOCKING */
236
237
634aa8de 238\f
6180e336
NJ
239static void
240to_timespec (SCM t, scm_t_timespec *waittime)
241{
242 if (scm_is_pair (t))
243 {
244 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
245 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
246 }
247 else
248 {
249 double time = scm_to_double (t);
250 double sec = scm_c_truncate (time);
251
252 waittime->tv_sec = (long) sec;
2a1d0688 253 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
254 }
255}
256
c058db8a 257\f
d823b11b 258/*** Queues */
7bfd3b9e 259
c058db8a
LC
260/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
261 the risk of false references leading to unbounded retained space as
262 described in "Bounding Space Usage of Conservative Garbage Collectors",
263 H.J. Boehm, 2001. */
264
9de87eea
MV
265/* Make an empty queue data structure.
266 */
d823b11b
MV
267static SCM
268make_queue ()
269{
270 return scm_cons (SCM_EOL, SCM_EOL);
271}
7bfd3b9e 272
9de87eea
MV
273/* Put T at the back of Q and return a handle that can be used with
274 remqueue to remove T from Q again.
275 */
d823b11b
MV
276static SCM
277enqueue (SCM q, SCM t)
278{
279 SCM c = scm_cons (t, SCM_EOL);
d2a51087 280 SCM_CRITICAL_SECTION_START;
d2e53ed6 281 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
282 SCM_SETCDR (q, c);
283 else
284 SCM_SETCDR (SCM_CAR (q), c);
285 SCM_SETCAR (q, c);
d2a51087 286 SCM_CRITICAL_SECTION_END;
d823b11b
MV
287 return c;
288}
7bfd3b9e 289
9de87eea
MV
290/* Remove the element that the handle C refers to from the queue Q. C
291 must have been returned from a call to enqueue. The return value
292 is zero when the element referred to by C has already been removed.
293 Otherwise, 1 is returned.
294*/
295static int
d823b11b
MV
296remqueue (SCM q, SCM c)
297{
298 SCM p, prev = q;
d2a51087 299 SCM_CRITICAL_SECTION_START;
d2e53ed6 300 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 301 {
bc36d050 302 if (scm_is_eq (p, c))
d823b11b 303 {
bc36d050 304 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
305 SCM_SETCAR (q, SCM_CDR (c));
306 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
307
308 /* GC-robust */
309 SCM_SETCDR (c, SCM_EOL);
310
d2a51087 311 SCM_CRITICAL_SECTION_END;
9de87eea 312 return 1;
d823b11b
MV
313 }
314 prev = p;
315 }
d2a51087 316 SCM_CRITICAL_SECTION_END;
9de87eea 317 return 0;
d823b11b
MV
318}
319
9de87eea
MV
320/* Remove the front-most element from the queue Q and return it.
321 Return SCM_BOOL_F when Q is empty.
322*/
d823b11b
MV
323static SCM
324dequeue (SCM q)
325{
d2a51087
NJ
326 SCM c;
327 SCM_CRITICAL_SECTION_START;
328 c = SCM_CDR (q);
d2e53ed6 329 if (scm_is_null (c))
d2a51087
NJ
330 {
331 SCM_CRITICAL_SECTION_END;
332 return SCM_BOOL_F;
333 }
d823b11b
MV
334 else
335 {
336 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 337 if (scm_is_null (SCM_CDR (q)))
d823b11b 338 SCM_SETCAR (q, SCM_EOL);
d2a51087 339 SCM_CRITICAL_SECTION_END;
c058db8a
LC
340
341 /* GC-robust */
342 SCM_SETCDR (c, SCM_EOL);
343
d823b11b
MV
344 return SCM_CAR (c);
345 }
346}
7bfd3b9e 347
9de87eea 348/*** Thread smob routines */
76da80e7 349
d823b11b
MV
350
351static int
352thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
353{
23d72566
KR
354 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
355 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
356 the struct case, hence we go via a union, and extract according to the
357 size of pthread_t. */
358 union {
359 scm_i_pthread_t p;
360 unsigned short us;
361 unsigned int ui;
362 unsigned long ul;
363 scm_t_uintmax um;
364 } u;
9de87eea 365 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
366 scm_i_pthread_t p = t->pthread;
367 scm_t_uintmax id;
368 u.p = p;
369 if (sizeof (p) == sizeof (unsigned short))
370 id = u.us;
371 else if (sizeof (p) == sizeof (unsigned int))
372 id = u.ui;
373 else if (sizeof (p) == sizeof (unsigned long))
374 id = u.ul;
375 else
376 id = u.um;
377
d823b11b 378 scm_puts ("#<thread ", port);
23d72566 379 scm_uintprint (id, 10, port);
1b92fb6b 380 scm_puts (" (", port);
0345e278 381 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 382 scm_puts (")>", port);
d823b11b
MV
383 return 1;
384}
385
706846f6 386\f
9de87eea 387/*** Blocking on queues. */
f7eca35d 388
9de87eea
MV
389/* See also scm_i_queue_async_cell for how such a block is
390 interrputed.
391*/
d823b11b 392
9de87eea
MV
393/* Put the current thread on QUEUE and go to sleep, waiting for it to
394 be woken up by a call to 'unblock_from_queue', or to be
395 interrupted. Upon return of this function, the current thread is
396 no longer on QUEUE, even when the sleep has been interrupted.
397
d2a51087 398 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
399 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
400
401 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
402 as MUTEX is needed.
403
404 When WAITTIME is not NULL, the sleep will be aborted at that time.
405
406 The return value of block_self is an errno value. It will be zero
407 when the sleep has been successfully completed by a call to
408 unblock_from_queue, EINTR when it has been interrupted by the
409 delivery of a system async, and ETIMEDOUT when the timeout has
410 expired.
411
412 The system asyncs themselves are not executed by block_self.
413*/
414static int
415block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
416 const scm_t_timespec *waittime)
76da80e7 417{
9de87eea
MV
418 scm_i_thread *t = SCM_I_CURRENT_THREAD;
419 SCM q_handle;
420 int err;
421
422 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
423 err = EINTR;
424 else
425 {
426 t->block_asyncs++;
427 q_handle = enqueue (queue, t->handle);
428 if (waittime == NULL)
429 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
430 else
431 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
432
433 /* When we are still on QUEUE, we have been interrupted. We
434 report this only when no other error (such as a timeout) has
435 happened above.
436 */
437 if (remqueue (queue, q_handle) && err == 0)
438 err = EINTR;
439 t->block_asyncs--;
440 scm_i_reset_sleep (t);
441 }
442
443 return err;
76da80e7 444}
9de87eea 445
d2a51087
NJ
446/* Wake up the first thread on QUEUE, if any. The awoken thread is
447 returned, or #f if the queue was empty.
9de87eea
MV
448 */
449static SCM
450unblock_from_queue (SCM queue)
451{
452 SCM thread = dequeue (queue);
453 if (scm_is_true (thread))
454 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
455 return thread;
456}
457
45f15cac 458\f
9de87eea
MV
459/* Getting into and out of guile mode.
460 */
461
f60a7648
AW
462/* Key used to attach a cleanup handler to a given thread. Also, if
463 thread-local storage is unavailable, this key is used to retrieve the
464 current thread with `pthread_getspecific ()'. */
465scm_i_pthread_key_t scm_i_thread_key;
466
467
705edb95
LC
468#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
469
470/* When thread-local storage (TLS) is available, a pointer to the
471 current-thread object is kept in TLS. Note that storing the thread-object
472 itself in TLS (rather than a pointer to some malloc'd memory) is not
473 possible since thread objects may live longer than the actual thread they
474 represent. */
475SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
476
f60a7648 477#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 478
d823b11b 479
9de87eea
MV
480static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
481static scm_i_thread *all_threads = NULL;
482static int thread_count;
483
484static SCM scm_i_default_dynamic_state;
485
486/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 487 */
9de87eea 488static void
12c1d861 489guilify_self_1 (struct GC_stack_base *base)
d823b11b 490{
7f22442b
AW
491 scm_i_thread t;
492
493 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
494 before allocating anything in this thread, because allocation could
495 cause GC to run, and GC could cause finalizers, which could invoke
496 Scheme functions, which need the current thread to be set. */
497
498 t.pthread = scm_i_pthread_self ();
499 t.handle = SCM_BOOL_F;
500 t.result = SCM_BOOL_F;
501 t.cleanup_handler = SCM_BOOL_F;
502 t.mutexes = SCM_EOL;
503 t.held_mutex = NULL;
504 t.join_queue = SCM_EOL;
505 t.dynamic_state = SCM_BOOL_F;
506 t.dynwinds = SCM_EOL;
507 t.active_asyncs = SCM_EOL;
508 t.block_asyncs = 1;
509 t.pending_asyncs = 1;
510 t.critical_section_level = 0;
511 t.base = base->mem_base;
346e4402 512#ifdef __ia64__
7f22442b 513 t.register_backing_store_base = base->reg-base;
346e4402 514#endif
7f22442b
AW
515 t.continuation_root = SCM_EOL;
516 t.continuation_base = t.base;
517 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
518 t.sleep_mutex = NULL;
519 t.sleep_object = SCM_BOOL_F;
520 t.sleep_fd = -1;
521
522 if (pipe (t.sleep_pipe) != 0)
634aa8de
LC
523 /* FIXME: Error conditions during the initialization phase are handled
524 gracelessly since public functions such as `scm_init_guile ()'
525 currently have type `void'. */
526 abort ();
527
7f22442b
AW
528 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
529 t.current_mark_stack_ptr = NULL;
530 t.current_mark_stack_limit = NULL;
531 t.canceled = 0;
532 t.exited = 0;
533 t.guile_mode = 0;
9de87eea 534
7f22442b
AW
535 /* The switcheroo. */
536 {
537 scm_i_thread *t_ptr = &t;
538
539 GC_disable ();
540 t_ptr = GC_malloc (sizeof (scm_i_thread));
541 memcpy (t_ptr, &t, sizeof t);
542
543 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
544
545#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
546 /* Cache the current thread in TLS for faster lookup. */
547 scm_i_current_thread = t_ptr;
f60a7648 548#endif
9de87eea 549
7f22442b
AW
550 scm_i_pthread_mutex_lock (&thread_admin_mutex);
551 t_ptr->next_thread = all_threads;
552 all_threads = t_ptr;
553 thread_count++;
554 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
555
556 GC_enable ();
557 }
d823b11b
MV
558}
559
9de87eea 560/* Perform second stage of thread initialisation, in guile mode.
d823b11b 561 */
9de87eea
MV
562static void
563guilify_self_2 (SCM parent)
d823b11b 564{
9de87eea
MV
565 scm_i_thread *t = SCM_I_CURRENT_THREAD;
566
72e6b608
LC
567 t->guile_mode = 1;
568
9de87eea 569 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 570
9de87eea
MV
571 t->continuation_root = scm_cons (t->handle, SCM_EOL);
572 t->continuation_base = t->base;
2bbe1533 573 t->vm = SCM_BOOL_F;
9de87eea
MV
574
575 if (scm_is_true (parent))
576 t->dynamic_state = scm_make_dynamic_state (parent);
577 else
578 t->dynamic_state = scm_i_make_initial_dynamic_state ();
579
580 t->join_queue = make_queue ();
581 t->block_asyncs = 0;
d823b11b
MV
582}
583
6180e336
NJ
584\f
585/*** Fat mutexes */
586
587/* We implement our own mutex type since we want them to be 'fair', we
588 want to do fancy things while waiting for them (like running
589 asyncs) and we might want to add things that are nice for
590 debugging.
591*/
592
593typedef struct {
594 scm_i_pthread_mutex_t lock;
595 SCM owner;
adc085f1 596 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 597
adc085f1 598 int recursive; /* allow recursive locking? */
6180e336
NJ
599 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
600 int allow_external_unlock; /* is it an error to unlock a mutex that is not
601 owned by the current thread? */
602
603 SCM waiting; /* the threads waiting for this mutex. */
604} fat_mutex;
605
606#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
607#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
608
9de87eea 609/* Perform thread tear-down, in guile mode.
d823b11b 610 */
9de87eea
MV
611static void *
612do_thread_exit (void *v)
613{
2e77f720
LC
614 scm_i_thread *t = (scm_i_thread *) v;
615
616 if (!scm_is_false (t->cleanup_handler))
617 {
618 SCM ptr = t->cleanup_handler;
619
620 t->cleanup_handler = SCM_BOOL_F;
621 t->result = scm_internal_catch (SCM_BOOL_T,
622 (scm_t_catch_body) scm_call_0, ptr,
623 scm_handle_by_message_noexit, NULL);
624 }
9de87eea 625
86a597f8 626 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
627
628 t->exited = 1;
0c97d7dd
MV
629 close (t->sleep_pipe[0]);
630 close (t->sleep_pipe[1]);
9de87eea
MV
631 while (scm_is_true (unblock_from_queue (t->join_queue)))
632 ;
9de87eea 633
74926120 634 while (!scm_is_null (t->mutexes))
6180e336 635 {
a0faf7dd 636 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 637
a0faf7dd
LC
638 if (!SCM_UNBNDP (mutex))
639 {
640 fat_mutex *m = SCM_MUTEX_DATA (mutex);
641
642 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
643
644 /* Since MUTEX is in `t->mutexes', T must be its owner. */
645 assert (scm_is_eq (m->owner, t->handle));
646
a0faf7dd 647 unblock_from_queue (m->waiting);
f57fdf07 648
a0faf7dd
LC
649 scm_i_pthread_mutex_unlock (&m->lock);
650 }
6180e336 651
a0faf7dd 652 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
653 }
654
86a597f8 655 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 656
9de87eea
MV
657 return NULL;
658}
659
f60a7648
AW
660static void *
661do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
662{
7f22442b
AW
663 /* Won't hurt if we are already registered. */
664 GC_register_my_thread (sb);
f60a7648 665
7f22442b 666 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
667}
668
d823b11b 669static void
9de87eea 670on_thread_exit (void *v)
d823b11b 671{
29776e85 672 /* This handler is executed in non-guile mode. */
2e77f720 673 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 674
d2a51087
NJ
675 /* If this thread was cancelled while doing a cond wait, it will
676 still have a mutex locked, so we unlock it here. */
677 if (t->held_mutex)
678 {
679 scm_i_pthread_mutex_unlock (t->held_mutex);
680 t->held_mutex = NULL;
681 }
682
f60a7648
AW
683 /* Reinstate the current thread for purposes of scm_with_guile
684 guile-mode cleanup handlers. Only really needed in the non-TLS
685 case but it doesn't hurt to be consistent. */
686 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 687
2e77f720
LC
688 /* Ensure the signal handling thread has been launched, because we might be
689 shutting it down. */
690 scm_i_ensure_signal_delivery_thread ();
691
7f22442b
AW
692 /* Scheme-level thread finalizers and other cleanup needs to happen in
693 guile mode. */
694 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
695
696 /* Removing ourself from the list of all threads needs to happen in
697 non-guile mode since all SCM values on our stack become
29776e85 698 unprotected once we are no longer in the list. */
0c97d7dd
MV
699 scm_i_pthread_mutex_lock (&thread_admin_mutex);
700 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
701 if (*tp == t)
702 {
703 *tp = t->next_thread;
c058db8a
LC
704
705 /* GC-robust */
706 t->next_thread = NULL;
707
0c97d7dd
MV
708 break;
709 }
710 thread_count--;
2e77f720
LC
711
712 /* If there's only one other thread, it could be the signal delivery
713 thread, so we need to notify it to shut down by closing its read pipe.
714 If it's not the signal delivery thread, then closing the read pipe isn't
715 going to hurt. */
716 if (thread_count <= 1)
717 scm_i_close_signal_pipe ();
718
0c97d7dd
MV
719 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
720
f60a7648 721 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b
AW
722
723 GC_unregister_my_thread ();
d823b11b
MV
724}
725
9de87eea 726static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 727
9de87eea
MV
728static void
729init_thread_key (void)
730{
f60a7648 731 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 732}
d823b11b 733
cde24ce1
AW
734/* Perform any initializations necessary to make the current thread
735 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
736 if necessary.
a54a94b3 737
9de87eea
MV
738 BASE is the stack base to use with GC.
739
740 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
741 which case the default dynamic state is used.
742
cde24ce1 743 Returns zero when the thread was known to guile already; otherwise
9de87eea 744 return 1.
cde24ce1
AW
745
746 Note that it could be the case that the thread was known
747 to Guile, but not in guile mode (because we are within a
748 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
749 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
750
751static int
12c1d861 752scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 753{
9de87eea
MV
754 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
755
cde24ce1
AW
756 if (SCM_I_CURRENT_THREAD)
757 {
758 /* Thread is already known to Guile.
759 */
760 return 0;
761 }
762 else
9de87eea
MV
763 {
764 /* This thread has not been guilified yet.
765 */
766
767 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
768 if (scm_initialized_p == 0)
769 {
770 /* First thread ever to enter Guile. Run the full
771 initialization.
772 */
773 scm_i_init_guile (base);
12c1d861
AW
774
775 /* Allow other threads to come in later. */
776 GC_allow_register_threads ();
777
9de87eea
MV
778 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
779 }
780 else
781 {
782 /* Guile is already initialized, but this thread enters it for
783 the first time. Only initialize this thread.
784 */
785 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
786
787 /* Register this thread with libgc. */
788 GC_register_my_thread (base);
789
9de87eea
MV
790 guilify_self_1 (base);
791 guilify_self_2 (parent);
792 }
793 return 1;
794 }
d823b11b
MV
795}
796
12c1d861
AW
797void
798scm_init_guile ()
d823b11b 799{
12c1d861
AW
800 struct GC_stack_base stack_base;
801
802 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
803 scm_i_init_thread_for_guile (&stack_base,
804 scm_i_default_dynamic_state);
9de87eea
MV
805 else
806 {
12c1d861
AW
807 fprintf (stderr, "Failed to get stack base for current thread.\n");
808 exit (1);
9de87eea 809 }
a54a94b3
MV
810}
811
70eca635 812SCM_UNUSED static void
2e77f720
LC
813scm_leave_guile_cleanup (void *x)
814{
47b6e9bd 815 on_thread_exit (SCM_I_CURRENT_THREAD);
2e77f720
LC
816}
817
12c1d861 818struct with_guile_args
cde24ce1
AW
819{
820 GC_fn_type func;
821 void *data;
12c1d861 822 SCM parent;
cde24ce1
AW
823};
824
825static void *
826with_guile_trampoline (void *data)
827{
12c1d861 828 struct with_guile_args *args = data;
cde24ce1
AW
829
830 return scm_c_with_continuation_barrier (args->func, args->data);
831}
832
12c1d861
AW
833static void *
834with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
835{
836 void *res;
cde24ce1
AW
837 int new_thread;
838 scm_i_thread *t;
12c1d861 839 struct with_guile_args *args = data;
2e77f720 840
12c1d861 841 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
842 t = SCM_I_CURRENT_THREAD;
843 if (new_thread)
2e77f720 844 {
cde24ce1
AW
845 /* We are in Guile mode. */
846 assert (t->guile_mode);
847
12c1d861 848 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
849
850 /* Leave Guile mode. */
851 t->guile_mode = 0;
852 }
853 else if (t->guile_mode)
854 {
855 /* Already in Guile mode. */
12c1d861 856 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 857 }
74926120 858 else
cde24ce1 859 {
cde24ce1
AW
860 /* We are not in Guile mode, either because we are not within a
861 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 862
cde24ce1
AW
863 This call to scm_with_guile() could happen from anywhere on the
864 stack, and in particular lower on the stack than when it was
865 when this thread was first guilified. Thus, `base' must be
866 updated. */
867#if SCM_STACK_GROWS_UP
12c1d861
AW
868 if (SCM_STACK_PTR (base->mem_base) < t->base)
869 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 870#else
12c1d861
AW
871 if (SCM_STACK_PTR (base->mem_base) > t->base)
872 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 873#endif
72e6b608 874
cde24ce1 875 t->guile_mode = 1;
12c1d861 876 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
877 t->guile_mode = 0;
878 }
879 return res;
72e6b608 880}
3d1af79f 881
12c1d861
AW
882static void *
883scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
884{
885 struct with_guile_args args;
886
887 args.func = func;
888 args.data = data;
889 args.parent = parent;
890
891 return GC_call_with_stack_base (with_guile_and_parent, &args);
892}
893
894void *
895scm_with_guile (void *(*func)(void *), void *data)
896{
897 return scm_i_with_guile_and_parent (func, data,
898 scm_i_default_dynamic_state);
899}
900
9de87eea
MV
901void *
902scm_without_guile (void *(*func)(void *), void *data)
d823b11b 903{
72e6b608 904 void *result;
cde24ce1 905 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 906
cde24ce1 907 if (t->guile_mode)
72e6b608 908 {
cde24ce1
AW
909 SCM_I_CURRENT_THREAD->guile_mode = 0;
910 result = with_gc_inactive (func, data);
911 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
912 }
913 else
cde24ce1 914 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
915 result = func (data);
916
917 return result;
9de87eea
MV
918}
919
72e6b608 920\f
9de87eea
MV
921/*** Thread creation */
922
923typedef struct {
924 SCM parent;
925 SCM thunk;
926 SCM handler;
76da80e7 927 SCM thread;
9de87eea
MV
928 scm_i_pthread_mutex_t mutex;
929 scm_i_pthread_cond_t cond;
930} launch_data;
d823b11b 931
9de87eea
MV
932static void *
933really_launch (void *d)
934{
935 launch_data *data = (launch_data *)d;
936 SCM thunk = data->thunk, handler = data->handler;
937 scm_i_thread *t;
d823b11b 938
9de87eea 939 t = SCM_I_CURRENT_THREAD;
a54a94b3 940
9de87eea
MV
941 scm_i_scm_pthread_mutex_lock (&data->mutex);
942 data->thread = scm_current_thread ();
943 scm_i_pthread_cond_signal (&data->cond);
944 scm_i_pthread_mutex_unlock (&data->mutex);
945
946 if (SCM_UNBNDP (handler))
947 t->result = scm_call_0 (thunk);
948 else
949 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
950
951 return 0;
d823b11b
MV
952}
953
9de87eea
MV
954static void *
955launch_thread (void *d)
956{
957 launch_data *data = (launch_data *)d;
958 scm_i_pthread_detach (scm_i_pthread_self ());
959 scm_i_with_guile_and_parent (really_launch, d, data->parent);
960 return NULL;
961}
962
963SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 964 (SCM thunk, SCM handler),
9de87eea
MV
965 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
966 "returning a new thread object representing the thread. The procedure\n"
967 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
968 "\n"
969 "When @var{handler} is specified, then @var{thunk} is called from\n"
970 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
971 "handler. This catch is established inside the continuation barrier.\n"
972 "\n"
973 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
974 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
975#define FUNC_NAME s_scm_call_with_new_thread
976{
9de87eea
MV
977 launch_data data;
978 scm_i_pthread_t id;
979 int err;
d823b11b 980
9de87eea
MV
981 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
982 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
983 handler, SCM_ARG2, FUNC_NAME);
984
985 data.parent = scm_current_dynamic_state ();
986 data.thunk = thunk;
987 data.handler = handler;
988 data.thread = SCM_BOOL_F;
989 scm_i_pthread_mutex_init (&data.mutex, NULL);
990 scm_i_pthread_cond_init (&data.cond, NULL);
991
992 scm_i_scm_pthread_mutex_lock (&data.mutex);
993 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
994 if (err)
995 {
996 scm_i_pthread_mutex_unlock (&data.mutex);
997 errno = err;
998 scm_syserror (NULL);
999 }
1000 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1001 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1002
9de87eea 1003 return data.thread;
d823b11b
MV
1004}
1005#undef FUNC_NAME
1006
9de87eea
MV
1007typedef struct {
1008 SCM parent;
1009 scm_t_catch_body body;
1010 void *body_data;
1011 scm_t_catch_handler handler;
1012 void *handler_data;
1013 SCM thread;
1014 scm_i_pthread_mutex_t mutex;
1015 scm_i_pthread_cond_t cond;
1016} spawn_data;
1017
1018static void *
1019really_spawn (void *d)
1020{
1021 spawn_data *data = (spawn_data *)d;
1022 scm_t_catch_body body = data->body;
1023 void *body_data = data->body_data;
1024 scm_t_catch_handler handler = data->handler;
1025 void *handler_data = data->handler_data;
1026 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1027
1028 scm_i_scm_pthread_mutex_lock (&data->mutex);
1029 data->thread = scm_current_thread ();
1030 scm_i_pthread_cond_signal (&data->cond);
1031 scm_i_pthread_mutex_unlock (&data->mutex);
1032
1033 if (handler == NULL)
1034 t->result = body (body_data);
1035 else
1036 t->result = scm_internal_catch (SCM_BOOL_T,
1037 body, body_data,
1038 handler, handler_data);
1039
1040 return 0;
1041}
1042
1043static void *
1044spawn_thread (void *d)
1045{
1046 spawn_data *data = (spawn_data *)d;
1047 scm_i_pthread_detach (scm_i_pthread_self ());
1048 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1049 return NULL;
1050}
1051
1052SCM
1053scm_spawn_thread (scm_t_catch_body body, void *body_data,
1054 scm_t_catch_handler handler, void *handler_data)
1055{
1056 spawn_data data;
1057 scm_i_pthread_t id;
1058 int err;
1059
1060 data.parent = scm_current_dynamic_state ();
1061 data.body = body;
1062 data.body_data = body_data;
1063 data.handler = handler;
1064 data.handler_data = handler_data;
1065 data.thread = SCM_BOOL_F;
1066 scm_i_pthread_mutex_init (&data.mutex, NULL);
1067 scm_i_pthread_cond_init (&data.cond, NULL);
1068
1069 scm_i_scm_pthread_mutex_lock (&data.mutex);
1070 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1071 if (err)
1072 {
1073 scm_i_pthread_mutex_unlock (&data.mutex);
1074 errno = err;
1075 scm_syserror (NULL);
1076 }
1077 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1078 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1079
9de87eea
MV
1080 return data.thread;
1081}
1082
29717c89
MD
1083SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1084 (),
1085"Move the calling thread to the end of the scheduling queue.")
1086#define FUNC_NAME s_scm_yield
1087{
9de87eea 1088 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1089}
1090#undef FUNC_NAME
1091
2e77f720
LC
1092SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1093 (SCM thread),
1094"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1095"cannot be the current thread, and if @var{thread} has already terminated or "
1096"been signaled to terminate, this function is a no-op.")
1097#define FUNC_NAME s_scm_cancel_thread
1098{
1099 scm_i_thread *t = NULL;
1100
1101 SCM_VALIDATE_THREAD (1, thread);
1102 t = SCM_I_THREAD_DATA (thread);
86a597f8 1103 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1104 if (!t->canceled)
1105 {
1106 t->canceled = 1;
86a597f8 1107 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1108 scm_i_pthread_cancel (t->pthread);
1109 }
1110 else
86a597f8 1111 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1112
1113 return SCM_UNSPECIFIED;
1114}
1115#undef FUNC_NAME
1116
1117SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1118 (SCM thread, SCM proc),
1119"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1120"This handler will be called when the thread exits.")
1121#define FUNC_NAME s_scm_set_thread_cleanup_x
1122{
1123 scm_i_thread *t;
1124
1125 SCM_VALIDATE_THREAD (1, thread);
1126 if (!scm_is_false (proc))
1127 SCM_VALIDATE_THUNK (2, proc);
1128
2e77f720 1129 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1130 scm_i_pthread_mutex_lock (&t->admin_mutex);
1131
2e77f720
LC
1132 if (!(t->exited || t->canceled))
1133 t->cleanup_handler = proc;
1134
86a597f8 1135 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1136
1137 return SCM_UNSPECIFIED;
1138}
1139#undef FUNC_NAME
1140
1141SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1142 (SCM thread),
1143"Return the cleanup handler installed for the thread @var{thread}.")
1144#define FUNC_NAME s_scm_thread_cleanup
1145{
1146 scm_i_thread *t;
1147 SCM ret;
1148
1149 SCM_VALIDATE_THREAD (1, thread);
1150
2e77f720 1151 t = SCM_I_THREAD_DATA (thread);
86a597f8 1152 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1153 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1154 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1155
1156 return ret;
1157}
1158#undef FUNC_NAME
1159
6180e336
NJ
1160SCM scm_join_thread (SCM thread)
1161{
1162 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1163}
1164
1165SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1166 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1167"Suspend execution of the calling thread until the target @var{thread} "
1168"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1169#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1170{
9de87eea 1171 scm_i_thread *t;
6180e336
NJ
1172 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1173 SCM res = SCM_BOOL_F;
1174
1175 if (! (SCM_UNBNDP (timeoutval)))
1176 res = timeoutval;
d823b11b
MV
1177
1178 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1179 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1180 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1181
9de87eea 1182 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1183 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1184
6180e336
NJ
1185 if (! SCM_UNBNDP (timeout))
1186 {
1187 to_timespec (timeout, &ctimeout);
1188 timeout_ptr = &ctimeout;
1189 }
1190
1191 if (t->exited)
1192 res = t->result;
1193 else
d823b11b 1194 {
9de87eea
MV
1195 while (1)
1196 {
74926120 1197 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1198 timeout_ptr);
1199 if (err == 0)
1200 {
1201 if (t->exited)
1202 {
1203 res = t->result;
1204 break;
1205 }
1206 }
1207 else if (err == ETIMEDOUT)
9de87eea 1208 break;
6180e336 1209
86a597f8 1210 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1211 SCM_TICK;
86a597f8 1212 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1213
1214 /* Check for exit again, since we just released and
1215 reacquired the admin mutex, before the next block_self
1216 call (which would block forever if t has already
1217 exited). */
1218 if (t->exited)
1219 {
1220 res = t->result;
1221 break;
1222 }
9de87eea 1223 }
d823b11b 1224 }
9de87eea 1225
86a597f8 1226 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1227
d823b11b 1228 return res;
5f05c406
MV
1229}
1230#undef FUNC_NAME
1231
6180e336
NJ
1232SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1233 (SCM obj),
1234 "Return @code{#t} if @var{obj} is a thread.")
1235#define FUNC_NAME s_scm_thread_p
1236{
1237 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1238}
1239#undef FUNC_NAME
5f05c406 1240
4079f87e 1241
9de87eea
MV
1242static size_t
1243fat_mutex_free (SCM mx)
76da80e7 1244{
9de87eea
MV
1245 fat_mutex *m = SCM_MUTEX_DATA (mx);
1246 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1247 return 0;
1248}
1249
1250static int
9de87eea 1251fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1252{
9de87eea
MV
1253 fat_mutex *m = SCM_MUTEX_DATA (mx);
1254 scm_puts ("#<mutex ", port);
1255 scm_uintprint ((scm_t_bits)m, 16, port);
1256 scm_puts (">", port);
1257 return 1;
76da80e7
MV
1258}
1259
76da80e7 1260static SCM
6180e336 1261make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1262{
9de87eea
MV
1263 fat_mutex *m;
1264 SCM mx;
1265
1266 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1267 scm_i_pthread_mutex_init (&m->lock, NULL);
1268 m->owner = SCM_BOOL_F;
adc085f1 1269 m->level = 0;
6180e336 1270
adc085f1 1271 m->recursive = recursive;
6180e336
NJ
1272 m->unchecked_unlock = unchecked_unlock;
1273 m->allow_external_unlock = external_unlock;
1274
9de87eea
MV
1275 m->waiting = SCM_EOL;
1276 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1277 m->waiting = make_queue ();
1278 return mx;
76da80e7
MV
1279}
1280
6180e336
NJ
1281SCM scm_make_mutex (void)
1282{
1283 return scm_make_mutex_with_flags (SCM_EOL);
1284}
1285
2a1d0688
NJ
1286SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1287SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1288SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1289
1290SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1291 (SCM flags),
9de87eea 1292 "Create a new mutex. ")
6180e336 1293#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1294{
6180e336
NJ
1295 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1296
1297 SCM ptr = flags;
1298 while (! scm_is_null (ptr))
1299 {
1300 SCM flag = SCM_CAR (ptr);
1301 if (scm_is_eq (flag, unchecked_unlock_sym))
1302 unchecked_unlock = 1;
1303 else if (scm_is_eq (flag, allow_external_unlock_sym))
1304 external_unlock = 1;
1305 else if (scm_is_eq (flag, recursive_sym))
1306 recursive = 1;
74926120 1307 else
2a1d0688 1308 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1309 ptr = SCM_CDR (ptr);
1310 }
1311 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1312}
1313#undef FUNC_NAME
1314
9de87eea 1315SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1316 (void),
9de87eea
MV
1317 "Create a new recursive mutex. ")
1318#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1319{
6180e336 1320 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1321}
1322#undef FUNC_NAME
1323
6180e336
NJ
1324SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1325
1326static SCM
adc085f1 1327fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1328{
1329 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1330
adc085f1 1331 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1332 SCM err = SCM_BOOL_F;
1333
1334 struct timeval current_time;
9de87eea
MV
1335
1336 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1337
1338 while (1)
9de87eea 1339 {
adc085f1 1340 if (m->level == 0)
6180e336 1341 {
adc085f1 1342 m->owner = new_owner;
6180e336 1343 m->level++;
74926120 1344
adc085f1 1345 if (SCM_I_IS_THREAD (new_owner))
6180e336 1346 {
adc085f1 1347 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
6180e336 1348 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1349
1350 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1351 retained when not referenced elsewhere (bug #27450).
1352 The weak pair itself is eventually removed when MUTEX
1353 is unlocked. Note that `t->mutexes' lists mutexes
1354 currently held by T, so it should be small. */
a0faf7dd
LC
1355 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1356
6180e336 1357 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1358 }
adc085f1
JG
1359 *ret = 1;
1360 break;
1361 }
1362 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1363 {
1364 m->owner = new_owner;
1365 err = scm_cons (scm_abandoned_mutex_error_key,
1366 scm_from_locale_string ("lock obtained on abandoned "
1367 "mutex"));
1368 *ret = 1;
1369 break;
1370 }
1371 else if (scm_is_eq (m->owner, new_owner))
1372 {
1373 if (m->recursive)
1374 {
1375 m->level++;
74926120 1376 *ret = 1;
adc085f1
JG
1377 }
1378 else
6180e336 1379 {
adc085f1
JG
1380 err = scm_cons (scm_misc_error_key,
1381 scm_from_locale_string ("mutex already locked "
1382 "by thread"));
1383 *ret = 0;
1384 }
74926120 1385 break;
adc085f1 1386 }
9de87eea 1387 else
9de87eea 1388 {
74926120 1389 if (timeout != NULL)
adc085f1
JG
1390 {
1391 gettimeofday (&current_time, NULL);
1392 if (current_time.tv_sec > timeout->tv_sec ||
1393 (current_time.tv_sec == timeout->tv_sec &&
1394 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1395 {
adc085f1
JG
1396 *ret = 0;
1397 break;
6180e336 1398 }
6180e336 1399 }
37a52039 1400 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1401 scm_i_pthread_mutex_unlock (&m->lock);
1402 SCM_TICK;
1403 scm_i_scm_pthread_mutex_lock (&m->lock);
1404 }
1405 }
1406 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1407 return err;
9de87eea
MV
1408}
1409
6180e336
NJ
1410SCM scm_lock_mutex (SCM mx)
1411{
adc085f1 1412 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1413}
1414
adc085f1
JG
1415SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1416 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1417"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1418"blocks until the mutex becomes available. The function returns when "
1419"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1420"a thread already owns will succeed right away and will not block the "
1421"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1422#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1423{
6180e336
NJ
1424 SCM exception;
1425 int ret = 0;
1426 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1427
6180e336
NJ
1428 SCM_VALIDATE_MUTEX (1, m);
1429
1430 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1431 {
1432 to_timespec (timeout, &cwaittime);
1433 waittime = &cwaittime;
1434 }
1435
adc085f1 1436 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1437 if (!scm_is_false (exception))
1438 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1439 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1440}
76da80e7 1441#undef FUNC_NAME
9bc4701c 1442
a4d106c7 1443void
661ae7ab 1444scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1445{
661ae7ab
MV
1446 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1447 SCM_F_WIND_EXPLICITLY);
1448 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1449 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1450}
1451
9bc4701c 1452SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1453 (SCM mutex),
9bc4701c
MD
1454"Try to lock @var{mutex}. If the mutex is already locked by someone "
1455"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1456#define FUNC_NAME s_scm_try_mutex
1457{
6180e336
NJ
1458 SCM exception;
1459 int ret = 0;
1460 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1461
ba1b7223 1462 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1463
1464 to_timespec (scm_from_int(0), &cwaittime);
1465 waittime = &cwaittime;
74926120 1466
adc085f1 1467 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1468 if (!scm_is_false (exception))
1469 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1470 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1471}
1472#undef FUNC_NAME
76da80e7 1473
6180e336
NJ
1474/*** Fat condition variables */
1475
1476typedef struct {
1477 scm_i_pthread_mutex_t lock;
1478 SCM waiting; /* the threads waiting for this condition. */
1479} fat_cond;
1480
1481#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1482#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1483
1484static int
1485fat_mutex_unlock (SCM mutex, SCM cond,
1486 const scm_t_timespec *waittime, int relock)
9de87eea 1487{
7f991c7d 1488 SCM owner;
6180e336
NJ
1489 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1490 fat_cond *c = NULL;
1491 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1492 int err = 0, ret = 0;
9de87eea
MV
1493
1494 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1495
7f991c7d 1496 owner = m->owner;
adc085f1 1497
d31ae2c3 1498 if (!scm_is_eq (owner, t->handle))
9bc4701c 1499 {
adc085f1 1500 if (m->level == 0)
6180e336
NJ
1501 {
1502 if (!m->unchecked_unlock)
2a1d0688
NJ
1503 {
1504 scm_i_pthread_mutex_unlock (&m->lock);
1505 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1506 }
d31ae2c3 1507 owner = t->handle;
6180e336
NJ
1508 }
1509 else if (!m->allow_external_unlock)
2a1d0688
NJ
1510 {
1511 scm_i_pthread_mutex_unlock (&m->lock);
1512 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1513 }
6180e336
NJ
1514 }
1515
1516 if (! (SCM_UNBNDP (cond)))
1517 {
6180e336
NJ
1518 c = SCM_CONDVAR_DATA (cond);
1519 while (1)
1520 {
1521 int brk = 0;
1522
6180e336
NJ
1523 if (m->level > 0)
1524 m->level--;
adc085f1 1525 if (m->level == 0)
f57fdf07
LC
1526 {
1527 /* Change the owner of MUTEX. */
1528 t->mutexes = scm_delq_x (mutex, t->mutexes);
1529 m->owner = unblock_from_queue (m->waiting);
1530 }
adc085f1 1531
6180e336 1532 t->block_asyncs++;
74926120 1533
d2a51087
NJ
1534 err = block_self (c->waiting, cond, &m->lock, waittime);
1535 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1536
1537 if (err == 0)
1538 {
1539 ret = 1;
1540 brk = 1;
1541 }
1542 else if (err == ETIMEDOUT)
1543 {
1544 ret = 0;
1545 brk = 1;
1546 }
1547 else if (err != EINTR)
74926120 1548 {
6180e336 1549 errno = err;
6180e336 1550 scm_syserror (NULL);
74926120 1551 }
6180e336
NJ
1552
1553 if (brk)
1554 {
1555 if (relock)
adc085f1 1556 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1557 t->block_asyncs--;
6180e336
NJ
1558 break;
1559 }
74926120 1560
6180e336
NJ
1561 t->block_asyncs--;
1562 scm_async_click ();
74926120 1563
6180e336
NJ
1564 scm_remember_upto_here_2 (cond, mutex);
1565
1566 scm_i_scm_pthread_mutex_lock (&m->lock);
1567 }
9bc4701c 1568 }
9de87eea 1569 else
6180e336
NJ
1570 {
1571 if (m->level > 0)
1572 m->level--;
74926120 1573 if (m->level == 0)
f57fdf07
LC
1574 {
1575 /* Change the owner of MUTEX. */
1576 t->mutexes = scm_delq_x (mutex, t->mutexes);
1577 m->owner = unblock_from_queue (m->waiting);
1578 }
74926120 1579
6180e336
NJ
1580 scm_i_pthread_mutex_unlock (&m->lock);
1581 ret = 1;
1582 }
9de87eea 1583
6180e336 1584 return ret;
9bc4701c 1585}
9bc4701c 1586
6180e336
NJ
1587SCM scm_unlock_mutex (SCM mx)
1588{
1589 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1590}
9bc4701c 1591
6180e336
NJ
1592SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1593 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1594"Unlocks @var{mutex} if the calling thread owns the lock on "
1595"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1596"thread results in undefined behaviour. Once a mutex has been unlocked, "
1597"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1598"lock. Every call to @code{lock-mutex} by this thread must be matched "
1599"with a call to @code{unlock-mutex}. Only the last call to "
1600"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1601#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1602{
6180e336
NJ
1603 scm_t_timespec cwaittime, *waittime = NULL;
1604
9bc4701c 1605 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1606 if (! (SCM_UNBNDP (cond)))
1607 {
1608 SCM_VALIDATE_CONDVAR (2, cond);
1609
1610 if (! (SCM_UNBNDP (timeout)))
1611 {
1612 to_timespec (timeout, &cwaittime);
1613 waittime = &cwaittime;
1614 }
1615 }
1616
1617 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1618}
1619#undef FUNC_NAME
1620
6180e336
NJ
1621SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1622 (SCM obj),
1623 "Return @code{#t} if @var{obj} is a mutex.")
1624#define FUNC_NAME s_scm_mutex_p
1625{
1626 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1627}
74926120 1628#undef FUNC_NAME
9de87eea
MV
1629
1630SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1631 (SCM mx),
1632 "Return the thread owning @var{mx}, or @code{#f}.")
1633#define FUNC_NAME s_scm_mutex_owner
1634{
adc085f1
JG
1635 SCM owner;
1636 fat_mutex *m = NULL;
1637
9de87eea 1638 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1639 m = SCM_MUTEX_DATA (mx);
1640 scm_i_pthread_mutex_lock (&m->lock);
1641 owner = m->owner;
1642 scm_i_pthread_mutex_unlock (&m->lock);
1643
1644 return owner;
9de87eea
MV
1645}
1646#undef FUNC_NAME
1647
1648SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1649 (SCM mx),
adc085f1 1650 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1651#define FUNC_NAME s_scm_mutex_level
1652{
1653 SCM_VALIDATE_MUTEX (1, mx);
1654 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1655}
1656#undef FUNC_NAME
1657
adc085f1
JG
1658SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1659 (SCM mx),
1660 "Returns @code{#t} if the mutex @var{mx} is locked.")
1661#define FUNC_NAME s_scm_mutex_locked_p
1662{
1663 SCM_VALIDATE_MUTEX (1, mx);
1664 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1665}
1666#undef FUNC_NAME
9de87eea 1667
9de87eea
MV
1668static int
1669fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1670{
1671 fat_cond *c = SCM_CONDVAR_DATA (cv);
1672 scm_puts ("#<condition-variable ", port);
1673 scm_uintprint ((scm_t_bits)c, 16, port);
1674 scm_puts (">", port);
1675 return 1;
1676}
9bc4701c 1677
d823b11b
MV
1678SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1679 (void),
1680 "Make a new condition variable.")
1681#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1682{
9de87eea
MV
1683 fat_cond *c;
1684 SCM cv;
1685
1686 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1687 c->waiting = SCM_EOL;
1688 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1689 c->waiting = make_queue ();
d823b11b 1690 return cv;
5f05c406 1691}
d823b11b 1692#undef FUNC_NAME
5f05c406 1693
d823b11b
MV
1694SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1695 (SCM cv, SCM mx, SCM t),
1696"Wait until @var{cond-var} has been signalled. While waiting, "
1697"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1698"is locked again when this function returns. When @var{time} is given, "
1699"it specifies a point in time where the waiting should be aborted. It "
1700"can be either a integer as returned by @code{current-time} or a pair "
1701"as returned by @code{gettimeofday}. When the waiting is aborted the "
1702"mutex is locked and @code{#f} is returned. When the condition "
1703"variable is in fact signalled, the mutex is also locked and @code{#t} "
1704"is returned. ")
1705#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1706{
9de87eea 1707 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1708
1709 SCM_VALIDATE_CONDVAR (1, cv);
1710 SCM_VALIDATE_MUTEX (2, mx);
74926120 1711
d823b11b
MV
1712 if (!SCM_UNBNDP (t))
1713 {
6180e336 1714 to_timespec (t, &waittime);
9de87eea 1715 waitptr = &waittime;
d823b11b
MV
1716 }
1717
2a1d0688 1718 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1719}
d823b11b 1720#undef FUNC_NAME
5f05c406 1721
9de87eea
MV
1722static void
1723fat_cond_signal (fat_cond *c)
1724{
9de87eea 1725 unblock_from_queue (c->waiting);
9de87eea
MV
1726}
1727
d823b11b
MV
1728SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1729 (SCM cv),
1730 "Wake up one thread that is waiting for @var{cv}")
1731#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1732{
d823b11b 1733 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1734 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1735 return SCM_BOOL_T;
5f05c406 1736}
d823b11b 1737#undef FUNC_NAME
5f05c406 1738
9de87eea
MV
1739static void
1740fat_cond_broadcast (fat_cond *c)
1741{
9de87eea
MV
1742 while (scm_is_true (unblock_from_queue (c->waiting)))
1743 ;
9de87eea
MV
1744}
1745
d823b11b
MV
1746SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1747 (SCM cv),
1748 "Wake up all threads that are waiting for @var{cv}. ")
1749#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1750{
d823b11b 1751 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1752 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1753 return SCM_BOOL_T;
5f05c406 1754}
d823b11b 1755#undef FUNC_NAME
5f05c406 1756
6180e336
NJ
1757SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1758 (SCM obj),
1759 "Return @code{#t} if @var{obj} is a condition variable.")
1760#define FUNC_NAME s_scm_condition_variable_p
1761{
1762 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1763}
1764#undef FUNC_NAME
1765
6087fad9 1766
8c2b3143 1767\f
d823b11b
MV
1768/*** Select */
1769
8c2b3143
LC
1770struct select_args
1771{
1772 int nfds;
1773 SELECT_TYPE *read_fds;
1774 SELECT_TYPE *write_fds;
1775 SELECT_TYPE *except_fds;
1776 struct timeval *timeout;
1777
1778 int result;
1779 int errno_value;
1780};
1781
1782static void *
1783do_std_select (void *args)
1784{
1785 struct select_args *select_args;
1786
1787 select_args = (struct select_args *) args;
1788
1789 select_args->result =
1790 select (select_args->nfds,
1791 select_args->read_fds, select_args->write_fds,
1792 select_args->except_fds, select_args->timeout);
1793 select_args->errno_value = errno;
1794
1795 return NULL;
1796}
1797
911782b7 1798int
9de87eea
MV
1799scm_std_select (int nfds,
1800 SELECT_TYPE *readfds,
1801 SELECT_TYPE *writefds,
1802 SELECT_TYPE *exceptfds,
1803 struct timeval *timeout)
1804{
1805 fd_set my_readfds;
1806 int res, eno, wakeup_fd;
1807 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1808 struct select_args args;
9de87eea
MV
1809
1810 if (readfds == NULL)
1811 {
1812 FD_ZERO (&my_readfds);
1813 readfds = &my_readfds;
1814 }
1815
1816 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1817 SCM_TICK;
1818
1819 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1820 FD_SET (wakeup_fd, readfds);
1821 if (wakeup_fd >= nfds)
1822 nfds = wakeup_fd+1;
9de87eea 1823
8c2b3143
LC
1824 args.nfds = nfds;
1825 args.read_fds = readfds;
1826 args.write_fds = writefds;
1827 args.except_fds = exceptfds;
1828 args.timeout = timeout;
1829
1830 /* Explicitly cooperate with the GC. */
1831 scm_without_guile (do_std_select, &args);
1832
1833 res = args.result;
1834 eno = args.errno_value;
1835
1836 t->sleep_fd = -1;
9de87eea
MV
1837 scm_i_reset_sleep (t);
1838
1839 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1840 {
1841 char dummy;
634aa8de
LC
1842 full_read (wakeup_fd, &dummy, 1);
1843
9de87eea
MV
1844 FD_CLR (wakeup_fd, readfds);
1845 res -= 1;
1846 if (res == 0)
1847 {
1848 eno = EINTR;
1849 res = -1;
1850 }
1851 }
d823b11b
MV
1852 errno = eno;
1853 return res;
5f05c406
MV
1854}
1855
9de87eea 1856/* Convenience API for blocking while in guile mode. */
76da80e7 1857
9de87eea 1858#if SCM_USE_PTHREAD_THREADS
92e64b87 1859
2956b071
LC
1860/* It seems reasonable to not run procedures related to mutex and condition
1861 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1862 without it, and (ii) the only potential gain would be GC latency. See
1863 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1864 for a discussion of the pros and cons. */
1865
9bc4701c 1866int
9de87eea 1867scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1868{
9de87eea 1869 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1870 return res;
1871}
1872
9de87eea 1873static void
2b829bbb 1874do_unlock (void *data)
28d52ebb 1875{
9de87eea 1876 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1877}
1878
1879void
661ae7ab 1880scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1881{
9de87eea 1882 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1883 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1884}
1885
9bc4701c 1886int
9de87eea 1887scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1888{
4cf72f0b
LC
1889 int res;
1890 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1891
1892 t->held_mutex = mutex;
1893 res = scm_i_pthread_cond_wait (cond, mutex);
1894 t->held_mutex = NULL;
1895
9bc4701c
MD
1896 return res;
1897}
9bc4701c 1898
76da80e7 1899int
9de87eea
MV
1900scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1901 scm_i_pthread_mutex_t *mutex,
1902 const scm_t_timespec *wt)
76da80e7 1903{
4cf72f0b
LC
1904 int res;
1905 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1906
1907 t->held_mutex = mutex;
1908 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1909 t->held_mutex = NULL;
1910
9de87eea 1911 return res;
76da80e7
MV
1912}
1913
9de87eea 1914#endif
76da80e7 1915
d823b11b 1916unsigned long
9de87eea 1917scm_std_usleep (unsigned long usecs)
5f05c406 1918{
d823b11b
MV
1919 struct timeval tv;
1920 tv.tv_usec = usecs % 1000000;
1921 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1922 scm_std_select (0, NULL, NULL, NULL, &tv);
1923 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1924}
1925
9de87eea
MV
1926unsigned int
1927scm_std_sleep (unsigned int secs)
6c214b62 1928{
d823b11b
MV
1929 struct timeval tv;
1930 tv.tv_usec = 0;
1931 tv.tv_sec = secs;
9de87eea 1932 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1933 return tv.tv_sec;
6c214b62
MD
1934}
1935
d823b11b
MV
1936/*** Misc */
1937
1938SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1939 (void),
1940 "Return the thread that called this function.")
1941#define FUNC_NAME s_scm_current_thread
1942{
9de87eea 1943 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1944}
1945#undef FUNC_NAME
1946
9de87eea
MV
1947static SCM
1948scm_c_make_list (size_t n, SCM fill)
1949{
1950 SCM res = SCM_EOL;
1951 while (n-- > 0)
1952 res = scm_cons (fill, res);
1953 return res;
1954}
1955
d823b11b
MV
1956SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1957 (void),
1958 "Return a list of all threads.")
9bc4701c 1959#define FUNC_NAME s_scm_all_threads
d823b11b 1960{
9de87eea
MV
1961 /* We can not allocate while holding the thread_admin_mutex because
1962 of the way GC is done.
1963 */
1964 int n = thread_count;
1965 scm_i_thread *t;
1966 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1967
9de87eea
MV
1968 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1969 l = &list;
1970 for (t = all_threads; t && n > 0; t = t->next_thread)
1971 {
2e77f720
LC
1972 if (t != scm_i_signal_delivery_thread)
1973 {
1974 SCM_SETCAR (*l, t->handle);
1975 l = SCM_CDRLOC (*l);
1976 }
9de87eea
MV
1977 n--;
1978 }
1979 *l = SCM_EOL;
1980 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1981 return list;
d823b11b 1982}
9de87eea 1983#undef FUNC_NAME
d823b11b
MV
1984
1985SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1986 (SCM thread),
1987 "Return @code{#t} iff @var{thread} has exited.\n")
1988#define FUNC_NAME s_scm_thread_exited_p
1989{
7888309b 1990 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1991}
1992#undef FUNC_NAME
1993
911782b7 1994int
d823b11b
MV
1995scm_c_thread_exited_p (SCM thread)
1996#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1997{
9de87eea 1998 scm_i_thread *t;
d823b11b 1999 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2000 t = SCM_I_THREAD_DATA (thread);
d823b11b 2001 return t->exited;
5f05c406 2002}
d823b11b 2003#undef FUNC_NAME
5f05c406 2004
9de87eea 2005static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2006static int threads_initialized_p = 0;
2007
9bc4701c 2008
a4d106c7
MV
2009/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2010 */
d1138028 2011scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2012
661ae7ab 2013static SCM dynwind_critical_section_mutex;
a54a94b3 2014
9bc4701c 2015void
661ae7ab 2016scm_dynwind_critical_section (SCM mutex)
76da80e7 2017{
a4d106c7 2018 if (scm_is_false (mutex))
661ae7ab
MV
2019 mutex = dynwind_critical_section_mutex;
2020 scm_dynwind_lock_mutex (mutex);
2021 scm_dynwind_block_asyncs ();
9de87eea
MV
2022}
2023
2024/*** Initialization */
2025
9de87eea
MV
2026scm_i_pthread_mutex_t scm_i_misc_mutex;
2027
d1138028
MV
2028#if SCM_USE_PTHREAD_THREADS
2029pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2030#endif
2031
9de87eea 2032void
12c1d861 2033scm_threads_prehistory (void *base)
9de87eea 2034{
d1138028
MV
2035#if SCM_USE_PTHREAD_THREADS
2036 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2037 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2038 PTHREAD_MUTEX_RECURSIVE);
2039#endif
2040
2041 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2042 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2043 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2044 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2045
12c1d861 2046 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2047}
2048
d823b11b
MV
2049scm_t_bits scm_tc16_thread;
2050scm_t_bits scm_tc16_mutex;
2051scm_t_bits scm_tc16_condvar;
7bfd3b9e 2052
7bfd3b9e 2053void
9de87eea 2054scm_init_threads ()
7bfd3b9e 2055{
9de87eea 2056 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2057 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2058
9de87eea 2059 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2060 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2061 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2062
9de87eea
MV
2063 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2064 sizeof (fat_cond));
9de87eea 2065 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2066
9de87eea
MV
2067 scm_i_default_dynamic_state = SCM_BOOL_F;
2068 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2069 threads_initialized_p = 1;
a4d106c7 2070
f39448c5 2071 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2072}
89e00824 2073
5f05c406 2074void
9de87eea 2075scm_init_threads_default_dynamic_state ()
5f05c406 2076{
9de87eea 2077 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2078 scm_i_default_dynamic_state = state;
5f05c406
MV
2079}
2080
d823b11b 2081void
9de87eea 2082scm_init_thread_procs ()
d823b11b 2083{
9de87eea 2084#include "libguile/threads.x"
d823b11b
MV
2085}
2086
3c13664e
LC
2087\f
2088/* IA64-specific things. */
2089
2090#ifdef __ia64__
2091# ifdef __hpux
2092# include <sys/param.h>
2093# include <sys/pstat.h>
2094void *
2095scm_ia64_register_backing_store_base (void)
2096{
2097 struct pst_vm_status vm_status;
2098 int i = 0;
2099 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2100 if (vm_status.pst_type == PS_RSESTACK)
2101 return (void *) vm_status.pst_vaddr;
2102 abort ();
2103}
2104void *
2105scm_ia64_ar_bsp (const void *ctx)
2106{
2107 uint64_t bsp;
2108 __uc_get_ar_bsp (ctx, &bsp);
2109 return (void *) bsp;
2110}
2111# endif /* hpux */
2112# ifdef linux
2113# include <ucontext.h>
2114void *
2115scm_ia64_register_backing_store_base (void)
2116{
2117 extern void *__libc_ia64_register_backing_store_base;
2118 return __libc_ia64_register_backing_store_base;
2119}
2120void *
2121scm_ia64_ar_bsp (const void *opaque)
2122{
2123 const ucontext_t *ctx = opaque;
2124 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2125}
2126# endif /* linux */
2127#endif /* __ia64__ */
2128
2129
89e00824
ML
2130/*
2131 Local Variables:
2132 c-file-style: "gnu"
2133 End:
2134*/