Fix fencepost error in bip_seek
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07
LC
41#include <assert.h>
42
d823b11b
MV
43#include "libguile/validate.h"
44#include "libguile/root.h"
45#include "libguile/eval.h"
46#include "libguile/async.h"
47#include "libguile/ports.h"
48#include "libguile/threads.h"
a0599745 49#include "libguile/dynwind.h"
d823b11b 50#include "libguile/iselect.h"
9de87eea
MV
51#include "libguile/fluids.h"
52#include "libguile/continuations.h"
2b829bbb 53#include "libguile/gc.h"
9de87eea 54#include "libguile/init.h"
2e77f720 55#include "libguile/scmsigs.h"
6180e336 56#include "libguile/strings.h"
a0faf7dd 57#include "libguile/weaks.h"
7bfd3b9e 58
ecc9f40f
MV
59#ifdef __MINGW32__
60#ifndef ETIMEDOUT
61# define ETIMEDOUT WSAETIMEDOUT
62#endif
63# include <fcntl.h>
64# include <process.h>
65# define pipe(fd) _pipe (fd, 256, O_BINARY)
66#endif /* __MINGW32__ */
67
634aa8de
LC
68#include <full-read.h>
69
cde24ce1
AW
70
71\f
72
73/* First some libgc shims. */
74
75/* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77#ifndef HAVE_GC_FN_TYPE
78typedef void * (* GC_fn_type) (void *);
79#endif
80
81
5f0d2951
AW
82#ifndef GC_SUCCESS
83#define GC_SUCCESS 0
84#endif
85
86#ifndef GC_UNIMPLEMENTED
87#define GC_UNIMPLEMENTED 3
88#endif
89
90/* Likewise struct GC_stack_base is missing before 7.1. */
91#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
92struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94#ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96#endif
97};
5f0d2951
AW
98
99static int
987b8160 100GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
101{
102 return GC_UNIMPLEMENTED;
103}
104
105static void
106GC_unregister_my_thread ()
107{
108}
109
12c1d861
AW
110#if !SCM_USE_PTHREAD_THREADS
111/* No threads; we can just use GC_stackbottom. */
5f0d2951 112static void *
12c1d861
AW
113get_thread_stack_base ()
114{
115 return GC_stackbottom;
116}
117
118#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120/* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123static void *
124get_thread_stack_base ()
125{
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134#if SCM_STACK_GROWS_UP
135 return start;
136#else
137 return end;
138#endif
139}
140
141#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142/* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145static void *
146get_thread_stack_base ()
147{
148 return pthread_get_stackaddr_np (pthread_self ());
149}
150
151#else
152#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
153#endif
154
155static int
987b8160 156GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 157{
12c1d861
AW
158 stack_base->mem_base = get_thread_stack_base ();
159#ifdef __ia64__
160 /* Calculate and store off the base of this thread's register
161 backing store (RBS). Unfortunately our implementation(s) of
162 scm_ia64_register_backing_store_base are only reliable for the
163 main thread. For other threads, therefore, find out the current
164 top of the RBS, and use that as a maximum. */
165 stack_base->reg_base = scm_ia64_register_backing_store_base ();
166 {
167 ucontext_t ctx;
168 void *bsp;
169 getcontext (&ctx);
170 bsp = scm_ia64_ar_bsp (&ctx);
171 if (stack_base->reg_base > bsp)
172 stack_base->reg_base = bsp;
173 }
174#endif
175 return GC_SUCCESS;
5f0d2951 176}
12c1d861
AW
177
178static void *
179GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
180{
181 struct GC_stack_base stack_base;
182
183 stack_base.mem_base = (void*)&stack_base;
184#ifdef __ia64__
185 /* FIXME: Untested. */
186 {
187 ucontext_t ctx;
188 getcontext (&ctx);
189 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
190 }
5f0d2951
AW
191#endif
192
12c1d861
AW
193 return fn (&stack_base, arg);
194}
195#endif /* HAVE_GC_STACK_BASE */
196
5f0d2951 197
cde24ce1
AW
198/* Now define with_gc_active and with_gc_inactive. */
199
200#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
201
202/* We have a sufficiently new libgc (7.2 or newer). */
203
204static void*
205with_gc_inactive (GC_fn_type func, void *data)
206{
207 return GC_do_blocking (func, data);
208}
209
210static void*
211with_gc_active (GC_fn_type func, void *data)
212{
213 return GC_call_with_gc_active (func, data);
214}
215
216#else
217
218/* libgc not new enough, so never actually deactivate GC.
219
220 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
221 GC_call_with_gc_active. */
222
223static void*
224with_gc_inactive (GC_fn_type func, void *data)
225{
226 return func (data);
227}
228
229static void*
230with_gc_active (GC_fn_type func, void *data)
231{
232 return func (data);
233}
234
235#endif /* HAVE_GC_DO_BLOCKING */
236
237
634aa8de 238\f
6180e336
NJ
239static void
240to_timespec (SCM t, scm_t_timespec *waittime)
241{
242 if (scm_is_pair (t))
243 {
244 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
245 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
246 }
247 else
248 {
249 double time = scm_to_double (t);
250 double sec = scm_c_truncate (time);
251
252 waittime->tv_sec = (long) sec;
2a1d0688 253 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
254 }
255}
256
c058db8a 257\f
d823b11b 258/*** Queues */
7bfd3b9e 259
c058db8a
LC
260/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
261 the risk of false references leading to unbounded retained space as
262 described in "Bounding Space Usage of Conservative Garbage Collectors",
263 H.J. Boehm, 2001. */
264
9de87eea
MV
265/* Make an empty queue data structure.
266 */
d823b11b
MV
267static SCM
268make_queue ()
269{
270 return scm_cons (SCM_EOL, SCM_EOL);
271}
7bfd3b9e 272
9de87eea
MV
273/* Put T at the back of Q and return a handle that can be used with
274 remqueue to remove T from Q again.
275 */
d823b11b
MV
276static SCM
277enqueue (SCM q, SCM t)
278{
279 SCM c = scm_cons (t, SCM_EOL);
d2a51087 280 SCM_CRITICAL_SECTION_START;
d2e53ed6 281 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
282 SCM_SETCDR (q, c);
283 else
284 SCM_SETCDR (SCM_CAR (q), c);
285 SCM_SETCAR (q, c);
d2a51087 286 SCM_CRITICAL_SECTION_END;
d823b11b
MV
287 return c;
288}
7bfd3b9e 289
9de87eea
MV
290/* Remove the element that the handle C refers to from the queue Q. C
291 must have been returned from a call to enqueue. The return value
292 is zero when the element referred to by C has already been removed.
293 Otherwise, 1 is returned.
294*/
295static int
d823b11b
MV
296remqueue (SCM q, SCM c)
297{
298 SCM p, prev = q;
d2a51087 299 SCM_CRITICAL_SECTION_START;
d2e53ed6 300 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 301 {
bc36d050 302 if (scm_is_eq (p, c))
d823b11b 303 {
bc36d050 304 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
305 SCM_SETCAR (q, SCM_CDR (c));
306 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
307
308 /* GC-robust */
309 SCM_SETCDR (c, SCM_EOL);
310
d2a51087 311 SCM_CRITICAL_SECTION_END;
9de87eea 312 return 1;
d823b11b
MV
313 }
314 prev = p;
315 }
d2a51087 316 SCM_CRITICAL_SECTION_END;
9de87eea 317 return 0;
d823b11b
MV
318}
319
9de87eea
MV
320/* Remove the front-most element from the queue Q and return it.
321 Return SCM_BOOL_F when Q is empty.
322*/
d823b11b
MV
323static SCM
324dequeue (SCM q)
325{
d2a51087
NJ
326 SCM c;
327 SCM_CRITICAL_SECTION_START;
328 c = SCM_CDR (q);
d2e53ed6 329 if (scm_is_null (c))
d2a51087
NJ
330 {
331 SCM_CRITICAL_SECTION_END;
332 return SCM_BOOL_F;
333 }
d823b11b
MV
334 else
335 {
336 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 337 if (scm_is_null (SCM_CDR (q)))
d823b11b 338 SCM_SETCAR (q, SCM_EOL);
d2a51087 339 SCM_CRITICAL_SECTION_END;
c058db8a
LC
340
341 /* GC-robust */
342 SCM_SETCDR (c, SCM_EOL);
343
d823b11b
MV
344 return SCM_CAR (c);
345 }
346}
7bfd3b9e 347
9de87eea 348/*** Thread smob routines */
76da80e7 349
d823b11b
MV
350
351static int
352thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
353{
23d72566
KR
354 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
355 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
356 the struct case, hence we go via a union, and extract according to the
357 size of pthread_t. */
358 union {
359 scm_i_pthread_t p;
360 unsigned short us;
361 unsigned int ui;
362 unsigned long ul;
363 scm_t_uintmax um;
364 } u;
9de87eea 365 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
366 scm_i_pthread_t p = t->pthread;
367 scm_t_uintmax id;
368 u.p = p;
369 if (sizeof (p) == sizeof (unsigned short))
370 id = u.us;
371 else if (sizeof (p) == sizeof (unsigned int))
372 id = u.ui;
373 else if (sizeof (p) == sizeof (unsigned long))
374 id = u.ul;
375 else
376 id = u.um;
377
d823b11b 378 scm_puts ("#<thread ", port);
23d72566 379 scm_uintprint (id, 10, port);
1b92fb6b 380 scm_puts (" (", port);
0345e278 381 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 382 scm_puts (")>", port);
d823b11b
MV
383 return 1;
384}
385
706846f6 386\f
9de87eea 387/*** Blocking on queues. */
f7eca35d 388
9de87eea
MV
389/* See also scm_i_queue_async_cell for how such a block is
390 interrputed.
391*/
d823b11b 392
9de87eea
MV
393/* Put the current thread on QUEUE and go to sleep, waiting for it to
394 be woken up by a call to 'unblock_from_queue', or to be
395 interrupted. Upon return of this function, the current thread is
396 no longer on QUEUE, even when the sleep has been interrupted.
397
d2a51087 398 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
399 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
400
401 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
402 as MUTEX is needed.
403
404 When WAITTIME is not NULL, the sleep will be aborted at that time.
405
406 The return value of block_self is an errno value. It will be zero
407 when the sleep has been successfully completed by a call to
408 unblock_from_queue, EINTR when it has been interrupted by the
409 delivery of a system async, and ETIMEDOUT when the timeout has
410 expired.
411
412 The system asyncs themselves are not executed by block_self.
413*/
414static int
415block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
416 const scm_t_timespec *waittime)
76da80e7 417{
9de87eea
MV
418 scm_i_thread *t = SCM_I_CURRENT_THREAD;
419 SCM q_handle;
420 int err;
421
422 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
423 err = EINTR;
424 else
425 {
426 t->block_asyncs++;
427 q_handle = enqueue (queue, t->handle);
428 if (waittime == NULL)
429 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
430 else
431 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
432
433 /* When we are still on QUEUE, we have been interrupted. We
434 report this only when no other error (such as a timeout) has
435 happened above.
436 */
437 if (remqueue (queue, q_handle) && err == 0)
438 err = EINTR;
439 t->block_asyncs--;
440 scm_i_reset_sleep (t);
441 }
442
443 return err;
76da80e7 444}
9de87eea 445
d2a51087
NJ
446/* Wake up the first thread on QUEUE, if any. The awoken thread is
447 returned, or #f if the queue was empty.
9de87eea
MV
448 */
449static SCM
450unblock_from_queue (SCM queue)
451{
452 SCM thread = dequeue (queue);
453 if (scm_is_true (thread))
454 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
455 return thread;
456}
457
45f15cac 458\f
9de87eea
MV
459/* Getting into and out of guile mode.
460 */
461
f60a7648
AW
462/* Key used to attach a cleanup handler to a given thread. Also, if
463 thread-local storage is unavailable, this key is used to retrieve the
464 current thread with `pthread_getspecific ()'. */
465scm_i_pthread_key_t scm_i_thread_key;
466
467
705edb95
LC
468#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
469
470/* When thread-local storage (TLS) is available, a pointer to the
471 current-thread object is kept in TLS. Note that storing the thread-object
472 itself in TLS (rather than a pointer to some malloc'd memory) is not
473 possible since thread objects may live longer than the actual thread they
474 represent. */
475SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
476
f60a7648 477#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 478
d823b11b 479
9de87eea
MV
480static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
481static scm_i_thread *all_threads = NULL;
482static int thread_count;
483
484static SCM scm_i_default_dynamic_state;
485
486/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 487 */
9de87eea 488static void
12c1d861 489guilify_self_1 (struct GC_stack_base *base)
d823b11b 490{
7f22442b
AW
491 scm_i_thread t;
492
493 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
494 before allocating anything in this thread, because allocation could
495 cause GC to run, and GC could cause finalizers, which could invoke
496 Scheme functions, which need the current thread to be set. */
497
498 t.pthread = scm_i_pthread_self ();
499 t.handle = SCM_BOOL_F;
500 t.result = SCM_BOOL_F;
501 t.cleanup_handler = SCM_BOOL_F;
502 t.mutexes = SCM_EOL;
503 t.held_mutex = NULL;
504 t.join_queue = SCM_EOL;
505 t.dynamic_state = SCM_BOOL_F;
506 t.dynwinds = SCM_EOL;
507 t.active_asyncs = SCM_EOL;
508 t.block_asyncs = 1;
509 t.pending_asyncs = 1;
510 t.critical_section_level = 0;
511 t.base = base->mem_base;
346e4402 512#ifdef __ia64__
7f22442b 513 t.register_backing_store_base = base->reg-base;
346e4402 514#endif
7f22442b
AW
515 t.continuation_root = SCM_EOL;
516 t.continuation_base = t.base;
517 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
518 t.sleep_mutex = NULL;
519 t.sleep_object = SCM_BOOL_F;
520 t.sleep_fd = -1;
521
522 if (pipe (t.sleep_pipe) != 0)
634aa8de
LC
523 /* FIXME: Error conditions during the initialization phase are handled
524 gracelessly since public functions such as `scm_init_guile ()'
525 currently have type `void'. */
526 abort ();
527
7f22442b
AW
528 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
529 t.current_mark_stack_ptr = NULL;
530 t.current_mark_stack_limit = NULL;
531 t.canceled = 0;
532 t.exited = 0;
533 t.guile_mode = 0;
9de87eea 534
7f22442b
AW
535 /* The switcheroo. */
536 {
537 scm_i_thread *t_ptr = &t;
538
539 GC_disable ();
540 t_ptr = GC_malloc (sizeof (scm_i_thread));
541 memcpy (t_ptr, &t, sizeof t);
542
543 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
544
545#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
546 /* Cache the current thread in TLS for faster lookup. */
547 scm_i_current_thread = t_ptr;
f60a7648 548#endif
9de87eea 549
7f22442b
AW
550 scm_i_pthread_mutex_lock (&thread_admin_mutex);
551 t_ptr->next_thread = all_threads;
552 all_threads = t_ptr;
553 thread_count++;
554 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
555
556 GC_enable ();
557 }
d823b11b
MV
558}
559
9de87eea 560/* Perform second stage of thread initialisation, in guile mode.
d823b11b 561 */
9de87eea
MV
562static void
563guilify_self_2 (SCM parent)
d823b11b 564{
9de87eea
MV
565 scm_i_thread *t = SCM_I_CURRENT_THREAD;
566
72e6b608
LC
567 t->guile_mode = 1;
568
9de87eea 569 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 570
9de87eea
MV
571 t->continuation_root = scm_cons (t->handle, SCM_EOL);
572 t->continuation_base = t->base;
2bbe1533 573 t->vm = SCM_BOOL_F;
9de87eea
MV
574
575 if (scm_is_true (parent))
576 t->dynamic_state = scm_make_dynamic_state (parent);
577 else
578 t->dynamic_state = scm_i_make_initial_dynamic_state ();
579
580 t->join_queue = make_queue ();
581 t->block_asyncs = 0;
d823b11b
MV
582}
583
6180e336
NJ
584\f
585/*** Fat mutexes */
586
587/* We implement our own mutex type since we want them to be 'fair', we
588 want to do fancy things while waiting for them (like running
589 asyncs) and we might want to add things that are nice for
590 debugging.
591*/
592
593typedef struct {
594 scm_i_pthread_mutex_t lock;
595 SCM owner;
adc085f1 596 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 597
adc085f1 598 int recursive; /* allow recursive locking? */
6180e336
NJ
599 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
600 int allow_external_unlock; /* is it an error to unlock a mutex that is not
601 owned by the current thread? */
602
603 SCM waiting; /* the threads waiting for this mutex. */
604} fat_mutex;
605
606#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
607#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
608
9de87eea 609/* Perform thread tear-down, in guile mode.
d823b11b 610 */
9de87eea
MV
611static void *
612do_thread_exit (void *v)
613{
2e77f720
LC
614 scm_i_thread *t = (scm_i_thread *) v;
615
616 if (!scm_is_false (t->cleanup_handler))
617 {
618 SCM ptr = t->cleanup_handler;
619
620 t->cleanup_handler = SCM_BOOL_F;
621 t->result = scm_internal_catch (SCM_BOOL_T,
622 (scm_t_catch_body) scm_call_0, ptr,
623 scm_handle_by_message_noexit, NULL);
624 }
9de87eea 625
86a597f8 626 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
627
628 t->exited = 1;
0c97d7dd
MV
629 close (t->sleep_pipe[0]);
630 close (t->sleep_pipe[1]);
9de87eea
MV
631 while (scm_is_true (unblock_from_queue (t->join_queue)))
632 ;
9de87eea 633
74926120 634 while (!scm_is_null (t->mutexes))
6180e336 635 {
a0faf7dd 636 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 637
a0faf7dd
LC
638 if (!SCM_UNBNDP (mutex))
639 {
640 fat_mutex *m = SCM_MUTEX_DATA (mutex);
641
642 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
643
644 /* Since MUTEX is in `t->mutexes', T must be its owner. */
645 assert (scm_is_eq (m->owner, t->handle));
646
a0faf7dd 647 unblock_from_queue (m->waiting);
f57fdf07 648
a0faf7dd
LC
649 scm_i_pthread_mutex_unlock (&m->lock);
650 }
6180e336 651
a0faf7dd 652 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
653 }
654
86a597f8 655 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 656
9de87eea
MV
657 return NULL;
658}
659
f60a7648
AW
660static void *
661do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
662{
7f22442b
AW
663 /* Won't hurt if we are already registered. */
664 GC_register_my_thread (sb);
f60a7648 665
7f22442b 666 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
667}
668
d823b11b 669static void
9de87eea 670on_thread_exit (void *v)
d823b11b 671{
29776e85 672 /* This handler is executed in non-guile mode. */
2e77f720 673 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 674
d2a51087
NJ
675 /* If this thread was cancelled while doing a cond wait, it will
676 still have a mutex locked, so we unlock it here. */
677 if (t->held_mutex)
678 {
679 scm_i_pthread_mutex_unlock (t->held_mutex);
680 t->held_mutex = NULL;
681 }
682
f60a7648
AW
683 /* Reinstate the current thread for purposes of scm_with_guile
684 guile-mode cleanup handlers. Only really needed in the non-TLS
685 case but it doesn't hurt to be consistent. */
686 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 687
2e77f720
LC
688 /* Ensure the signal handling thread has been launched, because we might be
689 shutting it down. */
690 scm_i_ensure_signal_delivery_thread ();
691
7f22442b
AW
692 /* Scheme-level thread finalizers and other cleanup needs to happen in
693 guile mode. */
694 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
695
696 /* Removing ourself from the list of all threads needs to happen in
697 non-guile mode since all SCM values on our stack become
29776e85 698 unprotected once we are no longer in the list. */
0c97d7dd
MV
699 scm_i_pthread_mutex_lock (&thread_admin_mutex);
700 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
701 if (*tp == t)
702 {
703 *tp = t->next_thread;
c058db8a
LC
704
705 /* GC-robust */
706 t->next_thread = NULL;
707
0c97d7dd
MV
708 break;
709 }
710 thread_count--;
2e77f720
LC
711
712 /* If there's only one other thread, it could be the signal delivery
713 thread, so we need to notify it to shut down by closing its read pipe.
714 If it's not the signal delivery thread, then closing the read pipe isn't
715 going to hurt. */
716 if (thread_count <= 1)
717 scm_i_close_signal_pipe ();
718
0c97d7dd
MV
719 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
720
f60a7648 721 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 722
653ccd78 723#if !SCM_USE_NULL_THREADS
7f22442b 724 GC_unregister_my_thread ();
653ccd78 725#endif
d823b11b
MV
726}
727
9de87eea 728static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 729
9de87eea
MV
730static void
731init_thread_key (void)
732{
f60a7648 733 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 734}
d823b11b 735
cde24ce1
AW
736/* Perform any initializations necessary to make the current thread
737 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
738 if necessary.
a54a94b3 739
9de87eea
MV
740 BASE is the stack base to use with GC.
741
742 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
743 which case the default dynamic state is used.
744
cde24ce1 745 Returns zero when the thread was known to guile already; otherwise
9de87eea 746 return 1.
cde24ce1
AW
747
748 Note that it could be the case that the thread was known
749 to Guile, but not in guile mode (because we are within a
750 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
751 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
752
753static int
12c1d861 754scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 755{
9de87eea
MV
756 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
757
cde24ce1
AW
758 if (SCM_I_CURRENT_THREAD)
759 {
760 /* Thread is already known to Guile.
761 */
762 return 0;
763 }
764 else
9de87eea
MV
765 {
766 /* This thread has not been guilified yet.
767 */
768
769 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
770 if (scm_initialized_p == 0)
771 {
772 /* First thread ever to enter Guile. Run the full
773 initialization.
774 */
775 scm_i_init_guile (base);
12c1d861 776
4000d064 777#ifdef HAVE_GC_ALLOW_REGISTER_THREADS
12c1d861
AW
778 /* Allow other threads to come in later. */
779 GC_allow_register_threads ();
4000d064 780#endif
12c1d861 781
9de87eea
MV
782 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
783 }
784 else
785 {
786 /* Guile is already initialized, but this thread enters it for
787 the first time. Only initialize this thread.
788 */
789 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
790
791 /* Register this thread with libgc. */
792 GC_register_my_thread (base);
793
9de87eea
MV
794 guilify_self_1 (base);
795 guilify_self_2 (parent);
796 }
797 return 1;
798 }
d823b11b
MV
799}
800
12c1d861
AW
801void
802scm_init_guile ()
d823b11b 803{
12c1d861
AW
804 struct GC_stack_base stack_base;
805
806 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
807 scm_i_init_thread_for_guile (&stack_base,
808 scm_i_default_dynamic_state);
9de87eea
MV
809 else
810 {
12c1d861
AW
811 fprintf (stderr, "Failed to get stack base for current thread.\n");
812 exit (1);
9de87eea 813 }
a54a94b3
MV
814}
815
70eca635 816SCM_UNUSED static void
2e77f720
LC
817scm_leave_guile_cleanup (void *x)
818{
47b6e9bd 819 on_thread_exit (SCM_I_CURRENT_THREAD);
2e77f720
LC
820}
821
12c1d861 822struct with_guile_args
cde24ce1
AW
823{
824 GC_fn_type func;
825 void *data;
12c1d861 826 SCM parent;
cde24ce1
AW
827};
828
829static void *
830with_guile_trampoline (void *data)
831{
12c1d861 832 struct with_guile_args *args = data;
cde24ce1
AW
833
834 return scm_c_with_continuation_barrier (args->func, args->data);
835}
836
12c1d861
AW
837static void *
838with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
839{
840 void *res;
cde24ce1
AW
841 int new_thread;
842 scm_i_thread *t;
12c1d861 843 struct with_guile_args *args = data;
2e77f720 844
12c1d861 845 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
846 t = SCM_I_CURRENT_THREAD;
847 if (new_thread)
2e77f720 848 {
cde24ce1
AW
849 /* We are in Guile mode. */
850 assert (t->guile_mode);
851
12c1d861 852 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
853
854 /* Leave Guile mode. */
855 t->guile_mode = 0;
856 }
857 else if (t->guile_mode)
858 {
859 /* Already in Guile mode. */
12c1d861 860 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 861 }
74926120 862 else
cde24ce1 863 {
cde24ce1
AW
864 /* We are not in Guile mode, either because we are not within a
865 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 866
cde24ce1
AW
867 This call to scm_with_guile() could happen from anywhere on the
868 stack, and in particular lower on the stack than when it was
869 when this thread was first guilified. Thus, `base' must be
870 updated. */
871#if SCM_STACK_GROWS_UP
12c1d861
AW
872 if (SCM_STACK_PTR (base->mem_base) < t->base)
873 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 874#else
12c1d861
AW
875 if (SCM_STACK_PTR (base->mem_base) > t->base)
876 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 877#endif
72e6b608 878
cde24ce1 879 t->guile_mode = 1;
12c1d861 880 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
881 t->guile_mode = 0;
882 }
883 return res;
72e6b608 884}
3d1af79f 885
12c1d861
AW
886static void *
887scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
888{
889 struct with_guile_args args;
890
891 args.func = func;
892 args.data = data;
893 args.parent = parent;
894
895 return GC_call_with_stack_base (with_guile_and_parent, &args);
896}
897
898void *
899scm_with_guile (void *(*func)(void *), void *data)
900{
901 return scm_i_with_guile_and_parent (func, data,
902 scm_i_default_dynamic_state);
903}
904
9de87eea
MV
905void *
906scm_without_guile (void *(*func)(void *), void *data)
d823b11b 907{
72e6b608 908 void *result;
cde24ce1 909 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 910
cde24ce1 911 if (t->guile_mode)
72e6b608 912 {
cde24ce1
AW
913 SCM_I_CURRENT_THREAD->guile_mode = 0;
914 result = with_gc_inactive (func, data);
915 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
916 }
917 else
cde24ce1 918 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
919 result = func (data);
920
921 return result;
9de87eea
MV
922}
923
72e6b608 924\f
9de87eea
MV
925/*** Thread creation */
926
927typedef struct {
928 SCM parent;
929 SCM thunk;
930 SCM handler;
76da80e7 931 SCM thread;
9de87eea
MV
932 scm_i_pthread_mutex_t mutex;
933 scm_i_pthread_cond_t cond;
934} launch_data;
d823b11b 935
9de87eea
MV
936static void *
937really_launch (void *d)
938{
939 launch_data *data = (launch_data *)d;
940 SCM thunk = data->thunk, handler = data->handler;
941 scm_i_thread *t;
d823b11b 942
9de87eea 943 t = SCM_I_CURRENT_THREAD;
a54a94b3 944
9de87eea
MV
945 scm_i_scm_pthread_mutex_lock (&data->mutex);
946 data->thread = scm_current_thread ();
947 scm_i_pthread_cond_signal (&data->cond);
948 scm_i_pthread_mutex_unlock (&data->mutex);
949
950 if (SCM_UNBNDP (handler))
951 t->result = scm_call_0 (thunk);
952 else
953 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
954
955 return 0;
d823b11b
MV
956}
957
9de87eea
MV
958static void *
959launch_thread (void *d)
960{
961 launch_data *data = (launch_data *)d;
962 scm_i_pthread_detach (scm_i_pthread_self ());
963 scm_i_with_guile_and_parent (really_launch, d, data->parent);
964 return NULL;
965}
966
967SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 968 (SCM thunk, SCM handler),
9de87eea
MV
969 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
970 "returning a new thread object representing the thread. The procedure\n"
971 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
972 "\n"
973 "When @var{handler} is specified, then @var{thunk} is called from\n"
974 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
975 "handler. This catch is established inside the continuation barrier.\n"
976 "\n"
977 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
978 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
979#define FUNC_NAME s_scm_call_with_new_thread
980{
9de87eea
MV
981 launch_data data;
982 scm_i_pthread_t id;
983 int err;
d823b11b 984
9de87eea
MV
985 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
986 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
987 handler, SCM_ARG2, FUNC_NAME);
988
989 data.parent = scm_current_dynamic_state ();
990 data.thunk = thunk;
991 data.handler = handler;
992 data.thread = SCM_BOOL_F;
993 scm_i_pthread_mutex_init (&data.mutex, NULL);
994 scm_i_pthread_cond_init (&data.cond, NULL);
995
996 scm_i_scm_pthread_mutex_lock (&data.mutex);
997 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
998 if (err)
999 {
1000 scm_i_pthread_mutex_unlock (&data.mutex);
1001 errno = err;
1002 scm_syserror (NULL);
1003 }
1004 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1005 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1006
9de87eea 1007 return data.thread;
d823b11b
MV
1008}
1009#undef FUNC_NAME
1010
9de87eea
MV
1011typedef struct {
1012 SCM parent;
1013 scm_t_catch_body body;
1014 void *body_data;
1015 scm_t_catch_handler handler;
1016 void *handler_data;
1017 SCM thread;
1018 scm_i_pthread_mutex_t mutex;
1019 scm_i_pthread_cond_t cond;
1020} spawn_data;
1021
1022static void *
1023really_spawn (void *d)
1024{
1025 spawn_data *data = (spawn_data *)d;
1026 scm_t_catch_body body = data->body;
1027 void *body_data = data->body_data;
1028 scm_t_catch_handler handler = data->handler;
1029 void *handler_data = data->handler_data;
1030 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1031
1032 scm_i_scm_pthread_mutex_lock (&data->mutex);
1033 data->thread = scm_current_thread ();
1034 scm_i_pthread_cond_signal (&data->cond);
1035 scm_i_pthread_mutex_unlock (&data->mutex);
1036
1037 if (handler == NULL)
1038 t->result = body (body_data);
1039 else
1040 t->result = scm_internal_catch (SCM_BOOL_T,
1041 body, body_data,
1042 handler, handler_data);
1043
1044 return 0;
1045}
1046
1047static void *
1048spawn_thread (void *d)
1049{
1050 spawn_data *data = (spawn_data *)d;
1051 scm_i_pthread_detach (scm_i_pthread_self ());
1052 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1053 return NULL;
1054}
1055
1056SCM
1057scm_spawn_thread (scm_t_catch_body body, void *body_data,
1058 scm_t_catch_handler handler, void *handler_data)
1059{
1060 spawn_data data;
1061 scm_i_pthread_t id;
1062 int err;
1063
1064 data.parent = scm_current_dynamic_state ();
1065 data.body = body;
1066 data.body_data = body_data;
1067 data.handler = handler;
1068 data.handler_data = handler_data;
1069 data.thread = SCM_BOOL_F;
1070 scm_i_pthread_mutex_init (&data.mutex, NULL);
1071 scm_i_pthread_cond_init (&data.cond, NULL);
1072
1073 scm_i_scm_pthread_mutex_lock (&data.mutex);
1074 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1075 if (err)
1076 {
1077 scm_i_pthread_mutex_unlock (&data.mutex);
1078 errno = err;
1079 scm_syserror (NULL);
1080 }
1081 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1082 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1083
9de87eea
MV
1084 return data.thread;
1085}
1086
29717c89
MD
1087SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1088 (),
1089"Move the calling thread to the end of the scheduling queue.")
1090#define FUNC_NAME s_scm_yield
1091{
9de87eea 1092 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1093}
1094#undef FUNC_NAME
1095
2e77f720
LC
1096SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1097 (SCM thread),
1098"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1099"cannot be the current thread, and if @var{thread} has already terminated or "
1100"been signaled to terminate, this function is a no-op.")
1101#define FUNC_NAME s_scm_cancel_thread
1102{
1103 scm_i_thread *t = NULL;
1104
1105 SCM_VALIDATE_THREAD (1, thread);
1106 t = SCM_I_THREAD_DATA (thread);
86a597f8 1107 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1108 if (!t->canceled)
1109 {
1110 t->canceled = 1;
86a597f8 1111 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1112 scm_i_pthread_cancel (t->pthread);
1113 }
1114 else
86a597f8 1115 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1116
1117 return SCM_UNSPECIFIED;
1118}
1119#undef FUNC_NAME
1120
1121SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1122 (SCM thread, SCM proc),
1123"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1124"This handler will be called when the thread exits.")
1125#define FUNC_NAME s_scm_set_thread_cleanup_x
1126{
1127 scm_i_thread *t;
1128
1129 SCM_VALIDATE_THREAD (1, thread);
1130 if (!scm_is_false (proc))
1131 SCM_VALIDATE_THUNK (2, proc);
1132
2e77f720 1133 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1134 scm_i_pthread_mutex_lock (&t->admin_mutex);
1135
2e77f720
LC
1136 if (!(t->exited || t->canceled))
1137 t->cleanup_handler = proc;
1138
86a597f8 1139 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1140
1141 return SCM_UNSPECIFIED;
1142}
1143#undef FUNC_NAME
1144
1145SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1146 (SCM thread),
1147"Return the cleanup handler installed for the thread @var{thread}.")
1148#define FUNC_NAME s_scm_thread_cleanup
1149{
1150 scm_i_thread *t;
1151 SCM ret;
1152
1153 SCM_VALIDATE_THREAD (1, thread);
1154
2e77f720 1155 t = SCM_I_THREAD_DATA (thread);
86a597f8 1156 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1157 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1158 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1159
1160 return ret;
1161}
1162#undef FUNC_NAME
1163
6180e336
NJ
1164SCM scm_join_thread (SCM thread)
1165{
1166 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1167}
1168
1169SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1170 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1171"Suspend execution of the calling thread until the target @var{thread} "
1172"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1173#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1174{
9de87eea 1175 scm_i_thread *t;
6180e336
NJ
1176 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1177 SCM res = SCM_BOOL_F;
1178
1179 if (! (SCM_UNBNDP (timeoutval)))
1180 res = timeoutval;
d823b11b
MV
1181
1182 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1183 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1184 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1185
9de87eea 1186 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1187 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1188
6180e336
NJ
1189 if (! SCM_UNBNDP (timeout))
1190 {
1191 to_timespec (timeout, &ctimeout);
1192 timeout_ptr = &ctimeout;
1193 }
1194
1195 if (t->exited)
1196 res = t->result;
1197 else
d823b11b 1198 {
9de87eea
MV
1199 while (1)
1200 {
74926120 1201 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1202 timeout_ptr);
1203 if (err == 0)
1204 {
1205 if (t->exited)
1206 {
1207 res = t->result;
1208 break;
1209 }
1210 }
1211 else if (err == ETIMEDOUT)
9de87eea 1212 break;
6180e336 1213
86a597f8 1214 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1215 SCM_TICK;
86a597f8 1216 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1217
1218 /* Check for exit again, since we just released and
1219 reacquired the admin mutex, before the next block_self
1220 call (which would block forever if t has already
1221 exited). */
1222 if (t->exited)
1223 {
1224 res = t->result;
1225 break;
1226 }
9de87eea 1227 }
d823b11b 1228 }
9de87eea 1229
86a597f8 1230 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1231
d823b11b 1232 return res;
5f05c406
MV
1233}
1234#undef FUNC_NAME
1235
6180e336
NJ
1236SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1237 (SCM obj),
1238 "Return @code{#t} if @var{obj} is a thread.")
1239#define FUNC_NAME s_scm_thread_p
1240{
1241 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1242}
1243#undef FUNC_NAME
5f05c406 1244
4079f87e 1245
9de87eea
MV
1246static size_t
1247fat_mutex_free (SCM mx)
76da80e7 1248{
9de87eea
MV
1249 fat_mutex *m = SCM_MUTEX_DATA (mx);
1250 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1251 return 0;
1252}
1253
1254static int
9de87eea 1255fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1256{
9de87eea
MV
1257 fat_mutex *m = SCM_MUTEX_DATA (mx);
1258 scm_puts ("#<mutex ", port);
1259 scm_uintprint ((scm_t_bits)m, 16, port);
1260 scm_puts (">", port);
1261 return 1;
76da80e7
MV
1262}
1263
76da80e7 1264static SCM
6180e336 1265make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1266{
9de87eea
MV
1267 fat_mutex *m;
1268 SCM mx;
1269
1270 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1271 scm_i_pthread_mutex_init (&m->lock, NULL);
1272 m->owner = SCM_BOOL_F;
adc085f1 1273 m->level = 0;
6180e336 1274
adc085f1 1275 m->recursive = recursive;
6180e336
NJ
1276 m->unchecked_unlock = unchecked_unlock;
1277 m->allow_external_unlock = external_unlock;
1278
9de87eea
MV
1279 m->waiting = SCM_EOL;
1280 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1281 m->waiting = make_queue ();
1282 return mx;
76da80e7
MV
1283}
1284
6180e336
NJ
1285SCM scm_make_mutex (void)
1286{
1287 return scm_make_mutex_with_flags (SCM_EOL);
1288}
1289
2a1d0688
NJ
1290SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1291SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1292SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1293
1294SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1295 (SCM flags),
9de87eea 1296 "Create a new mutex. ")
6180e336 1297#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1298{
6180e336
NJ
1299 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1300
1301 SCM ptr = flags;
1302 while (! scm_is_null (ptr))
1303 {
1304 SCM flag = SCM_CAR (ptr);
1305 if (scm_is_eq (flag, unchecked_unlock_sym))
1306 unchecked_unlock = 1;
1307 else if (scm_is_eq (flag, allow_external_unlock_sym))
1308 external_unlock = 1;
1309 else if (scm_is_eq (flag, recursive_sym))
1310 recursive = 1;
74926120 1311 else
2a1d0688 1312 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1313 ptr = SCM_CDR (ptr);
1314 }
1315 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1316}
1317#undef FUNC_NAME
1318
9de87eea 1319SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1320 (void),
9de87eea
MV
1321 "Create a new recursive mutex. ")
1322#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1323{
6180e336 1324 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1325}
1326#undef FUNC_NAME
1327
6180e336
NJ
1328SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1329
1330static SCM
adc085f1 1331fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1332{
1333 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1334
adc085f1 1335 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1336 SCM err = SCM_BOOL_F;
1337
1338 struct timeval current_time;
9de87eea
MV
1339
1340 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1341
1342 while (1)
9de87eea 1343 {
adc085f1 1344 if (m->level == 0)
6180e336 1345 {
adc085f1 1346 m->owner = new_owner;
6180e336 1347 m->level++;
74926120 1348
adc085f1 1349 if (SCM_I_IS_THREAD (new_owner))
6180e336 1350 {
adc085f1 1351 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
6180e336 1352 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1353
1354 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1355 retained when not referenced elsewhere (bug #27450).
1356 The weak pair itself is eventually removed when MUTEX
1357 is unlocked. Note that `t->mutexes' lists mutexes
1358 currently held by T, so it should be small. */
a0faf7dd
LC
1359 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1360
6180e336 1361 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1362 }
adc085f1
JG
1363 *ret = 1;
1364 break;
1365 }
1366 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1367 {
1368 m->owner = new_owner;
1369 err = scm_cons (scm_abandoned_mutex_error_key,
1370 scm_from_locale_string ("lock obtained on abandoned "
1371 "mutex"));
1372 *ret = 1;
1373 break;
1374 }
1375 else if (scm_is_eq (m->owner, new_owner))
1376 {
1377 if (m->recursive)
1378 {
1379 m->level++;
74926120 1380 *ret = 1;
adc085f1
JG
1381 }
1382 else
6180e336 1383 {
adc085f1
JG
1384 err = scm_cons (scm_misc_error_key,
1385 scm_from_locale_string ("mutex already locked "
1386 "by thread"));
1387 *ret = 0;
1388 }
74926120 1389 break;
adc085f1 1390 }
9de87eea 1391 else
9de87eea 1392 {
74926120 1393 if (timeout != NULL)
adc085f1
JG
1394 {
1395 gettimeofday (&current_time, NULL);
1396 if (current_time.tv_sec > timeout->tv_sec ||
1397 (current_time.tv_sec == timeout->tv_sec &&
1398 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1399 {
adc085f1
JG
1400 *ret = 0;
1401 break;
6180e336 1402 }
6180e336 1403 }
37a52039 1404 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1405 scm_i_pthread_mutex_unlock (&m->lock);
1406 SCM_TICK;
1407 scm_i_scm_pthread_mutex_lock (&m->lock);
1408 }
1409 }
1410 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1411 return err;
9de87eea
MV
1412}
1413
6180e336
NJ
1414SCM scm_lock_mutex (SCM mx)
1415{
adc085f1 1416 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1417}
1418
adc085f1
JG
1419SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1420 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1421"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1422"blocks until the mutex becomes available. The function returns when "
1423"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1424"a thread already owns will succeed right away and will not block the "
1425"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1426#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1427{
6180e336
NJ
1428 SCM exception;
1429 int ret = 0;
1430 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1431
6180e336
NJ
1432 SCM_VALIDATE_MUTEX (1, m);
1433
1434 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1435 {
1436 to_timespec (timeout, &cwaittime);
1437 waittime = &cwaittime;
1438 }
1439
adc085f1 1440 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1441 if (!scm_is_false (exception))
1442 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1443 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1444}
76da80e7 1445#undef FUNC_NAME
9bc4701c 1446
a4d106c7 1447void
661ae7ab 1448scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1449{
661ae7ab
MV
1450 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1451 SCM_F_WIND_EXPLICITLY);
1452 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1453 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1454}
1455
9bc4701c 1456SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1457 (SCM mutex),
9bc4701c
MD
1458"Try to lock @var{mutex}. If the mutex is already locked by someone "
1459"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1460#define FUNC_NAME s_scm_try_mutex
1461{
6180e336
NJ
1462 SCM exception;
1463 int ret = 0;
1464 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1465
ba1b7223 1466 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1467
1468 to_timespec (scm_from_int(0), &cwaittime);
1469 waittime = &cwaittime;
74926120 1470
adc085f1 1471 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1472 if (!scm_is_false (exception))
1473 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1474 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1475}
1476#undef FUNC_NAME
76da80e7 1477
6180e336
NJ
1478/*** Fat condition variables */
1479
1480typedef struct {
1481 scm_i_pthread_mutex_t lock;
1482 SCM waiting; /* the threads waiting for this condition. */
1483} fat_cond;
1484
1485#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1486#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1487
1488static int
1489fat_mutex_unlock (SCM mutex, SCM cond,
1490 const scm_t_timespec *waittime, int relock)
9de87eea 1491{
7f991c7d 1492 SCM owner;
6180e336
NJ
1493 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1494 fat_cond *c = NULL;
1495 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1496 int err = 0, ret = 0;
9de87eea
MV
1497
1498 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1499
7f991c7d 1500 owner = m->owner;
adc085f1 1501
d31ae2c3 1502 if (!scm_is_eq (owner, t->handle))
9bc4701c 1503 {
adc085f1 1504 if (m->level == 0)
6180e336
NJ
1505 {
1506 if (!m->unchecked_unlock)
2a1d0688
NJ
1507 {
1508 scm_i_pthread_mutex_unlock (&m->lock);
1509 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1510 }
d31ae2c3 1511 owner = t->handle;
6180e336
NJ
1512 }
1513 else if (!m->allow_external_unlock)
2a1d0688
NJ
1514 {
1515 scm_i_pthread_mutex_unlock (&m->lock);
1516 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1517 }
6180e336
NJ
1518 }
1519
1520 if (! (SCM_UNBNDP (cond)))
1521 {
6180e336
NJ
1522 c = SCM_CONDVAR_DATA (cond);
1523 while (1)
1524 {
1525 int brk = 0;
1526
6180e336
NJ
1527 if (m->level > 0)
1528 m->level--;
adc085f1 1529 if (m->level == 0)
f57fdf07
LC
1530 {
1531 /* Change the owner of MUTEX. */
1532 t->mutexes = scm_delq_x (mutex, t->mutexes);
1533 m->owner = unblock_from_queue (m->waiting);
1534 }
adc085f1 1535
6180e336 1536 t->block_asyncs++;
74926120 1537
d2a51087
NJ
1538 err = block_self (c->waiting, cond, &m->lock, waittime);
1539 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1540
1541 if (err == 0)
1542 {
1543 ret = 1;
1544 brk = 1;
1545 }
1546 else if (err == ETIMEDOUT)
1547 {
1548 ret = 0;
1549 brk = 1;
1550 }
1551 else if (err != EINTR)
74926120 1552 {
6180e336 1553 errno = err;
6180e336 1554 scm_syserror (NULL);
74926120 1555 }
6180e336
NJ
1556
1557 if (brk)
1558 {
1559 if (relock)
adc085f1 1560 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1561 t->block_asyncs--;
6180e336
NJ
1562 break;
1563 }
74926120 1564
6180e336
NJ
1565 t->block_asyncs--;
1566 scm_async_click ();
74926120 1567
6180e336
NJ
1568 scm_remember_upto_here_2 (cond, mutex);
1569
1570 scm_i_scm_pthread_mutex_lock (&m->lock);
1571 }
9bc4701c 1572 }
9de87eea 1573 else
6180e336
NJ
1574 {
1575 if (m->level > 0)
1576 m->level--;
74926120 1577 if (m->level == 0)
f57fdf07
LC
1578 {
1579 /* Change the owner of MUTEX. */
1580 t->mutexes = scm_delq_x (mutex, t->mutexes);
1581 m->owner = unblock_from_queue (m->waiting);
1582 }
74926120 1583
6180e336
NJ
1584 scm_i_pthread_mutex_unlock (&m->lock);
1585 ret = 1;
1586 }
9de87eea 1587
6180e336 1588 return ret;
9bc4701c 1589}
9bc4701c 1590
6180e336
NJ
1591SCM scm_unlock_mutex (SCM mx)
1592{
1593 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1594}
9bc4701c 1595
6180e336
NJ
1596SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1597 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1598"Unlocks @var{mutex} if the calling thread owns the lock on "
1599"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1600"thread results in undefined behaviour. Once a mutex has been unlocked, "
1601"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1602"lock. Every call to @code{lock-mutex} by this thread must be matched "
1603"with a call to @code{unlock-mutex}. Only the last call to "
1604"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1605#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1606{
6180e336
NJ
1607 scm_t_timespec cwaittime, *waittime = NULL;
1608
9bc4701c 1609 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1610 if (! (SCM_UNBNDP (cond)))
1611 {
1612 SCM_VALIDATE_CONDVAR (2, cond);
1613
1614 if (! (SCM_UNBNDP (timeout)))
1615 {
1616 to_timespec (timeout, &cwaittime);
1617 waittime = &cwaittime;
1618 }
1619 }
1620
1621 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1622}
1623#undef FUNC_NAME
1624
6180e336
NJ
1625SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1626 (SCM obj),
1627 "Return @code{#t} if @var{obj} is a mutex.")
1628#define FUNC_NAME s_scm_mutex_p
1629{
1630 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1631}
74926120 1632#undef FUNC_NAME
9de87eea
MV
1633
1634SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1635 (SCM mx),
1636 "Return the thread owning @var{mx}, or @code{#f}.")
1637#define FUNC_NAME s_scm_mutex_owner
1638{
adc085f1
JG
1639 SCM owner;
1640 fat_mutex *m = NULL;
1641
9de87eea 1642 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1643 m = SCM_MUTEX_DATA (mx);
1644 scm_i_pthread_mutex_lock (&m->lock);
1645 owner = m->owner;
1646 scm_i_pthread_mutex_unlock (&m->lock);
1647
1648 return owner;
9de87eea
MV
1649}
1650#undef FUNC_NAME
1651
1652SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1653 (SCM mx),
adc085f1 1654 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1655#define FUNC_NAME s_scm_mutex_level
1656{
1657 SCM_VALIDATE_MUTEX (1, mx);
1658 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1659}
1660#undef FUNC_NAME
1661
adc085f1
JG
1662SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1663 (SCM mx),
1664 "Returns @code{#t} if the mutex @var{mx} is locked.")
1665#define FUNC_NAME s_scm_mutex_locked_p
1666{
1667 SCM_VALIDATE_MUTEX (1, mx);
1668 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1669}
1670#undef FUNC_NAME
9de87eea 1671
9de87eea
MV
1672static int
1673fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1674{
1675 fat_cond *c = SCM_CONDVAR_DATA (cv);
1676 scm_puts ("#<condition-variable ", port);
1677 scm_uintprint ((scm_t_bits)c, 16, port);
1678 scm_puts (">", port);
1679 return 1;
1680}
9bc4701c 1681
d823b11b
MV
1682SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1683 (void),
1684 "Make a new condition variable.")
1685#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1686{
9de87eea
MV
1687 fat_cond *c;
1688 SCM cv;
1689
1690 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1691 c->waiting = SCM_EOL;
1692 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1693 c->waiting = make_queue ();
d823b11b 1694 return cv;
5f05c406 1695}
d823b11b 1696#undef FUNC_NAME
5f05c406 1697
d823b11b
MV
1698SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1699 (SCM cv, SCM mx, SCM t),
1700"Wait until @var{cond-var} has been signalled. While waiting, "
1701"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1702"is locked again when this function returns. When @var{time} is given, "
1703"it specifies a point in time where the waiting should be aborted. It "
1704"can be either a integer as returned by @code{current-time} or a pair "
1705"as returned by @code{gettimeofday}. When the waiting is aborted the "
1706"mutex is locked and @code{#f} is returned. When the condition "
1707"variable is in fact signalled, the mutex is also locked and @code{#t} "
1708"is returned. ")
1709#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1710{
9de87eea 1711 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1712
1713 SCM_VALIDATE_CONDVAR (1, cv);
1714 SCM_VALIDATE_MUTEX (2, mx);
74926120 1715
d823b11b
MV
1716 if (!SCM_UNBNDP (t))
1717 {
6180e336 1718 to_timespec (t, &waittime);
9de87eea 1719 waitptr = &waittime;
d823b11b
MV
1720 }
1721
2a1d0688 1722 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1723}
d823b11b 1724#undef FUNC_NAME
5f05c406 1725
9de87eea
MV
1726static void
1727fat_cond_signal (fat_cond *c)
1728{
9de87eea 1729 unblock_from_queue (c->waiting);
9de87eea
MV
1730}
1731
d823b11b
MV
1732SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1733 (SCM cv),
1734 "Wake up one thread that is waiting for @var{cv}")
1735#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1736{
d823b11b 1737 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1738 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1739 return SCM_BOOL_T;
5f05c406 1740}
d823b11b 1741#undef FUNC_NAME
5f05c406 1742
9de87eea
MV
1743static void
1744fat_cond_broadcast (fat_cond *c)
1745{
9de87eea
MV
1746 while (scm_is_true (unblock_from_queue (c->waiting)))
1747 ;
9de87eea
MV
1748}
1749
d823b11b
MV
1750SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1751 (SCM cv),
1752 "Wake up all threads that are waiting for @var{cv}. ")
1753#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1754{
d823b11b 1755 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1756 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1757 return SCM_BOOL_T;
5f05c406 1758}
d823b11b 1759#undef FUNC_NAME
5f05c406 1760
6180e336
NJ
1761SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1762 (SCM obj),
1763 "Return @code{#t} if @var{obj} is a condition variable.")
1764#define FUNC_NAME s_scm_condition_variable_p
1765{
1766 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1767}
1768#undef FUNC_NAME
1769
6087fad9 1770
8c2b3143 1771\f
d823b11b
MV
1772/*** Select */
1773
8c2b3143
LC
1774struct select_args
1775{
1776 int nfds;
1777 SELECT_TYPE *read_fds;
1778 SELECT_TYPE *write_fds;
1779 SELECT_TYPE *except_fds;
1780 struct timeval *timeout;
1781
1782 int result;
1783 int errno_value;
1784};
1785
1786static void *
1787do_std_select (void *args)
1788{
1789 struct select_args *select_args;
1790
1791 select_args = (struct select_args *) args;
1792
1793 select_args->result =
1794 select (select_args->nfds,
1795 select_args->read_fds, select_args->write_fds,
1796 select_args->except_fds, select_args->timeout);
1797 select_args->errno_value = errno;
1798
1799 return NULL;
1800}
1801
911782b7 1802int
9de87eea
MV
1803scm_std_select (int nfds,
1804 SELECT_TYPE *readfds,
1805 SELECT_TYPE *writefds,
1806 SELECT_TYPE *exceptfds,
1807 struct timeval *timeout)
1808{
1809 fd_set my_readfds;
1810 int res, eno, wakeup_fd;
1811 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1812 struct select_args args;
9de87eea
MV
1813
1814 if (readfds == NULL)
1815 {
1816 FD_ZERO (&my_readfds);
1817 readfds = &my_readfds;
1818 }
1819
1820 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1821 SCM_TICK;
1822
1823 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1824 FD_SET (wakeup_fd, readfds);
1825 if (wakeup_fd >= nfds)
1826 nfds = wakeup_fd+1;
9de87eea 1827
8c2b3143
LC
1828 args.nfds = nfds;
1829 args.read_fds = readfds;
1830 args.write_fds = writefds;
1831 args.except_fds = exceptfds;
1832 args.timeout = timeout;
1833
1834 /* Explicitly cooperate with the GC. */
1835 scm_without_guile (do_std_select, &args);
1836
1837 res = args.result;
1838 eno = args.errno_value;
1839
1840 t->sleep_fd = -1;
9de87eea
MV
1841 scm_i_reset_sleep (t);
1842
1843 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1844 {
1845 char dummy;
634aa8de
LC
1846 full_read (wakeup_fd, &dummy, 1);
1847
9de87eea
MV
1848 FD_CLR (wakeup_fd, readfds);
1849 res -= 1;
1850 if (res == 0)
1851 {
1852 eno = EINTR;
1853 res = -1;
1854 }
1855 }
d823b11b
MV
1856 errno = eno;
1857 return res;
5f05c406
MV
1858}
1859
9de87eea 1860/* Convenience API for blocking while in guile mode. */
76da80e7 1861
9de87eea 1862#if SCM_USE_PTHREAD_THREADS
92e64b87 1863
2956b071
LC
1864/* It seems reasonable to not run procedures related to mutex and condition
1865 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1866 without it, and (ii) the only potential gain would be GC latency. See
1867 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1868 for a discussion of the pros and cons. */
1869
9bc4701c 1870int
9de87eea 1871scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1872{
9de87eea 1873 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1874 return res;
1875}
1876
9de87eea 1877static void
2b829bbb 1878do_unlock (void *data)
28d52ebb 1879{
9de87eea 1880 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1881}
1882
1883void
661ae7ab 1884scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1885{
9de87eea 1886 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1887 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1888}
1889
9bc4701c 1890int
9de87eea 1891scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1892{
4cf72f0b
LC
1893 int res;
1894 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1895
1896 t->held_mutex = mutex;
1897 res = scm_i_pthread_cond_wait (cond, mutex);
1898 t->held_mutex = NULL;
1899
9bc4701c
MD
1900 return res;
1901}
9bc4701c 1902
76da80e7 1903int
9de87eea
MV
1904scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1905 scm_i_pthread_mutex_t *mutex,
1906 const scm_t_timespec *wt)
76da80e7 1907{
4cf72f0b
LC
1908 int res;
1909 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1910
1911 t->held_mutex = mutex;
1912 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1913 t->held_mutex = NULL;
1914
9de87eea 1915 return res;
76da80e7
MV
1916}
1917
9de87eea 1918#endif
76da80e7 1919
d823b11b 1920unsigned long
9de87eea 1921scm_std_usleep (unsigned long usecs)
5f05c406 1922{
d823b11b
MV
1923 struct timeval tv;
1924 tv.tv_usec = usecs % 1000000;
1925 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1926 scm_std_select (0, NULL, NULL, NULL, &tv);
1927 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1928}
1929
9de87eea
MV
1930unsigned int
1931scm_std_sleep (unsigned int secs)
6c214b62 1932{
d823b11b
MV
1933 struct timeval tv;
1934 tv.tv_usec = 0;
1935 tv.tv_sec = secs;
9de87eea 1936 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1937 return tv.tv_sec;
6c214b62
MD
1938}
1939
d823b11b
MV
1940/*** Misc */
1941
1942SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1943 (void),
1944 "Return the thread that called this function.")
1945#define FUNC_NAME s_scm_current_thread
1946{
9de87eea 1947 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1948}
1949#undef FUNC_NAME
1950
9de87eea
MV
1951static SCM
1952scm_c_make_list (size_t n, SCM fill)
1953{
1954 SCM res = SCM_EOL;
1955 while (n-- > 0)
1956 res = scm_cons (fill, res);
1957 return res;
1958}
1959
d823b11b
MV
1960SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1961 (void),
1962 "Return a list of all threads.")
9bc4701c 1963#define FUNC_NAME s_scm_all_threads
d823b11b 1964{
9de87eea
MV
1965 /* We can not allocate while holding the thread_admin_mutex because
1966 of the way GC is done.
1967 */
1968 int n = thread_count;
1969 scm_i_thread *t;
1970 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1971
9de87eea
MV
1972 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1973 l = &list;
1974 for (t = all_threads; t && n > 0; t = t->next_thread)
1975 {
2e77f720
LC
1976 if (t != scm_i_signal_delivery_thread)
1977 {
1978 SCM_SETCAR (*l, t->handle);
1979 l = SCM_CDRLOC (*l);
1980 }
9de87eea
MV
1981 n--;
1982 }
1983 *l = SCM_EOL;
1984 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1985 return list;
d823b11b 1986}
9de87eea 1987#undef FUNC_NAME
d823b11b
MV
1988
1989SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1990 (SCM thread),
1991 "Return @code{#t} iff @var{thread} has exited.\n")
1992#define FUNC_NAME s_scm_thread_exited_p
1993{
7888309b 1994 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1995}
1996#undef FUNC_NAME
1997
911782b7 1998int
d823b11b
MV
1999scm_c_thread_exited_p (SCM thread)
2000#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2001{
9de87eea 2002 scm_i_thread *t;
d823b11b 2003 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2004 t = SCM_I_THREAD_DATA (thread);
d823b11b 2005 return t->exited;
5f05c406 2006}
d823b11b 2007#undef FUNC_NAME
5f05c406 2008
9de87eea 2009static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2010static int threads_initialized_p = 0;
2011
9bc4701c 2012
a4d106c7
MV
2013/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2014 */
d1138028 2015scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2016
661ae7ab 2017static SCM dynwind_critical_section_mutex;
a54a94b3 2018
9bc4701c 2019void
661ae7ab 2020scm_dynwind_critical_section (SCM mutex)
76da80e7 2021{
a4d106c7 2022 if (scm_is_false (mutex))
661ae7ab
MV
2023 mutex = dynwind_critical_section_mutex;
2024 scm_dynwind_lock_mutex (mutex);
2025 scm_dynwind_block_asyncs ();
9de87eea
MV
2026}
2027
2028/*** Initialization */
2029
9de87eea
MV
2030scm_i_pthread_mutex_t scm_i_misc_mutex;
2031
d1138028
MV
2032#if SCM_USE_PTHREAD_THREADS
2033pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2034#endif
2035
9de87eea 2036void
12c1d861 2037scm_threads_prehistory (void *base)
9de87eea 2038{
d1138028
MV
2039#if SCM_USE_PTHREAD_THREADS
2040 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2041 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2042 PTHREAD_MUTEX_RECURSIVE);
2043#endif
2044
2045 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2046 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2047 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2048 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2049
12c1d861 2050 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2051}
2052
d823b11b
MV
2053scm_t_bits scm_tc16_thread;
2054scm_t_bits scm_tc16_mutex;
2055scm_t_bits scm_tc16_condvar;
7bfd3b9e 2056
7bfd3b9e 2057void
9de87eea 2058scm_init_threads ()
7bfd3b9e 2059{
9de87eea 2060 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2061 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2062
9de87eea 2063 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2064 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2065 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2066
9de87eea
MV
2067 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2068 sizeof (fat_cond));
9de87eea 2069 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2070
9de87eea
MV
2071 scm_i_default_dynamic_state = SCM_BOOL_F;
2072 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2073 threads_initialized_p = 1;
a4d106c7 2074
f39448c5 2075 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2076}
89e00824 2077
5f05c406 2078void
9de87eea 2079scm_init_threads_default_dynamic_state ()
5f05c406 2080{
9de87eea 2081 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2082 scm_i_default_dynamic_state = state;
5f05c406
MV
2083}
2084
d823b11b 2085void
9de87eea 2086scm_init_thread_procs ()
d823b11b 2087{
9de87eea 2088#include "libguile/threads.x"
d823b11b
MV
2089}
2090
3c13664e
LC
2091\f
2092/* IA64-specific things. */
2093
2094#ifdef __ia64__
2095# ifdef __hpux
2096# include <sys/param.h>
2097# include <sys/pstat.h>
2098void *
2099scm_ia64_register_backing_store_base (void)
2100{
2101 struct pst_vm_status vm_status;
2102 int i = 0;
2103 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2104 if (vm_status.pst_type == PS_RSESTACK)
2105 return (void *) vm_status.pst_vaddr;
2106 abort ();
2107}
2108void *
2109scm_ia64_ar_bsp (const void *ctx)
2110{
2111 uint64_t bsp;
2112 __uc_get_ar_bsp (ctx, &bsp);
2113 return (void *) bsp;
2114}
2115# endif /* hpux */
2116# ifdef linux
2117# include <ucontext.h>
2118void *
2119scm_ia64_register_backing_store_base (void)
2120{
2121 extern void *__libc_ia64_register_backing_store_base;
2122 return __libc_ia64_register_backing_store_base;
2123}
2124void *
2125scm_ia64_ar_bsp (const void *opaque)
2126{
2127 const ucontext_t *ctx = opaque;
2128 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2129}
2130# endif /* linux */
2131#endif /* __ia64__ */
2132
2133
89e00824
ML
2134/*
2135 Local Variables:
2136 c-file-style: "gnu"
2137 End:
2138*/