Revert "with-continuation-barrier carps, calls exit(3) _after_ unwinding"
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07
LC
41#include <assert.h>
42
d823b11b
MV
43#include "libguile/validate.h"
44#include "libguile/root.h"
45#include "libguile/eval.h"
46#include "libguile/async.h"
47#include "libguile/ports.h"
48#include "libguile/threads.h"
a0599745 49#include "libguile/dynwind.h"
d823b11b 50#include "libguile/iselect.h"
9de87eea
MV
51#include "libguile/fluids.h"
52#include "libguile/continuations.h"
2b829bbb 53#include "libguile/gc.h"
9de87eea 54#include "libguile/init.h"
2e77f720 55#include "libguile/scmsigs.h"
6180e336 56#include "libguile/strings.h"
a0faf7dd 57#include "libguile/weaks.h"
7bfd3b9e 58
ecc9f40f
MV
59#ifdef __MINGW32__
60#ifndef ETIMEDOUT
61# define ETIMEDOUT WSAETIMEDOUT
62#endif
63# include <fcntl.h>
64# include <process.h>
65# define pipe(fd) _pipe (fd, 256, O_BINARY)
66#endif /* __MINGW32__ */
67
634aa8de
LC
68#include <full-read.h>
69
cde24ce1
AW
70
71\f
72
73/* First some libgc shims. */
74
75/* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77#ifndef HAVE_GC_FN_TYPE
78typedef void * (* GC_fn_type) (void *);
79#endif
80
81
5f0d2951
AW
82#ifndef GC_SUCCESS
83#define GC_SUCCESS 0
84#endif
85
86#ifndef GC_UNIMPLEMENTED
87#define GC_UNIMPLEMENTED 3
88#endif
89
90/* Likewise struct GC_stack_base is missing before 7.1. */
91#ifndef HAVE_GC_STACK_BASE
92struct GC_stack_base;
93
94static int
95GC_register_my_thread (struct GC_stack_base *)
96{
97 return GC_UNIMPLEMENTED;
98}
99
100static void
101GC_unregister_my_thread ()
102{
103}
104
105static void *
106GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
107{
108 return fn (NULL, arg);
109}
110#endif
111
112
cde24ce1
AW
113/* Now define with_gc_active and with_gc_inactive. */
114
115#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
116
117/* We have a sufficiently new libgc (7.2 or newer). */
118
119static void*
120with_gc_inactive (GC_fn_type func, void *data)
121{
122 return GC_do_blocking (func, data);
123}
124
125static void*
126with_gc_active (GC_fn_type func, void *data)
127{
128 return GC_call_with_gc_active (func, data);
129}
130
131#else
132
133/* libgc not new enough, so never actually deactivate GC.
134
135 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
136 GC_call_with_gc_active. */
137
138static void*
139with_gc_inactive (GC_fn_type func, void *data)
140{
141 return func (data);
142}
143
144static void*
145with_gc_active (GC_fn_type func, void *data)
146{
147 return func (data);
148}
149
150#endif /* HAVE_GC_DO_BLOCKING */
151
152
634aa8de 153\f
6180e336
NJ
154static void
155to_timespec (SCM t, scm_t_timespec *waittime)
156{
157 if (scm_is_pair (t))
158 {
159 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
160 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
161 }
162 else
163 {
164 double time = scm_to_double (t);
165 double sec = scm_c_truncate (time);
166
167 waittime->tv_sec = (long) sec;
2a1d0688 168 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
169 }
170}
171
c058db8a 172\f
d823b11b 173/*** Queues */
7bfd3b9e 174
c058db8a
LC
175/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
176 the risk of false references leading to unbounded retained space as
177 described in "Bounding Space Usage of Conservative Garbage Collectors",
178 H.J. Boehm, 2001. */
179
9de87eea
MV
180/* Make an empty queue data structure.
181 */
d823b11b
MV
182static SCM
183make_queue ()
184{
185 return scm_cons (SCM_EOL, SCM_EOL);
186}
7bfd3b9e 187
9de87eea
MV
188/* Put T at the back of Q and return a handle that can be used with
189 remqueue to remove T from Q again.
190 */
d823b11b
MV
191static SCM
192enqueue (SCM q, SCM t)
193{
194 SCM c = scm_cons (t, SCM_EOL);
d2a51087 195 SCM_CRITICAL_SECTION_START;
d2e53ed6 196 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
197 SCM_SETCDR (q, c);
198 else
199 SCM_SETCDR (SCM_CAR (q), c);
200 SCM_SETCAR (q, c);
d2a51087 201 SCM_CRITICAL_SECTION_END;
d823b11b
MV
202 return c;
203}
7bfd3b9e 204
9de87eea
MV
205/* Remove the element that the handle C refers to from the queue Q. C
206 must have been returned from a call to enqueue. The return value
207 is zero when the element referred to by C has already been removed.
208 Otherwise, 1 is returned.
209*/
210static int
d823b11b
MV
211remqueue (SCM q, SCM c)
212{
213 SCM p, prev = q;
d2a51087 214 SCM_CRITICAL_SECTION_START;
d2e53ed6 215 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 216 {
bc36d050 217 if (scm_is_eq (p, c))
d823b11b 218 {
bc36d050 219 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
220 SCM_SETCAR (q, SCM_CDR (c));
221 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
222
223 /* GC-robust */
224 SCM_SETCDR (c, SCM_EOL);
225
d2a51087 226 SCM_CRITICAL_SECTION_END;
9de87eea 227 return 1;
d823b11b
MV
228 }
229 prev = p;
230 }
d2a51087 231 SCM_CRITICAL_SECTION_END;
9de87eea 232 return 0;
d823b11b
MV
233}
234
9de87eea
MV
235/* Remove the front-most element from the queue Q and return it.
236 Return SCM_BOOL_F when Q is empty.
237*/
d823b11b
MV
238static SCM
239dequeue (SCM q)
240{
d2a51087
NJ
241 SCM c;
242 SCM_CRITICAL_SECTION_START;
243 c = SCM_CDR (q);
d2e53ed6 244 if (scm_is_null (c))
d2a51087
NJ
245 {
246 SCM_CRITICAL_SECTION_END;
247 return SCM_BOOL_F;
248 }
d823b11b
MV
249 else
250 {
251 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 252 if (scm_is_null (SCM_CDR (q)))
d823b11b 253 SCM_SETCAR (q, SCM_EOL);
d2a51087 254 SCM_CRITICAL_SECTION_END;
c058db8a
LC
255
256 /* GC-robust */
257 SCM_SETCDR (c, SCM_EOL);
258
d823b11b
MV
259 return SCM_CAR (c);
260 }
261}
7bfd3b9e 262
9de87eea 263/*** Thread smob routines */
76da80e7 264
d823b11b
MV
265
266static int
267thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
268{
23d72566
KR
269 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
270 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
271 the struct case, hence we go via a union, and extract according to the
272 size of pthread_t. */
273 union {
274 scm_i_pthread_t p;
275 unsigned short us;
276 unsigned int ui;
277 unsigned long ul;
278 scm_t_uintmax um;
279 } u;
9de87eea 280 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
281 scm_i_pthread_t p = t->pthread;
282 scm_t_uintmax id;
283 u.p = p;
284 if (sizeof (p) == sizeof (unsigned short))
285 id = u.us;
286 else if (sizeof (p) == sizeof (unsigned int))
287 id = u.ui;
288 else if (sizeof (p) == sizeof (unsigned long))
289 id = u.ul;
290 else
291 id = u.um;
292
d823b11b 293 scm_puts ("#<thread ", port);
23d72566 294 scm_uintprint (id, 10, port);
1b92fb6b 295 scm_puts (" (", port);
0345e278 296 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 297 scm_puts (")>", port);
d823b11b
MV
298 return 1;
299}
300
706846f6 301\f
9de87eea 302/*** Blocking on queues. */
f7eca35d 303
9de87eea
MV
304/* See also scm_i_queue_async_cell for how such a block is
305 interrputed.
306*/
d823b11b 307
9de87eea
MV
308/* Put the current thread on QUEUE and go to sleep, waiting for it to
309 be woken up by a call to 'unblock_from_queue', or to be
310 interrupted. Upon return of this function, the current thread is
311 no longer on QUEUE, even when the sleep has been interrupted.
312
d2a51087 313 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
314 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
315
316 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
317 as MUTEX is needed.
318
319 When WAITTIME is not NULL, the sleep will be aborted at that time.
320
321 The return value of block_self is an errno value. It will be zero
322 when the sleep has been successfully completed by a call to
323 unblock_from_queue, EINTR when it has been interrupted by the
324 delivery of a system async, and ETIMEDOUT when the timeout has
325 expired.
326
327 The system asyncs themselves are not executed by block_self.
328*/
329static int
330block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
331 const scm_t_timespec *waittime)
76da80e7 332{
9de87eea
MV
333 scm_i_thread *t = SCM_I_CURRENT_THREAD;
334 SCM q_handle;
335 int err;
336
337 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
338 err = EINTR;
339 else
340 {
341 t->block_asyncs++;
342 q_handle = enqueue (queue, t->handle);
343 if (waittime == NULL)
344 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
345 else
346 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
347
348 /* When we are still on QUEUE, we have been interrupted. We
349 report this only when no other error (such as a timeout) has
350 happened above.
351 */
352 if (remqueue (queue, q_handle) && err == 0)
353 err = EINTR;
354 t->block_asyncs--;
355 scm_i_reset_sleep (t);
356 }
357
358 return err;
76da80e7 359}
9de87eea 360
d2a51087
NJ
361/* Wake up the first thread on QUEUE, if any. The awoken thread is
362 returned, or #f if the queue was empty.
9de87eea
MV
363 */
364static SCM
365unblock_from_queue (SCM queue)
366{
367 SCM thread = dequeue (queue);
368 if (scm_is_true (thread))
369 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
370 return thread;
371}
372
45f15cac 373\f
9de87eea
MV
374/* Getting into and out of guile mode.
375 */
376
f60a7648
AW
377/* Key used to attach a cleanup handler to a given thread. Also, if
378 thread-local storage is unavailable, this key is used to retrieve the
379 current thread with `pthread_getspecific ()'. */
380scm_i_pthread_key_t scm_i_thread_key;
381
382
705edb95
LC
383#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
384
385/* When thread-local storage (TLS) is available, a pointer to the
386 current-thread object is kept in TLS. Note that storing the thread-object
387 itself in TLS (rather than a pointer to some malloc'd memory) is not
388 possible since thread objects may live longer than the actual thread they
389 represent. */
390SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
391
f60a7648 392#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 393
d823b11b 394
9de87eea
MV
395static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
396static scm_i_thread *all_threads = NULL;
397static int thread_count;
398
399static SCM scm_i_default_dynamic_state;
400
401/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 402 */
9de87eea
MV
403static void
404guilify_self_1 (SCM_STACKITEM *base)
d823b11b 405{
c812243b 406 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
9de87eea
MV
407
408 t->pthread = scm_i_pthread_self ();
409 t->handle = SCM_BOOL_F;
410 t->result = SCM_BOOL_F;
2e77f720 411 t->cleanup_handler = SCM_BOOL_F;
6180e336 412 t->mutexes = SCM_EOL;
d2a51087 413 t->held_mutex = NULL;
9de87eea
MV
414 t->join_queue = SCM_EOL;
415 t->dynamic_state = SCM_BOOL_F;
416 t->dynwinds = SCM_EOL;
417 t->active_asyncs = SCM_EOL;
9de87eea
MV
418 t->block_asyncs = 1;
419 t->pending_asyncs = 1;
87f30eda 420 t->critical_section_level = 0;
9de87eea 421 t->base = base;
346e4402
NJ
422#ifdef __ia64__
423 /* Calculate and store off the base of this thread's register
424 backing store (RBS). Unfortunately our implementation(s) of
425 scm_ia64_register_backing_store_base are only reliable for the
426 main thread. For other threads, therefore, find out the current
427 top of the RBS, and use that as a maximum. */
428 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
429 {
430 ucontext_t ctx;
431 void *bsp;
432 getcontext (&ctx);
433 bsp = scm_ia64_ar_bsp (&ctx);
434 if (t->register_backing_store_base > bsp)
435 t->register_backing_store_base = bsp;
436 }
437#endif
0c97d7dd 438 t->continuation_root = SCM_EOL;
9de87eea
MV
439 t->continuation_base = base;
440 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
441 t->sleep_mutex = NULL;
442 t->sleep_object = SCM_BOOL_F;
443 t->sleep_fd = -1;
634aa8de
LC
444
445 if (pipe (t->sleep_pipe) != 0)
446 /* FIXME: Error conditions during the initialization phase are handled
447 gracelessly since public functions such as `scm_init_guile ()'
448 currently have type `void'. */
449 abort ();
450
86a597f8 451 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
378f2625
LC
452 t->current_mark_stack_ptr = NULL;
453 t->current_mark_stack_limit = NULL;
2e77f720 454 t->canceled = 0;
9de87eea 455 t->exited = 0;
72e6b608 456 t->guile_mode = 0;
9de87eea 457
f60a7648
AW
458 scm_i_pthread_setspecific (scm_i_thread_key, t);
459
460#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
461 /* Cache the current thread in TLS for faster lookup. */
462 scm_i_current_thread = t;
463#endif
9de87eea 464
9de87eea
MV
465 scm_i_pthread_mutex_lock (&thread_admin_mutex);
466 t->next_thread = all_threads;
467 all_threads = t;
468 thread_count++;
469 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
470}
471
9de87eea 472/* Perform second stage of thread initialisation, in guile mode.
d823b11b 473 */
9de87eea
MV
474static void
475guilify_self_2 (SCM parent)
d823b11b 476{
9de87eea
MV
477 scm_i_thread *t = SCM_I_CURRENT_THREAD;
478
72e6b608
LC
479 t->guile_mode = 1;
480
9de87eea 481 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 482
9de87eea
MV
483 t->continuation_root = scm_cons (t->handle, SCM_EOL);
484 t->continuation_base = t->base;
2bbe1533 485 t->vm = SCM_BOOL_F;
9de87eea
MV
486
487 if (scm_is_true (parent))
488 t->dynamic_state = scm_make_dynamic_state (parent);
489 else
490 t->dynamic_state = scm_i_make_initial_dynamic_state ();
491
492 t->join_queue = make_queue ();
493 t->block_asyncs = 0;
d823b11b
MV
494}
495
6180e336
NJ
496\f
497/*** Fat mutexes */
498
499/* We implement our own mutex type since we want them to be 'fair', we
500 want to do fancy things while waiting for them (like running
501 asyncs) and we might want to add things that are nice for
502 debugging.
503*/
504
505typedef struct {
506 scm_i_pthread_mutex_t lock;
507 SCM owner;
adc085f1 508 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 509
adc085f1 510 int recursive; /* allow recursive locking? */
6180e336
NJ
511 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
512 int allow_external_unlock; /* is it an error to unlock a mutex that is not
513 owned by the current thread? */
514
515 SCM waiting; /* the threads waiting for this mutex. */
516} fat_mutex;
517
518#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
519#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
520
9de87eea 521/* Perform thread tear-down, in guile mode.
d823b11b 522 */
9de87eea
MV
523static void *
524do_thread_exit (void *v)
525{
2e77f720
LC
526 scm_i_thread *t = (scm_i_thread *) v;
527
528 if (!scm_is_false (t->cleanup_handler))
529 {
530 SCM ptr = t->cleanup_handler;
531
532 t->cleanup_handler = SCM_BOOL_F;
533 t->result = scm_internal_catch (SCM_BOOL_T,
534 (scm_t_catch_body) scm_call_0, ptr,
535 scm_handle_by_message_noexit, NULL);
536 }
9de87eea 537
86a597f8 538 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
539
540 t->exited = 1;
0c97d7dd
MV
541 close (t->sleep_pipe[0]);
542 close (t->sleep_pipe[1]);
9de87eea
MV
543 while (scm_is_true (unblock_from_queue (t->join_queue)))
544 ;
9de87eea 545
74926120 546 while (!scm_is_null (t->mutexes))
6180e336 547 {
a0faf7dd 548 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 549
a0faf7dd
LC
550 if (!SCM_UNBNDP (mutex))
551 {
552 fat_mutex *m = SCM_MUTEX_DATA (mutex);
553
554 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
555
556 /* Since MUTEX is in `t->mutexes', T must be its owner. */
557 assert (scm_is_eq (m->owner, t->handle));
558
a0faf7dd 559 unblock_from_queue (m->waiting);
f57fdf07 560
a0faf7dd
LC
561 scm_i_pthread_mutex_unlock (&m->lock);
562 }
6180e336 563
a0faf7dd 564 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
565 }
566
86a597f8 567 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 568
9de87eea
MV
569 return NULL;
570}
571
f60a7648
AW
572static void *
573do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
574{
575 void *ret;
576 int registered;
577
578 registered = GC_register_my_thread (sb);
579
580 ret = scm_with_guile (do_thread_exit, v);
581
582 if (registered == GC_SUCCESS)
583 GC_unregister_my_thread ();
584
585 return ret;
586}
587
d823b11b 588static void
9de87eea 589on_thread_exit (void *v)
d823b11b 590{
29776e85 591 /* This handler is executed in non-guile mode. */
2e77f720 592 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 593
d2a51087
NJ
594 /* If this thread was cancelled while doing a cond wait, it will
595 still have a mutex locked, so we unlock it here. */
596 if (t->held_mutex)
597 {
598 scm_i_pthread_mutex_unlock (t->held_mutex);
599 t->held_mutex = NULL;
600 }
601
f60a7648
AW
602 /* Reinstate the current thread for purposes of scm_with_guile
603 guile-mode cleanup handlers. Only really needed in the non-TLS
604 case but it doesn't hurt to be consistent. */
605 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 606
2e77f720
LC
607 /* Ensure the signal handling thread has been launched, because we might be
608 shutting it down. */
609 scm_i_ensure_signal_delivery_thread ();
610
0c97d7dd 611 /* Unblocking the joining threads needs to happen in guile mode
f60a7648
AW
612 since the queue is a SCM data structure. Trampoline through
613 GC_call_with_stack_base so that the GC works even if it already
614 cleaned up for this thread. */
615 GC_call_with_stack_base (do_thread_exit_trampoline, v);
0c97d7dd
MV
616
617 /* Removing ourself from the list of all threads needs to happen in
618 non-guile mode since all SCM values on our stack become
29776e85 619 unprotected once we are no longer in the list. */
0c97d7dd
MV
620 scm_i_pthread_mutex_lock (&thread_admin_mutex);
621 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
622 if (*tp == t)
623 {
624 *tp = t->next_thread;
c058db8a
LC
625
626 /* GC-robust */
627 t->next_thread = NULL;
628
0c97d7dd
MV
629 break;
630 }
631 thread_count--;
2e77f720
LC
632
633 /* If there's only one other thread, it could be the signal delivery
634 thread, so we need to notify it to shut down by closing its read pipe.
635 If it's not the signal delivery thread, then closing the read pipe isn't
636 going to hurt. */
637 if (thread_count <= 1)
638 scm_i_close_signal_pipe ();
639
0c97d7dd
MV
640 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
641
f60a7648 642 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
643}
644
9de87eea 645static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 646
9de87eea
MV
647static void
648init_thread_key (void)
649{
f60a7648 650 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 651}
d823b11b 652
cde24ce1
AW
653/* Perform any initializations necessary to make the current thread
654 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
655 if necessary.
a54a94b3 656
9de87eea
MV
657 BASE is the stack base to use with GC.
658
659 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
660 which case the default dynamic state is used.
661
cde24ce1 662 Returns zero when the thread was known to guile already; otherwise
9de87eea 663 return 1.
cde24ce1
AW
664
665 Note that it could be the case that the thread was known
666 to Guile, but not in guile mode (because we are within a
667 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
668 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
669
670static int
671scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 672{
9de87eea
MV
673 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
674
cde24ce1
AW
675 if (SCM_I_CURRENT_THREAD)
676 {
677 /* Thread is already known to Guile.
678 */
679 return 0;
680 }
681 else
9de87eea
MV
682 {
683 /* This thread has not been guilified yet.
684 */
685
686 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
687 if (scm_initialized_p == 0)
688 {
689 /* First thread ever to enter Guile. Run the full
690 initialization.
691 */
692 scm_i_init_guile (base);
693 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
694 }
695 else
696 {
697 /* Guile is already initialized, but this thread enters it for
698 the first time. Only initialize this thread.
699 */
700 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
701 guilify_self_1 (base);
702 guilify_self_2 (parent);
703 }
704 return 1;
705 }
d823b11b
MV
706}
707
9de87eea 708#if SCM_USE_PTHREAD_THREADS
9de87eea 709
534bbcc1 710#if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
23d72566
KR
711/* This method for GNU/Linux and perhaps some other systems.
712 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
713 available on them. */
9de87eea
MV
714#define HAVE_GET_THREAD_STACK_BASE
715
716static SCM_STACKITEM *
717get_thread_stack_base ()
d823b11b 718{
9de87eea
MV
719 pthread_attr_t attr;
720 void *start, *end;
721 size_t size;
722
9de87eea
MV
723 pthread_getattr_np (pthread_self (), &attr);
724 pthread_attr_getstack (&attr, &start, &size);
725 end = (char *)start + size;
726
2b829bbb
KR
727 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
728 for the main thread, but we can use scm_get_stack_base in that
729 case.
730 */
731
732#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 733 if ((void *)&attr < start || (void *)&attr >= end)
071e0d93 734 return (SCM_STACKITEM *) GC_stackbottom;
9de87eea 735 else
2b829bbb 736#endif
9de87eea
MV
737 {
738#if SCM_STACK_GROWS_UP
739 return start;
740#else
741 return end;
742#endif
743 }
a54a94b3
MV
744}
745
56a3dcd4 746#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
23d72566
KR
747/* This method for MacOS X.
748 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
749 but as of 2006 there's nothing obvious at apple.com. */
750#define HAVE_GET_THREAD_STACK_BASE
751static SCM_STACKITEM *
752get_thread_stack_base ()
753{
754 return pthread_get_stackaddr_np (pthread_self ());
755}
756
757#elif defined (__MINGW32__)
758/* This method for mingw. In mingw the basic scm_get_stack_base can be used
759 in any thread. We don't like hard-coding the name of a system, but there
760 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
761 work. */
762#define HAVE_GET_THREAD_STACK_BASE
763static SCM_STACKITEM *
764get_thread_stack_base ()
765{
071e0d93 766 return (SCM_STACKITEM *) GC_stackbottom;
23d72566
KR
767}
768
769#endif /* pthread methods of get_thread_stack_base */
9de87eea
MV
770
771#else /* !SCM_USE_PTHREAD_THREADS */
772
773#define HAVE_GET_THREAD_STACK_BASE
774
775static SCM_STACKITEM *
776get_thread_stack_base ()
a54a94b3 777{
071e0d93 778 return (SCM_STACKITEM *) GC_stackbottom;
d823b11b
MV
779}
780
9de87eea 781#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
782
783#ifdef HAVE_GET_THREAD_STACK_BASE
784
785void
786scm_init_guile ()
d823b11b 787{
9de87eea
MV
788 scm_i_init_thread_for_guile (get_thread_stack_base (),
789 scm_i_default_dynamic_state);
d823b11b
MV
790}
791
9de87eea
MV
792#endif
793
794void *
795scm_with_guile (void *(*func)(void *), void *data)
796{
797 return scm_i_with_guile_and_parent (func, data,
798 scm_i_default_dynamic_state);
799}
800
70eca635 801SCM_UNUSED static void
2e77f720
LC
802scm_leave_guile_cleanup (void *x)
803{
47b6e9bd 804 on_thread_exit (SCM_I_CURRENT_THREAD);
2e77f720
LC
805}
806
cde24ce1
AW
807struct with_guile_trampoline_args
808{
809 GC_fn_type func;
810 void *data;
811};
812
813static void *
814with_guile_trampoline (void *data)
815{
816 struct with_guile_trampoline_args *args = data;
817
818 return scm_c_with_continuation_barrier (args->func, args->data);
819}
820
9de87eea 821void *
2e77f720 822scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
9de87eea
MV
823{
824 void *res;
cde24ce1
AW
825 int new_thread;
826 scm_i_thread *t;
9de87eea 827 SCM_STACKITEM base_item;
2e77f720 828
cde24ce1
AW
829 new_thread = scm_i_init_thread_for_guile (&base_item, parent);
830 t = SCM_I_CURRENT_THREAD;
831 if (new_thread)
2e77f720 832 {
cde24ce1
AW
833 /* We are in Guile mode. */
834 assert (t->guile_mode);
835
2e77f720 836 res = scm_c_with_continuation_barrier (func, data);
cde24ce1
AW
837
838 /* Leave Guile mode. */
839 t->guile_mode = 0;
840 }
841 else if (t->guile_mode)
842 {
843 /* Already in Guile mode. */
844 res = scm_c_with_continuation_barrier (func, data);
2e77f720 845 }
74926120 846 else
cde24ce1
AW
847 {
848 struct with_guile_trampoline_args args;
849 args.func = func;
850 args.data = data;
72e6b608 851
cde24ce1
AW
852 /* We are not in Guile mode, either because we are not within a
853 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 854
cde24ce1
AW
855 This call to scm_with_guile() could happen from anywhere on the
856 stack, and in particular lower on the stack than when it was
857 when this thread was first guilified. Thus, `base' must be
858 updated. */
859#if SCM_STACK_GROWS_UP
860 if (SCM_STACK_PTR (&base_item) < t->base)
861 t->base = SCM_STACK_PTR (&base_item);
862#else
863 if (SCM_STACK_PTR (&base_item) > t->base)
864 t->base = SCM_STACK_PTR (&base_item);
865#endif
72e6b608 866
cde24ce1
AW
867 t->guile_mode = 1;
868 res = with_gc_active (with_guile_trampoline, &args);
869 t->guile_mode = 0;
870 }
871 return res;
72e6b608 872}
3d1af79f 873
9de87eea
MV
874void *
875scm_without_guile (void *(*func)(void *), void *data)
d823b11b 876{
72e6b608 877 void *result;
cde24ce1 878 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 879
cde24ce1 880 if (t->guile_mode)
72e6b608 881 {
cde24ce1
AW
882 SCM_I_CURRENT_THREAD->guile_mode = 0;
883 result = with_gc_inactive (func, data);
884 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
885 }
886 else
cde24ce1 887 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
888 result = func (data);
889
890 return result;
9de87eea
MV
891}
892
72e6b608 893\f
9de87eea
MV
894/*** Thread creation */
895
896typedef struct {
897 SCM parent;
898 SCM thunk;
899 SCM handler;
76da80e7 900 SCM thread;
9de87eea
MV
901 scm_i_pthread_mutex_t mutex;
902 scm_i_pthread_cond_t cond;
903} launch_data;
d823b11b 904
9de87eea
MV
905static void *
906really_launch (void *d)
907{
908 launch_data *data = (launch_data *)d;
909 SCM thunk = data->thunk, handler = data->handler;
910 scm_i_thread *t;
d823b11b 911
9de87eea 912 t = SCM_I_CURRENT_THREAD;
a54a94b3 913
9de87eea
MV
914 scm_i_scm_pthread_mutex_lock (&data->mutex);
915 data->thread = scm_current_thread ();
916 scm_i_pthread_cond_signal (&data->cond);
917 scm_i_pthread_mutex_unlock (&data->mutex);
918
919 if (SCM_UNBNDP (handler))
920 t->result = scm_call_0 (thunk);
921 else
922 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
923
924 return 0;
d823b11b
MV
925}
926
9de87eea
MV
927static void *
928launch_thread (void *d)
929{
930 launch_data *data = (launch_data *)d;
931 scm_i_pthread_detach (scm_i_pthread_self ());
932 scm_i_with_guile_and_parent (really_launch, d, data->parent);
933 return NULL;
934}
935
936SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 937 (SCM thunk, SCM handler),
9de87eea
MV
938 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
939 "returning a new thread object representing the thread. The procedure\n"
940 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
941 "\n"
942 "When @var{handler} is specified, then @var{thunk} is called from\n"
943 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
944 "handler. This catch is established inside the continuation barrier.\n"
945 "\n"
946 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
947 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
948#define FUNC_NAME s_scm_call_with_new_thread
949{
9de87eea
MV
950 launch_data data;
951 scm_i_pthread_t id;
952 int err;
d823b11b 953
9de87eea
MV
954 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
955 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
956 handler, SCM_ARG2, FUNC_NAME);
957
958 data.parent = scm_current_dynamic_state ();
959 data.thunk = thunk;
960 data.handler = handler;
961 data.thread = SCM_BOOL_F;
962 scm_i_pthread_mutex_init (&data.mutex, NULL);
963 scm_i_pthread_cond_init (&data.cond, NULL);
964
965 scm_i_scm_pthread_mutex_lock (&data.mutex);
966 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
967 if (err)
968 {
969 scm_i_pthread_mutex_unlock (&data.mutex);
970 errno = err;
971 scm_syserror (NULL);
972 }
973 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
974 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 975
9de87eea 976 return data.thread;
d823b11b
MV
977}
978#undef FUNC_NAME
979
9de87eea
MV
980typedef struct {
981 SCM parent;
982 scm_t_catch_body body;
983 void *body_data;
984 scm_t_catch_handler handler;
985 void *handler_data;
986 SCM thread;
987 scm_i_pthread_mutex_t mutex;
988 scm_i_pthread_cond_t cond;
989} spawn_data;
990
991static void *
992really_spawn (void *d)
993{
994 spawn_data *data = (spawn_data *)d;
995 scm_t_catch_body body = data->body;
996 void *body_data = data->body_data;
997 scm_t_catch_handler handler = data->handler;
998 void *handler_data = data->handler_data;
999 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1000
1001 scm_i_scm_pthread_mutex_lock (&data->mutex);
1002 data->thread = scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data->cond);
1004 scm_i_pthread_mutex_unlock (&data->mutex);
1005
1006 if (handler == NULL)
1007 t->result = body (body_data);
1008 else
1009 t->result = scm_internal_catch (SCM_BOOL_T,
1010 body, body_data,
1011 handler, handler_data);
1012
1013 return 0;
1014}
1015
1016static void *
1017spawn_thread (void *d)
1018{
1019 spawn_data *data = (spawn_data *)d;
1020 scm_i_pthread_detach (scm_i_pthread_self ());
1021 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1022 return NULL;
1023}
1024
1025SCM
1026scm_spawn_thread (scm_t_catch_body body, void *body_data,
1027 scm_t_catch_handler handler, void *handler_data)
1028{
1029 spawn_data data;
1030 scm_i_pthread_t id;
1031 int err;
1032
1033 data.parent = scm_current_dynamic_state ();
1034 data.body = body;
1035 data.body_data = body_data;
1036 data.handler = handler;
1037 data.handler_data = handler_data;
1038 data.thread = SCM_BOOL_F;
1039 scm_i_pthread_mutex_init (&data.mutex, NULL);
1040 scm_i_pthread_cond_init (&data.cond, NULL);
1041
1042 scm_i_scm_pthread_mutex_lock (&data.mutex);
1043 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1044 if (err)
1045 {
1046 scm_i_pthread_mutex_unlock (&data.mutex);
1047 errno = err;
1048 scm_syserror (NULL);
1049 }
1050 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1051 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1052
9de87eea
MV
1053 return data.thread;
1054}
1055
29717c89
MD
1056SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1057 (),
1058"Move the calling thread to the end of the scheduling queue.")
1059#define FUNC_NAME s_scm_yield
1060{
9de87eea 1061 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1062}
1063#undef FUNC_NAME
1064
2e77f720
LC
1065SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1066 (SCM thread),
1067"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1068"cannot be the current thread, and if @var{thread} has already terminated or "
1069"been signaled to terminate, this function is a no-op.")
1070#define FUNC_NAME s_scm_cancel_thread
1071{
1072 scm_i_thread *t = NULL;
1073
1074 SCM_VALIDATE_THREAD (1, thread);
1075 t = SCM_I_THREAD_DATA (thread);
86a597f8 1076 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1077 if (!t->canceled)
1078 {
1079 t->canceled = 1;
86a597f8 1080 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1081 scm_i_pthread_cancel (t->pthread);
1082 }
1083 else
86a597f8 1084 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1085
1086 return SCM_UNSPECIFIED;
1087}
1088#undef FUNC_NAME
1089
1090SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1091 (SCM thread, SCM proc),
1092"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1093"This handler will be called when the thread exits.")
1094#define FUNC_NAME s_scm_set_thread_cleanup_x
1095{
1096 scm_i_thread *t;
1097
1098 SCM_VALIDATE_THREAD (1, thread);
1099 if (!scm_is_false (proc))
1100 SCM_VALIDATE_THUNK (2, proc);
1101
2e77f720 1102 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1103 scm_i_pthread_mutex_lock (&t->admin_mutex);
1104
2e77f720
LC
1105 if (!(t->exited || t->canceled))
1106 t->cleanup_handler = proc;
1107
86a597f8 1108 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1109
1110 return SCM_UNSPECIFIED;
1111}
1112#undef FUNC_NAME
1113
1114SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1115 (SCM thread),
1116"Return the cleanup handler installed for the thread @var{thread}.")
1117#define FUNC_NAME s_scm_thread_cleanup
1118{
1119 scm_i_thread *t;
1120 SCM ret;
1121
1122 SCM_VALIDATE_THREAD (1, thread);
1123
2e77f720 1124 t = SCM_I_THREAD_DATA (thread);
86a597f8 1125 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1126 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1127 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1128
1129 return ret;
1130}
1131#undef FUNC_NAME
1132
6180e336
NJ
1133SCM scm_join_thread (SCM thread)
1134{
1135 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1136}
1137
1138SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1139 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1140"Suspend execution of the calling thread until the target @var{thread} "
1141"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1142#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1143{
9de87eea 1144 scm_i_thread *t;
6180e336
NJ
1145 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1146 SCM res = SCM_BOOL_F;
1147
1148 if (! (SCM_UNBNDP (timeoutval)))
1149 res = timeoutval;
d823b11b
MV
1150
1151 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1152 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1153 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1154
9de87eea 1155 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1156 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1157
6180e336
NJ
1158 if (! SCM_UNBNDP (timeout))
1159 {
1160 to_timespec (timeout, &ctimeout);
1161 timeout_ptr = &ctimeout;
1162 }
1163
1164 if (t->exited)
1165 res = t->result;
1166 else
d823b11b 1167 {
9de87eea
MV
1168 while (1)
1169 {
74926120 1170 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1171 timeout_ptr);
1172 if (err == 0)
1173 {
1174 if (t->exited)
1175 {
1176 res = t->result;
1177 break;
1178 }
1179 }
1180 else if (err == ETIMEDOUT)
9de87eea 1181 break;
6180e336 1182
86a597f8 1183 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1184 SCM_TICK;
86a597f8 1185 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1186
1187 /* Check for exit again, since we just released and
1188 reacquired the admin mutex, before the next block_self
1189 call (which would block forever if t has already
1190 exited). */
1191 if (t->exited)
1192 {
1193 res = t->result;
1194 break;
1195 }
9de87eea 1196 }
d823b11b 1197 }
9de87eea 1198
86a597f8 1199 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1200
d823b11b 1201 return res;
5f05c406
MV
1202}
1203#undef FUNC_NAME
1204
6180e336
NJ
1205SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1206 (SCM obj),
1207 "Return @code{#t} if @var{obj} is a thread.")
1208#define FUNC_NAME s_scm_thread_p
1209{
1210 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1211}
1212#undef FUNC_NAME
5f05c406 1213
4079f87e 1214
9de87eea
MV
1215static size_t
1216fat_mutex_free (SCM mx)
76da80e7 1217{
9de87eea
MV
1218 fat_mutex *m = SCM_MUTEX_DATA (mx);
1219 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1220 return 0;
1221}
1222
1223static int
9de87eea 1224fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1225{
9de87eea
MV
1226 fat_mutex *m = SCM_MUTEX_DATA (mx);
1227 scm_puts ("#<mutex ", port);
1228 scm_uintprint ((scm_t_bits)m, 16, port);
1229 scm_puts (">", port);
1230 return 1;
76da80e7
MV
1231}
1232
76da80e7 1233static SCM
6180e336 1234make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1235{
9de87eea
MV
1236 fat_mutex *m;
1237 SCM mx;
1238
1239 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1240 scm_i_pthread_mutex_init (&m->lock, NULL);
1241 m->owner = SCM_BOOL_F;
adc085f1 1242 m->level = 0;
6180e336 1243
adc085f1 1244 m->recursive = recursive;
6180e336
NJ
1245 m->unchecked_unlock = unchecked_unlock;
1246 m->allow_external_unlock = external_unlock;
1247
9de87eea
MV
1248 m->waiting = SCM_EOL;
1249 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1250 m->waiting = make_queue ();
1251 return mx;
76da80e7
MV
1252}
1253
6180e336
NJ
1254SCM scm_make_mutex (void)
1255{
1256 return scm_make_mutex_with_flags (SCM_EOL);
1257}
1258
2a1d0688
NJ
1259SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1260SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1261SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1262
1263SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1264 (SCM flags),
9de87eea 1265 "Create a new mutex. ")
6180e336 1266#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1267{
6180e336
NJ
1268 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1269
1270 SCM ptr = flags;
1271 while (! scm_is_null (ptr))
1272 {
1273 SCM flag = SCM_CAR (ptr);
1274 if (scm_is_eq (flag, unchecked_unlock_sym))
1275 unchecked_unlock = 1;
1276 else if (scm_is_eq (flag, allow_external_unlock_sym))
1277 external_unlock = 1;
1278 else if (scm_is_eq (flag, recursive_sym))
1279 recursive = 1;
74926120 1280 else
2a1d0688 1281 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1282 ptr = SCM_CDR (ptr);
1283 }
1284 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1285}
1286#undef FUNC_NAME
1287
9de87eea 1288SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1289 (void),
9de87eea
MV
1290 "Create a new recursive mutex. ")
1291#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1292{
6180e336 1293 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1294}
1295#undef FUNC_NAME
1296
6180e336
NJ
1297SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1298
1299static SCM
adc085f1 1300fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1301{
1302 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1303
adc085f1 1304 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1305 SCM err = SCM_BOOL_F;
1306
1307 struct timeval current_time;
9de87eea
MV
1308
1309 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1310
1311 while (1)
9de87eea 1312 {
adc085f1 1313 if (m->level == 0)
6180e336 1314 {
adc085f1 1315 m->owner = new_owner;
6180e336 1316 m->level++;
74926120 1317
adc085f1 1318 if (SCM_I_IS_THREAD (new_owner))
6180e336 1319 {
adc085f1 1320 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
6180e336 1321 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1322
1323 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1324 retained when not referenced elsewhere (bug #27450).
1325 The weak pair itself is eventually removed when MUTEX
1326 is unlocked. Note that `t->mutexes' lists mutexes
1327 currently held by T, so it should be small. */
a0faf7dd
LC
1328 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1329
6180e336 1330 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1331 }
adc085f1
JG
1332 *ret = 1;
1333 break;
1334 }
1335 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1336 {
1337 m->owner = new_owner;
1338 err = scm_cons (scm_abandoned_mutex_error_key,
1339 scm_from_locale_string ("lock obtained on abandoned "
1340 "mutex"));
1341 *ret = 1;
1342 break;
1343 }
1344 else if (scm_is_eq (m->owner, new_owner))
1345 {
1346 if (m->recursive)
1347 {
1348 m->level++;
74926120 1349 *ret = 1;
adc085f1
JG
1350 }
1351 else
6180e336 1352 {
adc085f1
JG
1353 err = scm_cons (scm_misc_error_key,
1354 scm_from_locale_string ("mutex already locked "
1355 "by thread"));
1356 *ret = 0;
1357 }
74926120 1358 break;
adc085f1 1359 }
9de87eea 1360 else
9de87eea 1361 {
74926120 1362 if (timeout != NULL)
adc085f1
JG
1363 {
1364 gettimeofday (&current_time, NULL);
1365 if (current_time.tv_sec > timeout->tv_sec ||
1366 (current_time.tv_sec == timeout->tv_sec &&
1367 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1368 {
adc085f1
JG
1369 *ret = 0;
1370 break;
6180e336 1371 }
6180e336 1372 }
37a52039 1373 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1374 scm_i_pthread_mutex_unlock (&m->lock);
1375 SCM_TICK;
1376 scm_i_scm_pthread_mutex_lock (&m->lock);
1377 }
1378 }
1379 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1380 return err;
9de87eea
MV
1381}
1382
6180e336
NJ
1383SCM scm_lock_mutex (SCM mx)
1384{
adc085f1 1385 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1386}
1387
adc085f1
JG
1388SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1389 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1390"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1391"blocks until the mutex becomes available. The function returns when "
1392"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1393"a thread already owns will succeed right away and will not block the "
1394"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1395#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1396{
6180e336
NJ
1397 SCM exception;
1398 int ret = 0;
1399 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1400
6180e336
NJ
1401 SCM_VALIDATE_MUTEX (1, m);
1402
1403 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1404 {
1405 to_timespec (timeout, &cwaittime);
1406 waittime = &cwaittime;
1407 }
1408
adc085f1 1409 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1410 if (!scm_is_false (exception))
1411 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1412 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1413}
76da80e7 1414#undef FUNC_NAME
9bc4701c 1415
a4d106c7 1416void
661ae7ab 1417scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1418{
661ae7ab
MV
1419 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1420 SCM_F_WIND_EXPLICITLY);
1421 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1422 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1423}
1424
9bc4701c 1425SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1426 (SCM mutex),
9bc4701c
MD
1427"Try to lock @var{mutex}. If the mutex is already locked by someone "
1428"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1429#define FUNC_NAME s_scm_try_mutex
1430{
6180e336
NJ
1431 SCM exception;
1432 int ret = 0;
1433 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1434
ba1b7223 1435 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1436
1437 to_timespec (scm_from_int(0), &cwaittime);
1438 waittime = &cwaittime;
74926120 1439
adc085f1 1440 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1441 if (!scm_is_false (exception))
1442 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1443 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1444}
1445#undef FUNC_NAME
76da80e7 1446
6180e336
NJ
1447/*** Fat condition variables */
1448
1449typedef struct {
1450 scm_i_pthread_mutex_t lock;
1451 SCM waiting; /* the threads waiting for this condition. */
1452} fat_cond;
1453
1454#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1455#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1456
1457static int
1458fat_mutex_unlock (SCM mutex, SCM cond,
1459 const scm_t_timespec *waittime, int relock)
9de87eea 1460{
7f991c7d 1461 SCM owner;
6180e336
NJ
1462 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1463 fat_cond *c = NULL;
1464 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1465 int err = 0, ret = 0;
9de87eea
MV
1466
1467 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1468
7f991c7d 1469 owner = m->owner;
adc085f1 1470
d31ae2c3 1471 if (!scm_is_eq (owner, t->handle))
9bc4701c 1472 {
adc085f1 1473 if (m->level == 0)
6180e336
NJ
1474 {
1475 if (!m->unchecked_unlock)
2a1d0688
NJ
1476 {
1477 scm_i_pthread_mutex_unlock (&m->lock);
1478 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1479 }
d31ae2c3 1480 owner = t->handle;
6180e336
NJ
1481 }
1482 else if (!m->allow_external_unlock)
2a1d0688
NJ
1483 {
1484 scm_i_pthread_mutex_unlock (&m->lock);
1485 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1486 }
6180e336
NJ
1487 }
1488
1489 if (! (SCM_UNBNDP (cond)))
1490 {
6180e336
NJ
1491 c = SCM_CONDVAR_DATA (cond);
1492 while (1)
1493 {
1494 int brk = 0;
1495
6180e336
NJ
1496 if (m->level > 0)
1497 m->level--;
adc085f1 1498 if (m->level == 0)
f57fdf07
LC
1499 {
1500 /* Change the owner of MUTEX. */
1501 t->mutexes = scm_delq_x (mutex, t->mutexes);
1502 m->owner = unblock_from_queue (m->waiting);
1503 }
adc085f1 1504
6180e336 1505 t->block_asyncs++;
74926120 1506
d2a51087
NJ
1507 err = block_self (c->waiting, cond, &m->lock, waittime);
1508 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1509
1510 if (err == 0)
1511 {
1512 ret = 1;
1513 brk = 1;
1514 }
1515 else if (err == ETIMEDOUT)
1516 {
1517 ret = 0;
1518 brk = 1;
1519 }
1520 else if (err != EINTR)
74926120 1521 {
6180e336 1522 errno = err;
6180e336 1523 scm_syserror (NULL);
74926120 1524 }
6180e336
NJ
1525
1526 if (brk)
1527 {
1528 if (relock)
adc085f1 1529 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1530 t->block_asyncs--;
6180e336
NJ
1531 break;
1532 }
74926120 1533
6180e336
NJ
1534 t->block_asyncs--;
1535 scm_async_click ();
74926120 1536
6180e336
NJ
1537 scm_remember_upto_here_2 (cond, mutex);
1538
1539 scm_i_scm_pthread_mutex_lock (&m->lock);
1540 }
9bc4701c 1541 }
9de87eea 1542 else
6180e336
NJ
1543 {
1544 if (m->level > 0)
1545 m->level--;
74926120 1546 if (m->level == 0)
f57fdf07
LC
1547 {
1548 /* Change the owner of MUTEX. */
1549 t->mutexes = scm_delq_x (mutex, t->mutexes);
1550 m->owner = unblock_from_queue (m->waiting);
1551 }
74926120 1552
6180e336
NJ
1553 scm_i_pthread_mutex_unlock (&m->lock);
1554 ret = 1;
1555 }
9de87eea 1556
6180e336 1557 return ret;
9bc4701c 1558}
9bc4701c 1559
6180e336
NJ
1560SCM scm_unlock_mutex (SCM mx)
1561{
1562 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1563}
9bc4701c 1564
6180e336
NJ
1565SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1566 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1567"Unlocks @var{mutex} if the calling thread owns the lock on "
1568"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1569"thread results in undefined behaviour. Once a mutex has been unlocked, "
1570"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1571"lock. Every call to @code{lock-mutex} by this thread must be matched "
1572"with a call to @code{unlock-mutex}. Only the last call to "
1573"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1574#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1575{
6180e336
NJ
1576 scm_t_timespec cwaittime, *waittime = NULL;
1577
9bc4701c 1578 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1579 if (! (SCM_UNBNDP (cond)))
1580 {
1581 SCM_VALIDATE_CONDVAR (2, cond);
1582
1583 if (! (SCM_UNBNDP (timeout)))
1584 {
1585 to_timespec (timeout, &cwaittime);
1586 waittime = &cwaittime;
1587 }
1588 }
1589
1590 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1591}
1592#undef FUNC_NAME
1593
6180e336
NJ
1594SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1595 (SCM obj),
1596 "Return @code{#t} if @var{obj} is a mutex.")
1597#define FUNC_NAME s_scm_mutex_p
1598{
1599 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1600}
74926120 1601#undef FUNC_NAME
9de87eea
MV
1602
1603SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1604 (SCM mx),
1605 "Return the thread owning @var{mx}, or @code{#f}.")
1606#define FUNC_NAME s_scm_mutex_owner
1607{
adc085f1
JG
1608 SCM owner;
1609 fat_mutex *m = NULL;
1610
9de87eea 1611 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1612 m = SCM_MUTEX_DATA (mx);
1613 scm_i_pthread_mutex_lock (&m->lock);
1614 owner = m->owner;
1615 scm_i_pthread_mutex_unlock (&m->lock);
1616
1617 return owner;
9de87eea
MV
1618}
1619#undef FUNC_NAME
1620
1621SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1622 (SCM mx),
adc085f1 1623 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1624#define FUNC_NAME s_scm_mutex_level
1625{
1626 SCM_VALIDATE_MUTEX (1, mx);
1627 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1628}
1629#undef FUNC_NAME
1630
adc085f1
JG
1631SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1632 (SCM mx),
1633 "Returns @code{#t} if the mutex @var{mx} is locked.")
1634#define FUNC_NAME s_scm_mutex_locked_p
1635{
1636 SCM_VALIDATE_MUTEX (1, mx);
1637 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1638}
1639#undef FUNC_NAME
9de87eea 1640
9de87eea
MV
1641static int
1642fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1643{
1644 fat_cond *c = SCM_CONDVAR_DATA (cv);
1645 scm_puts ("#<condition-variable ", port);
1646 scm_uintprint ((scm_t_bits)c, 16, port);
1647 scm_puts (">", port);
1648 return 1;
1649}
9bc4701c 1650
d823b11b
MV
1651SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1652 (void),
1653 "Make a new condition variable.")
1654#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1655{
9de87eea
MV
1656 fat_cond *c;
1657 SCM cv;
1658
1659 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1660 c->waiting = SCM_EOL;
1661 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1662 c->waiting = make_queue ();
d823b11b 1663 return cv;
5f05c406 1664}
d823b11b 1665#undef FUNC_NAME
5f05c406 1666
d823b11b
MV
1667SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1668 (SCM cv, SCM mx, SCM t),
1669"Wait until @var{cond-var} has been signalled. While waiting, "
1670"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1671"is locked again when this function returns. When @var{time} is given, "
1672"it specifies a point in time where the waiting should be aborted. It "
1673"can be either a integer as returned by @code{current-time} or a pair "
1674"as returned by @code{gettimeofday}. When the waiting is aborted the "
1675"mutex is locked and @code{#f} is returned. When the condition "
1676"variable is in fact signalled, the mutex is also locked and @code{#t} "
1677"is returned. ")
1678#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1679{
9de87eea 1680 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1681
1682 SCM_VALIDATE_CONDVAR (1, cv);
1683 SCM_VALIDATE_MUTEX (2, mx);
74926120 1684
d823b11b
MV
1685 if (!SCM_UNBNDP (t))
1686 {
6180e336 1687 to_timespec (t, &waittime);
9de87eea 1688 waitptr = &waittime;
d823b11b
MV
1689 }
1690
2a1d0688 1691 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1692}
d823b11b 1693#undef FUNC_NAME
5f05c406 1694
9de87eea
MV
1695static void
1696fat_cond_signal (fat_cond *c)
1697{
9de87eea 1698 unblock_from_queue (c->waiting);
9de87eea
MV
1699}
1700
d823b11b
MV
1701SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1702 (SCM cv),
1703 "Wake up one thread that is waiting for @var{cv}")
1704#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1705{
d823b11b 1706 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1707 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1708 return SCM_BOOL_T;
5f05c406 1709}
d823b11b 1710#undef FUNC_NAME
5f05c406 1711
9de87eea
MV
1712static void
1713fat_cond_broadcast (fat_cond *c)
1714{
9de87eea
MV
1715 while (scm_is_true (unblock_from_queue (c->waiting)))
1716 ;
9de87eea
MV
1717}
1718
d823b11b
MV
1719SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1720 (SCM cv),
1721 "Wake up all threads that are waiting for @var{cv}. ")
1722#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1723{
d823b11b 1724 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1725 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1726 return SCM_BOOL_T;
5f05c406 1727}
d823b11b 1728#undef FUNC_NAME
5f05c406 1729
6180e336
NJ
1730SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1731 (SCM obj),
1732 "Return @code{#t} if @var{obj} is a condition variable.")
1733#define FUNC_NAME s_scm_condition_variable_p
1734{
1735 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1736}
1737#undef FUNC_NAME
1738
6087fad9 1739
8c2b3143 1740\f
d823b11b
MV
1741/*** Select */
1742
8c2b3143
LC
1743struct select_args
1744{
1745 int nfds;
1746 SELECT_TYPE *read_fds;
1747 SELECT_TYPE *write_fds;
1748 SELECT_TYPE *except_fds;
1749 struct timeval *timeout;
1750
1751 int result;
1752 int errno_value;
1753};
1754
1755static void *
1756do_std_select (void *args)
1757{
1758 struct select_args *select_args;
1759
1760 select_args = (struct select_args *) args;
1761
1762 select_args->result =
1763 select (select_args->nfds,
1764 select_args->read_fds, select_args->write_fds,
1765 select_args->except_fds, select_args->timeout);
1766 select_args->errno_value = errno;
1767
1768 return NULL;
1769}
1770
911782b7 1771int
9de87eea
MV
1772scm_std_select (int nfds,
1773 SELECT_TYPE *readfds,
1774 SELECT_TYPE *writefds,
1775 SELECT_TYPE *exceptfds,
1776 struct timeval *timeout)
1777{
1778 fd_set my_readfds;
1779 int res, eno, wakeup_fd;
1780 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1781 struct select_args args;
9de87eea
MV
1782
1783 if (readfds == NULL)
1784 {
1785 FD_ZERO (&my_readfds);
1786 readfds = &my_readfds;
1787 }
1788
1789 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1790 SCM_TICK;
1791
1792 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1793 FD_SET (wakeup_fd, readfds);
1794 if (wakeup_fd >= nfds)
1795 nfds = wakeup_fd+1;
9de87eea 1796
8c2b3143
LC
1797 args.nfds = nfds;
1798 args.read_fds = readfds;
1799 args.write_fds = writefds;
1800 args.except_fds = exceptfds;
1801 args.timeout = timeout;
1802
1803 /* Explicitly cooperate with the GC. */
1804 scm_without_guile (do_std_select, &args);
1805
1806 res = args.result;
1807 eno = args.errno_value;
1808
1809 t->sleep_fd = -1;
9de87eea
MV
1810 scm_i_reset_sleep (t);
1811
1812 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1813 {
1814 char dummy;
634aa8de
LC
1815 full_read (wakeup_fd, &dummy, 1);
1816
9de87eea
MV
1817 FD_CLR (wakeup_fd, readfds);
1818 res -= 1;
1819 if (res == 0)
1820 {
1821 eno = EINTR;
1822 res = -1;
1823 }
1824 }
d823b11b
MV
1825 errno = eno;
1826 return res;
5f05c406
MV
1827}
1828
9de87eea 1829/* Convenience API for blocking while in guile mode. */
76da80e7 1830
9de87eea 1831#if SCM_USE_PTHREAD_THREADS
92e64b87 1832
2956b071
LC
1833/* It seems reasonable to not run procedures related to mutex and condition
1834 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1835 without it, and (ii) the only potential gain would be GC latency. See
1836 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1837 for a discussion of the pros and cons. */
1838
9bc4701c 1839int
9de87eea 1840scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1841{
9de87eea 1842 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1843 return res;
1844}
1845
9de87eea 1846static void
2b829bbb 1847do_unlock (void *data)
28d52ebb 1848{
9de87eea 1849 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1850}
1851
1852void
661ae7ab 1853scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1854{
9de87eea 1855 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1856 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1857}
1858
9bc4701c 1859int
9de87eea 1860scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1861{
4cf72f0b
LC
1862 int res;
1863 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1864
1865 t->held_mutex = mutex;
1866 res = scm_i_pthread_cond_wait (cond, mutex);
1867 t->held_mutex = NULL;
1868
9bc4701c
MD
1869 return res;
1870}
9bc4701c 1871
76da80e7 1872int
9de87eea
MV
1873scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1874 scm_i_pthread_mutex_t *mutex,
1875 const scm_t_timespec *wt)
76da80e7 1876{
4cf72f0b
LC
1877 int res;
1878 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1879
1880 t->held_mutex = mutex;
1881 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1882 t->held_mutex = NULL;
1883
9de87eea 1884 return res;
76da80e7
MV
1885}
1886
9de87eea 1887#endif
76da80e7 1888
d823b11b 1889unsigned long
9de87eea 1890scm_std_usleep (unsigned long usecs)
5f05c406 1891{
d823b11b
MV
1892 struct timeval tv;
1893 tv.tv_usec = usecs % 1000000;
1894 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1895 scm_std_select (0, NULL, NULL, NULL, &tv);
1896 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1897}
1898
9de87eea
MV
1899unsigned int
1900scm_std_sleep (unsigned int secs)
6c214b62 1901{
d823b11b
MV
1902 struct timeval tv;
1903 tv.tv_usec = 0;
1904 tv.tv_sec = secs;
9de87eea 1905 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1906 return tv.tv_sec;
6c214b62
MD
1907}
1908
d823b11b
MV
1909/*** Misc */
1910
1911SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1912 (void),
1913 "Return the thread that called this function.")
1914#define FUNC_NAME s_scm_current_thread
1915{
9de87eea 1916 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1917}
1918#undef FUNC_NAME
1919
9de87eea
MV
1920static SCM
1921scm_c_make_list (size_t n, SCM fill)
1922{
1923 SCM res = SCM_EOL;
1924 while (n-- > 0)
1925 res = scm_cons (fill, res);
1926 return res;
1927}
1928
d823b11b
MV
1929SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1930 (void),
1931 "Return a list of all threads.")
9bc4701c 1932#define FUNC_NAME s_scm_all_threads
d823b11b 1933{
9de87eea
MV
1934 /* We can not allocate while holding the thread_admin_mutex because
1935 of the way GC is done.
1936 */
1937 int n = thread_count;
1938 scm_i_thread *t;
1939 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1940
9de87eea
MV
1941 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1942 l = &list;
1943 for (t = all_threads; t && n > 0; t = t->next_thread)
1944 {
2e77f720
LC
1945 if (t != scm_i_signal_delivery_thread)
1946 {
1947 SCM_SETCAR (*l, t->handle);
1948 l = SCM_CDRLOC (*l);
1949 }
9de87eea
MV
1950 n--;
1951 }
1952 *l = SCM_EOL;
1953 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1954 return list;
d823b11b 1955}
9de87eea 1956#undef FUNC_NAME
d823b11b
MV
1957
1958SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1959 (SCM thread),
1960 "Return @code{#t} iff @var{thread} has exited.\n")
1961#define FUNC_NAME s_scm_thread_exited_p
1962{
7888309b 1963 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1964}
1965#undef FUNC_NAME
1966
911782b7 1967int
d823b11b
MV
1968scm_c_thread_exited_p (SCM thread)
1969#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1970{
9de87eea 1971 scm_i_thread *t;
d823b11b 1972 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1973 t = SCM_I_THREAD_DATA (thread);
d823b11b 1974 return t->exited;
5f05c406 1975}
d823b11b 1976#undef FUNC_NAME
5f05c406 1977
9de87eea 1978static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
1979static int threads_initialized_p = 0;
1980
9bc4701c 1981
a4d106c7
MV
1982/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1983 */
d1138028 1984scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 1985
661ae7ab 1986static SCM dynwind_critical_section_mutex;
a54a94b3 1987
9bc4701c 1988void
661ae7ab 1989scm_dynwind_critical_section (SCM mutex)
76da80e7 1990{
a4d106c7 1991 if (scm_is_false (mutex))
661ae7ab
MV
1992 mutex = dynwind_critical_section_mutex;
1993 scm_dynwind_lock_mutex (mutex);
1994 scm_dynwind_block_asyncs ();
9de87eea
MV
1995}
1996
1997/*** Initialization */
1998
9de87eea
MV
1999scm_i_pthread_mutex_t scm_i_misc_mutex;
2000
d1138028
MV
2001#if SCM_USE_PTHREAD_THREADS
2002pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2003#endif
2004
9de87eea
MV
2005void
2006scm_threads_prehistory (SCM_STACKITEM *base)
2007{
d1138028
MV
2008#if SCM_USE_PTHREAD_THREADS
2009 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2010 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2011 PTHREAD_MUTEX_RECURSIVE);
2012#endif
2013
2014 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2015 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2016 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2017 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2018
9de87eea 2019 guilify_self_1 (base);
9bc4701c
MD
2020}
2021
d823b11b
MV
2022scm_t_bits scm_tc16_thread;
2023scm_t_bits scm_tc16_mutex;
2024scm_t_bits scm_tc16_condvar;
7bfd3b9e 2025
7bfd3b9e 2026void
9de87eea 2027scm_init_threads ()
7bfd3b9e 2028{
9de87eea 2029 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2030 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2031
9de87eea 2032 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2033 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2034 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2035
9de87eea
MV
2036 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2037 sizeof (fat_cond));
9de87eea 2038 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2039
9de87eea
MV
2040 scm_i_default_dynamic_state = SCM_BOOL_F;
2041 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2042 threads_initialized_p = 1;
a4d106c7 2043
f39448c5 2044 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2045}
89e00824 2046
5f05c406 2047void
9de87eea 2048scm_init_threads_default_dynamic_state ()
5f05c406 2049{
9de87eea 2050 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2051 scm_i_default_dynamic_state = state;
5f05c406
MV
2052}
2053
d823b11b 2054void
9de87eea 2055scm_init_thread_procs ()
d823b11b 2056{
9de87eea 2057#include "libguile/threads.x"
d823b11b
MV
2058}
2059
3c13664e
LC
2060\f
2061/* IA64-specific things. */
2062
2063#ifdef __ia64__
2064# ifdef __hpux
2065# include <sys/param.h>
2066# include <sys/pstat.h>
2067void *
2068scm_ia64_register_backing_store_base (void)
2069{
2070 struct pst_vm_status vm_status;
2071 int i = 0;
2072 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2073 if (vm_status.pst_type == PS_RSESTACK)
2074 return (void *) vm_status.pst_vaddr;
2075 abort ();
2076}
2077void *
2078scm_ia64_ar_bsp (const void *ctx)
2079{
2080 uint64_t bsp;
2081 __uc_get_ar_bsp (ctx, &bsp);
2082 return (void *) bsp;
2083}
2084# endif /* hpux */
2085# ifdef linux
2086# include <ucontext.h>
2087void *
2088scm_ia64_register_backing_store_base (void)
2089{
2090 extern void *__libc_ia64_register_backing_store_base;
2091 return __libc_ia64_register_backing_store_base;
2092}
2093void *
2094scm_ia64_ar_bsp (const void *opaque)
2095{
2096 const ucontext_t *ctx = opaque;
2097 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2098}
2099# endif /* linux */
2100#endif /* __ia64__ */
2101
2102
89e00824
ML
2103/*
2104 Local Variables:
2105 c-file-style: "gnu"
2106 End:
2107*/