Merge remote-tracking branch 'local-2.0/stable-2.0'
[bpt/guile.git] / libguile / threads.c
CommitLineData
b3da54d1
LC
1/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 * Free Software Foundation, Inc.
74926120 4 *
73be1d9e 5 * This library is free software; you can redistribute it and/or
53befeb7
NJ
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
7bfd3b9e 9 *
53befeb7
NJ
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
7bfd3b9e 14 *
73be1d9e
MV
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
53befeb7
NJ
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
73be1d9e 19 */
1bbd0b84 20
1bbd0b84 21
7bfd3b9e 22\f
dbb605f5
LC
23#ifdef HAVE_CONFIG_H
24# include <config.h>
25#endif
7bfd3b9e 26
1c44468d 27#include "libguile/bdw-gc.h"
1810dc4e
RB
28#include "libguile/_scm.h"
29
b3da54d1 30#include <stdlib.h>
fcc5d734 31#if HAVE_UNISTD_H
d823b11b 32#include <unistd.h>
fcc5d734 33#endif
d823b11b 34#include <stdio.h>
8ab3d8a0
KR
35
36#ifdef HAVE_STRING_H
37#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38#endif
39
fcc5d734 40#if HAVE_SYS_TIME_H
d823b11b 41#include <sys/time.h>
fcc5d734 42#endif
5f05c406 43
c1d5d6d7
AW
44#if HAVE_PTHREAD_NP_H
45# include <pthread_np.h>
46#endif
47
f57fdf07 48#include <assert.h>
32f94bf2 49#include <fcntl.h>
d20912e6 50#include <nproc.h>
f57fdf07 51
d823b11b
MV
52#include "libguile/validate.h"
53#include "libguile/root.h"
54#include "libguile/eval.h"
55#include "libguile/async.h"
56#include "libguile/ports.h"
57#include "libguile/threads.h"
a0599745 58#include "libguile/dynwind.h"
d823b11b 59#include "libguile/iselect.h"
9de87eea
MV
60#include "libguile/fluids.h"
61#include "libguile/continuations.h"
2b829bbb 62#include "libguile/gc.h"
9de87eea 63#include "libguile/init.h"
2e77f720 64#include "libguile/scmsigs.h"
6180e336 65#include "libguile/strings.h"
7bfd3b9e 66
634aa8de
LC
67#include <full-read.h>
68
cde24ce1
AW
69
70\f
71
72/* First some libgc shims. */
73
74/* Make sure GC_fn_type is defined; it is missing from the public
75 headers of GC 7.1 and earlier. */
76#ifndef HAVE_GC_FN_TYPE
77typedef void * (* GC_fn_type) (void *);
78#endif
79
80
5f0d2951
AW
81#ifndef GC_SUCCESS
82#define GC_SUCCESS 0
83#endif
84
85#ifndef GC_UNIMPLEMENTED
86#define GC_UNIMPLEMENTED 3
87#endif
88
89/* Likewise struct GC_stack_base is missing before 7.1. */
90#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
91struct GC_stack_base {
92 void * mem_base; /* Base of memory stack. */
93#ifdef __ia64__
94 void * reg_base; /* Base of separate register stack. */
95#endif
96};
5f0d2951
AW
97
98static int
987b8160 99GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
100{
101 return GC_UNIMPLEMENTED;
102}
103
104static void
105GC_unregister_my_thread ()
106{
107}
108
12c1d861
AW
109#if !SCM_USE_PTHREAD_THREADS
110/* No threads; we can just use GC_stackbottom. */
5f0d2951 111static void *
12c1d861
AW
112get_thread_stack_base ()
113{
114 return GC_stackbottom;
115}
116
117#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
118 && defined PTHREAD_ATTR_GETSTACK_WORKS
119/* This method for GNU/Linux and perhaps some other systems.
120 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
121 available on them. */
122static void *
123get_thread_stack_base ()
124{
125 pthread_attr_t attr;
126 void *start, *end;
127 size_t size;
128
129 pthread_getattr_np (pthread_self (), &attr);
130 pthread_attr_getstack (&attr, &start, &size);
131 end = (char *)start + size;
132
133#if SCM_STACK_GROWS_UP
134 return start;
135#else
136 return end;
137#endif
138}
139
140#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
141/* This method for MacOS X.
142 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
143 but as of 2006 there's nothing obvious at apple.com. */
144static void *
145get_thread_stack_base ()
146{
147 return pthread_get_stackaddr_np (pthread_self ());
148}
149
c1d5d6d7
AW
150#elif HAVE_PTHREAD_ATTR_GET_NP
151/* This one is for FreeBSD 9. */
152static void *
153get_thread_stack_base ()
154{
155 pthread_attr_t attr;
156 void *start, *end;
157 size_t size;
158
159 pthread_attr_init (&attr);
160 pthread_attr_get_np (pthread_self (), &attr);
161 pthread_attr_getstack (&attr, &start, &size);
162 pthread_attr_destroy (&attr);
163
164 end = (char *)start + size;
165
166#if SCM_STACK_GROWS_UP
167 return start;
168#else
169 return end;
170#endif
171}
172
12c1d861
AW
173#else
174#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
175#endif
176
177static int
987b8160 178GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 179{
12c1d861
AW
180 stack_base->mem_base = get_thread_stack_base ();
181#ifdef __ia64__
182 /* Calculate and store off the base of this thread's register
183 backing store (RBS). Unfortunately our implementation(s) of
184 scm_ia64_register_backing_store_base are only reliable for the
185 main thread. For other threads, therefore, find out the current
186 top of the RBS, and use that as a maximum. */
187 stack_base->reg_base = scm_ia64_register_backing_store_base ();
188 {
189 ucontext_t ctx;
190 void *bsp;
191 getcontext (&ctx);
192 bsp = scm_ia64_ar_bsp (&ctx);
193 if (stack_base->reg_base > bsp)
194 stack_base->reg_base = bsp;
195 }
196#endif
197 return GC_SUCCESS;
5f0d2951 198}
12c1d861
AW
199
200static void *
201GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
202{
203 struct GC_stack_base stack_base;
204
205 stack_base.mem_base = (void*)&stack_base;
206#ifdef __ia64__
207 /* FIXME: Untested. */
208 {
209 ucontext_t ctx;
210 getcontext (&ctx);
211 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
212 }
5f0d2951
AW
213#endif
214
12c1d861
AW
215 return fn (&stack_base, arg);
216}
217#endif /* HAVE_GC_STACK_BASE */
218
5f0d2951 219
cde24ce1
AW
220/* Now define with_gc_active and with_gc_inactive. */
221
222#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
223
224/* We have a sufficiently new libgc (7.2 or newer). */
225
226static void*
227with_gc_inactive (GC_fn_type func, void *data)
228{
229 return GC_do_blocking (func, data);
230}
231
232static void*
233with_gc_active (GC_fn_type func, void *data)
234{
235 return GC_call_with_gc_active (func, data);
236}
237
238#else
239
240/* libgc not new enough, so never actually deactivate GC.
241
242 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
243 GC_call_with_gc_active. */
244
245static void*
246with_gc_inactive (GC_fn_type func, void *data)
247{
248 return func (data);
249}
250
251static void*
252with_gc_active (GC_fn_type func, void *data)
253{
254 return func (data);
255}
256
257#endif /* HAVE_GC_DO_BLOCKING */
258
259
634aa8de 260\f
6180e336
NJ
261static void
262to_timespec (SCM t, scm_t_timespec *waittime)
263{
264 if (scm_is_pair (t))
265 {
266 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
267 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
268 }
269 else
270 {
271 double time = scm_to_double (t);
272 double sec = scm_c_truncate (time);
273
274 waittime->tv_sec = (long) sec;
2a1d0688 275 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
276 }
277}
278
c058db8a 279\f
d823b11b 280/*** Queues */
7bfd3b9e 281
c058db8a
LC
282/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
283 the risk of false references leading to unbounded retained space as
284 described in "Bounding Space Usage of Conservative Garbage Collectors",
285 H.J. Boehm, 2001. */
286
9de87eea
MV
287/* Make an empty queue data structure.
288 */
d823b11b
MV
289static SCM
290make_queue ()
291{
292 return scm_cons (SCM_EOL, SCM_EOL);
293}
7bfd3b9e 294
9de87eea
MV
295/* Put T at the back of Q and return a handle that can be used with
296 remqueue to remove T from Q again.
297 */
d823b11b
MV
298static SCM
299enqueue (SCM q, SCM t)
300{
301 SCM c = scm_cons (t, SCM_EOL);
d2a51087 302 SCM_CRITICAL_SECTION_START;
d2e53ed6 303 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
304 SCM_SETCDR (q, c);
305 else
306 SCM_SETCDR (SCM_CAR (q), c);
307 SCM_SETCAR (q, c);
d2a51087 308 SCM_CRITICAL_SECTION_END;
d823b11b
MV
309 return c;
310}
7bfd3b9e 311
9de87eea
MV
312/* Remove the element that the handle C refers to from the queue Q. C
313 must have been returned from a call to enqueue. The return value
314 is zero when the element referred to by C has already been removed.
315 Otherwise, 1 is returned.
316*/
317static int
d823b11b
MV
318remqueue (SCM q, SCM c)
319{
320 SCM p, prev = q;
d2a51087 321 SCM_CRITICAL_SECTION_START;
d2e53ed6 322 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 323 {
bc36d050 324 if (scm_is_eq (p, c))
d823b11b 325 {
bc36d050 326 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
327 SCM_SETCAR (q, SCM_CDR (c));
328 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
329
330 /* GC-robust */
331 SCM_SETCDR (c, SCM_EOL);
332
d2a51087 333 SCM_CRITICAL_SECTION_END;
9de87eea 334 return 1;
d823b11b
MV
335 }
336 prev = p;
337 }
d2a51087 338 SCM_CRITICAL_SECTION_END;
9de87eea 339 return 0;
d823b11b
MV
340}
341
9de87eea
MV
342/* Remove the front-most element from the queue Q and return it.
343 Return SCM_BOOL_F when Q is empty.
344*/
d823b11b
MV
345static SCM
346dequeue (SCM q)
347{
d2a51087
NJ
348 SCM c;
349 SCM_CRITICAL_SECTION_START;
350 c = SCM_CDR (q);
d2e53ed6 351 if (scm_is_null (c))
d2a51087
NJ
352 {
353 SCM_CRITICAL_SECTION_END;
354 return SCM_BOOL_F;
355 }
d823b11b
MV
356 else
357 {
358 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 359 if (scm_is_null (SCM_CDR (q)))
d823b11b 360 SCM_SETCAR (q, SCM_EOL);
d2a51087 361 SCM_CRITICAL_SECTION_END;
c058db8a
LC
362
363 /* GC-robust */
364 SCM_SETCDR (c, SCM_EOL);
365
d823b11b
MV
366 return SCM_CAR (c);
367 }
368}
7bfd3b9e 369
9de87eea 370/*** Thread smob routines */
76da80e7 371
d823b11b
MV
372
373static int
374thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
375{
23d72566
KR
376 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
377 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
378 the struct case, hence we go via a union, and extract according to the
379 size of pthread_t. */
380 union {
381 scm_i_pthread_t p;
382 unsigned short us;
383 unsigned int ui;
384 unsigned long ul;
385 scm_t_uintmax um;
386 } u;
9de87eea 387 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
388 scm_i_pthread_t p = t->pthread;
389 scm_t_uintmax id;
390 u.p = p;
391 if (sizeof (p) == sizeof (unsigned short))
392 id = u.us;
393 else if (sizeof (p) == sizeof (unsigned int))
394 id = u.ui;
395 else if (sizeof (p) == sizeof (unsigned long))
396 id = u.ul;
397 else
398 id = u.um;
399
0607ebbf 400 scm_puts_unlocked ("#<thread ", port);
23d72566 401 scm_uintprint (id, 10, port);
0607ebbf 402 scm_puts_unlocked (" (", port);
0345e278 403 scm_uintprint ((scm_t_bits)t, 16, port);
0607ebbf 404 scm_puts_unlocked (")>", port);
d823b11b
MV
405 return 1;
406}
407
706846f6 408\f
9de87eea 409/*** Blocking on queues. */
f7eca35d 410
9de87eea
MV
411/* See also scm_i_queue_async_cell for how such a block is
412 interrputed.
413*/
d823b11b 414
9de87eea
MV
415/* Put the current thread on QUEUE and go to sleep, waiting for it to
416 be woken up by a call to 'unblock_from_queue', or to be
417 interrupted. Upon return of this function, the current thread is
418 no longer on QUEUE, even when the sleep has been interrupted.
419
d2a51087 420 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
421 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
422
423 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
424 as MUTEX is needed.
425
426 When WAITTIME is not NULL, the sleep will be aborted at that time.
427
428 The return value of block_self is an errno value. It will be zero
429 when the sleep has been successfully completed by a call to
430 unblock_from_queue, EINTR when it has been interrupted by the
431 delivery of a system async, and ETIMEDOUT when the timeout has
432 expired.
433
434 The system asyncs themselves are not executed by block_self.
435*/
436static int
437block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
438 const scm_t_timespec *waittime)
76da80e7 439{
9de87eea
MV
440 scm_i_thread *t = SCM_I_CURRENT_THREAD;
441 SCM q_handle;
442 int err;
443
444 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
445 err = EINTR;
446 else
447 {
448 t->block_asyncs++;
449 q_handle = enqueue (queue, t->handle);
450 if (waittime == NULL)
451 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
452 else
453 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
454
455 /* When we are still on QUEUE, we have been interrupted. We
456 report this only when no other error (such as a timeout) has
457 happened above.
458 */
459 if (remqueue (queue, q_handle) && err == 0)
460 err = EINTR;
461 t->block_asyncs--;
462 scm_i_reset_sleep (t);
463 }
464
465 return err;
76da80e7 466}
9de87eea 467
d2a51087
NJ
468/* Wake up the first thread on QUEUE, if any. The awoken thread is
469 returned, or #f if the queue was empty.
9de87eea
MV
470 */
471static SCM
472unblock_from_queue (SCM queue)
473{
474 SCM thread = dequeue (queue);
475 if (scm_is_true (thread))
476 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
477 return thread;
478}
479
45f15cac 480\f
9de87eea
MV
481/* Getting into and out of guile mode.
482 */
483
f60a7648
AW
484/* Key used to attach a cleanup handler to a given thread. Also, if
485 thread-local storage is unavailable, this key is used to retrieve the
486 current thread with `pthread_getspecific ()'. */
487scm_i_pthread_key_t scm_i_thread_key;
488
489
705edb95
LC
490#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
491
492/* When thread-local storage (TLS) is available, a pointer to the
493 current-thread object is kept in TLS. Note that storing the thread-object
494 itself in TLS (rather than a pointer to some malloc'd memory) is not
495 possible since thread objects may live longer than the actual thread they
496 represent. */
497SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
498
f60a7648 499#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 500
d823b11b 501
9de87eea 502static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
6a97b1f9 503SCM_PTHREAD_ATFORK_LOCK_STATIC_MUTEX (thread_admin_mutex);
9de87eea
MV
504static scm_i_thread *all_threads = NULL;
505static int thread_count;
506
507static SCM scm_i_default_dynamic_state;
508
0b77014f
AW
509/* Run when a fluid is collected. */
510void
aafb4ed7 511scm_i_reset_fluid (size_t n)
0b77014f
AW
512{
513 scm_i_thread *t;
514
515 scm_i_pthread_mutex_lock (&thread_admin_mutex);
516 for (t = all_threads; t; t = t->next_thread)
517 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
518 {
519 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
520
521 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 522 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
523 }
524 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
525}
526
9de87eea 527/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 528 */
9de87eea 529static void
12c1d861 530guilify_self_1 (struct GC_stack_base *base)
d823b11b 531{
7f22442b
AW
532 scm_i_thread t;
533
534 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
535 before allocating anything in this thread, because allocation could
536 cause GC to run, and GC could cause finalizers, which could invoke
537 Scheme functions, which need the current thread to be set. */
538
539 t.pthread = scm_i_pthread_self ();
540 t.handle = SCM_BOOL_F;
541 t.result = SCM_BOOL_F;
542 t.cleanup_handler = SCM_BOOL_F;
543 t.mutexes = SCM_EOL;
544 t.held_mutex = NULL;
545 t.join_queue = SCM_EOL;
546 t.dynamic_state = SCM_BOOL_F;
547 t.dynwinds = SCM_EOL;
548 t.active_asyncs = SCM_EOL;
549 t.block_asyncs = 1;
550 t.pending_asyncs = 1;
551 t.critical_section_level = 0;
552 t.base = base->mem_base;
346e4402 553#ifdef __ia64__
bbd21051 554 t.register_backing_store_base = base->reg_base;
346e4402 555#endif
7f22442b
AW
556 t.continuation_root = SCM_EOL;
557 t.continuation_base = t.base;
558 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
559 t.sleep_mutex = NULL;
560 t.sleep_object = SCM_BOOL_F;
561 t.sleep_fd = -1;
562
32f94bf2 563 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
564 /* FIXME: Error conditions during the initialization phase are handled
565 gracelessly since public functions such as `scm_init_guile ()'
566 currently have type `void'. */
567 abort ();
568
7f22442b
AW
569 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
570 t.current_mark_stack_ptr = NULL;
571 t.current_mark_stack_limit = NULL;
572 t.canceled = 0;
573 t.exited = 0;
574 t.guile_mode = 0;
9de87eea 575
7f22442b
AW
576 /* The switcheroo. */
577 {
578 scm_i_thread *t_ptr = &t;
579
580 GC_disable ();
581 t_ptr = GC_malloc (sizeof (scm_i_thread));
582 memcpy (t_ptr, &t, sizeof t);
583
584 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
585
586#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
587 /* Cache the current thread in TLS for faster lookup. */
588 scm_i_current_thread = t_ptr;
f60a7648 589#endif
9de87eea 590
7f22442b
AW
591 scm_i_pthread_mutex_lock (&thread_admin_mutex);
592 t_ptr->next_thread = all_threads;
593 all_threads = t_ptr;
594 thread_count++;
595 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
596
597 GC_enable ();
598 }
d823b11b
MV
599}
600
9de87eea 601/* Perform second stage of thread initialisation, in guile mode.
d823b11b 602 */
9de87eea
MV
603static void
604guilify_self_2 (SCM parent)
d823b11b 605{
9de87eea
MV
606 scm_i_thread *t = SCM_I_CURRENT_THREAD;
607
72e6b608
LC
608 t->guile_mode = 1;
609
9de87eea 610 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 611
9de87eea
MV
612 t->continuation_root = scm_cons (t->handle, SCM_EOL);
613 t->continuation_base = t->base;
2bbe1533 614 t->vm = SCM_BOOL_F;
9de87eea
MV
615
616 if (scm_is_true (parent))
617 t->dynamic_state = scm_make_dynamic_state (parent);
618 else
619 t->dynamic_state = scm_i_make_initial_dynamic_state ();
620
621 t->join_queue = make_queue ();
622 t->block_asyncs = 0;
d823b11b
MV
623}
624
6180e336
NJ
625\f
626/*** Fat mutexes */
627
628/* We implement our own mutex type since we want them to be 'fair', we
629 want to do fancy things while waiting for them (like running
630 asyncs) and we might want to add things that are nice for
631 debugging.
632*/
633
634typedef struct {
635 scm_i_pthread_mutex_t lock;
636 SCM owner;
adc085f1 637 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 638
adc085f1 639 int recursive; /* allow recursive locking? */
6180e336
NJ
640 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
641 int allow_external_unlock; /* is it an error to unlock a mutex that is not
642 owned by the current thread? */
643
644 SCM waiting; /* the threads waiting for this mutex. */
645} fat_mutex;
646
647#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
648#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
649
2002f1f8
AW
650static SCM
651call_cleanup (void *data)
652{
653 SCM *proc_p = data;
654 return scm_call_0 (*proc_p);
655}
656
9de87eea 657/* Perform thread tear-down, in guile mode.
d823b11b 658 */
9de87eea
MV
659static void *
660do_thread_exit (void *v)
661{
2e77f720
LC
662 scm_i_thread *t = (scm_i_thread *) v;
663
664 if (!scm_is_false (t->cleanup_handler))
665 {
666 SCM ptr = t->cleanup_handler;
667
668 t->cleanup_handler = SCM_BOOL_F;
669 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 670 call_cleanup, &ptr,
2e77f720
LC
671 scm_handle_by_message_noexit, NULL);
672 }
9de87eea 673
86a597f8 674 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
675
676 t->exited = 1;
0c97d7dd
MV
677 close (t->sleep_pipe[0]);
678 close (t->sleep_pipe[1]);
9de87eea
MV
679 while (scm_is_true (unblock_from_queue (t->join_queue)))
680 ;
9de87eea 681
74926120 682 while (!scm_is_null (t->mutexes))
6180e336 683 {
a141db86 684 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
74926120 685
a141db86 686 if (scm_is_true (mutex))
a0faf7dd
LC
687 {
688 fat_mutex *m = SCM_MUTEX_DATA (mutex);
689
690 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
691
692 /* Since MUTEX is in `t->mutexes', T must be its owner. */
693 assert (scm_is_eq (m->owner, t->handle));
694
a0faf7dd 695 unblock_from_queue (m->waiting);
f57fdf07 696
a0faf7dd
LC
697 scm_i_pthread_mutex_unlock (&m->lock);
698 }
6180e336 699
a141db86 700 t->mutexes = scm_cdr (t->mutexes);
6180e336
NJ
701 }
702
86a597f8 703 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 704
9de87eea
MV
705 return NULL;
706}
707
f60a7648
AW
708static void *
709do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
710{
7f22442b 711 /* Won't hurt if we are already registered. */
8e76ce94 712#if SCM_USE_PTHREAD_THREADS
7f22442b 713 GC_register_my_thread (sb);
8e76ce94 714#endif
f60a7648 715
7f22442b 716 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
717}
718
d823b11b 719static void
9de87eea 720on_thread_exit (void *v)
d823b11b 721{
29776e85 722 /* This handler is executed in non-guile mode. */
2e77f720 723 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 724
b8441577
LC
725 /* If we were canceled, we were unable to clear `t->guile_mode', so do
726 it here. */
727 t->guile_mode = 0;
728
d2a51087
NJ
729 /* If this thread was cancelled while doing a cond wait, it will
730 still have a mutex locked, so we unlock it here. */
731 if (t->held_mutex)
732 {
733 scm_i_pthread_mutex_unlock (t->held_mutex);
734 t->held_mutex = NULL;
735 }
736
f60a7648
AW
737 /* Reinstate the current thread for purposes of scm_with_guile
738 guile-mode cleanup handlers. Only really needed in the non-TLS
739 case but it doesn't hurt to be consistent. */
740 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 741
7f22442b
AW
742 /* Scheme-level thread finalizers and other cleanup needs to happen in
743 guile mode. */
744 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
745
746 /* Removing ourself from the list of all threads needs to happen in
747 non-guile mode since all SCM values on our stack become
29776e85 748 unprotected once we are no longer in the list. */
0c97d7dd
MV
749 scm_i_pthread_mutex_lock (&thread_admin_mutex);
750 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
751 if (*tp == t)
752 {
753 *tp = t->next_thread;
c058db8a
LC
754
755 /* GC-robust */
756 t->next_thread = NULL;
757
0c97d7dd
MV
758 break;
759 }
760 thread_count--;
2e77f720
LC
761
762 /* If there's only one other thread, it could be the signal delivery
763 thread, so we need to notify it to shut down by closing its read pipe.
764 If it's not the signal delivery thread, then closing the read pipe isn't
765 going to hurt. */
766 if (thread_count <= 1)
767 scm_i_close_signal_pipe ();
768
0c97d7dd
MV
769 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
770
f60a7648 771 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 772
8e76ce94 773#if SCM_USE_PTHREAD_THREADS
7f22442b 774 GC_unregister_my_thread ();
653ccd78 775#endif
d823b11b
MV
776}
777
9de87eea 778static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 779
9de87eea
MV
780static void
781init_thread_key (void)
782{
f60a7648 783 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 784}
d823b11b 785
cde24ce1
AW
786/* Perform any initializations necessary to make the current thread
787 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
788 if necessary.
a54a94b3 789
9de87eea
MV
790 BASE is the stack base to use with GC.
791
792 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
793 which case the default dynamic state is used.
794
cde24ce1 795 Returns zero when the thread was known to guile already; otherwise
9de87eea 796 return 1.
cde24ce1
AW
797
798 Note that it could be the case that the thread was known
799 to Guile, but not in guile mode (because we are within a
800 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
801 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
802
803static int
12c1d861 804scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 805{
9de87eea
MV
806 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
807
cde24ce1
AW
808 if (SCM_I_CURRENT_THREAD)
809 {
810 /* Thread is already known to Guile.
811 */
812 return 0;
813 }
814 else
9de87eea
MV
815 {
816 /* This thread has not been guilified yet.
817 */
818
819 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
820 if (scm_initialized_p == 0)
821 {
822 /* First thread ever to enter Guile. Run the full
823 initialization.
824 */
825 scm_i_init_guile (base);
12c1d861 826
8e76ce94 827#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
828 /* Allow other threads to come in later. */
829 GC_allow_register_threads ();
4000d064 830#endif
12c1d861 831
9de87eea
MV
832 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
833 }
834 else
835 {
836 /* Guile is already initialized, but this thread enters it for
837 the first time. Only initialize this thread.
838 */
839 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
840
841 /* Register this thread with libgc. */
8e76ce94 842#if SCM_USE_PTHREAD_THREADS
12c1d861 843 GC_register_my_thread (base);
8e76ce94 844#endif
12c1d861 845
9de87eea
MV
846 guilify_self_1 (base);
847 guilify_self_2 (parent);
848 }
849 return 1;
850 }
d823b11b
MV
851}
852
12c1d861
AW
853void
854scm_init_guile ()
d823b11b 855{
12c1d861
AW
856 struct GC_stack_base stack_base;
857
858 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
859 scm_i_init_thread_for_guile (&stack_base,
860 scm_i_default_dynamic_state);
9de87eea
MV
861 else
862 {
12c1d861 863 fprintf (stderr, "Failed to get stack base for current thread.\n");
4765b28f 864 exit (EXIT_FAILURE);
9de87eea 865 }
a54a94b3
MV
866}
867
12c1d861 868struct with_guile_args
cde24ce1
AW
869{
870 GC_fn_type func;
871 void *data;
12c1d861 872 SCM parent;
cde24ce1
AW
873};
874
875static void *
876with_guile_trampoline (void *data)
877{
12c1d861 878 struct with_guile_args *args = data;
cde24ce1
AW
879
880 return scm_c_with_continuation_barrier (args->func, args->data);
881}
882
12c1d861
AW
883static void *
884with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
885{
886 void *res;
cde24ce1
AW
887 int new_thread;
888 scm_i_thread *t;
12c1d861 889 struct with_guile_args *args = data;
2e77f720 890
12c1d861 891 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
892 t = SCM_I_CURRENT_THREAD;
893 if (new_thread)
2e77f720 894 {
cde24ce1
AW
895 /* We are in Guile mode. */
896 assert (t->guile_mode);
897
12c1d861 898 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
899
900 /* Leave Guile mode. */
901 t->guile_mode = 0;
902 }
903 else if (t->guile_mode)
904 {
905 /* Already in Guile mode. */
12c1d861 906 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 907 }
74926120 908 else
cde24ce1 909 {
cde24ce1
AW
910 /* We are not in Guile mode, either because we are not within a
911 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 912
cde24ce1
AW
913 This call to scm_with_guile() could happen from anywhere on the
914 stack, and in particular lower on the stack than when it was
915 when this thread was first guilified. Thus, `base' must be
916 updated. */
917#if SCM_STACK_GROWS_UP
12c1d861
AW
918 if (SCM_STACK_PTR (base->mem_base) < t->base)
919 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 920#else
12c1d861
AW
921 if (SCM_STACK_PTR (base->mem_base) > t->base)
922 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 923#endif
72e6b608 924
cde24ce1 925 t->guile_mode = 1;
12c1d861 926 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
927 t->guile_mode = 0;
928 }
929 return res;
72e6b608 930}
3d1af79f 931
12c1d861
AW
932static void *
933scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
934{
935 struct with_guile_args args;
936
937 args.func = func;
938 args.data = data;
939 args.parent = parent;
940
941 return GC_call_with_stack_base (with_guile_and_parent, &args);
942}
943
944void *
945scm_with_guile (void *(*func)(void *), void *data)
946{
947 return scm_i_with_guile_and_parent (func, data,
948 scm_i_default_dynamic_state);
949}
950
9de87eea
MV
951void *
952scm_without_guile (void *(*func)(void *), void *data)
d823b11b 953{
72e6b608 954 void *result;
cde24ce1 955 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 956
cde24ce1 957 if (t->guile_mode)
72e6b608 958 {
cde24ce1
AW
959 SCM_I_CURRENT_THREAD->guile_mode = 0;
960 result = with_gc_inactive (func, data);
961 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
962 }
963 else
cde24ce1 964 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
965 result = func (data);
966
967 return result;
9de87eea
MV
968}
969
72e6b608 970\f
9de87eea
MV
971/*** Thread creation */
972
973typedef struct {
974 SCM parent;
975 SCM thunk;
976 SCM handler;
76da80e7 977 SCM thread;
9de87eea
MV
978 scm_i_pthread_mutex_t mutex;
979 scm_i_pthread_cond_t cond;
980} launch_data;
d823b11b 981
9de87eea
MV
982static void *
983really_launch (void *d)
984{
985 launch_data *data = (launch_data *)d;
986 SCM thunk = data->thunk, handler = data->handler;
987 scm_i_thread *t;
d823b11b 988
9de87eea 989 t = SCM_I_CURRENT_THREAD;
a54a94b3 990
9de87eea
MV
991 scm_i_scm_pthread_mutex_lock (&data->mutex);
992 data->thread = scm_current_thread ();
993 scm_i_pthread_cond_signal (&data->cond);
994 scm_i_pthread_mutex_unlock (&data->mutex);
995
996 if (SCM_UNBNDP (handler))
997 t->result = scm_call_0 (thunk);
998 else
999 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1000
1001 return 0;
d823b11b
MV
1002}
1003
9de87eea
MV
1004static void *
1005launch_thread (void *d)
1006{
1007 launch_data *data = (launch_data *)d;
1008 scm_i_pthread_detach (scm_i_pthread_self ());
1009 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1010 return NULL;
1011}
1012
1013SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 1014 (SCM thunk, SCM handler),
9de87eea
MV
1015 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1016 "returning a new thread object representing the thread. The procedure\n"
1017 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1018 "\n"
1019 "When @var{handler} is specified, then @var{thunk} is called from\n"
1020 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1021 "handler. This catch is established inside the continuation barrier.\n"
1022 "\n"
1023 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1024 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
1025#define FUNC_NAME s_scm_call_with_new_thread
1026{
9de87eea
MV
1027 launch_data data;
1028 scm_i_pthread_t id;
1029 int err;
d823b11b 1030
9de87eea
MV
1031 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1032 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1033 handler, SCM_ARG2, FUNC_NAME);
1034
9f7537dc 1035 GC_collect_a_little ();
9de87eea
MV
1036 data.parent = scm_current_dynamic_state ();
1037 data.thunk = thunk;
1038 data.handler = handler;
1039 data.thread = SCM_BOOL_F;
1040 scm_i_pthread_mutex_init (&data.mutex, NULL);
1041 scm_i_pthread_cond_init (&data.cond, NULL);
1042
1043 scm_i_scm_pthread_mutex_lock (&data.mutex);
1044 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1045 if (err)
1046 {
1047 scm_i_pthread_mutex_unlock (&data.mutex);
1048 errno = err;
1049 scm_syserror (NULL);
1050 }
1051 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1052 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1053
9de87eea 1054 return data.thread;
d823b11b
MV
1055}
1056#undef FUNC_NAME
1057
9de87eea
MV
1058typedef struct {
1059 SCM parent;
1060 scm_t_catch_body body;
1061 void *body_data;
1062 scm_t_catch_handler handler;
1063 void *handler_data;
1064 SCM thread;
1065 scm_i_pthread_mutex_t mutex;
1066 scm_i_pthread_cond_t cond;
1067} spawn_data;
1068
1069static void *
1070really_spawn (void *d)
1071{
1072 spawn_data *data = (spawn_data *)d;
1073 scm_t_catch_body body = data->body;
1074 void *body_data = data->body_data;
1075 scm_t_catch_handler handler = data->handler;
1076 void *handler_data = data->handler_data;
1077 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1078
1079 scm_i_scm_pthread_mutex_lock (&data->mutex);
1080 data->thread = scm_current_thread ();
1081 scm_i_pthread_cond_signal (&data->cond);
1082 scm_i_pthread_mutex_unlock (&data->mutex);
1083
1084 if (handler == NULL)
1085 t->result = body (body_data);
1086 else
1087 t->result = scm_internal_catch (SCM_BOOL_T,
1088 body, body_data,
1089 handler, handler_data);
1090
1091 return 0;
1092}
1093
1094static void *
1095spawn_thread (void *d)
1096{
1097 spawn_data *data = (spawn_data *)d;
1098 scm_i_pthread_detach (scm_i_pthread_self ());
1099 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1100 return NULL;
1101}
1102
1103SCM
1104scm_spawn_thread (scm_t_catch_body body, void *body_data,
1105 scm_t_catch_handler handler, void *handler_data)
1106{
1107 spawn_data data;
1108 scm_i_pthread_t id;
1109 int err;
1110
1111 data.parent = scm_current_dynamic_state ();
1112 data.body = body;
1113 data.body_data = body_data;
1114 data.handler = handler;
1115 data.handler_data = handler_data;
1116 data.thread = SCM_BOOL_F;
1117 scm_i_pthread_mutex_init (&data.mutex, NULL);
1118 scm_i_pthread_cond_init (&data.cond, NULL);
1119
1120 scm_i_scm_pthread_mutex_lock (&data.mutex);
1121 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1122 if (err)
1123 {
1124 scm_i_pthread_mutex_unlock (&data.mutex);
1125 errno = err;
1126 scm_syserror (NULL);
1127 }
1128 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1129 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1130
0f4f2d9a
LC
1131 assert (SCM_I_IS_THREAD (data.thread));
1132
9de87eea
MV
1133 return data.thread;
1134}
1135
29717c89
MD
1136SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1137 (),
1138"Move the calling thread to the end of the scheduling queue.")
1139#define FUNC_NAME s_scm_yield
1140{
9de87eea 1141 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1142}
1143#undef FUNC_NAME
1144
2e77f720
LC
1145SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1146 (SCM thread),
1147"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1148"cannot be the current thread, and if @var{thread} has already terminated or "
1149"been signaled to terminate, this function is a no-op.")
1150#define FUNC_NAME s_scm_cancel_thread
1151{
1152 scm_i_thread *t = NULL;
1153
1154 SCM_VALIDATE_THREAD (1, thread);
1155 t = SCM_I_THREAD_DATA (thread);
86a597f8 1156 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1157 if (!t->canceled)
1158 {
1159 t->canceled = 1;
86a597f8 1160 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1161 scm_i_pthread_cancel (t->pthread);
1162 }
1163 else
86a597f8 1164 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1165
1166 return SCM_UNSPECIFIED;
1167}
1168#undef FUNC_NAME
1169
1170SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1171 (SCM thread, SCM proc),
1172"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1173"This handler will be called when the thread exits.")
1174#define FUNC_NAME s_scm_set_thread_cleanup_x
1175{
1176 scm_i_thread *t;
1177
1178 SCM_VALIDATE_THREAD (1, thread);
1179 if (!scm_is_false (proc))
1180 SCM_VALIDATE_THUNK (2, proc);
1181
2e77f720 1182 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1183 scm_i_pthread_mutex_lock (&t->admin_mutex);
1184
2e77f720
LC
1185 if (!(t->exited || t->canceled))
1186 t->cleanup_handler = proc;
1187
86a597f8 1188 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1189
1190 return SCM_UNSPECIFIED;
1191}
1192#undef FUNC_NAME
1193
1194SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1195 (SCM thread),
1196"Return the cleanup handler installed for the thread @var{thread}.")
1197#define FUNC_NAME s_scm_thread_cleanup
1198{
1199 scm_i_thread *t;
1200 SCM ret;
1201
1202 SCM_VALIDATE_THREAD (1, thread);
1203
2e77f720 1204 t = SCM_I_THREAD_DATA (thread);
86a597f8 1205 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1206 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1207 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1208
1209 return ret;
1210}
1211#undef FUNC_NAME
1212
6180e336
NJ
1213SCM scm_join_thread (SCM thread)
1214{
1215 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1216}
1217
1218SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1219 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1220"Suspend execution of the calling thread until the target @var{thread} "
1221"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1222#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1223{
9de87eea 1224 scm_i_thread *t;
6180e336
NJ
1225 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1226 SCM res = SCM_BOOL_F;
1227
1228 if (! (SCM_UNBNDP (timeoutval)))
1229 res = timeoutval;
d823b11b
MV
1230
1231 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1232 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1233 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1234
9de87eea 1235 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1236 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1237
6180e336
NJ
1238 if (! SCM_UNBNDP (timeout))
1239 {
1240 to_timespec (timeout, &ctimeout);
1241 timeout_ptr = &ctimeout;
1242 }
1243
1244 if (t->exited)
1245 res = t->result;
1246 else
d823b11b 1247 {
9de87eea
MV
1248 while (1)
1249 {
74926120 1250 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1251 timeout_ptr);
1252 if (err == 0)
1253 {
1254 if (t->exited)
1255 {
1256 res = t->result;
1257 break;
1258 }
1259 }
1260 else if (err == ETIMEDOUT)
9de87eea 1261 break;
6180e336 1262
86a597f8 1263 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1264 SCM_TICK;
86a597f8 1265 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1266
1267 /* Check for exit again, since we just released and
1268 reacquired the admin mutex, before the next block_self
1269 call (which would block forever if t has already
1270 exited). */
1271 if (t->exited)
1272 {
1273 res = t->result;
1274 break;
1275 }
9de87eea 1276 }
d823b11b 1277 }
9de87eea 1278
86a597f8 1279 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1280
d823b11b 1281 return res;
5f05c406
MV
1282}
1283#undef FUNC_NAME
1284
6180e336
NJ
1285SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1286 (SCM obj),
1287 "Return @code{#t} if @var{obj} is a thread.")
1288#define FUNC_NAME s_scm_thread_p
1289{
1290 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1291}
1292#undef FUNC_NAME
5f05c406 1293
4079f87e 1294
9de87eea
MV
1295static size_t
1296fat_mutex_free (SCM mx)
76da80e7 1297{
9de87eea
MV
1298 fat_mutex *m = SCM_MUTEX_DATA (mx);
1299 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1300 return 0;
1301}
1302
1303static int
9de87eea 1304fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1305{
9de87eea 1306 fat_mutex *m = SCM_MUTEX_DATA (mx);
0607ebbf 1307 scm_puts_unlocked ("#<mutex ", port);
9de87eea 1308 scm_uintprint ((scm_t_bits)m, 16, port);
0607ebbf 1309 scm_puts_unlocked (">", port);
9de87eea 1310 return 1;
76da80e7
MV
1311}
1312
76da80e7 1313static SCM
6180e336 1314make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1315{
9de87eea
MV
1316 fat_mutex *m;
1317 SCM mx;
1318
1319 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1320 scm_i_pthread_mutex_init (&m->lock, NULL);
1321 m->owner = SCM_BOOL_F;
adc085f1 1322 m->level = 0;
6180e336 1323
adc085f1 1324 m->recursive = recursive;
6180e336
NJ
1325 m->unchecked_unlock = unchecked_unlock;
1326 m->allow_external_unlock = external_unlock;
1327
9de87eea
MV
1328 m->waiting = SCM_EOL;
1329 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1330 m->waiting = make_queue ();
1331 return mx;
76da80e7
MV
1332}
1333
6180e336
NJ
1334SCM scm_make_mutex (void)
1335{
1336 return scm_make_mutex_with_flags (SCM_EOL);
1337}
1338
2a1d0688
NJ
1339SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1340SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1341SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1342
1343SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1344 (SCM flags),
9de87eea 1345 "Create a new mutex. ")
6180e336 1346#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1347{
6180e336
NJ
1348 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1349
1350 SCM ptr = flags;
1351 while (! scm_is_null (ptr))
1352 {
1353 SCM flag = SCM_CAR (ptr);
1354 if (scm_is_eq (flag, unchecked_unlock_sym))
1355 unchecked_unlock = 1;
1356 else if (scm_is_eq (flag, allow_external_unlock_sym))
1357 external_unlock = 1;
1358 else if (scm_is_eq (flag, recursive_sym))
1359 recursive = 1;
74926120 1360 else
2a1d0688 1361 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1362 ptr = SCM_CDR (ptr);
1363 }
1364 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1365}
1366#undef FUNC_NAME
1367
9de87eea 1368SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1369 (void),
9de87eea
MV
1370 "Create a new recursive mutex. ")
1371#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1372{
6180e336 1373 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1374}
1375#undef FUNC_NAME
1376
6180e336
NJ
1377SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1378
1379static SCM
adc085f1 1380fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1381{
1382 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1383
adc085f1 1384 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1385 SCM err = SCM_BOOL_F;
1386
1387 struct timeval current_time;
9de87eea
MV
1388
1389 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1390
1391 while (1)
9de87eea 1392 {
adc085f1 1393 if (m->level == 0)
6180e336 1394 {
adc085f1 1395 m->owner = new_owner;
6180e336 1396 m->level++;
74926120 1397
adc085f1 1398 if (SCM_I_IS_THREAD (new_owner))
6180e336 1399 {
adc085f1 1400 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1401
4f39f31e
LC
1402 /* FIXME: The order in which `t->admin_mutex' and
1403 `m->lock' are taken differs from that in
1404 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1405 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1406
1407 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1408 retained when not referenced elsewhere (bug #27450).
1409 The weak pair itself is eventually removed when MUTEX
1410 is unlocked. Note that `t->mutexes' lists mutexes
1411 currently held by T, so it should be small. */
a141db86
AW
1412 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1413 t->mutexes);
a0faf7dd 1414
6180e336 1415 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1416 }
adc085f1
JG
1417 *ret = 1;
1418 break;
1419 }
1420 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1421 {
1422 m->owner = new_owner;
1423 err = scm_cons (scm_abandoned_mutex_error_key,
1424 scm_from_locale_string ("lock obtained on abandoned "
1425 "mutex"));
1426 *ret = 1;
1427 break;
1428 }
1429 else if (scm_is_eq (m->owner, new_owner))
1430 {
1431 if (m->recursive)
1432 {
1433 m->level++;
74926120 1434 *ret = 1;
adc085f1
JG
1435 }
1436 else
6180e336 1437 {
adc085f1
JG
1438 err = scm_cons (scm_misc_error_key,
1439 scm_from_locale_string ("mutex already locked "
1440 "by thread"));
1441 *ret = 0;
1442 }
74926120 1443 break;
adc085f1 1444 }
9de87eea 1445 else
9de87eea 1446 {
74926120 1447 if (timeout != NULL)
adc085f1
JG
1448 {
1449 gettimeofday (&current_time, NULL);
1450 if (current_time.tv_sec > timeout->tv_sec ||
1451 (current_time.tv_sec == timeout->tv_sec &&
1452 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1453 {
adc085f1
JG
1454 *ret = 0;
1455 break;
6180e336 1456 }
6180e336 1457 }
37a52039 1458 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1459 scm_i_pthread_mutex_unlock (&m->lock);
1460 SCM_TICK;
1461 scm_i_scm_pthread_mutex_lock (&m->lock);
1462 }
1463 }
1464 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1465 return err;
9de87eea
MV
1466}
1467
6180e336
NJ
1468SCM scm_lock_mutex (SCM mx)
1469{
adc085f1 1470 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1471}
1472
adc085f1
JG
1473SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1474 (SCM m, SCM timeout, SCM owner),
b7e64f8b
BT
1475 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1476 "thread blocks until the mutex becomes available. The function\n"
1477 "returns when the calling thread owns the lock on @var{m}.\n"
1478 "Locking a mutex that a thread already owns will succeed right\n"
1479 "away and will not block the thread. That is, Guile's mutexes\n"
1480 "are @emph{recursive}.")
6180e336 1481#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1482{
6180e336
NJ
1483 SCM exception;
1484 int ret = 0;
1485 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1486
6180e336
NJ
1487 SCM_VALIDATE_MUTEX (1, m);
1488
1489 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1490 {
1491 to_timespec (timeout, &cwaittime);
1492 waittime = &cwaittime;
1493 }
1494
97ec95b7
LC
1495 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1496 SCM_VALIDATE_THREAD (3, owner);
1497
adc085f1 1498 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1499 if (!scm_is_false (exception))
1500 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1501 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1502}
76da80e7 1503#undef FUNC_NAME
9bc4701c 1504
9ec1573d
MW
1505static void
1506lock_mutex_return_void (SCM mx)
1507{
1508 (void) scm_lock_mutex (mx);
1509}
1510
1511static void
1512unlock_mutex_return_void (SCM mx)
1513{
1514 (void) scm_unlock_mutex (mx);
1515}
1516
a4d106c7 1517void
661ae7ab 1518scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1519{
9ec1573d 1520 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1521 SCM_F_WIND_EXPLICITLY);
9ec1573d 1522 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1523 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1524}
1525
9bc4701c 1526SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1527 (SCM mutex),
9bc4701c
MD
1528"Try to lock @var{mutex}. If the mutex is already locked by someone "
1529"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1530#define FUNC_NAME s_scm_try_mutex
1531{
6180e336
NJ
1532 SCM exception;
1533 int ret = 0;
1534 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1535
ba1b7223 1536 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1537
1538 to_timespec (scm_from_int(0), &cwaittime);
1539 waittime = &cwaittime;
74926120 1540
adc085f1 1541 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1542 if (!scm_is_false (exception))
1543 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1544 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1545}
1546#undef FUNC_NAME
76da80e7 1547
6180e336
NJ
1548/*** Fat condition variables */
1549
1550typedef struct {
1551 scm_i_pthread_mutex_t lock;
1552 SCM waiting; /* the threads waiting for this condition. */
1553} fat_cond;
1554
1555#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1556#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1557
a141db86
AW
1558static void
1559remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1560{
1561 SCM walk, prev;
1562
1563 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1564 walk = SCM_CDR (walk))
1565 {
1566 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1567 {
1568 if (scm_is_pair (prev))
1569 SCM_SETCDR (prev, SCM_CDR (walk));
1570 else
1571 t->mutexes = SCM_CDR (walk);
1572 break;
1573 }
1574 }
1575}
1576
6180e336
NJ
1577static int
1578fat_mutex_unlock (SCM mutex, SCM cond,
1579 const scm_t_timespec *waittime, int relock)
9de87eea 1580{
7f991c7d 1581 SCM owner;
6180e336
NJ
1582 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1583 fat_cond *c = NULL;
1584 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1585 int err = 0, ret = 0;
9de87eea
MV
1586
1587 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1588
7f991c7d 1589 owner = m->owner;
adc085f1 1590
d31ae2c3 1591 if (!scm_is_eq (owner, t->handle))
9bc4701c 1592 {
adc085f1 1593 if (m->level == 0)
6180e336
NJ
1594 {
1595 if (!m->unchecked_unlock)
2a1d0688
NJ
1596 {
1597 scm_i_pthread_mutex_unlock (&m->lock);
1598 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1599 }
d31ae2c3 1600 owner = t->handle;
6180e336
NJ
1601 }
1602 else if (!m->allow_external_unlock)
2a1d0688
NJ
1603 {
1604 scm_i_pthread_mutex_unlock (&m->lock);
1605 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1606 }
6180e336
NJ
1607 }
1608
1609 if (! (SCM_UNBNDP (cond)))
1610 {
6180e336
NJ
1611 c = SCM_CONDVAR_DATA (cond);
1612 while (1)
1613 {
1614 int brk = 0;
1615
6180e336
NJ
1616 if (m->level > 0)
1617 m->level--;
adc085f1 1618 if (m->level == 0)
f57fdf07
LC
1619 {
1620 /* Change the owner of MUTEX. */
a141db86 1621 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1622 m->owner = unblock_from_queue (m->waiting);
1623 }
adc085f1 1624
6180e336 1625 t->block_asyncs++;
74926120 1626
d2a51087
NJ
1627 err = block_self (c->waiting, cond, &m->lock, waittime);
1628 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1629
1630 if (err == 0)
1631 {
1632 ret = 1;
1633 brk = 1;
1634 }
1635 else if (err == ETIMEDOUT)
1636 {
1637 ret = 0;
1638 brk = 1;
1639 }
1640 else if (err != EINTR)
74926120 1641 {
6180e336 1642 errno = err;
6180e336 1643 scm_syserror (NULL);
74926120 1644 }
6180e336
NJ
1645
1646 if (brk)
1647 {
1648 if (relock)
adc085f1 1649 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1650 t->block_asyncs--;
6180e336
NJ
1651 break;
1652 }
74926120 1653
6180e336 1654 t->block_asyncs--;
27c6ebcb 1655 scm_async_tick ();
74926120 1656
6180e336
NJ
1657 scm_remember_upto_here_2 (cond, mutex);
1658
1659 scm_i_scm_pthread_mutex_lock (&m->lock);
1660 }
9bc4701c 1661 }
9de87eea 1662 else
6180e336
NJ
1663 {
1664 if (m->level > 0)
1665 m->level--;
74926120 1666 if (m->level == 0)
f57fdf07
LC
1667 {
1668 /* Change the owner of MUTEX. */
a141db86 1669 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1670 m->owner = unblock_from_queue (m->waiting);
1671 }
74926120 1672
6180e336
NJ
1673 scm_i_pthread_mutex_unlock (&m->lock);
1674 ret = 1;
1675 }
9de87eea 1676
6180e336 1677 return ret;
9bc4701c 1678}
9bc4701c 1679
6180e336
NJ
1680SCM scm_unlock_mutex (SCM mx)
1681{
1682 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1683}
9bc4701c 1684
6180e336
NJ
1685SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1686 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1687"Unlocks @var{mutex} if the calling thread owns the lock on "
1688"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1689"thread results in undefined behaviour. Once a mutex has been unlocked, "
1690"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1691"lock. Every call to @code{lock-mutex} by this thread must be matched "
1692"with a call to @code{unlock-mutex}. Only the last call to "
1693"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1694#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1695{
6180e336
NJ
1696 scm_t_timespec cwaittime, *waittime = NULL;
1697
9bc4701c 1698 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1699 if (! (SCM_UNBNDP (cond)))
1700 {
1701 SCM_VALIDATE_CONDVAR (2, cond);
1702
1703 if (! (SCM_UNBNDP (timeout)))
1704 {
1705 to_timespec (timeout, &cwaittime);
1706 waittime = &cwaittime;
1707 }
1708 }
1709
1710 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1711}
1712#undef FUNC_NAME
1713
6180e336
NJ
1714SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1715 (SCM obj),
1716 "Return @code{#t} if @var{obj} is a mutex.")
1717#define FUNC_NAME s_scm_mutex_p
1718{
1719 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1720}
74926120 1721#undef FUNC_NAME
9de87eea
MV
1722
1723SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1724 (SCM mx),
1725 "Return the thread owning @var{mx}, or @code{#f}.")
1726#define FUNC_NAME s_scm_mutex_owner
1727{
adc085f1
JG
1728 SCM owner;
1729 fat_mutex *m = NULL;
1730
9de87eea 1731 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1732 m = SCM_MUTEX_DATA (mx);
1733 scm_i_pthread_mutex_lock (&m->lock);
1734 owner = m->owner;
1735 scm_i_pthread_mutex_unlock (&m->lock);
1736
1737 return owner;
9de87eea
MV
1738}
1739#undef FUNC_NAME
1740
1741SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1742 (SCM mx),
adc085f1 1743 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1744#define FUNC_NAME s_scm_mutex_level
1745{
1746 SCM_VALIDATE_MUTEX (1, mx);
1747 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1748}
1749#undef FUNC_NAME
1750
adc085f1
JG
1751SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1752 (SCM mx),
1753 "Returns @code{#t} if the mutex @var{mx} is locked.")
1754#define FUNC_NAME s_scm_mutex_locked_p
1755{
1756 SCM_VALIDATE_MUTEX (1, mx);
1757 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1758}
1759#undef FUNC_NAME
9de87eea 1760
9de87eea
MV
1761static int
1762fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1763{
1764 fat_cond *c = SCM_CONDVAR_DATA (cv);
0607ebbf 1765 scm_puts_unlocked ("#<condition-variable ", port);
9de87eea 1766 scm_uintprint ((scm_t_bits)c, 16, port);
0607ebbf 1767 scm_puts_unlocked (">", port);
9de87eea
MV
1768 return 1;
1769}
9bc4701c 1770
d823b11b
MV
1771SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1772 (void),
1773 "Make a new condition variable.")
1774#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1775{
9de87eea
MV
1776 fat_cond *c;
1777 SCM cv;
1778
1779 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1780 c->waiting = SCM_EOL;
1781 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1782 c->waiting = make_queue ();
d823b11b 1783 return cv;
5f05c406 1784}
d823b11b 1785#undef FUNC_NAME
5f05c406 1786
d823b11b
MV
1787SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1788 (SCM cv, SCM mx, SCM t),
b7e64f8b
BT
1789"Wait until condition variable @var{cv} has been signalled. While waiting, "
1790"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1791"is locked again when this function returns. When @var{t} is given, "
d823b11b
MV
1792"it specifies a point in time where the waiting should be aborted. It "
1793"can be either a integer as returned by @code{current-time} or a pair "
1794"as returned by @code{gettimeofday}. When the waiting is aborted the "
1795"mutex is locked and @code{#f} is returned. When the condition "
1796"variable is in fact signalled, the mutex is also locked and @code{#t} "
1797"is returned. ")
1798#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1799{
9de87eea 1800 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1801
1802 SCM_VALIDATE_CONDVAR (1, cv);
1803 SCM_VALIDATE_MUTEX (2, mx);
74926120 1804
d823b11b
MV
1805 if (!SCM_UNBNDP (t))
1806 {
6180e336 1807 to_timespec (t, &waittime);
9de87eea 1808 waitptr = &waittime;
d823b11b
MV
1809 }
1810
2a1d0688 1811 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1812}
d823b11b 1813#undef FUNC_NAME
5f05c406 1814
9de87eea
MV
1815static void
1816fat_cond_signal (fat_cond *c)
1817{
9de87eea 1818 unblock_from_queue (c->waiting);
9de87eea
MV
1819}
1820
d823b11b
MV
1821SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1822 (SCM cv),
1823 "Wake up one thread that is waiting for @var{cv}")
1824#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1825{
d823b11b 1826 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1827 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1828 return SCM_BOOL_T;
5f05c406 1829}
d823b11b 1830#undef FUNC_NAME
5f05c406 1831
9de87eea
MV
1832static void
1833fat_cond_broadcast (fat_cond *c)
1834{
9de87eea
MV
1835 while (scm_is_true (unblock_from_queue (c->waiting)))
1836 ;
9de87eea
MV
1837}
1838
d823b11b
MV
1839SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1840 (SCM cv),
1841 "Wake up all threads that are waiting for @var{cv}. ")
1842#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1843{
d823b11b 1844 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1845 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1846 return SCM_BOOL_T;
5f05c406 1847}
d823b11b 1848#undef FUNC_NAME
5f05c406 1849
6180e336
NJ
1850SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1851 (SCM obj),
1852 "Return @code{#t} if @var{obj} is a condition variable.")
1853#define FUNC_NAME s_scm_condition_variable_p
1854{
1855 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1856}
1857#undef FUNC_NAME
1858
6087fad9 1859
8c2b3143 1860\f
d823b11b
MV
1861/*** Select */
1862
8c2b3143
LC
1863struct select_args
1864{
1865 int nfds;
1866 SELECT_TYPE *read_fds;
1867 SELECT_TYPE *write_fds;
1868 SELECT_TYPE *except_fds;
1869 struct timeval *timeout;
1870
1871 int result;
1872 int errno_value;
1873};
1874
1875static void *
1876do_std_select (void *args)
1877{
1878 struct select_args *select_args;
1879
1880 select_args = (struct select_args *) args;
1881
1882 select_args->result =
1883 select (select_args->nfds,
1884 select_args->read_fds, select_args->write_fds,
1885 select_args->except_fds, select_args->timeout);
1886 select_args->errno_value = errno;
1887
1888 return NULL;
1889}
1890
911782b7 1891int
9de87eea
MV
1892scm_std_select (int nfds,
1893 SELECT_TYPE *readfds,
1894 SELECT_TYPE *writefds,
1895 SELECT_TYPE *exceptfds,
1896 struct timeval *timeout)
1897{
1898 fd_set my_readfds;
1899 int res, eno, wakeup_fd;
1900 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1901 struct select_args args;
9de87eea
MV
1902
1903 if (readfds == NULL)
1904 {
1905 FD_ZERO (&my_readfds);
1906 readfds = &my_readfds;
1907 }
1908
1909 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1910 SCM_TICK;
1911
1912 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1913 FD_SET (wakeup_fd, readfds);
1914 if (wakeup_fd >= nfds)
1915 nfds = wakeup_fd+1;
9de87eea 1916
8c2b3143
LC
1917 args.nfds = nfds;
1918 args.read_fds = readfds;
1919 args.write_fds = writefds;
1920 args.except_fds = exceptfds;
1921 args.timeout = timeout;
1922
1923 /* Explicitly cooperate with the GC. */
1924 scm_without_guile (do_std_select, &args);
1925
1926 res = args.result;
1927 eno = args.errno_value;
1928
1929 t->sleep_fd = -1;
9de87eea
MV
1930 scm_i_reset_sleep (t);
1931
1932 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1933 {
1934 char dummy;
634aa8de
LC
1935 full_read (wakeup_fd, &dummy, 1);
1936
9de87eea
MV
1937 FD_CLR (wakeup_fd, readfds);
1938 res -= 1;
1939 if (res == 0)
1940 {
1941 eno = EINTR;
1942 res = -1;
1943 }
1944 }
d823b11b
MV
1945 errno = eno;
1946 return res;
5f05c406
MV
1947}
1948
9de87eea 1949/* Convenience API for blocking while in guile mode. */
76da80e7 1950
9de87eea 1951#if SCM_USE_PTHREAD_THREADS
92e64b87 1952
2956b071
LC
1953/* It seems reasonable to not run procedures related to mutex and condition
1954 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1955 without it, and (ii) the only potential gain would be GC latency. See
1956 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1957 for a discussion of the pros and cons. */
1958
9bc4701c 1959int
9de87eea 1960scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1961{
9de87eea 1962 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1963 return res;
1964}
1965
9de87eea 1966static void
2b829bbb 1967do_unlock (void *data)
28d52ebb 1968{
9de87eea 1969 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1970}
1971
1972void
661ae7ab 1973scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1974{
9de87eea 1975 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1976 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1977}
1978
9bc4701c 1979int
9de87eea 1980scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1981{
4cf72f0b
LC
1982 int res;
1983 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1984
1985 t->held_mutex = mutex;
1986 res = scm_i_pthread_cond_wait (cond, mutex);
1987 t->held_mutex = NULL;
1988
9bc4701c
MD
1989 return res;
1990}
9bc4701c 1991
76da80e7 1992int
9de87eea
MV
1993scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1994 scm_i_pthread_mutex_t *mutex,
1995 const scm_t_timespec *wt)
76da80e7 1996{
4cf72f0b
LC
1997 int res;
1998 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1999
2000 t->held_mutex = mutex;
2001 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2002 t->held_mutex = NULL;
2003
9de87eea 2004 return res;
76da80e7
MV
2005}
2006
9de87eea 2007#endif
76da80e7 2008
d823b11b 2009unsigned long
9de87eea 2010scm_std_usleep (unsigned long usecs)
5f05c406 2011{
d823b11b
MV
2012 struct timeval tv;
2013 tv.tv_usec = usecs % 1000000;
2014 tv.tv_sec = usecs / 1000000;
9de87eea
MV
2015 scm_std_select (0, NULL, NULL, NULL, &tv);
2016 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
2017}
2018
9de87eea
MV
2019unsigned int
2020scm_std_sleep (unsigned int secs)
6c214b62 2021{
d823b11b
MV
2022 struct timeval tv;
2023 tv.tv_usec = 0;
2024 tv.tv_sec = secs;
9de87eea 2025 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 2026 return tv.tv_sec;
6c214b62
MD
2027}
2028
d823b11b
MV
2029/*** Misc */
2030
2031SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2032 (void),
2033 "Return the thread that called this function.")
2034#define FUNC_NAME s_scm_current_thread
2035{
9de87eea 2036 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2037}
2038#undef FUNC_NAME
2039
9de87eea
MV
2040static SCM
2041scm_c_make_list (size_t n, SCM fill)
2042{
2043 SCM res = SCM_EOL;
2044 while (n-- > 0)
2045 res = scm_cons (fill, res);
2046 return res;
2047}
2048
d823b11b
MV
2049SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2050 (void),
2051 "Return a list of all threads.")
9bc4701c 2052#define FUNC_NAME s_scm_all_threads
d823b11b 2053{
9de87eea
MV
2054 /* We can not allocate while holding the thread_admin_mutex because
2055 of the way GC is done.
2056 */
2057 int n = thread_count;
2058 scm_i_thread *t;
2059 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2060
9de87eea
MV
2061 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2062 l = &list;
2063 for (t = all_threads; t && n > 0; t = t->next_thread)
2064 {
2e77f720
LC
2065 if (t != scm_i_signal_delivery_thread)
2066 {
2067 SCM_SETCAR (*l, t->handle);
2068 l = SCM_CDRLOC (*l);
2069 }
9de87eea
MV
2070 n--;
2071 }
2072 *l = SCM_EOL;
2073 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2074 return list;
d823b11b 2075}
9de87eea 2076#undef FUNC_NAME
d823b11b
MV
2077
2078SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2079 (SCM thread),
2080 "Return @code{#t} iff @var{thread} has exited.\n")
2081#define FUNC_NAME s_scm_thread_exited_p
2082{
7888309b 2083 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2084}
2085#undef FUNC_NAME
2086
911782b7 2087int
d823b11b
MV
2088scm_c_thread_exited_p (SCM thread)
2089#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2090{
9de87eea 2091 scm_i_thread *t;
d823b11b 2092 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2093 t = SCM_I_THREAD_DATA (thread);
d823b11b 2094 return t->exited;
5f05c406 2095}
d823b11b 2096#undef FUNC_NAME
5f05c406 2097
d20912e6
LC
2098SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2099 (void),
2100 "Return the total number of processors of the machine, which\n"
2101 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2102 "thread execution unit, which can be either:\n\n"
2103 "@itemize\n"
2104 "@item an execution core in a (possibly multi-core) chip, in a\n"
2105 " (possibly multi- chip) module, in a single computer, or\n"
2106 "@item a thread execution unit inside a core in the case of\n"
2107 " @dfn{hyper-threaded} CPUs.\n"
2108 "@end itemize\n\n"
2109 "Which of the two definitions is used, is unspecified.\n")
2110#define FUNC_NAME s_scm_total_processor_count
2111{
2112 return scm_from_ulong (num_processors (NPROC_ALL));
2113}
2114#undef FUNC_NAME
2115
2116SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2117 (void),
2118 "Like @code{total-processor-count}, but return the number of\n"
2119 "processors available to the current process. See\n"
2120 "@code{setaffinity} and @code{getaffinity} for more\n"
2121 "information.\n")
2122#define FUNC_NAME s_scm_current_processor_count
2123{
2124 return scm_from_ulong (num_processors (NPROC_CURRENT));
2125}
2126#undef FUNC_NAME
2127
2128
2129\f
2130
9de87eea 2131static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2132static int threads_initialized_p = 0;
2133
9bc4701c 2134
a4d106c7
MV
2135/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2136 */
d1138028 2137scm_i_pthread_mutex_t scm_i_critical_section_mutex;
6a97b1f9 2138SCM_PTHREAD_ATFORK_LOCK_STATIC_MUTEX (scm_i_critical_section_mutex);
a4d106c7 2139
661ae7ab 2140static SCM dynwind_critical_section_mutex;
a54a94b3 2141
9bc4701c 2142void
661ae7ab 2143scm_dynwind_critical_section (SCM mutex)
76da80e7 2144{
a4d106c7 2145 if (scm_is_false (mutex))
661ae7ab
MV
2146 mutex = dynwind_critical_section_mutex;
2147 scm_dynwind_lock_mutex (mutex);
2148 scm_dynwind_block_asyncs ();
9de87eea
MV
2149}
2150
2151/*** Initialization */
2152
9de87eea 2153scm_i_pthread_mutex_t scm_i_misc_mutex;
6a97b1f9 2154SCM_PTHREAD_ATFORK_LOCK_STATIC_MUTEX (scm_i_misc_mutex);
9de87eea 2155
d1138028
MV
2156#if SCM_USE_PTHREAD_THREADS
2157pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2158#endif
2159
9de87eea 2160void
12c1d861 2161scm_threads_prehistory (void *base)
9de87eea 2162{
d1138028
MV
2163#if SCM_USE_PTHREAD_THREADS
2164 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2165 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2166 PTHREAD_MUTEX_RECURSIVE);
2167#endif
2168
2169 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2170 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2171 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2172 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2173
12c1d861 2174 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2175}
2176
d823b11b
MV
2177scm_t_bits scm_tc16_thread;
2178scm_t_bits scm_tc16_mutex;
2179scm_t_bits scm_tc16_condvar;
7bfd3b9e 2180
7bfd3b9e 2181void
9de87eea 2182scm_init_threads ()
7bfd3b9e 2183{
9de87eea 2184 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2185 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2186
9de87eea 2187 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2188 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2189 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2190
9de87eea
MV
2191 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2192 sizeof (fat_cond));
9de87eea 2193 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2194
9de87eea
MV
2195 scm_i_default_dynamic_state = SCM_BOOL_F;
2196 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2197 threads_initialized_p = 1;
a4d106c7 2198
f39448c5 2199 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2200}
89e00824 2201
5f05c406 2202void
9de87eea 2203scm_init_threads_default_dynamic_state ()
5f05c406 2204{
9de87eea 2205 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2206 scm_i_default_dynamic_state = state;
5f05c406
MV
2207}
2208
d823b11b 2209void
9de87eea 2210scm_init_thread_procs ()
d823b11b 2211{
9de87eea 2212#include "libguile/threads.x"
d823b11b
MV
2213}
2214
3c13664e
LC
2215\f
2216/* IA64-specific things. */
2217
2218#ifdef __ia64__
2219# ifdef __hpux
2220# include <sys/param.h>
2221# include <sys/pstat.h>
2222void *
2223scm_ia64_register_backing_store_base (void)
2224{
2225 struct pst_vm_status vm_status;
2226 int i = 0;
2227 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2228 if (vm_status.pst_type == PS_RSESTACK)
2229 return (void *) vm_status.pst_vaddr;
2230 abort ();
2231}
2232void *
2233scm_ia64_ar_bsp (const void *ctx)
2234{
2235 uint64_t bsp;
2236 __uc_get_ar_bsp (ctx, &bsp);
2237 return (void *) bsp;
2238}
2239# endif /* hpux */
2240# ifdef linux
2241# include <ucontext.h>
2242void *
2243scm_ia64_register_backing_store_base (void)
2244{
2245 extern void *__libc_ia64_register_backing_store_base;
2246 return __libc_ia64_register_backing_store_base;
2247}
2248void *
2249scm_ia64_ar_bsp (const void *opaque)
2250{
2251 const ucontext_t *ctx = opaque;
2252 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2253}
2254# endif /* linux */
ba20d262
AW
2255# ifdef __FreeBSD__
2256# include <ucontext.h>
2257void *
2258scm_ia64_register_backing_store_base (void)
2259{
2260 return (void *)0x8000000000000000;
2261}
2262void *
2263scm_ia64_ar_bsp (const void *opaque)
2264{
2265 const ucontext_t *ctx = opaque;
2266 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2267 + ctx->uc_mcontext.mc_special.ndirty);
2268}
2269# endif /* __FreeBSD__ */
3c13664e
LC
2270#endif /* __ia64__ */
2271
2272
89e00824
ML
2273/*
2274 Local Variables:
2275 c-file-style: "gnu"
2276 End:
2277*/