failing to load module in psyntax indicates an identifier is not macro
[bpt/guile.git] / libguile / threads.c
CommitLineData
b3da54d1
LC
1/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 * Free Software Foundation, Inc.
74926120 4 *
73be1d9e 5 * This library is free software; you can redistribute it and/or
53befeb7
NJ
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
7bfd3b9e 9 *
53befeb7
NJ
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
7bfd3b9e 14 *
73be1d9e
MV
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
53befeb7
NJ
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
73be1d9e 19 */
1bbd0b84 20
1bbd0b84 21
7bfd3b9e 22\f
dbb605f5
LC
23#ifdef HAVE_CONFIG_H
24# include <config.h>
25#endif
7bfd3b9e 26
1c44468d 27#include "libguile/bdw-gc.h"
1810dc4e
RB
28#include "libguile/_scm.h"
29
b3da54d1 30#include <stdlib.h>
fcc5d734 31#if HAVE_UNISTD_H
d823b11b 32#include <unistd.h>
fcc5d734 33#endif
d823b11b 34#include <stdio.h>
8ab3d8a0
KR
35
36#ifdef HAVE_STRING_H
37#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38#endif
39
fcc5d734 40#if HAVE_SYS_TIME_H
d823b11b 41#include <sys/time.h>
fcc5d734 42#endif
5f05c406 43
c1d5d6d7
AW
44#if HAVE_PTHREAD_NP_H
45# include <pthread_np.h>
46#endif
47
6ab4de61
AW
48#include <sys/select.h>
49
f57fdf07 50#include <assert.h>
32f94bf2 51#include <fcntl.h>
d20912e6 52#include <nproc.h>
f57fdf07 53
d823b11b
MV
54#include "libguile/validate.h"
55#include "libguile/root.h"
56#include "libguile/eval.h"
57#include "libguile/async.h"
58#include "libguile/ports.h"
59#include "libguile/threads.h"
a0599745 60#include "libguile/dynwind.h"
d823b11b 61#include "libguile/iselect.h"
9de87eea
MV
62#include "libguile/fluids.h"
63#include "libguile/continuations.h"
2b829bbb 64#include "libguile/gc.h"
9de87eea 65#include "libguile/init.h"
2e77f720 66#include "libguile/scmsigs.h"
6180e336 67#include "libguile/strings.h"
a0faf7dd 68#include "libguile/weaks.h"
7bfd3b9e 69
634aa8de
LC
70#include <full-read.h>
71
cde24ce1
AW
72
73\f
74
75/* First some libgc shims. */
76
77/* Make sure GC_fn_type is defined; it is missing from the public
78 headers of GC 7.1 and earlier. */
79#ifndef HAVE_GC_FN_TYPE
80typedef void * (* GC_fn_type) (void *);
81#endif
82
83
5f0d2951
AW
84#ifndef GC_SUCCESS
85#define GC_SUCCESS 0
86#endif
87
88#ifndef GC_UNIMPLEMENTED
89#define GC_UNIMPLEMENTED 3
90#endif
91
92/* Likewise struct GC_stack_base is missing before 7.1. */
93#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
94struct GC_stack_base {
95 void * mem_base; /* Base of memory stack. */
96#ifdef __ia64__
97 void * reg_base; /* Base of separate register stack. */
98#endif
99};
5f0d2951
AW
100
101static int
987b8160 102GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
103{
104 return GC_UNIMPLEMENTED;
105}
106
107static void
108GC_unregister_my_thread ()
109{
110}
111
12c1d861
AW
112#if !SCM_USE_PTHREAD_THREADS
113/* No threads; we can just use GC_stackbottom. */
5f0d2951 114static void *
12c1d861
AW
115get_thread_stack_base ()
116{
117 return GC_stackbottom;
118}
119
120#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
121 && defined PTHREAD_ATTR_GETSTACK_WORKS
122/* This method for GNU/Linux and perhaps some other systems.
123 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
124 available on them. */
125static void *
126get_thread_stack_base ()
127{
128 pthread_attr_t attr;
129 void *start, *end;
130 size_t size;
131
132 pthread_getattr_np (pthread_self (), &attr);
133 pthread_attr_getstack (&attr, &start, &size);
134 end = (char *)start + size;
135
136#if SCM_STACK_GROWS_UP
137 return start;
138#else
139 return end;
140#endif
141}
142
143#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
144/* This method for MacOS X.
145 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
146 but as of 2006 there's nothing obvious at apple.com. */
147static void *
148get_thread_stack_base ()
149{
150 return pthread_get_stackaddr_np (pthread_self ());
151}
152
c1d5d6d7
AW
153#elif HAVE_PTHREAD_ATTR_GET_NP
154/* This one is for FreeBSD 9. */
155static void *
156get_thread_stack_base ()
157{
158 pthread_attr_t attr;
159 void *start, *end;
160 size_t size;
161
162 pthread_attr_init (&attr);
163 pthread_attr_get_np (pthread_self (), &attr);
164 pthread_attr_getstack (&attr, &start, &size);
165 pthread_attr_destroy (&attr);
166
167 end = (char *)start + size;
168
169#if SCM_STACK_GROWS_UP
170 return start;
171#else
172 return end;
173#endif
174}
175
12c1d861
AW
176#else
177#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
178#endif
179
180static int
987b8160 181GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 182{
12c1d861
AW
183 stack_base->mem_base = get_thread_stack_base ();
184#ifdef __ia64__
185 /* Calculate and store off the base of this thread's register
186 backing store (RBS). Unfortunately our implementation(s) of
187 scm_ia64_register_backing_store_base are only reliable for the
188 main thread. For other threads, therefore, find out the current
189 top of the RBS, and use that as a maximum. */
190 stack_base->reg_base = scm_ia64_register_backing_store_base ();
191 {
192 ucontext_t ctx;
193 void *bsp;
194 getcontext (&ctx);
195 bsp = scm_ia64_ar_bsp (&ctx);
196 if (stack_base->reg_base > bsp)
197 stack_base->reg_base = bsp;
198 }
199#endif
200 return GC_SUCCESS;
5f0d2951 201}
12c1d861
AW
202
203static void *
204GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
205{
206 struct GC_stack_base stack_base;
207
208 stack_base.mem_base = (void*)&stack_base;
209#ifdef __ia64__
210 /* FIXME: Untested. */
211 {
212 ucontext_t ctx;
213 getcontext (&ctx);
214 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
215 }
5f0d2951
AW
216#endif
217
12c1d861
AW
218 return fn (&stack_base, arg);
219}
220#endif /* HAVE_GC_STACK_BASE */
221
5f0d2951 222
cde24ce1
AW
223/* Now define with_gc_active and with_gc_inactive. */
224
225#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
226
227/* We have a sufficiently new libgc (7.2 or newer). */
228
229static void*
230with_gc_inactive (GC_fn_type func, void *data)
231{
232 return GC_do_blocking (func, data);
233}
234
235static void*
236with_gc_active (GC_fn_type func, void *data)
237{
238 return GC_call_with_gc_active (func, data);
239}
240
241#else
242
243/* libgc not new enough, so never actually deactivate GC.
244
245 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
246 GC_call_with_gc_active. */
247
248static void*
249with_gc_inactive (GC_fn_type func, void *data)
250{
251 return func (data);
252}
253
254static void*
255with_gc_active (GC_fn_type func, void *data)
256{
257 return func (data);
258}
259
260#endif /* HAVE_GC_DO_BLOCKING */
261
262
634aa8de 263\f
6180e336
NJ
264static void
265to_timespec (SCM t, scm_t_timespec *waittime)
266{
267 if (scm_is_pair (t))
268 {
269 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
270 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
271 }
272 else
273 {
274 double time = scm_to_double (t);
275 double sec = scm_c_truncate (time);
276
277 waittime->tv_sec = (long) sec;
2a1d0688 278 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
279 }
280}
281
c058db8a 282\f
d823b11b 283/*** Queues */
7bfd3b9e 284
c058db8a
LC
285/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
286 the risk of false references leading to unbounded retained space as
287 described in "Bounding Space Usage of Conservative Garbage Collectors",
288 H.J. Boehm, 2001. */
289
9de87eea
MV
290/* Make an empty queue data structure.
291 */
d823b11b
MV
292static SCM
293make_queue ()
294{
295 return scm_cons (SCM_EOL, SCM_EOL);
296}
7bfd3b9e 297
9de87eea
MV
298/* Put T at the back of Q and return a handle that can be used with
299 remqueue to remove T from Q again.
300 */
d823b11b
MV
301static SCM
302enqueue (SCM q, SCM t)
303{
304 SCM c = scm_cons (t, SCM_EOL);
d2a51087 305 SCM_CRITICAL_SECTION_START;
d2e53ed6 306 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
307 SCM_SETCDR (q, c);
308 else
309 SCM_SETCDR (SCM_CAR (q), c);
310 SCM_SETCAR (q, c);
d2a51087 311 SCM_CRITICAL_SECTION_END;
d823b11b
MV
312 return c;
313}
7bfd3b9e 314
9de87eea
MV
315/* Remove the element that the handle C refers to from the queue Q. C
316 must have been returned from a call to enqueue. The return value
317 is zero when the element referred to by C has already been removed.
318 Otherwise, 1 is returned.
319*/
320static int
d823b11b
MV
321remqueue (SCM q, SCM c)
322{
323 SCM p, prev = q;
d2a51087 324 SCM_CRITICAL_SECTION_START;
d2e53ed6 325 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 326 {
bc36d050 327 if (scm_is_eq (p, c))
d823b11b 328 {
bc36d050 329 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
330 SCM_SETCAR (q, SCM_CDR (c));
331 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
d2a51087 336 SCM_CRITICAL_SECTION_END;
9de87eea 337 return 1;
d823b11b
MV
338 }
339 prev = p;
340 }
d2a51087 341 SCM_CRITICAL_SECTION_END;
9de87eea 342 return 0;
d823b11b
MV
343}
344
9de87eea
MV
345/* Remove the front-most element from the queue Q and return it.
346 Return SCM_BOOL_F when Q is empty.
347*/
d823b11b
MV
348static SCM
349dequeue (SCM q)
350{
d2a51087
NJ
351 SCM c;
352 SCM_CRITICAL_SECTION_START;
353 c = SCM_CDR (q);
d2e53ed6 354 if (scm_is_null (c))
d2a51087
NJ
355 {
356 SCM_CRITICAL_SECTION_END;
357 return SCM_BOOL_F;
358 }
d823b11b
MV
359 else
360 {
361 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 362 if (scm_is_null (SCM_CDR (q)))
d823b11b 363 SCM_SETCAR (q, SCM_EOL);
d2a51087 364 SCM_CRITICAL_SECTION_END;
c058db8a
LC
365
366 /* GC-robust */
367 SCM_SETCDR (c, SCM_EOL);
368
d823b11b
MV
369 return SCM_CAR (c);
370 }
371}
7bfd3b9e 372
9de87eea 373/*** Thread smob routines */
76da80e7 374
d823b11b
MV
375
376static int
377thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
378{
23d72566
KR
379 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
380 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
381 the struct case, hence we go via a union, and extract according to the
382 size of pthread_t. */
383 union {
384 scm_i_pthread_t p;
385 unsigned short us;
386 unsigned int ui;
387 unsigned long ul;
388 scm_t_uintmax um;
389 } u;
9de87eea 390 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
391 scm_i_pthread_t p = t->pthread;
392 scm_t_uintmax id;
393 u.p = p;
394 if (sizeof (p) == sizeof (unsigned short))
395 id = u.us;
396 else if (sizeof (p) == sizeof (unsigned int))
397 id = u.ui;
398 else if (sizeof (p) == sizeof (unsigned long))
399 id = u.ul;
400 else
401 id = u.um;
402
d823b11b 403 scm_puts ("#<thread ", port);
23d72566 404 scm_uintprint (id, 10, port);
1b92fb6b 405 scm_puts (" (", port);
0345e278 406 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 407 scm_puts (")>", port);
d823b11b
MV
408 return 1;
409}
410
706846f6 411\f
9de87eea 412/*** Blocking on queues. */
f7eca35d 413
9de87eea
MV
414/* See also scm_i_queue_async_cell for how such a block is
415 interrputed.
416*/
d823b11b 417
9de87eea
MV
418/* Put the current thread on QUEUE and go to sleep, waiting for it to
419 be woken up by a call to 'unblock_from_queue', or to be
420 interrupted. Upon return of this function, the current thread is
421 no longer on QUEUE, even when the sleep has been interrupted.
422
d2a51087 423 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
424 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
425
426 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
427 as MUTEX is needed.
428
429 When WAITTIME is not NULL, the sleep will be aborted at that time.
430
431 The return value of block_self is an errno value. It will be zero
432 when the sleep has been successfully completed by a call to
433 unblock_from_queue, EINTR when it has been interrupted by the
434 delivery of a system async, and ETIMEDOUT when the timeout has
435 expired.
436
437 The system asyncs themselves are not executed by block_self.
438*/
439static int
440block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
441 const scm_t_timespec *waittime)
76da80e7 442{
9de87eea
MV
443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
444 SCM q_handle;
445 int err;
446
447 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
448 err = EINTR;
449 else
450 {
451 t->block_asyncs++;
452 q_handle = enqueue (queue, t->handle);
453 if (waittime == NULL)
454 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
455 else
456 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
457
458 /* When we are still on QUEUE, we have been interrupted. We
459 report this only when no other error (such as a timeout) has
460 happened above.
461 */
462 if (remqueue (queue, q_handle) && err == 0)
463 err = EINTR;
464 t->block_asyncs--;
465 scm_i_reset_sleep (t);
466 }
467
468 return err;
76da80e7 469}
9de87eea 470
d2a51087
NJ
471/* Wake up the first thread on QUEUE, if any. The awoken thread is
472 returned, or #f if the queue was empty.
9de87eea
MV
473 */
474static SCM
475unblock_from_queue (SCM queue)
476{
477 SCM thread = dequeue (queue);
478 if (scm_is_true (thread))
479 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
480 return thread;
481}
482
45f15cac 483\f
9de87eea
MV
484/* Getting into and out of guile mode.
485 */
486
f60a7648
AW
487/* Key used to attach a cleanup handler to a given thread. Also, if
488 thread-local storage is unavailable, this key is used to retrieve the
489 current thread with `pthread_getspecific ()'. */
490scm_i_pthread_key_t scm_i_thread_key;
491
492
705edb95
LC
493#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
494
495/* When thread-local storage (TLS) is available, a pointer to the
496 current-thread object is kept in TLS. Note that storing the thread-object
497 itself in TLS (rather than a pointer to some malloc'd memory) is not
498 possible since thread objects may live longer than the actual thread they
499 represent. */
500SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
501
f60a7648 502#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 503
d823b11b 504
9de87eea
MV
505static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
506static scm_i_thread *all_threads = NULL;
507static int thread_count;
508
509static SCM scm_i_default_dynamic_state;
510
0b77014f
AW
511/* Run when a fluid is collected. */
512void
aafb4ed7 513scm_i_reset_fluid (size_t n)
0b77014f
AW
514{
515 scm_i_thread *t;
516
517 scm_i_pthread_mutex_lock (&thread_admin_mutex);
518 for (t = all_threads; t; t = t->next_thread)
519 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
520 {
521 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
522
523 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 524 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
525 }
526 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
527}
528
9de87eea 529/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 530 */
9de87eea 531static void
12c1d861 532guilify_self_1 (struct GC_stack_base *base)
d823b11b 533{
7f22442b
AW
534 scm_i_thread t;
535
536 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
537 before allocating anything in this thread, because allocation could
538 cause GC to run, and GC could cause finalizers, which could invoke
539 Scheme functions, which need the current thread to be set. */
540
541 t.pthread = scm_i_pthread_self ();
542 t.handle = SCM_BOOL_F;
543 t.result = SCM_BOOL_F;
544 t.cleanup_handler = SCM_BOOL_F;
545 t.mutexes = SCM_EOL;
546 t.held_mutex = NULL;
547 t.join_queue = SCM_EOL;
548 t.dynamic_state = SCM_BOOL_F;
549 t.dynwinds = SCM_EOL;
550 t.active_asyncs = SCM_EOL;
551 t.block_asyncs = 1;
552 t.pending_asyncs = 1;
553 t.critical_section_level = 0;
554 t.base = base->mem_base;
346e4402 555#ifdef __ia64__
bbd21051 556 t.register_backing_store_base = base->reg_base;
346e4402 557#endif
7f22442b
AW
558 t.continuation_root = SCM_EOL;
559 t.continuation_base = t.base;
560 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
561 t.sleep_mutex = NULL;
562 t.sleep_object = SCM_BOOL_F;
563 t.sleep_fd = -1;
564
32f94bf2 565 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
566 /* FIXME: Error conditions during the initialization phase are handled
567 gracelessly since public functions such as `scm_init_guile ()'
568 currently have type `void'. */
569 abort ();
570
7f22442b
AW
571 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
572 t.current_mark_stack_ptr = NULL;
573 t.current_mark_stack_limit = NULL;
574 t.canceled = 0;
575 t.exited = 0;
576 t.guile_mode = 0;
9de87eea 577
7f22442b
AW
578 /* The switcheroo. */
579 {
580 scm_i_thread *t_ptr = &t;
581
582 GC_disable ();
583 t_ptr = GC_malloc (sizeof (scm_i_thread));
584 memcpy (t_ptr, &t, sizeof t);
585
586 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
587
588#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
589 /* Cache the current thread in TLS for faster lookup. */
590 scm_i_current_thread = t_ptr;
f60a7648 591#endif
9de87eea 592
7f22442b
AW
593 scm_i_pthread_mutex_lock (&thread_admin_mutex);
594 t_ptr->next_thread = all_threads;
595 all_threads = t_ptr;
596 thread_count++;
597 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
598
599 GC_enable ();
600 }
d823b11b
MV
601}
602
9de87eea 603/* Perform second stage of thread initialisation, in guile mode.
d823b11b 604 */
9de87eea
MV
605static void
606guilify_self_2 (SCM parent)
d823b11b 607{
9de87eea
MV
608 scm_i_thread *t = SCM_I_CURRENT_THREAD;
609
72e6b608
LC
610 t->guile_mode = 1;
611
9de87eea 612 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 613
9de87eea
MV
614 t->continuation_root = scm_cons (t->handle, SCM_EOL);
615 t->continuation_base = t->base;
2bbe1533 616 t->vm = SCM_BOOL_F;
9de87eea
MV
617
618 if (scm_is_true (parent))
619 t->dynamic_state = scm_make_dynamic_state (parent);
620 else
621 t->dynamic_state = scm_i_make_initial_dynamic_state ();
622
623 t->join_queue = make_queue ();
624 t->block_asyncs = 0;
f740445a
AW
625
626 /* See note in finalizers.c:queue_finalizer_async(). */
627 GC_invoke_finalizers ();
d823b11b
MV
628}
629
6180e336
NJ
630\f
631/*** Fat mutexes */
632
633/* We implement our own mutex type since we want them to be 'fair', we
634 want to do fancy things while waiting for them (like running
635 asyncs) and we might want to add things that are nice for
636 debugging.
637*/
638
639typedef struct {
640 scm_i_pthread_mutex_t lock;
641 SCM owner;
adc085f1 642 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 643
adc085f1 644 int recursive; /* allow recursive locking? */
6180e336
NJ
645 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
646 int allow_external_unlock; /* is it an error to unlock a mutex that is not
647 owned by the current thread? */
648
649 SCM waiting; /* the threads waiting for this mutex. */
650} fat_mutex;
651
652#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
653#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
654
2002f1f8
AW
655static SCM
656call_cleanup (void *data)
657{
658 SCM *proc_p = data;
659 return scm_call_0 (*proc_p);
660}
661
9de87eea 662/* Perform thread tear-down, in guile mode.
d823b11b 663 */
9de87eea
MV
664static void *
665do_thread_exit (void *v)
666{
2e77f720
LC
667 scm_i_thread *t = (scm_i_thread *) v;
668
91230883
LC
669 /* Ensure the signal handling thread has been launched, because we might be
670 shutting it down. This needs to be done in Guile mode. */
671 scm_i_ensure_signal_delivery_thread ();
672
2e77f720
LC
673 if (!scm_is_false (t->cleanup_handler))
674 {
675 SCM ptr = t->cleanup_handler;
676
677 t->cleanup_handler = SCM_BOOL_F;
678 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 679 call_cleanup, &ptr,
2e77f720
LC
680 scm_handle_by_message_noexit, NULL);
681 }
9de87eea 682
86a597f8 683 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
684
685 t->exited = 1;
0c97d7dd
MV
686 close (t->sleep_pipe[0]);
687 close (t->sleep_pipe[1]);
9de87eea
MV
688 while (scm_is_true (unblock_from_queue (t->join_queue)))
689 ;
9de87eea 690
74926120 691 while (!scm_is_null (t->mutexes))
6180e336 692 {
a0faf7dd 693 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 694
a0faf7dd
LC
695 if (!SCM_UNBNDP (mutex))
696 {
697 fat_mutex *m = SCM_MUTEX_DATA (mutex);
698
699 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07 700
02a362a6
LC
701 /* Check whether T owns MUTEX. This is usually the case, unless
702 T abandoned MUTEX; in that case, T is no longer its owner (see
703 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
704 if (scm_is_eq (m->owner, t->handle))
705 unblock_from_queue (m->waiting);
f57fdf07 706
a0faf7dd
LC
707 scm_i_pthread_mutex_unlock (&m->lock);
708 }
6180e336 709
a0faf7dd 710 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
711 }
712
86a597f8 713 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 714
9de87eea
MV
715 return NULL;
716}
717
f60a7648
AW
718static void *
719do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
720{
7f22442b 721 /* Won't hurt if we are already registered. */
8e76ce94 722#if SCM_USE_PTHREAD_THREADS
7f22442b 723 GC_register_my_thread (sb);
8e76ce94 724#endif
f60a7648 725
7f22442b 726 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
727}
728
d823b11b 729static void
9de87eea 730on_thread_exit (void *v)
d823b11b 731{
29776e85 732 /* This handler is executed in non-guile mode. */
2e77f720 733 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 734
b8441577
LC
735 /* If we were canceled, we were unable to clear `t->guile_mode', so do
736 it here. */
737 t->guile_mode = 0;
738
d2a51087
NJ
739 /* If this thread was cancelled while doing a cond wait, it will
740 still have a mutex locked, so we unlock it here. */
741 if (t->held_mutex)
742 {
743 scm_i_pthread_mutex_unlock (t->held_mutex);
744 t->held_mutex = NULL;
745 }
746
f60a7648
AW
747 /* Reinstate the current thread for purposes of scm_with_guile
748 guile-mode cleanup handlers. Only really needed in the non-TLS
749 case but it doesn't hurt to be consistent. */
750 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 751
7f22442b
AW
752 /* Scheme-level thread finalizers and other cleanup needs to happen in
753 guile mode. */
754 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
755
756 /* Removing ourself from the list of all threads needs to happen in
757 non-guile mode since all SCM values on our stack become
29776e85 758 unprotected once we are no longer in the list. */
0c97d7dd
MV
759 scm_i_pthread_mutex_lock (&thread_admin_mutex);
760 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
761 if (*tp == t)
762 {
763 *tp = t->next_thread;
c058db8a
LC
764
765 /* GC-robust */
766 t->next_thread = NULL;
767
0c97d7dd
MV
768 break;
769 }
770 thread_count--;
2e77f720
LC
771
772 /* If there's only one other thread, it could be the signal delivery
773 thread, so we need to notify it to shut down by closing its read pipe.
774 If it's not the signal delivery thread, then closing the read pipe isn't
775 going to hurt. */
776 if (thread_count <= 1)
777 scm_i_close_signal_pipe ();
778
0c97d7dd
MV
779 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
780
f60a7648 781 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 782
8e76ce94 783#if SCM_USE_PTHREAD_THREADS
7f22442b 784 GC_unregister_my_thread ();
653ccd78 785#endif
d823b11b
MV
786}
787
9de87eea 788static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 789
9de87eea
MV
790static void
791init_thread_key (void)
792{
f60a7648 793 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 794}
d823b11b 795
cde24ce1
AW
796/* Perform any initializations necessary to make the current thread
797 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
798 if necessary.
a54a94b3 799
9de87eea
MV
800 BASE is the stack base to use with GC.
801
802 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
803 which case the default dynamic state is used.
804
cde24ce1 805 Returns zero when the thread was known to guile already; otherwise
9de87eea 806 return 1.
cde24ce1
AW
807
808 Note that it could be the case that the thread was known
809 to Guile, but not in guile mode (because we are within a
810 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
811 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
812
813static int
12c1d861 814scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 815{
9de87eea
MV
816 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
817
cde24ce1
AW
818 if (SCM_I_CURRENT_THREAD)
819 {
820 /* Thread is already known to Guile.
821 */
822 return 0;
823 }
824 else
9de87eea
MV
825 {
826 /* This thread has not been guilified yet.
827 */
828
829 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
830 if (scm_initialized_p == 0)
831 {
832 /* First thread ever to enter Guile. Run the full
833 initialization.
834 */
835 scm_i_init_guile (base);
12c1d861 836
8e76ce94 837#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
838 /* Allow other threads to come in later. */
839 GC_allow_register_threads ();
4000d064 840#endif
12c1d861 841
9de87eea
MV
842 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
843 }
844 else
845 {
846 /* Guile is already initialized, but this thread enters it for
847 the first time. Only initialize this thread.
848 */
849 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
850
851 /* Register this thread with libgc. */
8e76ce94 852#if SCM_USE_PTHREAD_THREADS
12c1d861 853 GC_register_my_thread (base);
8e76ce94 854#endif
12c1d861 855
9de87eea
MV
856 guilify_self_1 (base);
857 guilify_self_2 (parent);
858 }
859 return 1;
860 }
d823b11b
MV
861}
862
12c1d861
AW
863void
864scm_init_guile ()
d823b11b 865{
12c1d861
AW
866 struct GC_stack_base stack_base;
867
868 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
869 scm_i_init_thread_for_guile (&stack_base,
870 scm_i_default_dynamic_state);
9de87eea
MV
871 else
872 {
12c1d861 873 fprintf (stderr, "Failed to get stack base for current thread.\n");
b3da54d1 874 exit (EXIT_FAILURE);
9de87eea 875 }
a54a94b3
MV
876}
877
12c1d861 878struct with_guile_args
cde24ce1
AW
879{
880 GC_fn_type func;
881 void *data;
12c1d861 882 SCM parent;
cde24ce1
AW
883};
884
885static void *
886with_guile_trampoline (void *data)
887{
12c1d861 888 struct with_guile_args *args = data;
cde24ce1
AW
889
890 return scm_c_with_continuation_barrier (args->func, args->data);
891}
892
12c1d861
AW
893static void *
894with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
895{
896 void *res;
cde24ce1
AW
897 int new_thread;
898 scm_i_thread *t;
12c1d861 899 struct with_guile_args *args = data;
2e77f720 900
12c1d861 901 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
902 t = SCM_I_CURRENT_THREAD;
903 if (new_thread)
2e77f720 904 {
cde24ce1
AW
905 /* We are in Guile mode. */
906 assert (t->guile_mode);
907
12c1d861 908 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
909
910 /* Leave Guile mode. */
911 t->guile_mode = 0;
912 }
913 else if (t->guile_mode)
914 {
915 /* Already in Guile mode. */
12c1d861 916 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 917 }
74926120 918 else
cde24ce1 919 {
cde24ce1
AW
920 /* We are not in Guile mode, either because we are not within a
921 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 922
cde24ce1
AW
923 This call to scm_with_guile() could happen from anywhere on the
924 stack, and in particular lower on the stack than when it was
925 when this thread was first guilified. Thus, `base' must be
926 updated. */
927#if SCM_STACK_GROWS_UP
12c1d861
AW
928 if (SCM_STACK_PTR (base->mem_base) < t->base)
929 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 930#else
12c1d861
AW
931 if (SCM_STACK_PTR (base->mem_base) > t->base)
932 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 933#endif
72e6b608 934
cde24ce1 935 t->guile_mode = 1;
12c1d861 936 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
937 t->guile_mode = 0;
938 }
939 return res;
72e6b608 940}
3d1af79f 941
12c1d861
AW
942static void *
943scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
944{
945 struct with_guile_args args;
946
947 args.func = func;
948 args.data = data;
949 args.parent = parent;
950
951 return GC_call_with_stack_base (with_guile_and_parent, &args);
952}
953
954void *
955scm_with_guile (void *(*func)(void *), void *data)
956{
957 return scm_i_with_guile_and_parent (func, data,
958 scm_i_default_dynamic_state);
959}
960
9de87eea
MV
961void *
962scm_without_guile (void *(*func)(void *), void *data)
d823b11b 963{
72e6b608 964 void *result;
cde24ce1 965 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 966
cde24ce1 967 if (t->guile_mode)
72e6b608 968 {
cde24ce1
AW
969 SCM_I_CURRENT_THREAD->guile_mode = 0;
970 result = with_gc_inactive (func, data);
971 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
972 }
973 else
cde24ce1 974 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
975 result = func (data);
976
977 return result;
9de87eea
MV
978}
979
72e6b608 980\f
9de87eea
MV
981/*** Thread creation */
982
983typedef struct {
984 SCM parent;
985 SCM thunk;
986 SCM handler;
76da80e7 987 SCM thread;
9de87eea
MV
988 scm_i_pthread_mutex_t mutex;
989 scm_i_pthread_cond_t cond;
990} launch_data;
d823b11b 991
9de87eea
MV
992static void *
993really_launch (void *d)
994{
995 launch_data *data = (launch_data *)d;
996 SCM thunk = data->thunk, handler = data->handler;
997 scm_i_thread *t;
d823b11b 998
9de87eea 999 t = SCM_I_CURRENT_THREAD;
a54a94b3 1000
9de87eea
MV
1001 scm_i_scm_pthread_mutex_lock (&data->mutex);
1002 data->thread = scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data->cond);
1004 scm_i_pthread_mutex_unlock (&data->mutex);
1005
1006 if (SCM_UNBNDP (handler))
1007 t->result = scm_call_0 (thunk);
1008 else
1009 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1010
1011 return 0;
d823b11b
MV
1012}
1013
9de87eea
MV
1014static void *
1015launch_thread (void *d)
1016{
1017 launch_data *data = (launch_data *)d;
1018 scm_i_pthread_detach (scm_i_pthread_self ());
1019 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1020 return NULL;
1021}
1022
1023SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 1024 (SCM thunk, SCM handler),
9de87eea
MV
1025 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1026 "returning a new thread object representing the thread. The procedure\n"
1027 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1028 "\n"
1029 "When @var{handler} is specified, then @var{thunk} is called from\n"
1030 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1031 "handler. This catch is established inside the continuation barrier.\n"
1032 "\n"
1033 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1034 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
1035#define FUNC_NAME s_scm_call_with_new_thread
1036{
9de87eea
MV
1037 launch_data data;
1038 scm_i_pthread_t id;
1039 int err;
d823b11b 1040
9de87eea
MV
1041 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1042 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1043 handler, SCM_ARG2, FUNC_NAME);
1044
9f7537dc 1045 GC_collect_a_little ();
9de87eea
MV
1046 data.parent = scm_current_dynamic_state ();
1047 data.thunk = thunk;
1048 data.handler = handler;
1049 data.thread = SCM_BOOL_F;
1050 scm_i_pthread_mutex_init (&data.mutex, NULL);
1051 scm_i_pthread_cond_init (&data.cond, NULL);
1052
1053 scm_i_scm_pthread_mutex_lock (&data.mutex);
1054 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1055 if (err)
1056 {
1057 scm_i_pthread_mutex_unlock (&data.mutex);
1058 errno = err;
1059 scm_syserror (NULL);
1060 }
1061 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1062 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1063
9de87eea 1064 return data.thread;
d823b11b
MV
1065}
1066#undef FUNC_NAME
1067
9de87eea
MV
1068typedef struct {
1069 SCM parent;
1070 scm_t_catch_body body;
1071 void *body_data;
1072 scm_t_catch_handler handler;
1073 void *handler_data;
1074 SCM thread;
1075 scm_i_pthread_mutex_t mutex;
1076 scm_i_pthread_cond_t cond;
1077} spawn_data;
1078
1079static void *
1080really_spawn (void *d)
1081{
1082 spawn_data *data = (spawn_data *)d;
1083 scm_t_catch_body body = data->body;
1084 void *body_data = data->body_data;
1085 scm_t_catch_handler handler = data->handler;
1086 void *handler_data = data->handler_data;
1087 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1088
1089 scm_i_scm_pthread_mutex_lock (&data->mutex);
1090 data->thread = scm_current_thread ();
1091 scm_i_pthread_cond_signal (&data->cond);
1092 scm_i_pthread_mutex_unlock (&data->mutex);
1093
1094 if (handler == NULL)
1095 t->result = body (body_data);
1096 else
1097 t->result = scm_internal_catch (SCM_BOOL_T,
1098 body, body_data,
1099 handler, handler_data);
1100
1101 return 0;
1102}
1103
1104static void *
1105spawn_thread (void *d)
1106{
1107 spawn_data *data = (spawn_data *)d;
1108 scm_i_pthread_detach (scm_i_pthread_self ());
1109 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1110 return NULL;
1111}
1112
1113SCM
1114scm_spawn_thread (scm_t_catch_body body, void *body_data,
1115 scm_t_catch_handler handler, void *handler_data)
1116{
1117 spawn_data data;
1118 scm_i_pthread_t id;
1119 int err;
1120
1121 data.parent = scm_current_dynamic_state ();
1122 data.body = body;
1123 data.body_data = body_data;
1124 data.handler = handler;
1125 data.handler_data = handler_data;
1126 data.thread = SCM_BOOL_F;
1127 scm_i_pthread_mutex_init (&data.mutex, NULL);
1128 scm_i_pthread_cond_init (&data.cond, NULL);
1129
1130 scm_i_scm_pthread_mutex_lock (&data.mutex);
1131 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1132 if (err)
1133 {
1134 scm_i_pthread_mutex_unlock (&data.mutex);
1135 errno = err;
1136 scm_syserror (NULL);
1137 }
1138 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1139 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1140
0f4f2d9a
LC
1141 assert (SCM_I_IS_THREAD (data.thread));
1142
9de87eea
MV
1143 return data.thread;
1144}
1145
29717c89
MD
1146SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1147 (),
1148"Move the calling thread to the end of the scheduling queue.")
1149#define FUNC_NAME s_scm_yield
1150{
9de87eea 1151 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1152}
1153#undef FUNC_NAME
1154
2e77f720
LC
1155SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1156 (SCM thread),
1157"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1158"cannot be the current thread, and if @var{thread} has already terminated or "
1159"been signaled to terminate, this function is a no-op.")
1160#define FUNC_NAME s_scm_cancel_thread
1161{
1162 scm_i_thread *t = NULL;
1163
1164 SCM_VALIDATE_THREAD (1, thread);
1165 t = SCM_I_THREAD_DATA (thread);
86a597f8 1166 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1167 if (!t->canceled)
1168 {
1169 t->canceled = 1;
86a597f8 1170 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1171 scm_i_pthread_cancel (t->pthread);
1172 }
1173 else
86a597f8 1174 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1175
1176 return SCM_UNSPECIFIED;
1177}
1178#undef FUNC_NAME
1179
1180SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1181 (SCM thread, SCM proc),
1182"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1183"This handler will be called when the thread exits.")
1184#define FUNC_NAME s_scm_set_thread_cleanup_x
1185{
1186 scm_i_thread *t;
1187
1188 SCM_VALIDATE_THREAD (1, thread);
1189 if (!scm_is_false (proc))
1190 SCM_VALIDATE_THUNK (2, proc);
1191
2e77f720 1192 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1193 scm_i_pthread_mutex_lock (&t->admin_mutex);
1194
2e77f720
LC
1195 if (!(t->exited || t->canceled))
1196 t->cleanup_handler = proc;
1197
86a597f8 1198 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1199
1200 return SCM_UNSPECIFIED;
1201}
1202#undef FUNC_NAME
1203
1204SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1205 (SCM thread),
1206"Return the cleanup handler installed for the thread @var{thread}.")
1207#define FUNC_NAME s_scm_thread_cleanup
1208{
1209 scm_i_thread *t;
1210 SCM ret;
1211
1212 SCM_VALIDATE_THREAD (1, thread);
1213
2e77f720 1214 t = SCM_I_THREAD_DATA (thread);
86a597f8 1215 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1216 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1217 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1218
1219 return ret;
1220}
1221#undef FUNC_NAME
1222
6180e336
NJ
1223SCM scm_join_thread (SCM thread)
1224{
1225 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1226}
1227
1228SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1229 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1230"Suspend execution of the calling thread until the target @var{thread} "
1231"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1232#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1233{
9de87eea 1234 scm_i_thread *t;
6180e336
NJ
1235 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1236 SCM res = SCM_BOOL_F;
1237
1238 if (! (SCM_UNBNDP (timeoutval)))
1239 res = timeoutval;
d823b11b
MV
1240
1241 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1242 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1243 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1244
9de87eea 1245 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1246 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1247
6180e336
NJ
1248 if (! SCM_UNBNDP (timeout))
1249 {
1250 to_timespec (timeout, &ctimeout);
1251 timeout_ptr = &ctimeout;
1252 }
1253
1254 if (t->exited)
1255 res = t->result;
1256 else
d823b11b 1257 {
9de87eea
MV
1258 while (1)
1259 {
74926120 1260 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1261 timeout_ptr);
1262 if (err == 0)
1263 {
1264 if (t->exited)
1265 {
1266 res = t->result;
1267 break;
1268 }
1269 }
1270 else if (err == ETIMEDOUT)
9de87eea 1271 break;
6180e336 1272
86a597f8 1273 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1274 SCM_TICK;
86a597f8 1275 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1276
1277 /* Check for exit again, since we just released and
1278 reacquired the admin mutex, before the next block_self
1279 call (which would block forever if t has already
1280 exited). */
1281 if (t->exited)
1282 {
1283 res = t->result;
1284 break;
1285 }
9de87eea 1286 }
d823b11b 1287 }
9de87eea 1288
86a597f8 1289 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1290
d823b11b 1291 return res;
5f05c406
MV
1292}
1293#undef FUNC_NAME
1294
6180e336
NJ
1295SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1296 (SCM obj),
1297 "Return @code{#t} if @var{obj} is a thread.")
1298#define FUNC_NAME s_scm_thread_p
1299{
1300 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1301}
1302#undef FUNC_NAME
5f05c406 1303
4079f87e 1304
9de87eea
MV
1305static size_t
1306fat_mutex_free (SCM mx)
76da80e7 1307{
9de87eea
MV
1308 fat_mutex *m = SCM_MUTEX_DATA (mx);
1309 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1310 return 0;
1311}
1312
1313static int
9de87eea 1314fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1315{
9de87eea
MV
1316 fat_mutex *m = SCM_MUTEX_DATA (mx);
1317 scm_puts ("#<mutex ", port);
1318 scm_uintprint ((scm_t_bits)m, 16, port);
1319 scm_puts (">", port);
1320 return 1;
76da80e7
MV
1321}
1322
76da80e7 1323static SCM
6180e336 1324make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1325{
9de87eea
MV
1326 fat_mutex *m;
1327 SCM mx;
1328
1329 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1330 scm_i_pthread_mutex_init (&m->lock, NULL);
1331 m->owner = SCM_BOOL_F;
adc085f1 1332 m->level = 0;
6180e336 1333
adc085f1 1334 m->recursive = recursive;
6180e336
NJ
1335 m->unchecked_unlock = unchecked_unlock;
1336 m->allow_external_unlock = external_unlock;
1337
9de87eea
MV
1338 m->waiting = SCM_EOL;
1339 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1340 m->waiting = make_queue ();
1341 return mx;
76da80e7
MV
1342}
1343
6180e336
NJ
1344SCM scm_make_mutex (void)
1345{
1346 return scm_make_mutex_with_flags (SCM_EOL);
1347}
1348
2a1d0688
NJ
1349SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1350SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1351SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1352
1353SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1354 (SCM flags),
9de87eea 1355 "Create a new mutex. ")
6180e336 1356#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1357{
6180e336
NJ
1358 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1359
1360 SCM ptr = flags;
1361 while (! scm_is_null (ptr))
1362 {
1363 SCM flag = SCM_CAR (ptr);
1364 if (scm_is_eq (flag, unchecked_unlock_sym))
1365 unchecked_unlock = 1;
1366 else if (scm_is_eq (flag, allow_external_unlock_sym))
1367 external_unlock = 1;
1368 else if (scm_is_eq (flag, recursive_sym))
1369 recursive = 1;
74926120 1370 else
2a1d0688 1371 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1372 ptr = SCM_CDR (ptr);
1373 }
1374 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1375}
1376#undef FUNC_NAME
1377
9de87eea 1378SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1379 (void),
9de87eea
MV
1380 "Create a new recursive mutex. ")
1381#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1382{
6180e336 1383 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1384}
1385#undef FUNC_NAME
1386
6180e336
NJ
1387SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1388
1389static SCM
adc085f1 1390fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1391{
1392 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1393
adc085f1 1394 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1395 SCM err = SCM_BOOL_F;
1396
1397 struct timeval current_time;
9de87eea
MV
1398
1399 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1400
1401 while (1)
9de87eea 1402 {
adc085f1 1403 if (m->level == 0)
6180e336 1404 {
adc085f1 1405 m->owner = new_owner;
6180e336 1406 m->level++;
74926120 1407
adc085f1 1408 if (SCM_I_IS_THREAD (new_owner))
6180e336 1409 {
adc085f1 1410 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1411
4f39f31e
LC
1412 /* FIXME: The order in which `t->admin_mutex' and
1413 `m->lock' are taken differs from that in
1414 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1415 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1416
1417 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1418 retained when not referenced elsewhere (bug #27450).
1419 The weak pair itself is eventually removed when MUTEX
1420 is unlocked. Note that `t->mutexes' lists mutexes
1421 currently held by T, so it should be small. */
a0faf7dd
LC
1422 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1423
6180e336 1424 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1425 }
adc085f1
JG
1426 *ret = 1;
1427 break;
1428 }
1429 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1430 {
1431 m->owner = new_owner;
1432 err = scm_cons (scm_abandoned_mutex_error_key,
1433 scm_from_locale_string ("lock obtained on abandoned "
1434 "mutex"));
1435 *ret = 1;
1436 break;
1437 }
1438 else if (scm_is_eq (m->owner, new_owner))
1439 {
1440 if (m->recursive)
1441 {
1442 m->level++;
74926120 1443 *ret = 1;
adc085f1
JG
1444 }
1445 else
6180e336 1446 {
adc085f1
JG
1447 err = scm_cons (scm_misc_error_key,
1448 scm_from_locale_string ("mutex already locked "
1449 "by thread"));
1450 *ret = 0;
1451 }
74926120 1452 break;
adc085f1 1453 }
9de87eea 1454 else
9de87eea 1455 {
74926120 1456 if (timeout != NULL)
adc085f1
JG
1457 {
1458 gettimeofday (&current_time, NULL);
1459 if (current_time.tv_sec > timeout->tv_sec ||
1460 (current_time.tv_sec == timeout->tv_sec &&
1461 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1462 {
adc085f1
JG
1463 *ret = 0;
1464 break;
6180e336 1465 }
6180e336 1466 }
37a52039 1467 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1468 scm_i_pthread_mutex_unlock (&m->lock);
1469 SCM_TICK;
1470 scm_i_scm_pthread_mutex_lock (&m->lock);
1471 }
1472 }
1473 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1474 return err;
9de87eea
MV
1475}
1476
6180e336
NJ
1477SCM scm_lock_mutex (SCM mx)
1478{
adc085f1 1479 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1480}
1481
adc085f1
JG
1482SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1483 (SCM m, SCM timeout, SCM owner),
b7e64f8b
BT
1484 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1485 "thread blocks until the mutex becomes available. The function\n"
1486 "returns when the calling thread owns the lock on @var{m}.\n"
1487 "Locking a mutex that a thread already owns will succeed right\n"
1488 "away and will not block the thread. That is, Guile's mutexes\n"
1489 "are @emph{recursive}.")
6180e336 1490#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1491{
6180e336
NJ
1492 SCM exception;
1493 int ret = 0;
1494 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1495
6180e336
NJ
1496 SCM_VALIDATE_MUTEX (1, m);
1497
1498 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1499 {
1500 to_timespec (timeout, &cwaittime);
1501 waittime = &cwaittime;
1502 }
1503
97ec95b7
LC
1504 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1505 SCM_VALIDATE_THREAD (3, owner);
1506
adc085f1 1507 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1508 if (!scm_is_false (exception))
1509 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1510 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1511}
76da80e7 1512#undef FUNC_NAME
9bc4701c 1513
2a3db25e
MW
1514static void
1515lock_mutex_return_void (SCM mx)
1516{
1517 (void) scm_lock_mutex (mx);
1518}
1519
1520static void
1521unlock_mutex_return_void (SCM mx)
1522{
1523 (void) scm_unlock_mutex (mx);
1524}
1525
a4d106c7 1526void
661ae7ab 1527scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1528{
2a3db25e 1529 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1530 SCM_F_WIND_EXPLICITLY);
2a3db25e 1531 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1532 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1533}
1534
9bc4701c 1535SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1536 (SCM mutex),
9bc4701c
MD
1537"Try to lock @var{mutex}. If the mutex is already locked by someone "
1538"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1539#define FUNC_NAME s_scm_try_mutex
1540{
6180e336
NJ
1541 SCM exception;
1542 int ret = 0;
1543 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1544
ba1b7223 1545 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1546
1547 to_timespec (scm_from_int(0), &cwaittime);
1548 waittime = &cwaittime;
74926120 1549
adc085f1 1550 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1551 if (!scm_is_false (exception))
1552 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1553 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1554}
1555#undef FUNC_NAME
76da80e7 1556
6180e336
NJ
1557/*** Fat condition variables */
1558
1559typedef struct {
1560 scm_i_pthread_mutex_t lock;
1561 SCM waiting; /* the threads waiting for this condition. */
1562} fat_cond;
1563
1564#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1565#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1566
1567static int
1568fat_mutex_unlock (SCM mutex, SCM cond,
1569 const scm_t_timespec *waittime, int relock)
9de87eea 1570{
7f991c7d 1571 SCM owner;
6180e336
NJ
1572 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1573 fat_cond *c = NULL;
1574 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1575 int err = 0, ret = 0;
9de87eea
MV
1576
1577 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1578
7f991c7d 1579 owner = m->owner;
adc085f1 1580
d31ae2c3 1581 if (!scm_is_eq (owner, t->handle))
9bc4701c 1582 {
adc085f1 1583 if (m->level == 0)
6180e336
NJ
1584 {
1585 if (!m->unchecked_unlock)
2a1d0688
NJ
1586 {
1587 scm_i_pthread_mutex_unlock (&m->lock);
1588 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1589 }
d31ae2c3 1590 owner = t->handle;
6180e336
NJ
1591 }
1592 else if (!m->allow_external_unlock)
2a1d0688
NJ
1593 {
1594 scm_i_pthread_mutex_unlock (&m->lock);
1595 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1596 }
6180e336
NJ
1597 }
1598
1599 if (! (SCM_UNBNDP (cond)))
1600 {
6180e336
NJ
1601 c = SCM_CONDVAR_DATA (cond);
1602 while (1)
1603 {
1604 int brk = 0;
1605
6180e336
NJ
1606 if (m->level > 0)
1607 m->level--;
adc085f1 1608 if (m->level == 0)
f57fdf07
LC
1609 {
1610 /* Change the owner of MUTEX. */
1611 t->mutexes = scm_delq_x (mutex, t->mutexes);
1612 m->owner = unblock_from_queue (m->waiting);
1613 }
adc085f1 1614
6180e336 1615 t->block_asyncs++;
74926120 1616
d2a51087
NJ
1617 err = block_self (c->waiting, cond, &m->lock, waittime);
1618 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1619
1620 if (err == 0)
1621 {
1622 ret = 1;
1623 brk = 1;
1624 }
1625 else if (err == ETIMEDOUT)
1626 {
1627 ret = 0;
1628 brk = 1;
1629 }
1630 else if (err != EINTR)
74926120 1631 {
6180e336 1632 errno = err;
6180e336 1633 scm_syserror (NULL);
74926120 1634 }
6180e336
NJ
1635
1636 if (brk)
1637 {
1638 if (relock)
adc085f1 1639 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1640 t->block_asyncs--;
6180e336
NJ
1641 break;
1642 }
74926120 1643
6180e336
NJ
1644 t->block_asyncs--;
1645 scm_async_click ();
74926120 1646
6180e336
NJ
1647 scm_remember_upto_here_2 (cond, mutex);
1648
1649 scm_i_scm_pthread_mutex_lock (&m->lock);
1650 }
9bc4701c 1651 }
9de87eea 1652 else
6180e336
NJ
1653 {
1654 if (m->level > 0)
1655 m->level--;
74926120 1656 if (m->level == 0)
f57fdf07
LC
1657 {
1658 /* Change the owner of MUTEX. */
1659 t->mutexes = scm_delq_x (mutex, t->mutexes);
1660 m->owner = unblock_from_queue (m->waiting);
1661 }
74926120 1662
6180e336
NJ
1663 scm_i_pthread_mutex_unlock (&m->lock);
1664 ret = 1;
1665 }
9de87eea 1666
6180e336 1667 return ret;
9bc4701c 1668}
9bc4701c 1669
6180e336
NJ
1670SCM scm_unlock_mutex (SCM mx)
1671{
1672 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1673}
9bc4701c 1674
6180e336
NJ
1675SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1676 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1677"Unlocks @var{mutex} if the calling thread owns the lock on "
1678"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1679"thread results in undefined behaviour. Once a mutex has been unlocked, "
1680"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1681"lock. Every call to @code{lock-mutex} by this thread must be matched "
1682"with a call to @code{unlock-mutex}. Only the last call to "
1683"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1684#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1685{
6180e336
NJ
1686 scm_t_timespec cwaittime, *waittime = NULL;
1687
9bc4701c 1688 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1689 if (! (SCM_UNBNDP (cond)))
1690 {
1691 SCM_VALIDATE_CONDVAR (2, cond);
1692
1693 if (! (SCM_UNBNDP (timeout)))
1694 {
1695 to_timespec (timeout, &cwaittime);
1696 waittime = &cwaittime;
1697 }
1698 }
1699
1700 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1701}
1702#undef FUNC_NAME
1703
6180e336
NJ
1704SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1705 (SCM obj),
1706 "Return @code{#t} if @var{obj} is a mutex.")
1707#define FUNC_NAME s_scm_mutex_p
1708{
1709 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1710}
74926120 1711#undef FUNC_NAME
9de87eea
MV
1712
1713SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1714 (SCM mx),
1715 "Return the thread owning @var{mx}, or @code{#f}.")
1716#define FUNC_NAME s_scm_mutex_owner
1717{
adc085f1
JG
1718 SCM owner;
1719 fat_mutex *m = NULL;
1720
9de87eea 1721 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1722 m = SCM_MUTEX_DATA (mx);
1723 scm_i_pthread_mutex_lock (&m->lock);
1724 owner = m->owner;
1725 scm_i_pthread_mutex_unlock (&m->lock);
1726
1727 return owner;
9de87eea
MV
1728}
1729#undef FUNC_NAME
1730
1731SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1732 (SCM mx),
adc085f1 1733 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1734#define FUNC_NAME s_scm_mutex_level
1735{
1736 SCM_VALIDATE_MUTEX (1, mx);
1737 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1738}
1739#undef FUNC_NAME
1740
adc085f1
JG
1741SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1742 (SCM mx),
1743 "Returns @code{#t} if the mutex @var{mx} is locked.")
1744#define FUNC_NAME s_scm_mutex_locked_p
1745{
1746 SCM_VALIDATE_MUTEX (1, mx);
1747 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1748}
1749#undef FUNC_NAME
9de87eea 1750
9de87eea
MV
1751static int
1752fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1753{
1754 fat_cond *c = SCM_CONDVAR_DATA (cv);
1755 scm_puts ("#<condition-variable ", port);
1756 scm_uintprint ((scm_t_bits)c, 16, port);
1757 scm_puts (">", port);
1758 return 1;
1759}
9bc4701c 1760
d823b11b
MV
1761SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1762 (void),
1763 "Make a new condition variable.")
1764#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1765{
9de87eea
MV
1766 fat_cond *c;
1767 SCM cv;
1768
1769 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1770 c->waiting = SCM_EOL;
1771 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1772 c->waiting = make_queue ();
d823b11b 1773 return cv;
5f05c406 1774}
d823b11b 1775#undef FUNC_NAME
5f05c406 1776
d823b11b
MV
1777SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1778 (SCM cv, SCM mx, SCM t),
b7e64f8b
BT
1779"Wait until condition variable @var{cv} has been signalled. While waiting, "
1780"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1781"is locked again when this function returns. When @var{t} is given, "
d823b11b
MV
1782"it specifies a point in time where the waiting should be aborted. It "
1783"can be either a integer as returned by @code{current-time} or a pair "
1784"as returned by @code{gettimeofday}. When the waiting is aborted the "
1785"mutex is locked and @code{#f} is returned. When the condition "
1786"variable is in fact signalled, the mutex is also locked and @code{#t} "
1787"is returned. ")
1788#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1789{
9de87eea 1790 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1791
1792 SCM_VALIDATE_CONDVAR (1, cv);
1793 SCM_VALIDATE_MUTEX (2, mx);
74926120 1794
d823b11b
MV
1795 if (!SCM_UNBNDP (t))
1796 {
6180e336 1797 to_timespec (t, &waittime);
9de87eea 1798 waitptr = &waittime;
d823b11b
MV
1799 }
1800
2a1d0688 1801 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1802}
d823b11b 1803#undef FUNC_NAME
5f05c406 1804
9de87eea
MV
1805static void
1806fat_cond_signal (fat_cond *c)
1807{
9de87eea 1808 unblock_from_queue (c->waiting);
9de87eea
MV
1809}
1810
d823b11b
MV
1811SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1812 (SCM cv),
1813 "Wake up one thread that is waiting for @var{cv}")
1814#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1815{
d823b11b 1816 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1817 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1818 return SCM_BOOL_T;
5f05c406 1819}
d823b11b 1820#undef FUNC_NAME
5f05c406 1821
9de87eea
MV
1822static void
1823fat_cond_broadcast (fat_cond *c)
1824{
9de87eea
MV
1825 while (scm_is_true (unblock_from_queue (c->waiting)))
1826 ;
9de87eea
MV
1827}
1828
d823b11b
MV
1829SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1830 (SCM cv),
1831 "Wake up all threads that are waiting for @var{cv}. ")
1832#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1833{
d823b11b 1834 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1835 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1836 return SCM_BOOL_T;
5f05c406 1837}
d823b11b 1838#undef FUNC_NAME
5f05c406 1839
6180e336
NJ
1840SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1841 (SCM obj),
1842 "Return @code{#t} if @var{obj} is a condition variable.")
1843#define FUNC_NAME s_scm_condition_variable_p
1844{
1845 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1846}
1847#undef FUNC_NAME
1848
6087fad9 1849
8c2b3143 1850\f
d823b11b
MV
1851/*** Select */
1852
8c2b3143
LC
1853struct select_args
1854{
1855 int nfds;
6ab4de61
AW
1856 fd_set *read_fds;
1857 fd_set *write_fds;
1858 fd_set *except_fds;
8c2b3143
LC
1859 struct timeval *timeout;
1860
1861 int result;
1862 int errno_value;
1863};
1864
1865static void *
1866do_std_select (void *args)
1867{
1868 struct select_args *select_args;
1869
1870 select_args = (struct select_args *) args;
1871
1872 select_args->result =
1873 select (select_args->nfds,
1874 select_args->read_fds, select_args->write_fds,
1875 select_args->except_fds, select_args->timeout);
1876 select_args->errno_value = errno;
1877
1878 return NULL;
1879}
1880
6ab4de61
AW
1881#if !SCM_HAVE_SYS_SELECT_H
1882static int scm_std_select (int nfds,
1883 fd_set *readfds,
1884 fd_set *writefds,
1885 fd_set *exceptfds,
1886 struct timeval *timeout);
1887#endif
1888
911782b7 1889int
9de87eea 1890scm_std_select (int nfds,
6ab4de61
AW
1891 fd_set *readfds,
1892 fd_set *writefds,
1893 fd_set *exceptfds,
9de87eea
MV
1894 struct timeval *timeout)
1895{
1896 fd_set my_readfds;
1897 int res, eno, wakeup_fd;
1898 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1899 struct select_args args;
9de87eea
MV
1900
1901 if (readfds == NULL)
1902 {
1903 FD_ZERO (&my_readfds);
1904 readfds = &my_readfds;
1905 }
1906
1907 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1908 SCM_TICK;
1909
1910 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1911 FD_SET (wakeup_fd, readfds);
1912 if (wakeup_fd >= nfds)
1913 nfds = wakeup_fd+1;
9de87eea 1914
8c2b3143
LC
1915 args.nfds = nfds;
1916 args.read_fds = readfds;
1917 args.write_fds = writefds;
1918 args.except_fds = exceptfds;
1919 args.timeout = timeout;
1920
1921 /* Explicitly cooperate with the GC. */
1922 scm_without_guile (do_std_select, &args);
1923
1924 res = args.result;
1925 eno = args.errno_value;
1926
1927 t->sleep_fd = -1;
9de87eea
MV
1928 scm_i_reset_sleep (t);
1929
1930 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1931 {
1932 char dummy;
634aa8de
LC
1933 full_read (wakeup_fd, &dummy, 1);
1934
9de87eea
MV
1935 FD_CLR (wakeup_fd, readfds);
1936 res -= 1;
1937 if (res == 0)
1938 {
1939 eno = EINTR;
1940 res = -1;
1941 }
1942 }
d823b11b
MV
1943 errno = eno;
1944 return res;
5f05c406
MV
1945}
1946
9de87eea 1947/* Convenience API for blocking while in guile mode. */
76da80e7 1948
9de87eea 1949#if SCM_USE_PTHREAD_THREADS
92e64b87 1950
2956b071
LC
1951/* It seems reasonable to not run procedures related to mutex and condition
1952 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1953 without it, and (ii) the only potential gain would be GC latency. See
1954 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1955 for a discussion of the pros and cons. */
1956
9bc4701c 1957int
9de87eea 1958scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1959{
9de87eea 1960 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1961 return res;
1962}
1963
9de87eea 1964static void
2b829bbb 1965do_unlock (void *data)
28d52ebb 1966{
9de87eea 1967 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1968}
1969
1970void
661ae7ab 1971scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1972{
9de87eea 1973 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1974 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1975}
1976
9bc4701c 1977int
9de87eea 1978scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1979{
4cf72f0b
LC
1980 int res;
1981 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1982
1983 t->held_mutex = mutex;
1984 res = scm_i_pthread_cond_wait (cond, mutex);
1985 t->held_mutex = NULL;
1986
9bc4701c
MD
1987 return res;
1988}
9bc4701c 1989
76da80e7 1990int
9de87eea
MV
1991scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1992 scm_i_pthread_mutex_t *mutex,
1993 const scm_t_timespec *wt)
76da80e7 1994{
4cf72f0b
LC
1995 int res;
1996 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1997
1998 t->held_mutex = mutex;
1999 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2000 t->held_mutex = NULL;
2001
9de87eea 2002 return res;
76da80e7
MV
2003}
2004
9de87eea 2005#endif
76da80e7 2006
d823b11b 2007unsigned long
9de87eea 2008scm_std_usleep (unsigned long usecs)
5f05c406 2009{
d823b11b
MV
2010 struct timeval tv;
2011 tv.tv_usec = usecs % 1000000;
2012 tv.tv_sec = usecs / 1000000;
9de87eea
MV
2013 scm_std_select (0, NULL, NULL, NULL, &tv);
2014 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
2015}
2016
9de87eea
MV
2017unsigned int
2018scm_std_sleep (unsigned int secs)
6c214b62 2019{
d823b11b
MV
2020 struct timeval tv;
2021 tv.tv_usec = 0;
2022 tv.tv_sec = secs;
9de87eea 2023 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 2024 return tv.tv_sec;
6c214b62
MD
2025}
2026
d823b11b
MV
2027/*** Misc */
2028
2029SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2030 (void),
2031 "Return the thread that called this function.")
2032#define FUNC_NAME s_scm_current_thread
2033{
9de87eea 2034 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2035}
2036#undef FUNC_NAME
2037
9de87eea
MV
2038static SCM
2039scm_c_make_list (size_t n, SCM fill)
2040{
2041 SCM res = SCM_EOL;
2042 while (n-- > 0)
2043 res = scm_cons (fill, res);
2044 return res;
2045}
2046
d823b11b
MV
2047SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2048 (void),
2049 "Return a list of all threads.")
9bc4701c 2050#define FUNC_NAME s_scm_all_threads
d823b11b 2051{
9de87eea
MV
2052 /* We can not allocate while holding the thread_admin_mutex because
2053 of the way GC is done.
2054 */
2055 int n = thread_count;
2056 scm_i_thread *t;
2057 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2058
9de87eea
MV
2059 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2060 l = &list;
2061 for (t = all_threads; t && n > 0; t = t->next_thread)
2062 {
2e77f720
LC
2063 if (t != scm_i_signal_delivery_thread)
2064 {
2065 SCM_SETCAR (*l, t->handle);
2066 l = SCM_CDRLOC (*l);
2067 }
9de87eea
MV
2068 n--;
2069 }
2070 *l = SCM_EOL;
2071 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2072 return list;
d823b11b 2073}
9de87eea 2074#undef FUNC_NAME
d823b11b
MV
2075
2076SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2077 (SCM thread),
2078 "Return @code{#t} iff @var{thread} has exited.\n")
2079#define FUNC_NAME s_scm_thread_exited_p
2080{
7888309b 2081 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2082}
2083#undef FUNC_NAME
2084
911782b7 2085int
d823b11b
MV
2086scm_c_thread_exited_p (SCM thread)
2087#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2088{
9de87eea 2089 scm_i_thread *t;
d823b11b 2090 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2091 t = SCM_I_THREAD_DATA (thread);
d823b11b 2092 return t->exited;
5f05c406 2093}
d823b11b 2094#undef FUNC_NAME
5f05c406 2095
d20912e6
LC
2096SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2097 (void),
2098 "Return the total number of processors of the machine, which\n"
2099 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2100 "thread execution unit, which can be either:\n\n"
2101 "@itemize\n"
2102 "@item an execution core in a (possibly multi-core) chip, in a\n"
2103 " (possibly multi- chip) module, in a single computer, or\n"
2104 "@item a thread execution unit inside a core in the case of\n"
2105 " @dfn{hyper-threaded} CPUs.\n"
2106 "@end itemize\n\n"
2107 "Which of the two definitions is used, is unspecified.\n")
2108#define FUNC_NAME s_scm_total_processor_count
2109{
2110 return scm_from_ulong (num_processors (NPROC_ALL));
2111}
2112#undef FUNC_NAME
2113
2114SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2115 (void),
2116 "Like @code{total-processor-count}, but return the number of\n"
2117 "processors available to the current process. See\n"
2118 "@code{setaffinity} and @code{getaffinity} for more\n"
2119 "information.\n")
2120#define FUNC_NAME s_scm_current_processor_count
2121{
2122 return scm_from_ulong (num_processors (NPROC_CURRENT));
2123}
2124#undef FUNC_NAME
2125
2126
2127\f
2128
9de87eea 2129static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2130static int threads_initialized_p = 0;
2131
9bc4701c 2132
a4d106c7
MV
2133/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2134 */
d1138028 2135scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2136
661ae7ab 2137static SCM dynwind_critical_section_mutex;
a54a94b3 2138
9bc4701c 2139void
661ae7ab 2140scm_dynwind_critical_section (SCM mutex)
76da80e7 2141{
a4d106c7 2142 if (scm_is_false (mutex))
661ae7ab
MV
2143 mutex = dynwind_critical_section_mutex;
2144 scm_dynwind_lock_mutex (mutex);
2145 scm_dynwind_block_asyncs ();
9de87eea
MV
2146}
2147
2148/*** Initialization */
2149
9de87eea
MV
2150scm_i_pthread_mutex_t scm_i_misc_mutex;
2151
d1138028
MV
2152#if SCM_USE_PTHREAD_THREADS
2153pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2154#endif
2155
9de87eea 2156void
12c1d861 2157scm_threads_prehistory (void *base)
9de87eea 2158{
d1138028
MV
2159#if SCM_USE_PTHREAD_THREADS
2160 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2161 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2162 PTHREAD_MUTEX_RECURSIVE);
2163#endif
2164
2165 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2166 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2167 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2168 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2169
12c1d861 2170 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2171}
2172
d823b11b
MV
2173scm_t_bits scm_tc16_thread;
2174scm_t_bits scm_tc16_mutex;
2175scm_t_bits scm_tc16_condvar;
7bfd3b9e 2176
7bfd3b9e 2177void
9de87eea 2178scm_init_threads ()
7bfd3b9e 2179{
9de87eea 2180 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2181 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2182
9de87eea 2183 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2184 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2185 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2186
9de87eea
MV
2187 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2188 sizeof (fat_cond));
9de87eea 2189 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2190
9de87eea
MV
2191 scm_i_default_dynamic_state = SCM_BOOL_F;
2192 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2193 threads_initialized_p = 1;
a4d106c7 2194
f39448c5 2195 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2196}
89e00824 2197
5f05c406 2198void
9de87eea 2199scm_init_threads_default_dynamic_state ()
5f05c406 2200{
9de87eea 2201 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2202 scm_i_default_dynamic_state = state;
5f05c406
MV
2203}
2204
d823b11b 2205void
9de87eea 2206scm_init_thread_procs ()
d823b11b 2207{
9de87eea 2208#include "libguile/threads.x"
d823b11b
MV
2209}
2210
3c13664e
LC
2211\f
2212/* IA64-specific things. */
2213
2214#ifdef __ia64__
2215# ifdef __hpux
2216# include <sys/param.h>
2217# include <sys/pstat.h>
2218void *
2219scm_ia64_register_backing_store_base (void)
2220{
2221 struct pst_vm_status vm_status;
2222 int i = 0;
2223 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2224 if (vm_status.pst_type == PS_RSESTACK)
2225 return (void *) vm_status.pst_vaddr;
2226 abort ();
2227}
2228void *
2229scm_ia64_ar_bsp (const void *ctx)
2230{
2231 uint64_t bsp;
2232 __uc_get_ar_bsp (ctx, &bsp);
2233 return (void *) bsp;
2234}
2235# endif /* hpux */
2236# ifdef linux
2237# include <ucontext.h>
2238void *
2239scm_ia64_register_backing_store_base (void)
2240{
2241 extern void *__libc_ia64_register_backing_store_base;
2242 return __libc_ia64_register_backing_store_base;
2243}
2244void *
2245scm_ia64_ar_bsp (const void *opaque)
2246{
2247 const ucontext_t *ctx = opaque;
2248 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2249}
2250# endif /* linux */
ba20d262
AW
2251# ifdef __FreeBSD__
2252# include <ucontext.h>
2253void *
2254scm_ia64_register_backing_store_base (void)
2255{
2256 return (void *)0x8000000000000000;
2257}
2258void *
2259scm_ia64_ar_bsp (const void *opaque)
2260{
2261 const ucontext_t *ctx = opaque;
2262 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2263 + ctx->uc_mcontext.mc_special.ndirty);
2264}
2265# endif /* __FreeBSD__ */
3c13664e
LC
2266#endif /* __ia64__ */
2267
2268
89e00824
ML
2269/*
2270 Local Variables:
2271 c-file-style: "gnu"
2272 End:
2273*/