Fix tests for 'scm_c_bind_keyword_arguments'.
[bpt/guile.git] / libguile / threads.c
CommitLineData
b3da54d1 1/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
dbab8aaa 2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
b3da54d1 3 * Free Software Foundation, Inc.
74926120 4 *
73be1d9e 5 * This library is free software; you can redistribute it and/or
53befeb7
NJ
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
7bfd3b9e 9 *
53befeb7
NJ
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
7bfd3b9e 14 *
73be1d9e
MV
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
53befeb7
NJ
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
73be1d9e 19 */
1bbd0b84 20
1bbd0b84 21
7bfd3b9e 22\f
dbb605f5
LC
23#ifdef HAVE_CONFIG_H
24# include <config.h>
25#endif
7bfd3b9e 26
1c44468d 27#include "libguile/bdw-gc.h"
1810dc4e
RB
28#include "libguile/_scm.h"
29
b3da54d1 30#include <stdlib.h>
fcc5d734 31#if HAVE_UNISTD_H
d823b11b 32#include <unistd.h>
fcc5d734 33#endif
d823b11b 34#include <stdio.h>
8ab3d8a0
KR
35
36#ifdef HAVE_STRING_H
37#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
38#endif
39
fcc5d734 40#if HAVE_SYS_TIME_H
d823b11b 41#include <sys/time.h>
fcc5d734 42#endif
5f05c406 43
c1d5d6d7
AW
44#if HAVE_PTHREAD_NP_H
45# include <pthread_np.h>
46#endif
47
6ab4de61
AW
48#include <sys/select.h>
49
f57fdf07 50#include <assert.h>
32f94bf2 51#include <fcntl.h>
d20912e6 52#include <nproc.h>
f57fdf07 53
d823b11b
MV
54#include "libguile/validate.h"
55#include "libguile/root.h"
56#include "libguile/eval.h"
57#include "libguile/async.h"
58#include "libguile/ports.h"
59#include "libguile/threads.h"
a0599745 60#include "libguile/dynwind.h"
d823b11b 61#include "libguile/iselect.h"
9de87eea
MV
62#include "libguile/fluids.h"
63#include "libguile/continuations.h"
2b829bbb 64#include "libguile/gc.h"
9de87eea 65#include "libguile/init.h"
2e77f720 66#include "libguile/scmsigs.h"
6180e336 67#include "libguile/strings.h"
a0faf7dd 68#include "libguile/weaks.h"
7bfd3b9e 69
634aa8de
LC
70#include <full-read.h>
71
cde24ce1
AW
72
73\f
74
75/* First some libgc shims. */
76
77/* Make sure GC_fn_type is defined; it is missing from the public
78 headers of GC 7.1 and earlier. */
79#ifndef HAVE_GC_FN_TYPE
80typedef void * (* GC_fn_type) (void *);
81#endif
82
83
5f0d2951
AW
84#ifndef GC_SUCCESS
85#define GC_SUCCESS 0
86#endif
87
88#ifndef GC_UNIMPLEMENTED
89#define GC_UNIMPLEMENTED 3
90#endif
91
92/* Likewise struct GC_stack_base is missing before 7.1. */
93#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
94struct GC_stack_base {
95 void * mem_base; /* Base of memory stack. */
96#ifdef __ia64__
97 void * reg_base; /* Base of separate register stack. */
98#endif
99};
5f0d2951
AW
100
101static int
987b8160 102GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
103{
104 return GC_UNIMPLEMENTED;
105}
106
107static void
108GC_unregister_my_thread ()
109{
110}
111
12c1d861
AW
112#if !SCM_USE_PTHREAD_THREADS
113/* No threads; we can just use GC_stackbottom. */
5f0d2951 114static void *
12c1d861
AW
115get_thread_stack_base ()
116{
117 return GC_stackbottom;
118}
119
120#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
121 && defined PTHREAD_ATTR_GETSTACK_WORKS
122/* This method for GNU/Linux and perhaps some other systems.
123 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
124 available on them. */
125static void *
126get_thread_stack_base ()
127{
128 pthread_attr_t attr;
129 void *start, *end;
130 size_t size;
131
132 pthread_getattr_np (pthread_self (), &attr);
133 pthread_attr_getstack (&attr, &start, &size);
134 end = (char *)start + size;
135
136#if SCM_STACK_GROWS_UP
137 return start;
138#else
139 return end;
140#endif
141}
142
143#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
144/* This method for MacOS X.
145 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
146 but as of 2006 there's nothing obvious at apple.com. */
147static void *
148get_thread_stack_base ()
149{
150 return pthread_get_stackaddr_np (pthread_self ());
151}
152
c1d5d6d7
AW
153#elif HAVE_PTHREAD_ATTR_GET_NP
154/* This one is for FreeBSD 9. */
155static void *
156get_thread_stack_base ()
157{
158 pthread_attr_t attr;
159 void *start, *end;
160 size_t size;
161
162 pthread_attr_init (&attr);
163 pthread_attr_get_np (pthread_self (), &attr);
164 pthread_attr_getstack (&attr, &start, &size);
165 pthread_attr_destroy (&attr);
166
167 end = (char *)start + size;
168
169#if SCM_STACK_GROWS_UP
170 return start;
171#else
172 return end;
173#endif
174}
175
12c1d861
AW
176#else
177#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
178#endif
179
180static int
987b8160 181GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 182{
12c1d861
AW
183 stack_base->mem_base = get_thread_stack_base ();
184#ifdef __ia64__
185 /* Calculate and store off the base of this thread's register
186 backing store (RBS). Unfortunately our implementation(s) of
187 scm_ia64_register_backing_store_base are only reliable for the
188 main thread. For other threads, therefore, find out the current
189 top of the RBS, and use that as a maximum. */
190 stack_base->reg_base = scm_ia64_register_backing_store_base ();
191 {
192 ucontext_t ctx;
193 void *bsp;
194 getcontext (&ctx);
195 bsp = scm_ia64_ar_bsp (&ctx);
196 if (stack_base->reg_base > bsp)
197 stack_base->reg_base = bsp;
198 }
199#endif
200 return GC_SUCCESS;
5f0d2951 201}
12c1d861
AW
202
203static void *
204GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
205{
206 struct GC_stack_base stack_base;
207
208 stack_base.mem_base = (void*)&stack_base;
209#ifdef __ia64__
210 /* FIXME: Untested. */
211 {
212 ucontext_t ctx;
213 getcontext (&ctx);
214 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
215 }
5f0d2951
AW
216#endif
217
12c1d861
AW
218 return fn (&stack_base, arg);
219}
220#endif /* HAVE_GC_STACK_BASE */
221
5f0d2951 222
cde24ce1
AW
223/* Now define with_gc_active and with_gc_inactive. */
224
225#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
226
227/* We have a sufficiently new libgc (7.2 or newer). */
228
229static void*
230with_gc_inactive (GC_fn_type func, void *data)
231{
232 return GC_do_blocking (func, data);
233}
234
235static void*
236with_gc_active (GC_fn_type func, void *data)
237{
238 return GC_call_with_gc_active (func, data);
239}
240
241#else
242
243/* libgc not new enough, so never actually deactivate GC.
244
245 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
246 GC_call_with_gc_active. */
247
248static void*
249with_gc_inactive (GC_fn_type func, void *data)
250{
251 return func (data);
252}
253
254static void*
255with_gc_active (GC_fn_type func, void *data)
256{
257 return func (data);
258}
259
260#endif /* HAVE_GC_DO_BLOCKING */
261
262
634aa8de 263\f
6180e336
NJ
264static void
265to_timespec (SCM t, scm_t_timespec *waittime)
266{
267 if (scm_is_pair (t))
268 {
269 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
270 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
271 }
272 else
273 {
274 double time = scm_to_double (t);
275 double sec = scm_c_truncate (time);
276
277 waittime->tv_sec = (long) sec;
2a1d0688 278 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
279 }
280}
281
c058db8a 282\f
d823b11b 283/*** Queues */
7bfd3b9e 284
c058db8a
LC
285/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
286 the risk of false references leading to unbounded retained space as
287 described in "Bounding Space Usage of Conservative Garbage Collectors",
288 H.J. Boehm, 2001. */
289
9de87eea
MV
290/* Make an empty queue data structure.
291 */
d823b11b
MV
292static SCM
293make_queue ()
294{
295 return scm_cons (SCM_EOL, SCM_EOL);
296}
7bfd3b9e 297
9de87eea
MV
298/* Put T at the back of Q and return a handle that can be used with
299 remqueue to remove T from Q again.
300 */
d823b11b
MV
301static SCM
302enqueue (SCM q, SCM t)
303{
304 SCM c = scm_cons (t, SCM_EOL);
d2a51087 305 SCM_CRITICAL_SECTION_START;
d2e53ed6 306 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
307 SCM_SETCDR (q, c);
308 else
309 SCM_SETCDR (SCM_CAR (q), c);
310 SCM_SETCAR (q, c);
d2a51087 311 SCM_CRITICAL_SECTION_END;
d823b11b
MV
312 return c;
313}
7bfd3b9e 314
9de87eea
MV
315/* Remove the element that the handle C refers to from the queue Q. C
316 must have been returned from a call to enqueue. The return value
317 is zero when the element referred to by C has already been removed.
318 Otherwise, 1 is returned.
319*/
320static int
d823b11b
MV
321remqueue (SCM q, SCM c)
322{
323 SCM p, prev = q;
d2a51087 324 SCM_CRITICAL_SECTION_START;
d2e53ed6 325 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 326 {
bc36d050 327 if (scm_is_eq (p, c))
d823b11b 328 {
bc36d050 329 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
330 SCM_SETCAR (q, SCM_CDR (c));
331 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
d2a51087 336 SCM_CRITICAL_SECTION_END;
9de87eea 337 return 1;
d823b11b
MV
338 }
339 prev = p;
340 }
d2a51087 341 SCM_CRITICAL_SECTION_END;
9de87eea 342 return 0;
d823b11b
MV
343}
344
9de87eea
MV
345/* Remove the front-most element from the queue Q and return it.
346 Return SCM_BOOL_F when Q is empty.
347*/
d823b11b
MV
348static SCM
349dequeue (SCM q)
350{
d2a51087
NJ
351 SCM c;
352 SCM_CRITICAL_SECTION_START;
353 c = SCM_CDR (q);
d2e53ed6 354 if (scm_is_null (c))
d2a51087
NJ
355 {
356 SCM_CRITICAL_SECTION_END;
357 return SCM_BOOL_F;
358 }
d823b11b
MV
359 else
360 {
361 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 362 if (scm_is_null (SCM_CDR (q)))
d823b11b 363 SCM_SETCAR (q, SCM_EOL);
d2a51087 364 SCM_CRITICAL_SECTION_END;
c058db8a
LC
365
366 /* GC-robust */
367 SCM_SETCDR (c, SCM_EOL);
368
d823b11b
MV
369 return SCM_CAR (c);
370 }
371}
7bfd3b9e 372
9de87eea 373/*** Thread smob routines */
76da80e7 374
d823b11b
MV
375
376static int
377thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
378{
23d72566
KR
379 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
380 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
381 the struct case, hence we go via a union, and extract according to the
382 size of pthread_t. */
383 union {
384 scm_i_pthread_t p;
385 unsigned short us;
386 unsigned int ui;
387 unsigned long ul;
388 scm_t_uintmax um;
389 } u;
9de87eea 390 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
391 scm_i_pthread_t p = t->pthread;
392 scm_t_uintmax id;
393 u.p = p;
394 if (sizeof (p) == sizeof (unsigned short))
395 id = u.us;
396 else if (sizeof (p) == sizeof (unsigned int))
397 id = u.ui;
398 else if (sizeof (p) == sizeof (unsigned long))
399 id = u.ul;
400 else
401 id = u.um;
402
d823b11b 403 scm_puts ("#<thread ", port);
23d72566 404 scm_uintprint (id, 10, port);
1b92fb6b 405 scm_puts (" (", port);
0345e278 406 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 407 scm_puts (")>", port);
d823b11b
MV
408 return 1;
409}
410
706846f6 411\f
9de87eea 412/*** Blocking on queues. */
f7eca35d 413
9de87eea
MV
414/* See also scm_i_queue_async_cell for how such a block is
415 interrputed.
416*/
d823b11b 417
9de87eea
MV
418/* Put the current thread on QUEUE and go to sleep, waiting for it to
419 be woken up by a call to 'unblock_from_queue', or to be
420 interrupted. Upon return of this function, the current thread is
421 no longer on QUEUE, even when the sleep has been interrupted.
422
d2a51087 423 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
424 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
425
426 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
427 as MUTEX is needed.
428
429 When WAITTIME is not NULL, the sleep will be aborted at that time.
430
431 The return value of block_self is an errno value. It will be zero
432 when the sleep has been successfully completed by a call to
433 unblock_from_queue, EINTR when it has been interrupted by the
434 delivery of a system async, and ETIMEDOUT when the timeout has
435 expired.
436
437 The system asyncs themselves are not executed by block_self.
438*/
439static int
440block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
441 const scm_t_timespec *waittime)
76da80e7 442{
9de87eea
MV
443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
444 SCM q_handle;
445 int err;
446
447 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
448 err = EINTR;
449 else
450 {
451 t->block_asyncs++;
452 q_handle = enqueue (queue, t->handle);
453 if (waittime == NULL)
454 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
455 else
456 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
457
458 /* When we are still on QUEUE, we have been interrupted. We
459 report this only when no other error (such as a timeout) has
460 happened above.
461 */
462 if (remqueue (queue, q_handle) && err == 0)
463 err = EINTR;
464 t->block_asyncs--;
465 scm_i_reset_sleep (t);
466 }
467
468 return err;
76da80e7 469}
9de87eea 470
d2a51087
NJ
471/* Wake up the first thread on QUEUE, if any. The awoken thread is
472 returned, or #f if the queue was empty.
9de87eea
MV
473 */
474static SCM
475unblock_from_queue (SCM queue)
476{
477 SCM thread = dequeue (queue);
478 if (scm_is_true (thread))
479 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
480 return thread;
481}
482
45f15cac 483\f
9de87eea
MV
484/* Getting into and out of guile mode.
485 */
486
f60a7648
AW
487/* Key used to attach a cleanup handler to a given thread. Also, if
488 thread-local storage is unavailable, this key is used to retrieve the
489 current thread with `pthread_getspecific ()'. */
490scm_i_pthread_key_t scm_i_thread_key;
491
492
705edb95
LC
493#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
494
495/* When thread-local storage (TLS) is available, a pointer to the
496 current-thread object is kept in TLS. Note that storing the thread-object
497 itself in TLS (rather than a pointer to some malloc'd memory) is not
498 possible since thread objects may live longer than the actual thread they
499 represent. */
500SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
501
f60a7648 502#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 503
d823b11b 504
9de87eea
MV
505static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
506static scm_i_thread *all_threads = NULL;
507static int thread_count;
508
509static SCM scm_i_default_dynamic_state;
510
0b77014f
AW
511/* Run when a fluid is collected. */
512void
aafb4ed7 513scm_i_reset_fluid (size_t n)
0b77014f
AW
514{
515 scm_i_thread *t;
516
517 scm_i_pthread_mutex_lock (&thread_admin_mutex);
518 for (t = all_threads; t; t = t->next_thread)
519 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
520 {
521 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
522
523 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 524 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
525 }
526 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
527}
528
9de87eea 529/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 530 */
9de87eea 531static void
12c1d861 532guilify_self_1 (struct GC_stack_base *base)
d823b11b 533{
7f22442b
AW
534 scm_i_thread t;
535
536 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
537 before allocating anything in this thread, because allocation could
538 cause GC to run, and GC could cause finalizers, which could invoke
539 Scheme functions, which need the current thread to be set. */
540
541 t.pthread = scm_i_pthread_self ();
542 t.handle = SCM_BOOL_F;
543 t.result = SCM_BOOL_F;
544 t.cleanup_handler = SCM_BOOL_F;
545 t.mutexes = SCM_EOL;
546 t.held_mutex = NULL;
547 t.join_queue = SCM_EOL;
548 t.dynamic_state = SCM_BOOL_F;
549 t.dynwinds = SCM_EOL;
550 t.active_asyncs = SCM_EOL;
551 t.block_asyncs = 1;
552 t.pending_asyncs = 1;
553 t.critical_section_level = 0;
554 t.base = base->mem_base;
346e4402 555#ifdef __ia64__
bbd21051 556 t.register_backing_store_base = base->reg_base;
346e4402 557#endif
7f22442b
AW
558 t.continuation_root = SCM_EOL;
559 t.continuation_base = t.base;
560 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
561 t.sleep_mutex = NULL;
562 t.sleep_object = SCM_BOOL_F;
563 t.sleep_fd = -1;
564
32f94bf2 565 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
566 /* FIXME: Error conditions during the initialization phase are handled
567 gracelessly since public functions such as `scm_init_guile ()'
568 currently have type `void'. */
569 abort ();
570
7f22442b
AW
571 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
572 t.current_mark_stack_ptr = NULL;
573 t.current_mark_stack_limit = NULL;
574 t.canceled = 0;
575 t.exited = 0;
576 t.guile_mode = 0;
9de87eea 577
7f22442b
AW
578 /* The switcheroo. */
579 {
580 scm_i_thread *t_ptr = &t;
581
582 GC_disable ();
583 t_ptr = GC_malloc (sizeof (scm_i_thread));
584 memcpy (t_ptr, &t, sizeof t);
585
586 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
587
588#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
589 /* Cache the current thread in TLS for faster lookup. */
590 scm_i_current_thread = t_ptr;
f60a7648 591#endif
9de87eea 592
7f22442b
AW
593 scm_i_pthread_mutex_lock (&thread_admin_mutex);
594 t_ptr->next_thread = all_threads;
595 all_threads = t_ptr;
596 thread_count++;
597 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
598
599 GC_enable ();
600 }
d823b11b
MV
601}
602
9de87eea 603/* Perform second stage of thread initialisation, in guile mode.
d823b11b 604 */
9de87eea
MV
605static void
606guilify_self_2 (SCM parent)
d823b11b 607{
9de87eea
MV
608 scm_i_thread *t = SCM_I_CURRENT_THREAD;
609
72e6b608
LC
610 t->guile_mode = 1;
611
9de87eea 612 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 613
9de87eea
MV
614 t->continuation_root = scm_cons (t->handle, SCM_EOL);
615 t->continuation_base = t->base;
2bbe1533 616 t->vm = SCM_BOOL_F;
9de87eea
MV
617
618 if (scm_is_true (parent))
619 t->dynamic_state = scm_make_dynamic_state (parent);
620 else
621 t->dynamic_state = scm_i_make_initial_dynamic_state ();
622
623 t->join_queue = make_queue ();
624 t->block_asyncs = 0;
f740445a
AW
625
626 /* See note in finalizers.c:queue_finalizer_async(). */
627 GC_invoke_finalizers ();
d823b11b
MV
628}
629
6180e336
NJ
630\f
631/*** Fat mutexes */
632
633/* We implement our own mutex type since we want them to be 'fair', we
634 want to do fancy things while waiting for them (like running
635 asyncs) and we might want to add things that are nice for
636 debugging.
637*/
638
639typedef struct {
640 scm_i_pthread_mutex_t lock;
641 SCM owner;
adc085f1 642 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 643
adc085f1 644 int recursive; /* allow recursive locking? */
6180e336
NJ
645 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
646 int allow_external_unlock; /* is it an error to unlock a mutex that is not
647 owned by the current thread? */
648
649 SCM waiting; /* the threads waiting for this mutex. */
650} fat_mutex;
651
652#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
653#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
654
2002f1f8
AW
655static SCM
656call_cleanup (void *data)
657{
658 SCM *proc_p = data;
659 return scm_call_0 (*proc_p);
660}
661
9de87eea 662/* Perform thread tear-down, in guile mode.
d823b11b 663 */
9de87eea
MV
664static void *
665do_thread_exit (void *v)
666{
2e77f720
LC
667 scm_i_thread *t = (scm_i_thread *) v;
668
91230883
LC
669 /* Ensure the signal handling thread has been launched, because we might be
670 shutting it down. This needs to be done in Guile mode. */
671 scm_i_ensure_signal_delivery_thread ();
672
2e77f720
LC
673 if (!scm_is_false (t->cleanup_handler))
674 {
675 SCM ptr = t->cleanup_handler;
676
677 t->cleanup_handler = SCM_BOOL_F;
678 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 679 call_cleanup, &ptr,
2e77f720
LC
680 scm_handle_by_message_noexit, NULL);
681 }
9de87eea 682
86a597f8 683 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
684
685 t->exited = 1;
0c97d7dd
MV
686 close (t->sleep_pipe[0]);
687 close (t->sleep_pipe[1]);
9de87eea
MV
688 while (scm_is_true (unblock_from_queue (t->join_queue)))
689 ;
9de87eea 690
74926120 691 while (!scm_is_null (t->mutexes))
6180e336 692 {
a0faf7dd 693 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 694
a0faf7dd
LC
695 if (!SCM_UNBNDP (mutex))
696 {
697 fat_mutex *m = SCM_MUTEX_DATA (mutex);
698
699 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07 700
02a362a6
LC
701 /* Check whether T owns MUTEX. This is usually the case, unless
702 T abandoned MUTEX; in that case, T is no longer its owner (see
703 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
704 if (scm_is_eq (m->owner, t->handle))
705 unblock_from_queue (m->waiting);
f57fdf07 706
a0faf7dd
LC
707 scm_i_pthread_mutex_unlock (&m->lock);
708 }
6180e336 709
a0faf7dd 710 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
711 }
712
86a597f8 713 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 714
9de87eea
MV
715 return NULL;
716}
717
f60a7648
AW
718static void *
719do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
720{
7f22442b 721 /* Won't hurt if we are already registered. */
8e76ce94 722#if SCM_USE_PTHREAD_THREADS
7f22442b 723 GC_register_my_thread (sb);
8e76ce94 724#endif
f60a7648 725
7f22442b 726 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
727}
728
d823b11b 729static void
9de87eea 730on_thread_exit (void *v)
d823b11b 731{
29776e85 732 /* This handler is executed in non-guile mode. */
2e77f720 733 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 734
b8441577
LC
735 /* If we were canceled, we were unable to clear `t->guile_mode', so do
736 it here. */
737 t->guile_mode = 0;
738
d2a51087
NJ
739 /* If this thread was cancelled while doing a cond wait, it will
740 still have a mutex locked, so we unlock it here. */
741 if (t->held_mutex)
742 {
743 scm_i_pthread_mutex_unlock (t->held_mutex);
744 t->held_mutex = NULL;
745 }
746
f60a7648
AW
747 /* Reinstate the current thread for purposes of scm_with_guile
748 guile-mode cleanup handlers. Only really needed in the non-TLS
749 case but it doesn't hurt to be consistent. */
750 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 751
7f22442b
AW
752 /* Scheme-level thread finalizers and other cleanup needs to happen in
753 guile mode. */
754 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
755
756 /* Removing ourself from the list of all threads needs to happen in
757 non-guile mode since all SCM values on our stack become
29776e85 758 unprotected once we are no longer in the list. */
0c97d7dd
MV
759 scm_i_pthread_mutex_lock (&thread_admin_mutex);
760 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
761 if (*tp == t)
762 {
763 *tp = t->next_thread;
c058db8a
LC
764
765 /* GC-robust */
766 t->next_thread = NULL;
767
0c97d7dd
MV
768 break;
769 }
770 thread_count--;
2e77f720
LC
771
772 /* If there's only one other thread, it could be the signal delivery
773 thread, so we need to notify it to shut down by closing its read pipe.
774 If it's not the signal delivery thread, then closing the read pipe isn't
775 going to hurt. */
776 if (thread_count <= 1)
777 scm_i_close_signal_pipe ();
778
0c97d7dd
MV
779 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
780
f60a7648 781 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 782
8e76ce94 783#if SCM_USE_PTHREAD_THREADS
7f22442b 784 GC_unregister_my_thread ();
653ccd78 785#endif
d823b11b
MV
786}
787
9de87eea 788static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 789
9de87eea
MV
790static void
791init_thread_key (void)
792{
f60a7648 793 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 794}
d823b11b 795
cde24ce1
AW
796/* Perform any initializations necessary to make the current thread
797 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
798 if necessary.
a54a94b3 799
9de87eea
MV
800 BASE is the stack base to use with GC.
801
802 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
803 which case the default dynamic state is used.
804
cde24ce1 805 Returns zero when the thread was known to guile already; otherwise
9de87eea 806 return 1.
cde24ce1
AW
807
808 Note that it could be the case that the thread was known
809 to Guile, but not in guile mode (because we are within a
810 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
811 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
812
813static int
12c1d861 814scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 815{
9de87eea
MV
816 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
817
cde24ce1
AW
818 if (SCM_I_CURRENT_THREAD)
819 {
820 /* Thread is already known to Guile.
821 */
822 return 0;
823 }
824 else
9de87eea
MV
825 {
826 /* This thread has not been guilified yet.
827 */
828
829 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
830 if (scm_initialized_p == 0)
831 {
832 /* First thread ever to enter Guile. Run the full
833 initialization.
834 */
835 scm_i_init_guile (base);
12c1d861 836
8e76ce94 837#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
838 /* Allow other threads to come in later. */
839 GC_allow_register_threads ();
4000d064 840#endif
12c1d861 841
9de87eea
MV
842 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
843 }
844 else
845 {
846 /* Guile is already initialized, but this thread enters it for
847 the first time. Only initialize this thread.
848 */
849 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
850
851 /* Register this thread with libgc. */
8e76ce94 852#if SCM_USE_PTHREAD_THREADS
12c1d861 853 GC_register_my_thread (base);
8e76ce94 854#endif
12c1d861 855
9de87eea
MV
856 guilify_self_1 (base);
857 guilify_self_2 (parent);
858 }
859 return 1;
860 }
d823b11b
MV
861}
862
12c1d861
AW
863void
864scm_init_guile ()
d823b11b 865{
12c1d861
AW
866 struct GC_stack_base stack_base;
867
868 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
869 scm_i_init_thread_for_guile (&stack_base,
870 scm_i_default_dynamic_state);
9de87eea
MV
871 else
872 {
12c1d861 873 fprintf (stderr, "Failed to get stack base for current thread.\n");
b3da54d1 874 exit (EXIT_FAILURE);
9de87eea 875 }
a54a94b3
MV
876}
877
12c1d861 878struct with_guile_args
cde24ce1
AW
879{
880 GC_fn_type func;
881 void *data;
12c1d861 882 SCM parent;
cde24ce1
AW
883};
884
885static void *
886with_guile_trampoline (void *data)
887{
12c1d861 888 struct with_guile_args *args = data;
cde24ce1
AW
889
890 return scm_c_with_continuation_barrier (args->func, args->data);
891}
892
12c1d861
AW
893static void *
894with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
895{
896 void *res;
cde24ce1
AW
897 int new_thread;
898 scm_i_thread *t;
12c1d861 899 struct with_guile_args *args = data;
2e77f720 900
12c1d861 901 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
902 t = SCM_I_CURRENT_THREAD;
903 if (new_thread)
2e77f720 904 {
cde24ce1
AW
905 /* We are in Guile mode. */
906 assert (t->guile_mode);
907
12c1d861 908 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
909
910 /* Leave Guile mode. */
911 t->guile_mode = 0;
912 }
913 else if (t->guile_mode)
914 {
915 /* Already in Guile mode. */
12c1d861 916 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 917 }
74926120 918 else
cde24ce1 919 {
cde24ce1
AW
920 /* We are not in Guile mode, either because we are not within a
921 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 922
cde24ce1
AW
923 This call to scm_with_guile() could happen from anywhere on the
924 stack, and in particular lower on the stack than when it was
925 when this thread was first guilified. Thus, `base' must be
926 updated. */
927#if SCM_STACK_GROWS_UP
12c1d861
AW
928 if (SCM_STACK_PTR (base->mem_base) < t->base)
929 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 930#else
12c1d861
AW
931 if (SCM_STACK_PTR (base->mem_base) > t->base)
932 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 933#endif
72e6b608 934
cde24ce1 935 t->guile_mode = 1;
12c1d861 936 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
937 t->guile_mode = 0;
938 }
939 return res;
72e6b608 940}
3d1af79f 941
12c1d861
AW
942static void *
943scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
944{
945 struct with_guile_args args;
946
947 args.func = func;
948 args.data = data;
949 args.parent = parent;
950
951 return GC_call_with_stack_base (with_guile_and_parent, &args);
952}
953
954void *
955scm_with_guile (void *(*func)(void *), void *data)
956{
957 return scm_i_with_guile_and_parent (func, data,
958 scm_i_default_dynamic_state);
959}
960
9de87eea
MV
961void *
962scm_without_guile (void *(*func)(void *), void *data)
d823b11b 963{
72e6b608 964 void *result;
cde24ce1 965 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 966
cde24ce1 967 if (t->guile_mode)
72e6b608 968 {
cde24ce1
AW
969 SCM_I_CURRENT_THREAD->guile_mode = 0;
970 result = with_gc_inactive (func, data);
971 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
972 }
973 else
cde24ce1 974 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
975 result = func (data);
976
977 return result;
9de87eea
MV
978}
979
72e6b608 980\f
9de87eea
MV
981/*** Thread creation */
982
983typedef struct {
984 SCM parent;
985 SCM thunk;
986 SCM handler;
76da80e7 987 SCM thread;
9de87eea
MV
988 scm_i_pthread_mutex_t mutex;
989 scm_i_pthread_cond_t cond;
990} launch_data;
d823b11b 991
9de87eea
MV
992static void *
993really_launch (void *d)
994{
995 launch_data *data = (launch_data *)d;
996 SCM thunk = data->thunk, handler = data->handler;
997 scm_i_thread *t;
d823b11b 998
9de87eea 999 t = SCM_I_CURRENT_THREAD;
a54a94b3 1000
9de87eea
MV
1001 scm_i_scm_pthread_mutex_lock (&data->mutex);
1002 data->thread = scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data->cond);
1004 scm_i_pthread_mutex_unlock (&data->mutex);
1005
1006 if (SCM_UNBNDP (handler))
1007 t->result = scm_call_0 (thunk);
1008 else
1009 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1010
1011 return 0;
d823b11b
MV
1012}
1013
9de87eea
MV
1014static void *
1015launch_thread (void *d)
1016{
1017 launch_data *data = (launch_data *)d;
1018 scm_i_pthread_detach (scm_i_pthread_self ());
1019 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1020 return NULL;
1021}
1022
1023SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 1024 (SCM thunk, SCM handler),
9de87eea
MV
1025 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1026 "returning a new thread object representing the thread. The procedure\n"
1027 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1028 "\n"
1029 "When @var{handler} is specified, then @var{thunk} is called from\n"
1030 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1031 "handler. This catch is established inside the continuation barrier.\n"
1032 "\n"
1033 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1034 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
1035#define FUNC_NAME s_scm_call_with_new_thread
1036{
9de87eea
MV
1037 launch_data data;
1038 scm_i_pthread_t id;
1039 int err;
d823b11b 1040
9de87eea
MV
1041 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1042 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1043 handler, SCM_ARG2, FUNC_NAME);
1044
9f7537dc 1045 GC_collect_a_little ();
9de87eea
MV
1046 data.parent = scm_current_dynamic_state ();
1047 data.thunk = thunk;
1048 data.handler = handler;
1049 data.thread = SCM_BOOL_F;
1050 scm_i_pthread_mutex_init (&data.mutex, NULL);
1051 scm_i_pthread_cond_init (&data.cond, NULL);
1052
1053 scm_i_scm_pthread_mutex_lock (&data.mutex);
1054 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1055 if (err)
1056 {
1057 scm_i_pthread_mutex_unlock (&data.mutex);
1058 errno = err;
1059 scm_syserror (NULL);
1060 }
dbab8aaa
AW
1061
1062 while (scm_is_false (data.thread))
1063 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1064
9de87eea 1065 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1066
9de87eea 1067 return data.thread;
d823b11b
MV
1068}
1069#undef FUNC_NAME
1070
9de87eea
MV
1071typedef struct {
1072 SCM parent;
1073 scm_t_catch_body body;
1074 void *body_data;
1075 scm_t_catch_handler handler;
1076 void *handler_data;
1077 SCM thread;
1078 scm_i_pthread_mutex_t mutex;
1079 scm_i_pthread_cond_t cond;
1080} spawn_data;
1081
1082static void *
1083really_spawn (void *d)
1084{
1085 spawn_data *data = (spawn_data *)d;
1086 scm_t_catch_body body = data->body;
1087 void *body_data = data->body_data;
1088 scm_t_catch_handler handler = data->handler;
1089 void *handler_data = data->handler_data;
1090 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1091
1092 scm_i_scm_pthread_mutex_lock (&data->mutex);
1093 data->thread = scm_current_thread ();
1094 scm_i_pthread_cond_signal (&data->cond);
1095 scm_i_pthread_mutex_unlock (&data->mutex);
1096
1097 if (handler == NULL)
1098 t->result = body (body_data);
1099 else
1100 t->result = scm_internal_catch (SCM_BOOL_T,
1101 body, body_data,
1102 handler, handler_data);
1103
1104 return 0;
1105}
1106
1107static void *
1108spawn_thread (void *d)
1109{
1110 spawn_data *data = (spawn_data *)d;
1111 scm_i_pthread_detach (scm_i_pthread_self ());
1112 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1113 return NULL;
1114}
1115
1116SCM
1117scm_spawn_thread (scm_t_catch_body body, void *body_data,
1118 scm_t_catch_handler handler, void *handler_data)
1119{
1120 spawn_data data;
1121 scm_i_pthread_t id;
1122 int err;
1123
1124 data.parent = scm_current_dynamic_state ();
1125 data.body = body;
1126 data.body_data = body_data;
1127 data.handler = handler;
1128 data.handler_data = handler_data;
1129 data.thread = SCM_BOOL_F;
1130 scm_i_pthread_mutex_init (&data.mutex, NULL);
1131 scm_i_pthread_cond_init (&data.cond, NULL);
1132
1133 scm_i_scm_pthread_mutex_lock (&data.mutex);
1134 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1135 if (err)
1136 {
1137 scm_i_pthread_mutex_unlock (&data.mutex);
1138 errno = err;
1139 scm_syserror (NULL);
1140 }
dbab8aaa
AW
1141
1142 while (scm_is_false (data.thread))
1143 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1144
9de87eea 1145 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1146
0f4f2d9a
LC
1147 assert (SCM_I_IS_THREAD (data.thread));
1148
9de87eea
MV
1149 return data.thread;
1150}
1151
29717c89
MD
1152SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1153 (),
1154"Move the calling thread to the end of the scheduling queue.")
1155#define FUNC_NAME s_scm_yield
1156{
9de87eea 1157 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1158}
1159#undef FUNC_NAME
1160
2e77f720
LC
1161SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1162 (SCM thread),
1163"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1164"cannot be the current thread, and if @var{thread} has already terminated or "
1165"been signaled to terminate, this function is a no-op.")
1166#define FUNC_NAME s_scm_cancel_thread
1167{
1168 scm_i_thread *t = NULL;
1169
1170 SCM_VALIDATE_THREAD (1, thread);
1171 t = SCM_I_THREAD_DATA (thread);
86a597f8 1172 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1173 if (!t->canceled)
1174 {
1175 t->canceled = 1;
86a597f8 1176 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1177 scm_i_pthread_cancel (t->pthread);
1178 }
1179 else
86a597f8 1180 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1181
1182 return SCM_UNSPECIFIED;
1183}
1184#undef FUNC_NAME
1185
1186SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1187 (SCM thread, SCM proc),
1188"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1189"This handler will be called when the thread exits.")
1190#define FUNC_NAME s_scm_set_thread_cleanup_x
1191{
1192 scm_i_thread *t;
1193
1194 SCM_VALIDATE_THREAD (1, thread);
1195 if (!scm_is_false (proc))
1196 SCM_VALIDATE_THUNK (2, proc);
1197
2e77f720 1198 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1199 scm_i_pthread_mutex_lock (&t->admin_mutex);
1200
2e77f720
LC
1201 if (!(t->exited || t->canceled))
1202 t->cleanup_handler = proc;
1203
86a597f8 1204 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1205
1206 return SCM_UNSPECIFIED;
1207}
1208#undef FUNC_NAME
1209
1210SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1211 (SCM thread),
1212"Return the cleanup handler installed for the thread @var{thread}.")
1213#define FUNC_NAME s_scm_thread_cleanup
1214{
1215 scm_i_thread *t;
1216 SCM ret;
1217
1218 SCM_VALIDATE_THREAD (1, thread);
1219
2e77f720 1220 t = SCM_I_THREAD_DATA (thread);
86a597f8 1221 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1222 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1223 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1224
1225 return ret;
1226}
1227#undef FUNC_NAME
1228
6180e336
NJ
1229SCM scm_join_thread (SCM thread)
1230{
1231 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1232}
1233
1234SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1235 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1236"Suspend execution of the calling thread until the target @var{thread} "
1237"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1238#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1239{
9de87eea 1240 scm_i_thread *t;
6180e336
NJ
1241 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1242 SCM res = SCM_BOOL_F;
1243
1244 if (! (SCM_UNBNDP (timeoutval)))
1245 res = timeoutval;
d823b11b
MV
1246
1247 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1248 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1249 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1250
9de87eea 1251 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1252 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1253
6180e336
NJ
1254 if (! SCM_UNBNDP (timeout))
1255 {
1256 to_timespec (timeout, &ctimeout);
1257 timeout_ptr = &ctimeout;
1258 }
1259
1260 if (t->exited)
1261 res = t->result;
1262 else
d823b11b 1263 {
9de87eea
MV
1264 while (1)
1265 {
74926120 1266 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1267 timeout_ptr);
1268 if (err == 0)
1269 {
1270 if (t->exited)
1271 {
1272 res = t->result;
1273 break;
1274 }
1275 }
1276 else if (err == ETIMEDOUT)
9de87eea 1277 break;
6180e336 1278
86a597f8 1279 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1280 SCM_TICK;
86a597f8 1281 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1282
1283 /* Check for exit again, since we just released and
1284 reacquired the admin mutex, before the next block_self
1285 call (which would block forever if t has already
1286 exited). */
1287 if (t->exited)
1288 {
1289 res = t->result;
1290 break;
1291 }
9de87eea 1292 }
d823b11b 1293 }
9de87eea 1294
86a597f8 1295 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1296
d823b11b 1297 return res;
5f05c406
MV
1298}
1299#undef FUNC_NAME
1300
6180e336
NJ
1301SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1302 (SCM obj),
1303 "Return @code{#t} if @var{obj} is a thread.")
1304#define FUNC_NAME s_scm_thread_p
1305{
1306 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1307}
1308#undef FUNC_NAME
5f05c406 1309
4079f87e 1310
9de87eea
MV
1311static size_t
1312fat_mutex_free (SCM mx)
76da80e7 1313{
9de87eea
MV
1314 fat_mutex *m = SCM_MUTEX_DATA (mx);
1315 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1316 return 0;
1317}
1318
1319static int
9de87eea 1320fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1321{
9de87eea
MV
1322 fat_mutex *m = SCM_MUTEX_DATA (mx);
1323 scm_puts ("#<mutex ", port);
1324 scm_uintprint ((scm_t_bits)m, 16, port);
1325 scm_puts (">", port);
1326 return 1;
76da80e7
MV
1327}
1328
76da80e7 1329static SCM
6180e336 1330make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1331{
9de87eea
MV
1332 fat_mutex *m;
1333 SCM mx;
1334
1335 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1336 scm_i_pthread_mutex_init (&m->lock, NULL);
1337 m->owner = SCM_BOOL_F;
adc085f1 1338 m->level = 0;
6180e336 1339
adc085f1 1340 m->recursive = recursive;
6180e336
NJ
1341 m->unchecked_unlock = unchecked_unlock;
1342 m->allow_external_unlock = external_unlock;
1343
9de87eea
MV
1344 m->waiting = SCM_EOL;
1345 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1346 m->waiting = make_queue ();
1347 return mx;
76da80e7
MV
1348}
1349
6180e336
NJ
1350SCM scm_make_mutex (void)
1351{
1352 return scm_make_mutex_with_flags (SCM_EOL);
1353}
1354
2a1d0688
NJ
1355SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1356SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1357SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1358
1359SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1360 (SCM flags),
9de87eea 1361 "Create a new mutex. ")
6180e336 1362#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1363{
6180e336
NJ
1364 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1365
1366 SCM ptr = flags;
1367 while (! scm_is_null (ptr))
1368 {
1369 SCM flag = SCM_CAR (ptr);
1370 if (scm_is_eq (flag, unchecked_unlock_sym))
1371 unchecked_unlock = 1;
1372 else if (scm_is_eq (flag, allow_external_unlock_sym))
1373 external_unlock = 1;
1374 else if (scm_is_eq (flag, recursive_sym))
1375 recursive = 1;
74926120 1376 else
2a1d0688 1377 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1378 ptr = SCM_CDR (ptr);
1379 }
1380 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1381}
1382#undef FUNC_NAME
1383
9de87eea 1384SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1385 (void),
9de87eea
MV
1386 "Create a new recursive mutex. ")
1387#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1388{
6180e336 1389 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1390}
1391#undef FUNC_NAME
1392
6180e336
NJ
1393SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1394
1395static SCM
adc085f1 1396fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1397{
1398 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1399
adc085f1 1400 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1401 SCM err = SCM_BOOL_F;
1402
1403 struct timeval current_time;
9de87eea
MV
1404
1405 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1406
1407 while (1)
9de87eea 1408 {
adc085f1 1409 if (m->level == 0)
6180e336 1410 {
adc085f1 1411 m->owner = new_owner;
6180e336 1412 m->level++;
74926120 1413
adc085f1 1414 if (SCM_I_IS_THREAD (new_owner))
6180e336 1415 {
adc085f1 1416 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1417
4f39f31e
LC
1418 /* FIXME: The order in which `t->admin_mutex' and
1419 `m->lock' are taken differs from that in
1420 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1421 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1422
1423 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1424 retained when not referenced elsewhere (bug #27450).
1425 The weak pair itself is eventually removed when MUTEX
1426 is unlocked. Note that `t->mutexes' lists mutexes
1427 currently held by T, so it should be small. */
a0faf7dd
LC
1428 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1429
6180e336 1430 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1431 }
adc085f1
JG
1432 *ret = 1;
1433 break;
1434 }
1435 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1436 {
1437 m->owner = new_owner;
1438 err = scm_cons (scm_abandoned_mutex_error_key,
1439 scm_from_locale_string ("lock obtained on abandoned "
1440 "mutex"));
1441 *ret = 1;
1442 break;
1443 }
1444 else if (scm_is_eq (m->owner, new_owner))
1445 {
1446 if (m->recursive)
1447 {
1448 m->level++;
74926120 1449 *ret = 1;
adc085f1
JG
1450 }
1451 else
6180e336 1452 {
adc085f1
JG
1453 err = scm_cons (scm_misc_error_key,
1454 scm_from_locale_string ("mutex already locked "
1455 "by thread"));
1456 *ret = 0;
1457 }
74926120 1458 break;
adc085f1 1459 }
9de87eea 1460 else
9de87eea 1461 {
74926120 1462 if (timeout != NULL)
adc085f1
JG
1463 {
1464 gettimeofday (&current_time, NULL);
1465 if (current_time.tv_sec > timeout->tv_sec ||
1466 (current_time.tv_sec == timeout->tv_sec &&
1467 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1468 {
adc085f1
JG
1469 *ret = 0;
1470 break;
6180e336 1471 }
6180e336 1472 }
37a52039 1473 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1474 scm_i_pthread_mutex_unlock (&m->lock);
1475 SCM_TICK;
1476 scm_i_scm_pthread_mutex_lock (&m->lock);
1477 }
1478 }
1479 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1480 return err;
9de87eea
MV
1481}
1482
6180e336
NJ
1483SCM scm_lock_mutex (SCM mx)
1484{
adc085f1 1485 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1486}
1487
adc085f1
JG
1488SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1489 (SCM m, SCM timeout, SCM owner),
b7e64f8b
BT
1490 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1491 "thread blocks until the mutex becomes available. The function\n"
1492 "returns when the calling thread owns the lock on @var{m}.\n"
1493 "Locking a mutex that a thread already owns will succeed right\n"
1494 "away and will not block the thread. That is, Guile's mutexes\n"
1495 "are @emph{recursive}.")
6180e336 1496#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1497{
6180e336
NJ
1498 SCM exception;
1499 int ret = 0;
1500 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1501
6180e336
NJ
1502 SCM_VALIDATE_MUTEX (1, m);
1503
1504 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1505 {
1506 to_timespec (timeout, &cwaittime);
1507 waittime = &cwaittime;
1508 }
1509
97ec95b7
LC
1510 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1511 SCM_VALIDATE_THREAD (3, owner);
1512
adc085f1 1513 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1514 if (!scm_is_false (exception))
1515 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1516 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1517}
76da80e7 1518#undef FUNC_NAME
9bc4701c 1519
2a3db25e
MW
1520static void
1521lock_mutex_return_void (SCM mx)
1522{
1523 (void) scm_lock_mutex (mx);
1524}
1525
1526static void
1527unlock_mutex_return_void (SCM mx)
1528{
1529 (void) scm_unlock_mutex (mx);
1530}
1531
a4d106c7 1532void
661ae7ab 1533scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1534{
2a3db25e 1535 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1536 SCM_F_WIND_EXPLICITLY);
2a3db25e 1537 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1538 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1539}
1540
9bc4701c 1541SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1542 (SCM mutex),
9bc4701c
MD
1543"Try to lock @var{mutex}. If the mutex is already locked by someone "
1544"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1545#define FUNC_NAME s_scm_try_mutex
1546{
6180e336
NJ
1547 SCM exception;
1548 int ret = 0;
1549 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1550
ba1b7223 1551 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1552
1553 to_timespec (scm_from_int(0), &cwaittime);
1554 waittime = &cwaittime;
74926120 1555
adc085f1 1556 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1557 if (!scm_is_false (exception))
1558 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1559 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1560}
1561#undef FUNC_NAME
76da80e7 1562
6180e336
NJ
1563/*** Fat condition variables */
1564
1565typedef struct {
1566 scm_i_pthread_mutex_t lock;
1567 SCM waiting; /* the threads waiting for this condition. */
1568} fat_cond;
1569
1570#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1571#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1572
1573static int
1574fat_mutex_unlock (SCM mutex, SCM cond,
1575 const scm_t_timespec *waittime, int relock)
9de87eea 1576{
7f991c7d 1577 SCM owner;
6180e336
NJ
1578 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1579 fat_cond *c = NULL;
1580 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1581 int err = 0, ret = 0;
9de87eea
MV
1582
1583 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1584
7f991c7d 1585 owner = m->owner;
adc085f1 1586
d31ae2c3 1587 if (!scm_is_eq (owner, t->handle))
9bc4701c 1588 {
adc085f1 1589 if (m->level == 0)
6180e336
NJ
1590 {
1591 if (!m->unchecked_unlock)
2a1d0688
NJ
1592 {
1593 scm_i_pthread_mutex_unlock (&m->lock);
1594 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1595 }
d31ae2c3 1596 owner = t->handle;
6180e336
NJ
1597 }
1598 else if (!m->allow_external_unlock)
2a1d0688
NJ
1599 {
1600 scm_i_pthread_mutex_unlock (&m->lock);
1601 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1602 }
6180e336
NJ
1603 }
1604
1605 if (! (SCM_UNBNDP (cond)))
1606 {
6180e336
NJ
1607 c = SCM_CONDVAR_DATA (cond);
1608 while (1)
1609 {
1610 int brk = 0;
1611
6180e336
NJ
1612 if (m->level > 0)
1613 m->level--;
adc085f1 1614 if (m->level == 0)
f57fdf07
LC
1615 {
1616 /* Change the owner of MUTEX. */
1617 t->mutexes = scm_delq_x (mutex, t->mutexes);
1618 m->owner = unblock_from_queue (m->waiting);
1619 }
adc085f1 1620
6180e336 1621 t->block_asyncs++;
74926120 1622
d2a51087
NJ
1623 err = block_self (c->waiting, cond, &m->lock, waittime);
1624 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1625
1626 if (err == 0)
1627 {
1628 ret = 1;
1629 brk = 1;
1630 }
1631 else if (err == ETIMEDOUT)
1632 {
1633 ret = 0;
1634 brk = 1;
1635 }
1636 else if (err != EINTR)
74926120 1637 {
6180e336 1638 errno = err;
6180e336 1639 scm_syserror (NULL);
74926120 1640 }
6180e336
NJ
1641
1642 if (brk)
1643 {
1644 if (relock)
adc085f1 1645 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1646 t->block_asyncs--;
6180e336
NJ
1647 break;
1648 }
74926120 1649
6180e336
NJ
1650 t->block_asyncs--;
1651 scm_async_click ();
74926120 1652
6180e336
NJ
1653 scm_remember_upto_here_2 (cond, mutex);
1654
1655 scm_i_scm_pthread_mutex_lock (&m->lock);
1656 }
9bc4701c 1657 }
9de87eea 1658 else
6180e336
NJ
1659 {
1660 if (m->level > 0)
1661 m->level--;
74926120 1662 if (m->level == 0)
f57fdf07
LC
1663 {
1664 /* Change the owner of MUTEX. */
1665 t->mutexes = scm_delq_x (mutex, t->mutexes);
1666 m->owner = unblock_from_queue (m->waiting);
1667 }
74926120 1668
6180e336
NJ
1669 scm_i_pthread_mutex_unlock (&m->lock);
1670 ret = 1;
1671 }
9de87eea 1672
6180e336 1673 return ret;
9bc4701c 1674}
9bc4701c 1675
6180e336
NJ
1676SCM scm_unlock_mutex (SCM mx)
1677{
1678 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1679}
9bc4701c 1680
6180e336
NJ
1681SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1682 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1683"Unlocks @var{mutex} if the calling thread owns the lock on "
1684"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1685"thread results in undefined behaviour. Once a mutex has been unlocked, "
1686"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1687"lock. Every call to @code{lock-mutex} by this thread must be matched "
1688"with a call to @code{unlock-mutex}. Only the last call to "
1689"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1690#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1691{
6180e336
NJ
1692 scm_t_timespec cwaittime, *waittime = NULL;
1693
9bc4701c 1694 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1695 if (! (SCM_UNBNDP (cond)))
1696 {
1697 SCM_VALIDATE_CONDVAR (2, cond);
1698
1699 if (! (SCM_UNBNDP (timeout)))
1700 {
1701 to_timespec (timeout, &cwaittime);
1702 waittime = &cwaittime;
1703 }
1704 }
1705
1706 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1707}
1708#undef FUNC_NAME
1709
6180e336
NJ
1710SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1711 (SCM obj),
1712 "Return @code{#t} if @var{obj} is a mutex.")
1713#define FUNC_NAME s_scm_mutex_p
1714{
1715 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1716}
74926120 1717#undef FUNC_NAME
9de87eea
MV
1718
1719SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1720 (SCM mx),
1721 "Return the thread owning @var{mx}, or @code{#f}.")
1722#define FUNC_NAME s_scm_mutex_owner
1723{
adc085f1
JG
1724 SCM owner;
1725 fat_mutex *m = NULL;
1726
9de87eea 1727 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1728 m = SCM_MUTEX_DATA (mx);
1729 scm_i_pthread_mutex_lock (&m->lock);
1730 owner = m->owner;
1731 scm_i_pthread_mutex_unlock (&m->lock);
1732
1733 return owner;
9de87eea
MV
1734}
1735#undef FUNC_NAME
1736
1737SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1738 (SCM mx),
adc085f1 1739 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1740#define FUNC_NAME s_scm_mutex_level
1741{
1742 SCM_VALIDATE_MUTEX (1, mx);
1743 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1744}
1745#undef FUNC_NAME
1746
adc085f1
JG
1747SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1748 (SCM mx),
1749 "Returns @code{#t} if the mutex @var{mx} is locked.")
1750#define FUNC_NAME s_scm_mutex_locked_p
1751{
1752 SCM_VALIDATE_MUTEX (1, mx);
1753 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1754}
1755#undef FUNC_NAME
9de87eea 1756
9de87eea
MV
1757static int
1758fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1759{
1760 fat_cond *c = SCM_CONDVAR_DATA (cv);
1761 scm_puts ("#<condition-variable ", port);
1762 scm_uintprint ((scm_t_bits)c, 16, port);
1763 scm_puts (">", port);
1764 return 1;
1765}
9bc4701c 1766
d823b11b
MV
1767SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1768 (void),
1769 "Make a new condition variable.")
1770#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1771{
9de87eea
MV
1772 fat_cond *c;
1773 SCM cv;
1774
1775 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1776 c->waiting = SCM_EOL;
1777 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1778 c->waiting = make_queue ();
d823b11b 1779 return cv;
5f05c406 1780}
d823b11b 1781#undef FUNC_NAME
5f05c406 1782
d823b11b
MV
1783SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1784 (SCM cv, SCM mx, SCM t),
b7e64f8b
BT
1785"Wait until condition variable @var{cv} has been signalled. While waiting, "
1786"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1787"is locked again when this function returns. When @var{t} is given, "
d823b11b
MV
1788"it specifies a point in time where the waiting should be aborted. It "
1789"can be either a integer as returned by @code{current-time} or a pair "
1790"as returned by @code{gettimeofday}. When the waiting is aborted the "
1791"mutex is locked and @code{#f} is returned. When the condition "
1792"variable is in fact signalled, the mutex is also locked and @code{#t} "
1793"is returned. ")
1794#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1795{
9de87eea 1796 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1797
1798 SCM_VALIDATE_CONDVAR (1, cv);
1799 SCM_VALIDATE_MUTEX (2, mx);
74926120 1800
d823b11b
MV
1801 if (!SCM_UNBNDP (t))
1802 {
6180e336 1803 to_timespec (t, &waittime);
9de87eea 1804 waitptr = &waittime;
d823b11b
MV
1805 }
1806
2a1d0688 1807 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1808}
d823b11b 1809#undef FUNC_NAME
5f05c406 1810
9de87eea
MV
1811static void
1812fat_cond_signal (fat_cond *c)
1813{
9de87eea 1814 unblock_from_queue (c->waiting);
9de87eea
MV
1815}
1816
d823b11b
MV
1817SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1818 (SCM cv),
1819 "Wake up one thread that is waiting for @var{cv}")
1820#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1821{
d823b11b 1822 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1823 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1824 return SCM_BOOL_T;
5f05c406 1825}
d823b11b 1826#undef FUNC_NAME
5f05c406 1827
9de87eea
MV
1828static void
1829fat_cond_broadcast (fat_cond *c)
1830{
9de87eea
MV
1831 while (scm_is_true (unblock_from_queue (c->waiting)))
1832 ;
9de87eea
MV
1833}
1834
d823b11b
MV
1835SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1836 (SCM cv),
1837 "Wake up all threads that are waiting for @var{cv}. ")
1838#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1839{
d823b11b 1840 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1841 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1842 return SCM_BOOL_T;
5f05c406 1843}
d823b11b 1844#undef FUNC_NAME
5f05c406 1845
6180e336
NJ
1846SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1847 (SCM obj),
1848 "Return @code{#t} if @var{obj} is a condition variable.")
1849#define FUNC_NAME s_scm_condition_variable_p
1850{
1851 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1852}
1853#undef FUNC_NAME
1854
6087fad9 1855
8c2b3143 1856\f
d823b11b
MV
1857/*** Select */
1858
8c2b3143
LC
1859struct select_args
1860{
1861 int nfds;
6ab4de61
AW
1862 fd_set *read_fds;
1863 fd_set *write_fds;
1864 fd_set *except_fds;
8c2b3143
LC
1865 struct timeval *timeout;
1866
1867 int result;
1868 int errno_value;
1869};
1870
1871static void *
1872do_std_select (void *args)
1873{
1874 struct select_args *select_args;
1875
1876 select_args = (struct select_args *) args;
1877
1878 select_args->result =
1879 select (select_args->nfds,
1880 select_args->read_fds, select_args->write_fds,
1881 select_args->except_fds, select_args->timeout);
1882 select_args->errno_value = errno;
1883
1884 return NULL;
1885}
1886
6ab4de61
AW
1887#if !SCM_HAVE_SYS_SELECT_H
1888static int scm_std_select (int nfds,
1889 fd_set *readfds,
1890 fd_set *writefds,
1891 fd_set *exceptfds,
1892 struct timeval *timeout);
1893#endif
1894
911782b7 1895int
9de87eea 1896scm_std_select (int nfds,
6ab4de61
AW
1897 fd_set *readfds,
1898 fd_set *writefds,
1899 fd_set *exceptfds,
9de87eea
MV
1900 struct timeval *timeout)
1901{
1902 fd_set my_readfds;
1903 int res, eno, wakeup_fd;
1904 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1905 struct select_args args;
9de87eea
MV
1906
1907 if (readfds == NULL)
1908 {
1909 FD_ZERO (&my_readfds);
1910 readfds = &my_readfds;
1911 }
1912
1913 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1914 SCM_TICK;
1915
1916 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1917 FD_SET (wakeup_fd, readfds);
1918 if (wakeup_fd >= nfds)
1919 nfds = wakeup_fd+1;
9de87eea 1920
8c2b3143
LC
1921 args.nfds = nfds;
1922 args.read_fds = readfds;
1923 args.write_fds = writefds;
1924 args.except_fds = exceptfds;
1925 args.timeout = timeout;
1926
1927 /* Explicitly cooperate with the GC. */
1928 scm_without_guile (do_std_select, &args);
1929
1930 res = args.result;
1931 eno = args.errno_value;
1932
1933 t->sleep_fd = -1;
9de87eea
MV
1934 scm_i_reset_sleep (t);
1935
1936 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1937 {
1938 char dummy;
634aa8de
LC
1939 full_read (wakeup_fd, &dummy, 1);
1940
9de87eea
MV
1941 FD_CLR (wakeup_fd, readfds);
1942 res -= 1;
1943 if (res == 0)
1944 {
1945 eno = EINTR;
1946 res = -1;
1947 }
1948 }
d823b11b
MV
1949 errno = eno;
1950 return res;
5f05c406
MV
1951}
1952
9de87eea 1953/* Convenience API for blocking while in guile mode. */
76da80e7 1954
9de87eea 1955#if SCM_USE_PTHREAD_THREADS
92e64b87 1956
2956b071
LC
1957/* It seems reasonable to not run procedures related to mutex and condition
1958 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1959 without it, and (ii) the only potential gain would be GC latency. See
1960 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1961 for a discussion of the pros and cons. */
1962
9bc4701c 1963int
9de87eea 1964scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1965{
9de87eea 1966 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1967 return res;
1968}
1969
9de87eea 1970static void
2b829bbb 1971do_unlock (void *data)
28d52ebb 1972{
9de87eea 1973 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1974}
1975
1976void
661ae7ab 1977scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1978{
9de87eea 1979 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1980 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1981}
1982
9bc4701c 1983int
9de87eea 1984scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1985{
4cf72f0b
LC
1986 int res;
1987 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1988
1989 t->held_mutex = mutex;
1990 res = scm_i_pthread_cond_wait (cond, mutex);
1991 t->held_mutex = NULL;
1992
9bc4701c
MD
1993 return res;
1994}
9bc4701c 1995
76da80e7 1996int
9de87eea
MV
1997scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1998 scm_i_pthread_mutex_t *mutex,
1999 const scm_t_timespec *wt)
76da80e7 2000{
4cf72f0b
LC
2001 int res;
2002 scm_i_thread *t = SCM_I_CURRENT_THREAD;
2003
2004 t->held_mutex = mutex;
2005 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
2006 t->held_mutex = NULL;
2007
9de87eea 2008 return res;
76da80e7
MV
2009}
2010
9de87eea 2011#endif
76da80e7 2012
d823b11b 2013unsigned long
9de87eea 2014scm_std_usleep (unsigned long usecs)
5f05c406 2015{
d823b11b
MV
2016 struct timeval tv;
2017 tv.tv_usec = usecs % 1000000;
2018 tv.tv_sec = usecs / 1000000;
9de87eea
MV
2019 scm_std_select (0, NULL, NULL, NULL, &tv);
2020 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
2021}
2022
9de87eea
MV
2023unsigned int
2024scm_std_sleep (unsigned int secs)
6c214b62 2025{
d823b11b
MV
2026 struct timeval tv;
2027 tv.tv_usec = 0;
2028 tv.tv_sec = secs;
9de87eea 2029 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 2030 return tv.tv_sec;
6c214b62
MD
2031}
2032
d823b11b
MV
2033/*** Misc */
2034
2035SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2036 (void),
2037 "Return the thread that called this function.")
2038#define FUNC_NAME s_scm_current_thread
2039{
9de87eea 2040 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2041}
2042#undef FUNC_NAME
2043
9de87eea
MV
2044static SCM
2045scm_c_make_list (size_t n, SCM fill)
2046{
2047 SCM res = SCM_EOL;
2048 while (n-- > 0)
2049 res = scm_cons (fill, res);
2050 return res;
2051}
2052
d823b11b
MV
2053SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2054 (void),
2055 "Return a list of all threads.")
9bc4701c 2056#define FUNC_NAME s_scm_all_threads
d823b11b 2057{
9de87eea
MV
2058 /* We can not allocate while holding the thread_admin_mutex because
2059 of the way GC is done.
2060 */
2061 int n = thread_count;
2062 scm_i_thread *t;
2063 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2064
9de87eea
MV
2065 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2066 l = &list;
2067 for (t = all_threads; t && n > 0; t = t->next_thread)
2068 {
2e77f720
LC
2069 if (t != scm_i_signal_delivery_thread)
2070 {
2071 SCM_SETCAR (*l, t->handle);
2072 l = SCM_CDRLOC (*l);
2073 }
9de87eea
MV
2074 n--;
2075 }
2076 *l = SCM_EOL;
2077 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2078 return list;
d823b11b 2079}
9de87eea 2080#undef FUNC_NAME
d823b11b
MV
2081
2082SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2083 (SCM thread),
2084 "Return @code{#t} iff @var{thread} has exited.\n")
2085#define FUNC_NAME s_scm_thread_exited_p
2086{
7888309b 2087 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2088}
2089#undef FUNC_NAME
2090
911782b7 2091int
d823b11b
MV
2092scm_c_thread_exited_p (SCM thread)
2093#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2094{
9de87eea 2095 scm_i_thread *t;
d823b11b 2096 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2097 t = SCM_I_THREAD_DATA (thread);
d823b11b 2098 return t->exited;
5f05c406 2099}
d823b11b 2100#undef FUNC_NAME
5f05c406 2101
d20912e6
LC
2102SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2103 (void),
2104 "Return the total number of processors of the machine, which\n"
2105 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2106 "thread execution unit, which can be either:\n\n"
2107 "@itemize\n"
2108 "@item an execution core in a (possibly multi-core) chip, in a\n"
2109 " (possibly multi- chip) module, in a single computer, or\n"
2110 "@item a thread execution unit inside a core in the case of\n"
2111 " @dfn{hyper-threaded} CPUs.\n"
2112 "@end itemize\n\n"
2113 "Which of the two definitions is used, is unspecified.\n")
2114#define FUNC_NAME s_scm_total_processor_count
2115{
2116 return scm_from_ulong (num_processors (NPROC_ALL));
2117}
2118#undef FUNC_NAME
2119
2120SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2121 (void),
2122 "Like @code{total-processor-count}, but return the number of\n"
2123 "processors available to the current process. See\n"
2124 "@code{setaffinity} and @code{getaffinity} for more\n"
2125 "information.\n")
2126#define FUNC_NAME s_scm_current_processor_count
2127{
2128 return scm_from_ulong (num_processors (NPROC_CURRENT));
2129}
2130#undef FUNC_NAME
2131
2132
2133\f
2134
9de87eea 2135static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2136static int threads_initialized_p = 0;
2137
9bc4701c 2138
a4d106c7
MV
2139/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2140 */
d1138028 2141scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2142
661ae7ab 2143static SCM dynwind_critical_section_mutex;
a54a94b3 2144
9bc4701c 2145void
661ae7ab 2146scm_dynwind_critical_section (SCM mutex)
76da80e7 2147{
a4d106c7 2148 if (scm_is_false (mutex))
661ae7ab
MV
2149 mutex = dynwind_critical_section_mutex;
2150 scm_dynwind_lock_mutex (mutex);
2151 scm_dynwind_block_asyncs ();
9de87eea
MV
2152}
2153
2154/*** Initialization */
2155
9de87eea
MV
2156scm_i_pthread_mutex_t scm_i_misc_mutex;
2157
d1138028
MV
2158#if SCM_USE_PTHREAD_THREADS
2159pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2160#endif
2161
9de87eea 2162void
12c1d861 2163scm_threads_prehistory (void *base)
9de87eea 2164{
d1138028
MV
2165#if SCM_USE_PTHREAD_THREADS
2166 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2167 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2168 PTHREAD_MUTEX_RECURSIVE);
2169#endif
2170
2171 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2172 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2173 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2174 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2175
12c1d861 2176 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2177}
2178
d823b11b
MV
2179scm_t_bits scm_tc16_thread;
2180scm_t_bits scm_tc16_mutex;
2181scm_t_bits scm_tc16_condvar;
7bfd3b9e 2182
7bfd3b9e 2183void
9de87eea 2184scm_init_threads ()
7bfd3b9e 2185{
9de87eea 2186 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2187 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2188
9de87eea 2189 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2190 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2191 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2192
9de87eea
MV
2193 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2194 sizeof (fat_cond));
9de87eea 2195 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2196
9de87eea
MV
2197 scm_i_default_dynamic_state = SCM_BOOL_F;
2198 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2199 threads_initialized_p = 1;
a4d106c7 2200
f39448c5 2201 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2202}
89e00824 2203
5f05c406 2204void
9de87eea 2205scm_init_threads_default_dynamic_state ()
5f05c406 2206{
9de87eea 2207 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2208 scm_i_default_dynamic_state = state;
5f05c406
MV
2209}
2210
d823b11b 2211void
9de87eea 2212scm_init_thread_procs ()
d823b11b 2213{
9de87eea 2214#include "libguile/threads.x"
d823b11b
MV
2215}
2216
3c13664e
LC
2217\f
2218/* IA64-specific things. */
2219
2220#ifdef __ia64__
2221# ifdef __hpux
2222# include <sys/param.h>
2223# include <sys/pstat.h>
2224void *
2225scm_ia64_register_backing_store_base (void)
2226{
2227 struct pst_vm_status vm_status;
2228 int i = 0;
2229 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2230 if (vm_status.pst_type == PS_RSESTACK)
2231 return (void *) vm_status.pst_vaddr;
2232 abort ();
2233}
2234void *
2235scm_ia64_ar_bsp (const void *ctx)
2236{
2237 uint64_t bsp;
2238 __uc_get_ar_bsp (ctx, &bsp);
2239 return (void *) bsp;
2240}
2241# endif /* hpux */
2242# ifdef linux
2243# include <ucontext.h>
2244void *
2245scm_ia64_register_backing_store_base (void)
2246{
2247 extern void *__libc_ia64_register_backing_store_base;
2248 return __libc_ia64_register_backing_store_base;
2249}
2250void *
2251scm_ia64_ar_bsp (const void *opaque)
2252{
2253 const ucontext_t *ctx = opaque;
2254 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2255}
2256# endif /* linux */
ba20d262
AW
2257# ifdef __FreeBSD__
2258# include <ucontext.h>
2259void *
2260scm_ia64_register_backing_store_base (void)
2261{
2262 return (void *)0x8000000000000000;
2263}
2264void *
2265scm_ia64_ar_bsp (const void *opaque)
2266{
2267 const ucontext_t *ctx = opaque;
2268 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2269 + ctx->uc_mcontext.mc_special.ndirty);
2270}
2271# endif /* __FreeBSD__ */
3c13664e
LC
2272#endif /* __ia64__ */
2273
2274
89e00824
ML
2275/*
2276 Local Variables:
2277 c-file-style: "gnu"
2278 End:
2279*/