remove weak pairs, rewrite weak vectors
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07 41#include <assert.h>
32f94bf2 42#include <fcntl.h>
d20912e6 43#include <nproc.h>
f57fdf07 44
d823b11b
MV
45#include "libguile/validate.h"
46#include "libguile/root.h"
47#include "libguile/eval.h"
48#include "libguile/async.h"
49#include "libguile/ports.h"
50#include "libguile/threads.h"
a0599745 51#include "libguile/dynwind.h"
d823b11b 52#include "libguile/iselect.h"
9de87eea
MV
53#include "libguile/fluids.h"
54#include "libguile/continuations.h"
2b829bbb 55#include "libguile/gc.h"
9de87eea 56#include "libguile/init.h"
2e77f720 57#include "libguile/scmsigs.h"
6180e336 58#include "libguile/strings.h"
7bfd3b9e 59
634aa8de
LC
60#include <full-read.h>
61
cde24ce1
AW
62
63\f
64
65/* First some libgc shims. */
66
67/* Make sure GC_fn_type is defined; it is missing from the public
68 headers of GC 7.1 and earlier. */
69#ifndef HAVE_GC_FN_TYPE
70typedef void * (* GC_fn_type) (void *);
71#endif
72
73
5f0d2951
AW
74#ifndef GC_SUCCESS
75#define GC_SUCCESS 0
76#endif
77
78#ifndef GC_UNIMPLEMENTED
79#define GC_UNIMPLEMENTED 3
80#endif
81
82/* Likewise struct GC_stack_base is missing before 7.1. */
83#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
84struct GC_stack_base {
85 void * mem_base; /* Base of memory stack. */
86#ifdef __ia64__
87 void * reg_base; /* Base of separate register stack. */
88#endif
89};
5f0d2951
AW
90
91static int
987b8160 92GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
93{
94 return GC_UNIMPLEMENTED;
95}
96
97static void
98GC_unregister_my_thread ()
99{
100}
101
12c1d861
AW
102#if !SCM_USE_PTHREAD_THREADS
103/* No threads; we can just use GC_stackbottom. */
5f0d2951 104static void *
12c1d861
AW
105get_thread_stack_base ()
106{
107 return GC_stackbottom;
108}
109
110#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
111 && defined PTHREAD_ATTR_GETSTACK_WORKS
112/* This method for GNU/Linux and perhaps some other systems.
113 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
114 available on them. */
115static void *
116get_thread_stack_base ()
117{
118 pthread_attr_t attr;
119 void *start, *end;
120 size_t size;
121
122 pthread_getattr_np (pthread_self (), &attr);
123 pthread_attr_getstack (&attr, &start, &size);
124 end = (char *)start + size;
125
126#if SCM_STACK_GROWS_UP
127 return start;
128#else
129 return end;
130#endif
131}
132
133#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
134/* This method for MacOS X.
135 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
136 but as of 2006 there's nothing obvious at apple.com. */
137static void *
138get_thread_stack_base ()
139{
140 return pthread_get_stackaddr_np (pthread_self ());
141}
142
143#else
144#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
145#endif
146
147static int
987b8160 148GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 149{
12c1d861
AW
150 stack_base->mem_base = get_thread_stack_base ();
151#ifdef __ia64__
152 /* Calculate and store off the base of this thread's register
153 backing store (RBS). Unfortunately our implementation(s) of
154 scm_ia64_register_backing_store_base are only reliable for the
155 main thread. For other threads, therefore, find out the current
156 top of the RBS, and use that as a maximum. */
157 stack_base->reg_base = scm_ia64_register_backing_store_base ();
158 {
159 ucontext_t ctx;
160 void *bsp;
161 getcontext (&ctx);
162 bsp = scm_ia64_ar_bsp (&ctx);
163 if (stack_base->reg_base > bsp)
164 stack_base->reg_base = bsp;
165 }
166#endif
167 return GC_SUCCESS;
5f0d2951 168}
12c1d861
AW
169
170static void *
171GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
172{
173 struct GC_stack_base stack_base;
174
175 stack_base.mem_base = (void*)&stack_base;
176#ifdef __ia64__
177 /* FIXME: Untested. */
178 {
179 ucontext_t ctx;
180 getcontext (&ctx);
181 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
182 }
5f0d2951
AW
183#endif
184
12c1d861
AW
185 return fn (&stack_base, arg);
186}
187#endif /* HAVE_GC_STACK_BASE */
188
5f0d2951 189
cde24ce1
AW
190/* Now define with_gc_active and with_gc_inactive. */
191
192#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
193
194/* We have a sufficiently new libgc (7.2 or newer). */
195
196static void*
197with_gc_inactive (GC_fn_type func, void *data)
198{
199 return GC_do_blocking (func, data);
200}
201
202static void*
203with_gc_active (GC_fn_type func, void *data)
204{
205 return GC_call_with_gc_active (func, data);
206}
207
208#else
209
210/* libgc not new enough, so never actually deactivate GC.
211
212 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
213 GC_call_with_gc_active. */
214
215static void*
216with_gc_inactive (GC_fn_type func, void *data)
217{
218 return func (data);
219}
220
221static void*
222with_gc_active (GC_fn_type func, void *data)
223{
224 return func (data);
225}
226
227#endif /* HAVE_GC_DO_BLOCKING */
228
229
634aa8de 230\f
6180e336
NJ
231static void
232to_timespec (SCM t, scm_t_timespec *waittime)
233{
234 if (scm_is_pair (t))
235 {
236 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
237 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
238 }
239 else
240 {
241 double time = scm_to_double (t);
242 double sec = scm_c_truncate (time);
243
244 waittime->tv_sec = (long) sec;
2a1d0688 245 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
246 }
247}
248
c058db8a 249\f
d823b11b 250/*** Queues */
7bfd3b9e 251
c058db8a
LC
252/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
253 the risk of false references leading to unbounded retained space as
254 described in "Bounding Space Usage of Conservative Garbage Collectors",
255 H.J. Boehm, 2001. */
256
9de87eea
MV
257/* Make an empty queue data structure.
258 */
d823b11b
MV
259static SCM
260make_queue ()
261{
262 return scm_cons (SCM_EOL, SCM_EOL);
263}
7bfd3b9e 264
9de87eea
MV
265/* Put T at the back of Q and return a handle that can be used with
266 remqueue to remove T from Q again.
267 */
d823b11b
MV
268static SCM
269enqueue (SCM q, SCM t)
270{
271 SCM c = scm_cons (t, SCM_EOL);
d2a51087 272 SCM_CRITICAL_SECTION_START;
d2e53ed6 273 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
274 SCM_SETCDR (q, c);
275 else
276 SCM_SETCDR (SCM_CAR (q), c);
277 SCM_SETCAR (q, c);
d2a51087 278 SCM_CRITICAL_SECTION_END;
d823b11b
MV
279 return c;
280}
7bfd3b9e 281
9de87eea
MV
282/* Remove the element that the handle C refers to from the queue Q. C
283 must have been returned from a call to enqueue. The return value
284 is zero when the element referred to by C has already been removed.
285 Otherwise, 1 is returned.
286*/
287static int
d823b11b
MV
288remqueue (SCM q, SCM c)
289{
290 SCM p, prev = q;
d2a51087 291 SCM_CRITICAL_SECTION_START;
d2e53ed6 292 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 293 {
bc36d050 294 if (scm_is_eq (p, c))
d823b11b 295 {
bc36d050 296 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
297 SCM_SETCAR (q, SCM_CDR (c));
298 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
299
300 /* GC-robust */
301 SCM_SETCDR (c, SCM_EOL);
302
d2a51087 303 SCM_CRITICAL_SECTION_END;
9de87eea 304 return 1;
d823b11b
MV
305 }
306 prev = p;
307 }
d2a51087 308 SCM_CRITICAL_SECTION_END;
9de87eea 309 return 0;
d823b11b
MV
310}
311
9de87eea
MV
312/* Remove the front-most element from the queue Q and return it.
313 Return SCM_BOOL_F when Q is empty.
314*/
d823b11b
MV
315static SCM
316dequeue (SCM q)
317{
d2a51087
NJ
318 SCM c;
319 SCM_CRITICAL_SECTION_START;
320 c = SCM_CDR (q);
d2e53ed6 321 if (scm_is_null (c))
d2a51087
NJ
322 {
323 SCM_CRITICAL_SECTION_END;
324 return SCM_BOOL_F;
325 }
d823b11b
MV
326 else
327 {
328 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 329 if (scm_is_null (SCM_CDR (q)))
d823b11b 330 SCM_SETCAR (q, SCM_EOL);
d2a51087 331 SCM_CRITICAL_SECTION_END;
c058db8a
LC
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
d823b11b
MV
336 return SCM_CAR (c);
337 }
338}
7bfd3b9e 339
9de87eea 340/*** Thread smob routines */
76da80e7 341
d823b11b
MV
342
343static int
344thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
345{
23d72566
KR
346 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
347 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
348 the struct case, hence we go via a union, and extract according to the
349 size of pthread_t. */
350 union {
351 scm_i_pthread_t p;
352 unsigned short us;
353 unsigned int ui;
354 unsigned long ul;
355 scm_t_uintmax um;
356 } u;
9de87eea 357 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
358 scm_i_pthread_t p = t->pthread;
359 scm_t_uintmax id;
360 u.p = p;
361 if (sizeof (p) == sizeof (unsigned short))
362 id = u.us;
363 else if (sizeof (p) == sizeof (unsigned int))
364 id = u.ui;
365 else if (sizeof (p) == sizeof (unsigned long))
366 id = u.ul;
367 else
368 id = u.um;
369
d823b11b 370 scm_puts ("#<thread ", port);
23d72566 371 scm_uintprint (id, 10, port);
1b92fb6b 372 scm_puts (" (", port);
0345e278 373 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 374 scm_puts (")>", port);
d823b11b
MV
375 return 1;
376}
377
706846f6 378\f
9de87eea 379/*** Blocking on queues. */
f7eca35d 380
9de87eea
MV
381/* See also scm_i_queue_async_cell for how such a block is
382 interrputed.
383*/
d823b11b 384
9de87eea
MV
385/* Put the current thread on QUEUE and go to sleep, waiting for it to
386 be woken up by a call to 'unblock_from_queue', or to be
387 interrupted. Upon return of this function, the current thread is
388 no longer on QUEUE, even when the sleep has been interrupted.
389
d2a51087 390 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
391 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
392
393 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
394 as MUTEX is needed.
395
396 When WAITTIME is not NULL, the sleep will be aborted at that time.
397
398 The return value of block_self is an errno value. It will be zero
399 when the sleep has been successfully completed by a call to
400 unblock_from_queue, EINTR when it has been interrupted by the
401 delivery of a system async, and ETIMEDOUT when the timeout has
402 expired.
403
404 The system asyncs themselves are not executed by block_self.
405*/
406static int
407block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
408 const scm_t_timespec *waittime)
76da80e7 409{
9de87eea
MV
410 scm_i_thread *t = SCM_I_CURRENT_THREAD;
411 SCM q_handle;
412 int err;
413
414 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
415 err = EINTR;
416 else
417 {
418 t->block_asyncs++;
419 q_handle = enqueue (queue, t->handle);
420 if (waittime == NULL)
421 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
422 else
423 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
424
425 /* When we are still on QUEUE, we have been interrupted. We
426 report this only when no other error (such as a timeout) has
427 happened above.
428 */
429 if (remqueue (queue, q_handle) && err == 0)
430 err = EINTR;
431 t->block_asyncs--;
432 scm_i_reset_sleep (t);
433 }
434
435 return err;
76da80e7 436}
9de87eea 437
d2a51087
NJ
438/* Wake up the first thread on QUEUE, if any. The awoken thread is
439 returned, or #f if the queue was empty.
9de87eea
MV
440 */
441static SCM
442unblock_from_queue (SCM queue)
443{
444 SCM thread = dequeue (queue);
445 if (scm_is_true (thread))
446 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
447 return thread;
448}
449
45f15cac 450\f
9de87eea
MV
451/* Getting into and out of guile mode.
452 */
453
f60a7648
AW
454/* Key used to attach a cleanup handler to a given thread. Also, if
455 thread-local storage is unavailable, this key is used to retrieve the
456 current thread with `pthread_getspecific ()'. */
457scm_i_pthread_key_t scm_i_thread_key;
458
459
705edb95
LC
460#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
461
462/* When thread-local storage (TLS) is available, a pointer to the
463 current-thread object is kept in TLS. Note that storing the thread-object
464 itself in TLS (rather than a pointer to some malloc'd memory) is not
465 possible since thread objects may live longer than the actual thread they
466 represent. */
467SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
468
f60a7648 469#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 470
d823b11b 471
9de87eea
MV
472static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
473static scm_i_thread *all_threads = NULL;
474static int thread_count;
475
476static SCM scm_i_default_dynamic_state;
477
0b77014f
AW
478/* Run when a fluid is collected. */
479void
480scm_i_reset_fluid (size_t n, SCM val)
481{
482 scm_i_thread *t;
483
484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
485 for (t = all_threads; t; t = t->next_thread)
486 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
487 {
488 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
489
490 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
491 SCM_SIMPLE_VECTOR_SET (v, n, val);
492 }
493 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
494}
495
9de87eea 496/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 497 */
9de87eea 498static void
12c1d861 499guilify_self_1 (struct GC_stack_base *base)
d823b11b 500{
7f22442b
AW
501 scm_i_thread t;
502
503 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
504 before allocating anything in this thread, because allocation could
505 cause GC to run, and GC could cause finalizers, which could invoke
506 Scheme functions, which need the current thread to be set. */
507
508 t.pthread = scm_i_pthread_self ();
509 t.handle = SCM_BOOL_F;
510 t.result = SCM_BOOL_F;
511 t.cleanup_handler = SCM_BOOL_F;
512 t.mutexes = SCM_EOL;
513 t.held_mutex = NULL;
514 t.join_queue = SCM_EOL;
515 t.dynamic_state = SCM_BOOL_F;
516 t.dynwinds = SCM_EOL;
517 t.active_asyncs = SCM_EOL;
518 t.block_asyncs = 1;
519 t.pending_asyncs = 1;
520 t.critical_section_level = 0;
521 t.base = base->mem_base;
346e4402 522#ifdef __ia64__
7f22442b 523 t.register_backing_store_base = base->reg-base;
346e4402 524#endif
7f22442b
AW
525 t.continuation_root = SCM_EOL;
526 t.continuation_base = t.base;
527 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
528 t.sleep_mutex = NULL;
529 t.sleep_object = SCM_BOOL_F;
530 t.sleep_fd = -1;
531
32f94bf2 532 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
533 /* FIXME: Error conditions during the initialization phase are handled
534 gracelessly since public functions such as `scm_init_guile ()'
535 currently have type `void'. */
536 abort ();
537
7f22442b
AW
538 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
539 t.current_mark_stack_ptr = NULL;
540 t.current_mark_stack_limit = NULL;
541 t.canceled = 0;
542 t.exited = 0;
543 t.guile_mode = 0;
9de87eea 544
7f22442b
AW
545 /* The switcheroo. */
546 {
547 scm_i_thread *t_ptr = &t;
548
549 GC_disable ();
550 t_ptr = GC_malloc (sizeof (scm_i_thread));
551 memcpy (t_ptr, &t, sizeof t);
552
553 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
554
555#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
556 /* Cache the current thread in TLS for faster lookup. */
557 scm_i_current_thread = t_ptr;
f60a7648 558#endif
9de87eea 559
7f22442b
AW
560 scm_i_pthread_mutex_lock (&thread_admin_mutex);
561 t_ptr->next_thread = all_threads;
562 all_threads = t_ptr;
563 thread_count++;
564 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
565
566 GC_enable ();
567 }
d823b11b
MV
568}
569
9de87eea 570/* Perform second stage of thread initialisation, in guile mode.
d823b11b 571 */
9de87eea
MV
572static void
573guilify_self_2 (SCM parent)
d823b11b 574{
9de87eea
MV
575 scm_i_thread *t = SCM_I_CURRENT_THREAD;
576
72e6b608
LC
577 t->guile_mode = 1;
578
9de87eea 579 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 580
9de87eea
MV
581 t->continuation_root = scm_cons (t->handle, SCM_EOL);
582 t->continuation_base = t->base;
2bbe1533 583 t->vm = SCM_BOOL_F;
9de87eea
MV
584
585 if (scm_is_true (parent))
586 t->dynamic_state = scm_make_dynamic_state (parent);
587 else
588 t->dynamic_state = scm_i_make_initial_dynamic_state ();
589
590 t->join_queue = make_queue ();
591 t->block_asyncs = 0;
d823b11b
MV
592}
593
6180e336
NJ
594\f
595/*** Fat mutexes */
596
597/* We implement our own mutex type since we want them to be 'fair', we
598 want to do fancy things while waiting for them (like running
599 asyncs) and we might want to add things that are nice for
600 debugging.
601*/
602
603typedef struct {
604 scm_i_pthread_mutex_t lock;
605 SCM owner;
adc085f1 606 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 607
adc085f1 608 int recursive; /* allow recursive locking? */
6180e336
NJ
609 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
610 int allow_external_unlock; /* is it an error to unlock a mutex that is not
611 owned by the current thread? */
612
613 SCM waiting; /* the threads waiting for this mutex. */
614} fat_mutex;
615
616#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
617#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
618
2002f1f8
AW
619static SCM
620call_cleanup (void *data)
621{
622 SCM *proc_p = data;
623 return scm_call_0 (*proc_p);
624}
625
9de87eea 626/* Perform thread tear-down, in guile mode.
d823b11b 627 */
9de87eea
MV
628static void *
629do_thread_exit (void *v)
630{
2e77f720
LC
631 scm_i_thread *t = (scm_i_thread *) v;
632
633 if (!scm_is_false (t->cleanup_handler))
634 {
635 SCM ptr = t->cleanup_handler;
636
637 t->cleanup_handler = SCM_BOOL_F;
638 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 639 call_cleanup, &ptr,
2e77f720
LC
640 scm_handle_by_message_noexit, NULL);
641 }
9de87eea 642
86a597f8 643 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
644
645 t->exited = 1;
0c97d7dd
MV
646 close (t->sleep_pipe[0]);
647 close (t->sleep_pipe[1]);
9de87eea
MV
648 while (scm_is_true (unblock_from_queue (t->join_queue)))
649 ;
9de87eea 650
74926120 651 while (!scm_is_null (t->mutexes))
6180e336 652 {
a141db86 653 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
74926120 654
a141db86 655 if (scm_is_true (mutex))
a0faf7dd
LC
656 {
657 fat_mutex *m = SCM_MUTEX_DATA (mutex);
658
659 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
660
661 /* Since MUTEX is in `t->mutexes', T must be its owner. */
662 assert (scm_is_eq (m->owner, t->handle));
663
a0faf7dd 664 unblock_from_queue (m->waiting);
f57fdf07 665
a0faf7dd
LC
666 scm_i_pthread_mutex_unlock (&m->lock);
667 }
6180e336 668
a141db86 669 t->mutexes = scm_cdr (t->mutexes);
6180e336
NJ
670 }
671
86a597f8 672 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 673
9de87eea
MV
674 return NULL;
675}
676
f60a7648
AW
677static void *
678do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
679{
7f22442b 680 /* Won't hurt if we are already registered. */
8e76ce94 681#if SCM_USE_PTHREAD_THREADS
7f22442b 682 GC_register_my_thread (sb);
8e76ce94 683#endif
f60a7648 684
7f22442b 685 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
686}
687
d823b11b 688static void
9de87eea 689on_thread_exit (void *v)
d823b11b 690{
29776e85 691 /* This handler is executed in non-guile mode. */
2e77f720 692 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 693
b8441577
LC
694 /* If we were canceled, we were unable to clear `t->guile_mode', so do
695 it here. */
696 t->guile_mode = 0;
697
d2a51087
NJ
698 /* If this thread was cancelled while doing a cond wait, it will
699 still have a mutex locked, so we unlock it here. */
700 if (t->held_mutex)
701 {
702 scm_i_pthread_mutex_unlock (t->held_mutex);
703 t->held_mutex = NULL;
704 }
705
f60a7648
AW
706 /* Reinstate the current thread for purposes of scm_with_guile
707 guile-mode cleanup handlers. Only really needed in the non-TLS
708 case but it doesn't hurt to be consistent. */
709 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 710
7f22442b
AW
711 /* Scheme-level thread finalizers and other cleanup needs to happen in
712 guile mode. */
713 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
714
715 /* Removing ourself from the list of all threads needs to happen in
716 non-guile mode since all SCM values on our stack become
29776e85 717 unprotected once we are no longer in the list. */
0c97d7dd
MV
718 scm_i_pthread_mutex_lock (&thread_admin_mutex);
719 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
720 if (*tp == t)
721 {
722 *tp = t->next_thread;
c058db8a
LC
723
724 /* GC-robust */
725 t->next_thread = NULL;
726
0c97d7dd
MV
727 break;
728 }
729 thread_count--;
2e77f720
LC
730
731 /* If there's only one other thread, it could be the signal delivery
732 thread, so we need to notify it to shut down by closing its read pipe.
733 If it's not the signal delivery thread, then closing the read pipe isn't
734 going to hurt. */
735 if (thread_count <= 1)
736 scm_i_close_signal_pipe ();
737
0c97d7dd
MV
738 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
739
f60a7648 740 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 741
8e76ce94 742#if SCM_USE_PTHREAD_THREADS
7f22442b 743 GC_unregister_my_thread ();
653ccd78 744#endif
d823b11b
MV
745}
746
9de87eea 747static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 748
9de87eea
MV
749static void
750init_thread_key (void)
751{
f60a7648 752 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 753}
d823b11b 754
cde24ce1
AW
755/* Perform any initializations necessary to make the current thread
756 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
757 if necessary.
a54a94b3 758
9de87eea
MV
759 BASE is the stack base to use with GC.
760
761 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
762 which case the default dynamic state is used.
763
cde24ce1 764 Returns zero when the thread was known to guile already; otherwise
9de87eea 765 return 1.
cde24ce1
AW
766
767 Note that it could be the case that the thread was known
768 to Guile, but not in guile mode (because we are within a
769 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
770 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
771
772static int
12c1d861 773scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 774{
9de87eea
MV
775 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
776
cde24ce1
AW
777 if (SCM_I_CURRENT_THREAD)
778 {
779 /* Thread is already known to Guile.
780 */
781 return 0;
782 }
783 else
9de87eea
MV
784 {
785 /* This thread has not been guilified yet.
786 */
787
788 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
789 if (scm_initialized_p == 0)
790 {
791 /* First thread ever to enter Guile. Run the full
792 initialization.
793 */
794 scm_i_init_guile (base);
12c1d861 795
8e76ce94 796#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
797 /* Allow other threads to come in later. */
798 GC_allow_register_threads ();
4000d064 799#endif
12c1d861 800
9de87eea
MV
801 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
802 }
803 else
804 {
805 /* Guile is already initialized, but this thread enters it for
806 the first time. Only initialize this thread.
807 */
808 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
809
810 /* Register this thread with libgc. */
8e76ce94 811#if SCM_USE_PTHREAD_THREADS
12c1d861 812 GC_register_my_thread (base);
8e76ce94 813#endif
12c1d861 814
9de87eea
MV
815 guilify_self_1 (base);
816 guilify_self_2 (parent);
817 }
818 return 1;
819 }
d823b11b
MV
820}
821
12c1d861
AW
822void
823scm_init_guile ()
d823b11b 824{
12c1d861
AW
825 struct GC_stack_base stack_base;
826
827 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
828 scm_i_init_thread_for_guile (&stack_base,
829 scm_i_default_dynamic_state);
9de87eea
MV
830 else
831 {
12c1d861 832 fprintf (stderr, "Failed to get stack base for current thread.\n");
4765b28f 833 exit (EXIT_FAILURE);
9de87eea 834 }
a54a94b3
MV
835}
836
12c1d861 837struct with_guile_args
cde24ce1
AW
838{
839 GC_fn_type func;
840 void *data;
12c1d861 841 SCM parent;
cde24ce1
AW
842};
843
844static void *
845with_guile_trampoline (void *data)
846{
12c1d861 847 struct with_guile_args *args = data;
cde24ce1
AW
848
849 return scm_c_with_continuation_barrier (args->func, args->data);
850}
851
12c1d861
AW
852static void *
853with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
854{
855 void *res;
cde24ce1
AW
856 int new_thread;
857 scm_i_thread *t;
12c1d861 858 struct with_guile_args *args = data;
2e77f720 859
12c1d861 860 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
861 t = SCM_I_CURRENT_THREAD;
862 if (new_thread)
2e77f720 863 {
cde24ce1
AW
864 /* We are in Guile mode. */
865 assert (t->guile_mode);
866
12c1d861 867 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
868
869 /* Leave Guile mode. */
870 t->guile_mode = 0;
871 }
872 else if (t->guile_mode)
873 {
874 /* Already in Guile mode. */
12c1d861 875 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 876 }
74926120 877 else
cde24ce1 878 {
cde24ce1
AW
879 /* We are not in Guile mode, either because we are not within a
880 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 881
cde24ce1
AW
882 This call to scm_with_guile() could happen from anywhere on the
883 stack, and in particular lower on the stack than when it was
884 when this thread was first guilified. Thus, `base' must be
885 updated. */
886#if SCM_STACK_GROWS_UP
12c1d861
AW
887 if (SCM_STACK_PTR (base->mem_base) < t->base)
888 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 889#else
12c1d861
AW
890 if (SCM_STACK_PTR (base->mem_base) > t->base)
891 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 892#endif
72e6b608 893
cde24ce1 894 t->guile_mode = 1;
12c1d861 895 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
896 t->guile_mode = 0;
897 }
898 return res;
72e6b608 899}
3d1af79f 900
12c1d861
AW
901static void *
902scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
903{
904 struct with_guile_args args;
905
906 args.func = func;
907 args.data = data;
908 args.parent = parent;
909
910 return GC_call_with_stack_base (with_guile_and_parent, &args);
911}
912
913void *
914scm_with_guile (void *(*func)(void *), void *data)
915{
916 return scm_i_with_guile_and_parent (func, data,
917 scm_i_default_dynamic_state);
918}
919
9de87eea
MV
920void *
921scm_without_guile (void *(*func)(void *), void *data)
d823b11b 922{
72e6b608 923 void *result;
cde24ce1 924 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 925
cde24ce1 926 if (t->guile_mode)
72e6b608 927 {
cde24ce1
AW
928 SCM_I_CURRENT_THREAD->guile_mode = 0;
929 result = with_gc_inactive (func, data);
930 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
931 }
932 else
cde24ce1 933 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
934 result = func (data);
935
936 return result;
9de87eea
MV
937}
938
72e6b608 939\f
9de87eea
MV
940/*** Thread creation */
941
942typedef struct {
943 SCM parent;
944 SCM thunk;
945 SCM handler;
76da80e7 946 SCM thread;
9de87eea
MV
947 scm_i_pthread_mutex_t mutex;
948 scm_i_pthread_cond_t cond;
949} launch_data;
d823b11b 950
9de87eea
MV
951static void *
952really_launch (void *d)
953{
954 launch_data *data = (launch_data *)d;
955 SCM thunk = data->thunk, handler = data->handler;
956 scm_i_thread *t;
d823b11b 957
9de87eea 958 t = SCM_I_CURRENT_THREAD;
a54a94b3 959
9de87eea
MV
960 scm_i_scm_pthread_mutex_lock (&data->mutex);
961 data->thread = scm_current_thread ();
962 scm_i_pthread_cond_signal (&data->cond);
963 scm_i_pthread_mutex_unlock (&data->mutex);
964
965 if (SCM_UNBNDP (handler))
966 t->result = scm_call_0 (thunk);
967 else
968 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
969
970 return 0;
d823b11b
MV
971}
972
9de87eea
MV
973static void *
974launch_thread (void *d)
975{
976 launch_data *data = (launch_data *)d;
977 scm_i_pthread_detach (scm_i_pthread_self ());
978 scm_i_with_guile_and_parent (really_launch, d, data->parent);
979 return NULL;
980}
981
982SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 983 (SCM thunk, SCM handler),
9de87eea
MV
984 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
985 "returning a new thread object representing the thread. The procedure\n"
986 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
987 "\n"
988 "When @var{handler} is specified, then @var{thunk} is called from\n"
989 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
990 "handler. This catch is established inside the continuation barrier.\n"
991 "\n"
992 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
993 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
994#define FUNC_NAME s_scm_call_with_new_thread
995{
9de87eea
MV
996 launch_data data;
997 scm_i_pthread_t id;
998 int err;
d823b11b 999
9de87eea
MV
1000 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1001 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1002 handler, SCM_ARG2, FUNC_NAME);
1003
1004 data.parent = scm_current_dynamic_state ();
1005 data.thunk = thunk;
1006 data.handler = handler;
1007 data.thread = SCM_BOOL_F;
1008 scm_i_pthread_mutex_init (&data.mutex, NULL);
1009 scm_i_pthread_cond_init (&data.cond, NULL);
1010
1011 scm_i_scm_pthread_mutex_lock (&data.mutex);
1012 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1013 if (err)
1014 {
1015 scm_i_pthread_mutex_unlock (&data.mutex);
1016 errno = err;
1017 scm_syserror (NULL);
1018 }
1019 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1020 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1021
9de87eea 1022 return data.thread;
d823b11b
MV
1023}
1024#undef FUNC_NAME
1025
9de87eea
MV
1026typedef struct {
1027 SCM parent;
1028 scm_t_catch_body body;
1029 void *body_data;
1030 scm_t_catch_handler handler;
1031 void *handler_data;
1032 SCM thread;
1033 scm_i_pthread_mutex_t mutex;
1034 scm_i_pthread_cond_t cond;
1035} spawn_data;
1036
1037static void *
1038really_spawn (void *d)
1039{
1040 spawn_data *data = (spawn_data *)d;
1041 scm_t_catch_body body = data->body;
1042 void *body_data = data->body_data;
1043 scm_t_catch_handler handler = data->handler;
1044 void *handler_data = data->handler_data;
1045 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1046
1047 scm_i_scm_pthread_mutex_lock (&data->mutex);
1048 data->thread = scm_current_thread ();
1049 scm_i_pthread_cond_signal (&data->cond);
1050 scm_i_pthread_mutex_unlock (&data->mutex);
1051
1052 if (handler == NULL)
1053 t->result = body (body_data);
1054 else
1055 t->result = scm_internal_catch (SCM_BOOL_T,
1056 body, body_data,
1057 handler, handler_data);
1058
1059 return 0;
1060}
1061
1062static void *
1063spawn_thread (void *d)
1064{
1065 spawn_data *data = (spawn_data *)d;
1066 scm_i_pthread_detach (scm_i_pthread_self ());
1067 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1068 return NULL;
1069}
1070
1071SCM
1072scm_spawn_thread (scm_t_catch_body body, void *body_data,
1073 scm_t_catch_handler handler, void *handler_data)
1074{
1075 spawn_data data;
1076 scm_i_pthread_t id;
1077 int err;
1078
1079 data.parent = scm_current_dynamic_state ();
1080 data.body = body;
1081 data.body_data = body_data;
1082 data.handler = handler;
1083 data.handler_data = handler_data;
1084 data.thread = SCM_BOOL_F;
1085 scm_i_pthread_mutex_init (&data.mutex, NULL);
1086 scm_i_pthread_cond_init (&data.cond, NULL);
1087
1088 scm_i_scm_pthread_mutex_lock (&data.mutex);
1089 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1090 if (err)
1091 {
1092 scm_i_pthread_mutex_unlock (&data.mutex);
1093 errno = err;
1094 scm_syserror (NULL);
1095 }
1096 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1097 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1098
9de87eea
MV
1099 return data.thread;
1100}
1101
29717c89
MD
1102SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1103 (),
1104"Move the calling thread to the end of the scheduling queue.")
1105#define FUNC_NAME s_scm_yield
1106{
9de87eea 1107 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1108}
1109#undef FUNC_NAME
1110
2e77f720
LC
1111SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1112 (SCM thread),
1113"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1114"cannot be the current thread, and if @var{thread} has already terminated or "
1115"been signaled to terminate, this function is a no-op.")
1116#define FUNC_NAME s_scm_cancel_thread
1117{
1118 scm_i_thread *t = NULL;
1119
1120 SCM_VALIDATE_THREAD (1, thread);
1121 t = SCM_I_THREAD_DATA (thread);
86a597f8 1122 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1123 if (!t->canceled)
1124 {
1125 t->canceled = 1;
86a597f8 1126 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1127 scm_i_pthread_cancel (t->pthread);
1128 }
1129 else
86a597f8 1130 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1131
1132 return SCM_UNSPECIFIED;
1133}
1134#undef FUNC_NAME
1135
1136SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1137 (SCM thread, SCM proc),
1138"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1139"This handler will be called when the thread exits.")
1140#define FUNC_NAME s_scm_set_thread_cleanup_x
1141{
1142 scm_i_thread *t;
1143
1144 SCM_VALIDATE_THREAD (1, thread);
1145 if (!scm_is_false (proc))
1146 SCM_VALIDATE_THUNK (2, proc);
1147
2e77f720 1148 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1149 scm_i_pthread_mutex_lock (&t->admin_mutex);
1150
2e77f720
LC
1151 if (!(t->exited || t->canceled))
1152 t->cleanup_handler = proc;
1153
86a597f8 1154 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1155
1156 return SCM_UNSPECIFIED;
1157}
1158#undef FUNC_NAME
1159
1160SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1161 (SCM thread),
1162"Return the cleanup handler installed for the thread @var{thread}.")
1163#define FUNC_NAME s_scm_thread_cleanup
1164{
1165 scm_i_thread *t;
1166 SCM ret;
1167
1168 SCM_VALIDATE_THREAD (1, thread);
1169
2e77f720 1170 t = SCM_I_THREAD_DATA (thread);
86a597f8 1171 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1172 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1173 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1174
1175 return ret;
1176}
1177#undef FUNC_NAME
1178
6180e336
NJ
1179SCM scm_join_thread (SCM thread)
1180{
1181 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1182}
1183
1184SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1185 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1186"Suspend execution of the calling thread until the target @var{thread} "
1187"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1188#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1189{
9de87eea 1190 scm_i_thread *t;
6180e336
NJ
1191 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1192 SCM res = SCM_BOOL_F;
1193
1194 if (! (SCM_UNBNDP (timeoutval)))
1195 res = timeoutval;
d823b11b
MV
1196
1197 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1198 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1199 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1200
9de87eea 1201 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1202 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1203
6180e336
NJ
1204 if (! SCM_UNBNDP (timeout))
1205 {
1206 to_timespec (timeout, &ctimeout);
1207 timeout_ptr = &ctimeout;
1208 }
1209
1210 if (t->exited)
1211 res = t->result;
1212 else
d823b11b 1213 {
9de87eea
MV
1214 while (1)
1215 {
74926120 1216 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1217 timeout_ptr);
1218 if (err == 0)
1219 {
1220 if (t->exited)
1221 {
1222 res = t->result;
1223 break;
1224 }
1225 }
1226 else if (err == ETIMEDOUT)
9de87eea 1227 break;
6180e336 1228
86a597f8 1229 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1230 SCM_TICK;
86a597f8 1231 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1232
1233 /* Check for exit again, since we just released and
1234 reacquired the admin mutex, before the next block_self
1235 call (which would block forever if t has already
1236 exited). */
1237 if (t->exited)
1238 {
1239 res = t->result;
1240 break;
1241 }
9de87eea 1242 }
d823b11b 1243 }
9de87eea 1244
86a597f8 1245 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1246
d823b11b 1247 return res;
5f05c406
MV
1248}
1249#undef FUNC_NAME
1250
6180e336
NJ
1251SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1252 (SCM obj),
1253 "Return @code{#t} if @var{obj} is a thread.")
1254#define FUNC_NAME s_scm_thread_p
1255{
1256 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1257}
1258#undef FUNC_NAME
5f05c406 1259
4079f87e 1260
9de87eea
MV
1261static size_t
1262fat_mutex_free (SCM mx)
76da80e7 1263{
9de87eea
MV
1264 fat_mutex *m = SCM_MUTEX_DATA (mx);
1265 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1266 return 0;
1267}
1268
1269static int
9de87eea 1270fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1271{
9de87eea
MV
1272 fat_mutex *m = SCM_MUTEX_DATA (mx);
1273 scm_puts ("#<mutex ", port);
1274 scm_uintprint ((scm_t_bits)m, 16, port);
1275 scm_puts (">", port);
1276 return 1;
76da80e7
MV
1277}
1278
76da80e7 1279static SCM
6180e336 1280make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1281{
9de87eea
MV
1282 fat_mutex *m;
1283 SCM mx;
1284
1285 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1286 scm_i_pthread_mutex_init (&m->lock, NULL);
1287 m->owner = SCM_BOOL_F;
adc085f1 1288 m->level = 0;
6180e336 1289
adc085f1 1290 m->recursive = recursive;
6180e336
NJ
1291 m->unchecked_unlock = unchecked_unlock;
1292 m->allow_external_unlock = external_unlock;
1293
9de87eea
MV
1294 m->waiting = SCM_EOL;
1295 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1296 m->waiting = make_queue ();
1297 return mx;
76da80e7
MV
1298}
1299
6180e336
NJ
1300SCM scm_make_mutex (void)
1301{
1302 return scm_make_mutex_with_flags (SCM_EOL);
1303}
1304
2a1d0688
NJ
1305SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1306SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1307SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1308
1309SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1310 (SCM flags),
9de87eea 1311 "Create a new mutex. ")
6180e336 1312#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1313{
6180e336
NJ
1314 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1315
1316 SCM ptr = flags;
1317 while (! scm_is_null (ptr))
1318 {
1319 SCM flag = SCM_CAR (ptr);
1320 if (scm_is_eq (flag, unchecked_unlock_sym))
1321 unchecked_unlock = 1;
1322 else if (scm_is_eq (flag, allow_external_unlock_sym))
1323 external_unlock = 1;
1324 else if (scm_is_eq (flag, recursive_sym))
1325 recursive = 1;
74926120 1326 else
2a1d0688 1327 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1328 ptr = SCM_CDR (ptr);
1329 }
1330 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1331}
1332#undef FUNC_NAME
1333
9de87eea 1334SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1335 (void),
9de87eea
MV
1336 "Create a new recursive mutex. ")
1337#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1338{
6180e336 1339 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1340}
1341#undef FUNC_NAME
1342
6180e336
NJ
1343SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1344
1345static SCM
adc085f1 1346fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1347{
1348 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1349
adc085f1 1350 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1351 SCM err = SCM_BOOL_F;
1352
1353 struct timeval current_time;
9de87eea
MV
1354
1355 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1356
1357 while (1)
9de87eea 1358 {
adc085f1 1359 if (m->level == 0)
6180e336 1360 {
adc085f1 1361 m->owner = new_owner;
6180e336 1362 m->level++;
74926120 1363
adc085f1 1364 if (SCM_I_IS_THREAD (new_owner))
6180e336 1365 {
adc085f1 1366 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1367
4f39f31e
LC
1368 /* FIXME: The order in which `t->admin_mutex' and
1369 `m->lock' are taken differs from that in
1370 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1371 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1372
1373 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1374 retained when not referenced elsewhere (bug #27450).
1375 The weak pair itself is eventually removed when MUTEX
1376 is unlocked. Note that `t->mutexes' lists mutexes
1377 currently held by T, so it should be small. */
a141db86
AW
1378 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1379 t->mutexes);
a0faf7dd 1380
6180e336 1381 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1382 }
adc085f1
JG
1383 *ret = 1;
1384 break;
1385 }
1386 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1387 {
1388 m->owner = new_owner;
1389 err = scm_cons (scm_abandoned_mutex_error_key,
1390 scm_from_locale_string ("lock obtained on abandoned "
1391 "mutex"));
1392 *ret = 1;
1393 break;
1394 }
1395 else if (scm_is_eq (m->owner, new_owner))
1396 {
1397 if (m->recursive)
1398 {
1399 m->level++;
74926120 1400 *ret = 1;
adc085f1
JG
1401 }
1402 else
6180e336 1403 {
adc085f1
JG
1404 err = scm_cons (scm_misc_error_key,
1405 scm_from_locale_string ("mutex already locked "
1406 "by thread"));
1407 *ret = 0;
1408 }
74926120 1409 break;
adc085f1 1410 }
9de87eea 1411 else
9de87eea 1412 {
74926120 1413 if (timeout != NULL)
adc085f1
JG
1414 {
1415 gettimeofday (&current_time, NULL);
1416 if (current_time.tv_sec > timeout->tv_sec ||
1417 (current_time.tv_sec == timeout->tv_sec &&
1418 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1419 {
adc085f1
JG
1420 *ret = 0;
1421 break;
6180e336 1422 }
6180e336 1423 }
37a52039 1424 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1425 scm_i_pthread_mutex_unlock (&m->lock);
1426 SCM_TICK;
1427 scm_i_scm_pthread_mutex_lock (&m->lock);
1428 }
1429 }
1430 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1431 return err;
9de87eea
MV
1432}
1433
6180e336
NJ
1434SCM scm_lock_mutex (SCM mx)
1435{
adc085f1 1436 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1437}
1438
adc085f1
JG
1439SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1440 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1441"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1442"blocks until the mutex becomes available. The function returns when "
1443"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1444"a thread already owns will succeed right away and will not block the "
1445"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1446#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1447{
6180e336
NJ
1448 SCM exception;
1449 int ret = 0;
1450 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1451
6180e336
NJ
1452 SCM_VALIDATE_MUTEX (1, m);
1453
1454 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1455 {
1456 to_timespec (timeout, &cwaittime);
1457 waittime = &cwaittime;
1458 }
1459
97ec95b7
LC
1460 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1461 SCM_VALIDATE_THREAD (3, owner);
1462
adc085f1 1463 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1464 if (!scm_is_false (exception))
1465 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1466 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1467}
76da80e7 1468#undef FUNC_NAME
9bc4701c 1469
9ec1573d
MW
1470static void
1471lock_mutex_return_void (SCM mx)
1472{
1473 (void) scm_lock_mutex (mx);
1474}
1475
1476static void
1477unlock_mutex_return_void (SCM mx)
1478{
1479 (void) scm_unlock_mutex (mx);
1480}
1481
a4d106c7 1482void
661ae7ab 1483scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1484{
9ec1573d 1485 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1486 SCM_F_WIND_EXPLICITLY);
9ec1573d 1487 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1488 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1489}
1490
9bc4701c 1491SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1492 (SCM mutex),
9bc4701c
MD
1493"Try to lock @var{mutex}. If the mutex is already locked by someone "
1494"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1495#define FUNC_NAME s_scm_try_mutex
1496{
6180e336
NJ
1497 SCM exception;
1498 int ret = 0;
1499 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1500
ba1b7223 1501 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1502
1503 to_timespec (scm_from_int(0), &cwaittime);
1504 waittime = &cwaittime;
74926120 1505
adc085f1 1506 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1507 if (!scm_is_false (exception))
1508 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1509 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1510}
1511#undef FUNC_NAME
76da80e7 1512
6180e336
NJ
1513/*** Fat condition variables */
1514
1515typedef struct {
1516 scm_i_pthread_mutex_t lock;
1517 SCM waiting; /* the threads waiting for this condition. */
1518} fat_cond;
1519
1520#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1521#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1522
a141db86
AW
1523static void
1524remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1525{
1526 SCM walk, prev;
1527
1528 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1529 walk = SCM_CDR (walk))
1530 {
1531 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1532 {
1533 if (scm_is_pair (prev))
1534 SCM_SETCDR (prev, SCM_CDR (walk));
1535 else
1536 t->mutexes = SCM_CDR (walk);
1537 break;
1538 }
1539 }
1540}
1541
6180e336
NJ
1542static int
1543fat_mutex_unlock (SCM mutex, SCM cond,
1544 const scm_t_timespec *waittime, int relock)
9de87eea 1545{
7f991c7d 1546 SCM owner;
6180e336
NJ
1547 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1548 fat_cond *c = NULL;
1549 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1550 int err = 0, ret = 0;
9de87eea
MV
1551
1552 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1553
7f991c7d 1554 owner = m->owner;
adc085f1 1555
d31ae2c3 1556 if (!scm_is_eq (owner, t->handle))
9bc4701c 1557 {
adc085f1 1558 if (m->level == 0)
6180e336
NJ
1559 {
1560 if (!m->unchecked_unlock)
2a1d0688
NJ
1561 {
1562 scm_i_pthread_mutex_unlock (&m->lock);
1563 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1564 }
d31ae2c3 1565 owner = t->handle;
6180e336
NJ
1566 }
1567 else if (!m->allow_external_unlock)
2a1d0688
NJ
1568 {
1569 scm_i_pthread_mutex_unlock (&m->lock);
1570 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1571 }
6180e336
NJ
1572 }
1573
1574 if (! (SCM_UNBNDP (cond)))
1575 {
6180e336
NJ
1576 c = SCM_CONDVAR_DATA (cond);
1577 while (1)
1578 {
1579 int brk = 0;
1580
6180e336
NJ
1581 if (m->level > 0)
1582 m->level--;
adc085f1 1583 if (m->level == 0)
f57fdf07
LC
1584 {
1585 /* Change the owner of MUTEX. */
a141db86 1586 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1587 m->owner = unblock_from_queue (m->waiting);
1588 }
adc085f1 1589
6180e336 1590 t->block_asyncs++;
74926120 1591
d2a51087
NJ
1592 err = block_self (c->waiting, cond, &m->lock, waittime);
1593 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1594
1595 if (err == 0)
1596 {
1597 ret = 1;
1598 brk = 1;
1599 }
1600 else if (err == ETIMEDOUT)
1601 {
1602 ret = 0;
1603 brk = 1;
1604 }
1605 else if (err != EINTR)
74926120 1606 {
6180e336 1607 errno = err;
6180e336 1608 scm_syserror (NULL);
74926120 1609 }
6180e336
NJ
1610
1611 if (brk)
1612 {
1613 if (relock)
adc085f1 1614 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1615 t->block_asyncs--;
6180e336
NJ
1616 break;
1617 }
74926120 1618
6180e336 1619 t->block_asyncs--;
27c6ebcb 1620 scm_async_tick ();
74926120 1621
6180e336
NJ
1622 scm_remember_upto_here_2 (cond, mutex);
1623
1624 scm_i_scm_pthread_mutex_lock (&m->lock);
1625 }
9bc4701c 1626 }
9de87eea 1627 else
6180e336
NJ
1628 {
1629 if (m->level > 0)
1630 m->level--;
74926120 1631 if (m->level == 0)
f57fdf07
LC
1632 {
1633 /* Change the owner of MUTEX. */
a141db86 1634 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1635 m->owner = unblock_from_queue (m->waiting);
1636 }
74926120 1637
6180e336
NJ
1638 scm_i_pthread_mutex_unlock (&m->lock);
1639 ret = 1;
1640 }
9de87eea 1641
6180e336 1642 return ret;
9bc4701c 1643}
9bc4701c 1644
6180e336
NJ
1645SCM scm_unlock_mutex (SCM mx)
1646{
1647 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1648}
9bc4701c 1649
6180e336
NJ
1650SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1651 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1652"Unlocks @var{mutex} if the calling thread owns the lock on "
1653"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1654"thread results in undefined behaviour. Once a mutex has been unlocked, "
1655"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1656"lock. Every call to @code{lock-mutex} by this thread must be matched "
1657"with a call to @code{unlock-mutex}. Only the last call to "
1658"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1659#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1660{
6180e336
NJ
1661 scm_t_timespec cwaittime, *waittime = NULL;
1662
9bc4701c 1663 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1664 if (! (SCM_UNBNDP (cond)))
1665 {
1666 SCM_VALIDATE_CONDVAR (2, cond);
1667
1668 if (! (SCM_UNBNDP (timeout)))
1669 {
1670 to_timespec (timeout, &cwaittime);
1671 waittime = &cwaittime;
1672 }
1673 }
1674
1675 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1676}
1677#undef FUNC_NAME
1678
6180e336
NJ
1679SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1680 (SCM obj),
1681 "Return @code{#t} if @var{obj} is a mutex.")
1682#define FUNC_NAME s_scm_mutex_p
1683{
1684 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1685}
74926120 1686#undef FUNC_NAME
9de87eea
MV
1687
1688SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1689 (SCM mx),
1690 "Return the thread owning @var{mx}, or @code{#f}.")
1691#define FUNC_NAME s_scm_mutex_owner
1692{
adc085f1
JG
1693 SCM owner;
1694 fat_mutex *m = NULL;
1695
9de87eea 1696 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1697 m = SCM_MUTEX_DATA (mx);
1698 scm_i_pthread_mutex_lock (&m->lock);
1699 owner = m->owner;
1700 scm_i_pthread_mutex_unlock (&m->lock);
1701
1702 return owner;
9de87eea
MV
1703}
1704#undef FUNC_NAME
1705
1706SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1707 (SCM mx),
adc085f1 1708 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1709#define FUNC_NAME s_scm_mutex_level
1710{
1711 SCM_VALIDATE_MUTEX (1, mx);
1712 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1713}
1714#undef FUNC_NAME
1715
adc085f1
JG
1716SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1717 (SCM mx),
1718 "Returns @code{#t} if the mutex @var{mx} is locked.")
1719#define FUNC_NAME s_scm_mutex_locked_p
1720{
1721 SCM_VALIDATE_MUTEX (1, mx);
1722 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1723}
1724#undef FUNC_NAME
9de87eea 1725
9de87eea
MV
1726static int
1727fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1728{
1729 fat_cond *c = SCM_CONDVAR_DATA (cv);
1730 scm_puts ("#<condition-variable ", port);
1731 scm_uintprint ((scm_t_bits)c, 16, port);
1732 scm_puts (">", port);
1733 return 1;
1734}
9bc4701c 1735
d823b11b
MV
1736SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1737 (void),
1738 "Make a new condition variable.")
1739#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1740{
9de87eea
MV
1741 fat_cond *c;
1742 SCM cv;
1743
1744 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1745 c->waiting = SCM_EOL;
1746 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1747 c->waiting = make_queue ();
d823b11b 1748 return cv;
5f05c406 1749}
d823b11b 1750#undef FUNC_NAME
5f05c406 1751
d823b11b
MV
1752SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1753 (SCM cv, SCM mx, SCM t),
1754"Wait until @var{cond-var} has been signalled. While waiting, "
1755"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1756"is locked again when this function returns. When @var{time} is given, "
1757"it specifies a point in time where the waiting should be aborted. It "
1758"can be either a integer as returned by @code{current-time} or a pair "
1759"as returned by @code{gettimeofday}. When the waiting is aborted the "
1760"mutex is locked and @code{#f} is returned. When the condition "
1761"variable is in fact signalled, the mutex is also locked and @code{#t} "
1762"is returned. ")
1763#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1764{
9de87eea 1765 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1766
1767 SCM_VALIDATE_CONDVAR (1, cv);
1768 SCM_VALIDATE_MUTEX (2, mx);
74926120 1769
d823b11b
MV
1770 if (!SCM_UNBNDP (t))
1771 {
6180e336 1772 to_timespec (t, &waittime);
9de87eea 1773 waitptr = &waittime;
d823b11b
MV
1774 }
1775
2a1d0688 1776 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1777}
d823b11b 1778#undef FUNC_NAME
5f05c406 1779
9de87eea
MV
1780static void
1781fat_cond_signal (fat_cond *c)
1782{
9de87eea 1783 unblock_from_queue (c->waiting);
9de87eea
MV
1784}
1785
d823b11b
MV
1786SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1787 (SCM cv),
1788 "Wake up one thread that is waiting for @var{cv}")
1789#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1790{
d823b11b 1791 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1792 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1793 return SCM_BOOL_T;
5f05c406 1794}
d823b11b 1795#undef FUNC_NAME
5f05c406 1796
9de87eea
MV
1797static void
1798fat_cond_broadcast (fat_cond *c)
1799{
9de87eea
MV
1800 while (scm_is_true (unblock_from_queue (c->waiting)))
1801 ;
9de87eea
MV
1802}
1803
d823b11b
MV
1804SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1805 (SCM cv),
1806 "Wake up all threads that are waiting for @var{cv}. ")
1807#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1808{
d823b11b 1809 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1810 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1811 return SCM_BOOL_T;
5f05c406 1812}
d823b11b 1813#undef FUNC_NAME
5f05c406 1814
6180e336
NJ
1815SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1816 (SCM obj),
1817 "Return @code{#t} if @var{obj} is a condition variable.")
1818#define FUNC_NAME s_scm_condition_variable_p
1819{
1820 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1821}
1822#undef FUNC_NAME
1823
6087fad9 1824
8c2b3143 1825\f
d823b11b
MV
1826/*** Select */
1827
8c2b3143
LC
1828struct select_args
1829{
1830 int nfds;
1831 SELECT_TYPE *read_fds;
1832 SELECT_TYPE *write_fds;
1833 SELECT_TYPE *except_fds;
1834 struct timeval *timeout;
1835
1836 int result;
1837 int errno_value;
1838};
1839
1840static void *
1841do_std_select (void *args)
1842{
1843 struct select_args *select_args;
1844
1845 select_args = (struct select_args *) args;
1846
1847 select_args->result =
1848 select (select_args->nfds,
1849 select_args->read_fds, select_args->write_fds,
1850 select_args->except_fds, select_args->timeout);
1851 select_args->errno_value = errno;
1852
1853 return NULL;
1854}
1855
911782b7 1856int
9de87eea
MV
1857scm_std_select (int nfds,
1858 SELECT_TYPE *readfds,
1859 SELECT_TYPE *writefds,
1860 SELECT_TYPE *exceptfds,
1861 struct timeval *timeout)
1862{
1863 fd_set my_readfds;
1864 int res, eno, wakeup_fd;
1865 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1866 struct select_args args;
9de87eea
MV
1867
1868 if (readfds == NULL)
1869 {
1870 FD_ZERO (&my_readfds);
1871 readfds = &my_readfds;
1872 }
1873
1874 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1875 SCM_TICK;
1876
1877 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1878 FD_SET (wakeup_fd, readfds);
1879 if (wakeup_fd >= nfds)
1880 nfds = wakeup_fd+1;
9de87eea 1881
8c2b3143
LC
1882 args.nfds = nfds;
1883 args.read_fds = readfds;
1884 args.write_fds = writefds;
1885 args.except_fds = exceptfds;
1886 args.timeout = timeout;
1887
1888 /* Explicitly cooperate with the GC. */
1889 scm_without_guile (do_std_select, &args);
1890
1891 res = args.result;
1892 eno = args.errno_value;
1893
1894 t->sleep_fd = -1;
9de87eea
MV
1895 scm_i_reset_sleep (t);
1896
1897 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1898 {
1899 char dummy;
634aa8de
LC
1900 full_read (wakeup_fd, &dummy, 1);
1901
9de87eea
MV
1902 FD_CLR (wakeup_fd, readfds);
1903 res -= 1;
1904 if (res == 0)
1905 {
1906 eno = EINTR;
1907 res = -1;
1908 }
1909 }
d823b11b
MV
1910 errno = eno;
1911 return res;
5f05c406
MV
1912}
1913
9de87eea 1914/* Convenience API for blocking while in guile mode. */
76da80e7 1915
9de87eea 1916#if SCM_USE_PTHREAD_THREADS
92e64b87 1917
2956b071
LC
1918/* It seems reasonable to not run procedures related to mutex and condition
1919 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1920 without it, and (ii) the only potential gain would be GC latency. See
1921 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1922 for a discussion of the pros and cons. */
1923
9bc4701c 1924int
9de87eea 1925scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1926{
9de87eea 1927 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1928 return res;
1929}
1930
9de87eea 1931static void
2b829bbb 1932do_unlock (void *data)
28d52ebb 1933{
9de87eea 1934 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1935}
1936
1937void
661ae7ab 1938scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1939{
9de87eea 1940 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1941 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1942}
1943
9bc4701c 1944int
9de87eea 1945scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1946{
4cf72f0b
LC
1947 int res;
1948 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1949
1950 t->held_mutex = mutex;
1951 res = scm_i_pthread_cond_wait (cond, mutex);
1952 t->held_mutex = NULL;
1953
9bc4701c
MD
1954 return res;
1955}
9bc4701c 1956
76da80e7 1957int
9de87eea
MV
1958scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1959 scm_i_pthread_mutex_t *mutex,
1960 const scm_t_timespec *wt)
76da80e7 1961{
4cf72f0b
LC
1962 int res;
1963 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1964
1965 t->held_mutex = mutex;
1966 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1967 t->held_mutex = NULL;
1968
9de87eea 1969 return res;
76da80e7
MV
1970}
1971
9de87eea 1972#endif
76da80e7 1973
d823b11b 1974unsigned long
9de87eea 1975scm_std_usleep (unsigned long usecs)
5f05c406 1976{
d823b11b
MV
1977 struct timeval tv;
1978 tv.tv_usec = usecs % 1000000;
1979 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1980 scm_std_select (0, NULL, NULL, NULL, &tv);
1981 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1982}
1983
9de87eea
MV
1984unsigned int
1985scm_std_sleep (unsigned int secs)
6c214b62 1986{
d823b11b
MV
1987 struct timeval tv;
1988 tv.tv_usec = 0;
1989 tv.tv_sec = secs;
9de87eea 1990 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1991 return tv.tv_sec;
6c214b62
MD
1992}
1993
d823b11b
MV
1994/*** Misc */
1995
1996SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1997 (void),
1998 "Return the thread that called this function.")
1999#define FUNC_NAME s_scm_current_thread
2000{
9de87eea 2001 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2002}
2003#undef FUNC_NAME
2004
9de87eea
MV
2005static SCM
2006scm_c_make_list (size_t n, SCM fill)
2007{
2008 SCM res = SCM_EOL;
2009 while (n-- > 0)
2010 res = scm_cons (fill, res);
2011 return res;
2012}
2013
d823b11b
MV
2014SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2015 (void),
2016 "Return a list of all threads.")
9bc4701c 2017#define FUNC_NAME s_scm_all_threads
d823b11b 2018{
9de87eea
MV
2019 /* We can not allocate while holding the thread_admin_mutex because
2020 of the way GC is done.
2021 */
2022 int n = thread_count;
2023 scm_i_thread *t;
2024 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2025
9de87eea
MV
2026 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2027 l = &list;
2028 for (t = all_threads; t && n > 0; t = t->next_thread)
2029 {
2e77f720
LC
2030 if (t != scm_i_signal_delivery_thread)
2031 {
2032 SCM_SETCAR (*l, t->handle);
2033 l = SCM_CDRLOC (*l);
2034 }
9de87eea
MV
2035 n--;
2036 }
2037 *l = SCM_EOL;
2038 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2039 return list;
d823b11b 2040}
9de87eea 2041#undef FUNC_NAME
d823b11b
MV
2042
2043SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2044 (SCM thread),
2045 "Return @code{#t} iff @var{thread} has exited.\n")
2046#define FUNC_NAME s_scm_thread_exited_p
2047{
7888309b 2048 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2049}
2050#undef FUNC_NAME
2051
911782b7 2052int
d823b11b
MV
2053scm_c_thread_exited_p (SCM thread)
2054#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2055{
9de87eea 2056 scm_i_thread *t;
d823b11b 2057 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2058 t = SCM_I_THREAD_DATA (thread);
d823b11b 2059 return t->exited;
5f05c406 2060}
d823b11b 2061#undef FUNC_NAME
5f05c406 2062
d20912e6
LC
2063SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2064 (void),
2065 "Return the total number of processors of the machine, which\n"
2066 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2067 "thread execution unit, which can be either:\n\n"
2068 "@itemize\n"
2069 "@item an execution core in a (possibly multi-core) chip, in a\n"
2070 " (possibly multi- chip) module, in a single computer, or\n"
2071 "@item a thread execution unit inside a core in the case of\n"
2072 " @dfn{hyper-threaded} CPUs.\n"
2073 "@end itemize\n\n"
2074 "Which of the two definitions is used, is unspecified.\n")
2075#define FUNC_NAME s_scm_total_processor_count
2076{
2077 return scm_from_ulong (num_processors (NPROC_ALL));
2078}
2079#undef FUNC_NAME
2080
2081SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2082 (void),
2083 "Like @code{total-processor-count}, but return the number of\n"
2084 "processors available to the current process. See\n"
2085 "@code{setaffinity} and @code{getaffinity} for more\n"
2086 "information.\n")
2087#define FUNC_NAME s_scm_current_processor_count
2088{
2089 return scm_from_ulong (num_processors (NPROC_CURRENT));
2090}
2091#undef FUNC_NAME
2092
2093
2094\f
2095
9de87eea 2096static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2097static int threads_initialized_p = 0;
2098
9bc4701c 2099
a4d106c7
MV
2100/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2101 */
d1138028 2102scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2103
661ae7ab 2104static SCM dynwind_critical_section_mutex;
a54a94b3 2105
9bc4701c 2106void
661ae7ab 2107scm_dynwind_critical_section (SCM mutex)
76da80e7 2108{
a4d106c7 2109 if (scm_is_false (mutex))
661ae7ab
MV
2110 mutex = dynwind_critical_section_mutex;
2111 scm_dynwind_lock_mutex (mutex);
2112 scm_dynwind_block_asyncs ();
9de87eea
MV
2113}
2114
2115/*** Initialization */
2116
9de87eea
MV
2117scm_i_pthread_mutex_t scm_i_misc_mutex;
2118
d1138028
MV
2119#if SCM_USE_PTHREAD_THREADS
2120pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2121#endif
2122
9de87eea 2123void
12c1d861 2124scm_threads_prehistory (void *base)
9de87eea 2125{
d1138028
MV
2126#if SCM_USE_PTHREAD_THREADS
2127 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2128 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2129 PTHREAD_MUTEX_RECURSIVE);
2130#endif
2131
2132 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2133 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2134 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2135 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2136
12c1d861 2137 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2138}
2139
d823b11b
MV
2140scm_t_bits scm_tc16_thread;
2141scm_t_bits scm_tc16_mutex;
2142scm_t_bits scm_tc16_condvar;
7bfd3b9e 2143
7bfd3b9e 2144void
9de87eea 2145scm_init_threads ()
7bfd3b9e 2146{
9de87eea 2147 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2148 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2149
9de87eea 2150 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2151 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2152 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2153
9de87eea
MV
2154 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2155 sizeof (fat_cond));
9de87eea 2156 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2157
9de87eea
MV
2158 scm_i_default_dynamic_state = SCM_BOOL_F;
2159 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2160 threads_initialized_p = 1;
a4d106c7 2161
f39448c5 2162 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2163}
89e00824 2164
5f05c406 2165void
9de87eea 2166scm_init_threads_default_dynamic_state ()
5f05c406 2167{
9de87eea 2168 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2169 scm_i_default_dynamic_state = state;
5f05c406
MV
2170}
2171
d823b11b 2172void
9de87eea 2173scm_init_thread_procs ()
d823b11b 2174{
9de87eea 2175#include "libguile/threads.x"
d823b11b
MV
2176}
2177
3c13664e
LC
2178\f
2179/* IA64-specific things. */
2180
2181#ifdef __ia64__
2182# ifdef __hpux
2183# include <sys/param.h>
2184# include <sys/pstat.h>
2185void *
2186scm_ia64_register_backing_store_base (void)
2187{
2188 struct pst_vm_status vm_status;
2189 int i = 0;
2190 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2191 if (vm_status.pst_type == PS_RSESTACK)
2192 return (void *) vm_status.pst_vaddr;
2193 abort ();
2194}
2195void *
2196scm_ia64_ar_bsp (const void *ctx)
2197{
2198 uint64_t bsp;
2199 __uc_get_ar_bsp (ctx, &bsp);
2200 return (void *) bsp;
2201}
2202# endif /* hpux */
2203# ifdef linux
2204# include <ucontext.h>
2205void *
2206scm_ia64_register_backing_store_base (void)
2207{
2208 extern void *__libc_ia64_register_backing_store_base;
2209 return __libc_ia64_register_backing_store_base;
2210}
2211void *
2212scm_ia64_ar_bsp (const void *opaque)
2213{
2214 const ucontext_t *ctx = opaque;
2215 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2216}
2217# endif /* linux */
2218#endif /* __ia64__ */
2219
2220
89e00824
ML
2221/*
2222 Local Variables:
2223 c-file-style: "gnu"
2224 End:
2225*/