Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07 41#include <assert.h>
32f94bf2 42#include <fcntl.h>
d20912e6 43#include <nproc.h>
f57fdf07 44
d823b11b
MV
45#include "libguile/validate.h"
46#include "libguile/root.h"
47#include "libguile/eval.h"
48#include "libguile/async.h"
49#include "libguile/ports.h"
50#include "libguile/threads.h"
a0599745 51#include "libguile/dynwind.h"
d823b11b 52#include "libguile/iselect.h"
9de87eea
MV
53#include "libguile/fluids.h"
54#include "libguile/continuations.h"
2b829bbb 55#include "libguile/gc.h"
9de87eea 56#include "libguile/init.h"
2e77f720 57#include "libguile/scmsigs.h"
6180e336 58#include "libguile/strings.h"
7bfd3b9e 59
634aa8de
LC
60#include <full-read.h>
61
cde24ce1
AW
62
63\f
64
65/* First some libgc shims. */
66
67/* Make sure GC_fn_type is defined; it is missing from the public
68 headers of GC 7.1 and earlier. */
69#ifndef HAVE_GC_FN_TYPE
70typedef void * (* GC_fn_type) (void *);
71#endif
72
73
5f0d2951
AW
74#ifndef GC_SUCCESS
75#define GC_SUCCESS 0
76#endif
77
78#ifndef GC_UNIMPLEMENTED
79#define GC_UNIMPLEMENTED 3
80#endif
81
82/* Likewise struct GC_stack_base is missing before 7.1. */
83#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
84struct GC_stack_base {
85 void * mem_base; /* Base of memory stack. */
86#ifdef __ia64__
87 void * reg_base; /* Base of separate register stack. */
88#endif
89};
5f0d2951
AW
90
91static int
987b8160 92GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
93{
94 return GC_UNIMPLEMENTED;
95}
96
97static void
98GC_unregister_my_thread ()
99{
100}
101
12c1d861
AW
102#if !SCM_USE_PTHREAD_THREADS
103/* No threads; we can just use GC_stackbottom. */
5f0d2951 104static void *
12c1d861
AW
105get_thread_stack_base ()
106{
107 return GC_stackbottom;
108}
109
110#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
111 && defined PTHREAD_ATTR_GETSTACK_WORKS
112/* This method for GNU/Linux and perhaps some other systems.
113 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
114 available on them. */
115static void *
116get_thread_stack_base ()
117{
118 pthread_attr_t attr;
119 void *start, *end;
120 size_t size;
121
122 pthread_getattr_np (pthread_self (), &attr);
123 pthread_attr_getstack (&attr, &start, &size);
124 end = (char *)start + size;
125
126#if SCM_STACK_GROWS_UP
127 return start;
128#else
129 return end;
130#endif
131}
132
133#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
134/* This method for MacOS X.
135 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
136 but as of 2006 there's nothing obvious at apple.com. */
137static void *
138get_thread_stack_base ()
139{
140 return pthread_get_stackaddr_np (pthread_self ());
141}
142
143#else
144#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
145#endif
146
147static int
987b8160 148GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 149{
12c1d861
AW
150 stack_base->mem_base = get_thread_stack_base ();
151#ifdef __ia64__
152 /* Calculate and store off the base of this thread's register
153 backing store (RBS). Unfortunately our implementation(s) of
154 scm_ia64_register_backing_store_base are only reliable for the
155 main thread. For other threads, therefore, find out the current
156 top of the RBS, and use that as a maximum. */
157 stack_base->reg_base = scm_ia64_register_backing_store_base ();
158 {
159 ucontext_t ctx;
160 void *bsp;
161 getcontext (&ctx);
162 bsp = scm_ia64_ar_bsp (&ctx);
163 if (stack_base->reg_base > bsp)
164 stack_base->reg_base = bsp;
165 }
166#endif
167 return GC_SUCCESS;
5f0d2951 168}
12c1d861
AW
169
170static void *
171GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
172{
173 struct GC_stack_base stack_base;
174
175 stack_base.mem_base = (void*)&stack_base;
176#ifdef __ia64__
177 /* FIXME: Untested. */
178 {
179 ucontext_t ctx;
180 getcontext (&ctx);
181 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
182 }
5f0d2951
AW
183#endif
184
12c1d861
AW
185 return fn (&stack_base, arg);
186}
187#endif /* HAVE_GC_STACK_BASE */
188
5f0d2951 189
cde24ce1
AW
190/* Now define with_gc_active and with_gc_inactive. */
191
192#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
193
194/* We have a sufficiently new libgc (7.2 or newer). */
195
196static void*
197with_gc_inactive (GC_fn_type func, void *data)
198{
199 return GC_do_blocking (func, data);
200}
201
202static void*
203with_gc_active (GC_fn_type func, void *data)
204{
205 return GC_call_with_gc_active (func, data);
206}
207
208#else
209
210/* libgc not new enough, so never actually deactivate GC.
211
212 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
213 GC_call_with_gc_active. */
214
215static void*
216with_gc_inactive (GC_fn_type func, void *data)
217{
218 return func (data);
219}
220
221static void*
222with_gc_active (GC_fn_type func, void *data)
223{
224 return func (data);
225}
226
227#endif /* HAVE_GC_DO_BLOCKING */
228
229
634aa8de 230\f
6180e336
NJ
231static void
232to_timespec (SCM t, scm_t_timespec *waittime)
233{
234 if (scm_is_pair (t))
235 {
236 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
237 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
238 }
239 else
240 {
241 double time = scm_to_double (t);
242 double sec = scm_c_truncate (time);
243
244 waittime->tv_sec = (long) sec;
2a1d0688 245 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
246 }
247}
248
c058db8a 249\f
d823b11b 250/*** Queues */
7bfd3b9e 251
c058db8a
LC
252/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
253 the risk of false references leading to unbounded retained space as
254 described in "Bounding Space Usage of Conservative Garbage Collectors",
255 H.J. Boehm, 2001. */
256
9de87eea
MV
257/* Make an empty queue data structure.
258 */
d823b11b
MV
259static SCM
260make_queue ()
261{
262 return scm_cons (SCM_EOL, SCM_EOL);
263}
7bfd3b9e 264
9de87eea
MV
265/* Put T at the back of Q and return a handle that can be used with
266 remqueue to remove T from Q again.
267 */
d823b11b
MV
268static SCM
269enqueue (SCM q, SCM t)
270{
271 SCM c = scm_cons (t, SCM_EOL);
d2a51087 272 SCM_CRITICAL_SECTION_START;
d2e53ed6 273 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
274 SCM_SETCDR (q, c);
275 else
276 SCM_SETCDR (SCM_CAR (q), c);
277 SCM_SETCAR (q, c);
d2a51087 278 SCM_CRITICAL_SECTION_END;
d823b11b
MV
279 return c;
280}
7bfd3b9e 281
9de87eea
MV
282/* Remove the element that the handle C refers to from the queue Q. C
283 must have been returned from a call to enqueue. The return value
284 is zero when the element referred to by C has already been removed.
285 Otherwise, 1 is returned.
286*/
287static int
d823b11b
MV
288remqueue (SCM q, SCM c)
289{
290 SCM p, prev = q;
d2a51087 291 SCM_CRITICAL_SECTION_START;
d2e53ed6 292 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 293 {
bc36d050 294 if (scm_is_eq (p, c))
d823b11b 295 {
bc36d050 296 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
297 SCM_SETCAR (q, SCM_CDR (c));
298 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
299
300 /* GC-robust */
301 SCM_SETCDR (c, SCM_EOL);
302
d2a51087 303 SCM_CRITICAL_SECTION_END;
9de87eea 304 return 1;
d823b11b
MV
305 }
306 prev = p;
307 }
d2a51087 308 SCM_CRITICAL_SECTION_END;
9de87eea 309 return 0;
d823b11b
MV
310}
311
9de87eea
MV
312/* Remove the front-most element from the queue Q and return it.
313 Return SCM_BOOL_F when Q is empty.
314*/
d823b11b
MV
315static SCM
316dequeue (SCM q)
317{
d2a51087
NJ
318 SCM c;
319 SCM_CRITICAL_SECTION_START;
320 c = SCM_CDR (q);
d2e53ed6 321 if (scm_is_null (c))
d2a51087
NJ
322 {
323 SCM_CRITICAL_SECTION_END;
324 return SCM_BOOL_F;
325 }
d823b11b
MV
326 else
327 {
328 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 329 if (scm_is_null (SCM_CDR (q)))
d823b11b 330 SCM_SETCAR (q, SCM_EOL);
d2a51087 331 SCM_CRITICAL_SECTION_END;
c058db8a
LC
332
333 /* GC-robust */
334 SCM_SETCDR (c, SCM_EOL);
335
d823b11b
MV
336 return SCM_CAR (c);
337 }
338}
7bfd3b9e 339
9de87eea 340/*** Thread smob routines */
76da80e7 341
d823b11b
MV
342
343static int
344thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
345{
23d72566
KR
346 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
347 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
348 the struct case, hence we go via a union, and extract according to the
349 size of pthread_t. */
350 union {
351 scm_i_pthread_t p;
352 unsigned short us;
353 unsigned int ui;
354 unsigned long ul;
355 scm_t_uintmax um;
356 } u;
9de87eea 357 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
358 scm_i_pthread_t p = t->pthread;
359 scm_t_uintmax id;
360 u.p = p;
361 if (sizeof (p) == sizeof (unsigned short))
362 id = u.us;
363 else if (sizeof (p) == sizeof (unsigned int))
364 id = u.ui;
365 else if (sizeof (p) == sizeof (unsigned long))
366 id = u.ul;
367 else
368 id = u.um;
369
0607ebbf 370 scm_puts_unlocked ("#<thread ", port);
23d72566 371 scm_uintprint (id, 10, port);
0607ebbf 372 scm_puts_unlocked (" (", port);
0345e278 373 scm_uintprint ((scm_t_bits)t, 16, port);
0607ebbf 374 scm_puts_unlocked (")>", port);
d823b11b
MV
375 return 1;
376}
377
706846f6 378\f
9de87eea 379/*** Blocking on queues. */
f7eca35d 380
9de87eea
MV
381/* See also scm_i_queue_async_cell for how such a block is
382 interrputed.
383*/
d823b11b 384
9de87eea
MV
385/* Put the current thread on QUEUE and go to sleep, waiting for it to
386 be woken up by a call to 'unblock_from_queue', or to be
387 interrupted. Upon return of this function, the current thread is
388 no longer on QUEUE, even when the sleep has been interrupted.
389
d2a51087 390 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
391 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
392
393 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
394 as MUTEX is needed.
395
396 When WAITTIME is not NULL, the sleep will be aborted at that time.
397
398 The return value of block_self is an errno value. It will be zero
399 when the sleep has been successfully completed by a call to
400 unblock_from_queue, EINTR when it has been interrupted by the
401 delivery of a system async, and ETIMEDOUT when the timeout has
402 expired.
403
404 The system asyncs themselves are not executed by block_self.
405*/
406static int
407block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
408 const scm_t_timespec *waittime)
76da80e7 409{
9de87eea
MV
410 scm_i_thread *t = SCM_I_CURRENT_THREAD;
411 SCM q_handle;
412 int err;
413
414 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
415 err = EINTR;
416 else
417 {
418 t->block_asyncs++;
419 q_handle = enqueue (queue, t->handle);
420 if (waittime == NULL)
421 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
422 else
423 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
424
425 /* When we are still on QUEUE, we have been interrupted. We
426 report this only when no other error (such as a timeout) has
427 happened above.
428 */
429 if (remqueue (queue, q_handle) && err == 0)
430 err = EINTR;
431 t->block_asyncs--;
432 scm_i_reset_sleep (t);
433 }
434
435 return err;
76da80e7 436}
9de87eea 437
d2a51087
NJ
438/* Wake up the first thread on QUEUE, if any. The awoken thread is
439 returned, or #f if the queue was empty.
9de87eea
MV
440 */
441static SCM
442unblock_from_queue (SCM queue)
443{
444 SCM thread = dequeue (queue);
445 if (scm_is_true (thread))
446 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
447 return thread;
448}
449
45f15cac 450\f
9de87eea
MV
451/* Getting into and out of guile mode.
452 */
453
f60a7648
AW
454/* Key used to attach a cleanup handler to a given thread. Also, if
455 thread-local storage is unavailable, this key is used to retrieve the
456 current thread with `pthread_getspecific ()'. */
457scm_i_pthread_key_t scm_i_thread_key;
458
459
705edb95
LC
460#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
461
462/* When thread-local storage (TLS) is available, a pointer to the
463 current-thread object is kept in TLS. Note that storing the thread-object
464 itself in TLS (rather than a pointer to some malloc'd memory) is not
465 possible since thread objects may live longer than the actual thread they
466 represent. */
467SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
468
f60a7648 469#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 470
d823b11b 471
9de87eea
MV
472static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
473static scm_i_thread *all_threads = NULL;
474static int thread_count;
475
476static SCM scm_i_default_dynamic_state;
477
0b77014f
AW
478/* Run when a fluid is collected. */
479void
aafb4ed7 480scm_i_reset_fluid (size_t n)
0b77014f
AW
481{
482 scm_i_thread *t;
483
484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
485 for (t = all_threads; t; t = t->next_thread)
486 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
487 {
488 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
489
490 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 491 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
492 }
493 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
494}
495
9de87eea 496/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 497 */
9de87eea 498static void
12c1d861 499guilify_self_1 (struct GC_stack_base *base)
d823b11b 500{
7f22442b
AW
501 scm_i_thread t;
502
503 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
504 before allocating anything in this thread, because allocation could
505 cause GC to run, and GC could cause finalizers, which could invoke
506 Scheme functions, which need the current thread to be set. */
507
508 t.pthread = scm_i_pthread_self ();
509 t.handle = SCM_BOOL_F;
510 t.result = SCM_BOOL_F;
511 t.cleanup_handler = SCM_BOOL_F;
512 t.mutexes = SCM_EOL;
513 t.held_mutex = NULL;
514 t.join_queue = SCM_EOL;
515 t.dynamic_state = SCM_BOOL_F;
516 t.dynwinds = SCM_EOL;
517 t.active_asyncs = SCM_EOL;
518 t.block_asyncs = 1;
519 t.pending_asyncs = 1;
520 t.critical_section_level = 0;
521 t.base = base->mem_base;
346e4402 522#ifdef __ia64__
7f22442b 523 t.register_backing_store_base = base->reg-base;
346e4402 524#endif
7f22442b
AW
525 t.continuation_root = SCM_EOL;
526 t.continuation_base = t.base;
527 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
528 t.sleep_mutex = NULL;
529 t.sleep_object = SCM_BOOL_F;
530 t.sleep_fd = -1;
531
32f94bf2 532 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
533 /* FIXME: Error conditions during the initialization phase are handled
534 gracelessly since public functions such as `scm_init_guile ()'
535 currently have type `void'. */
536 abort ();
537
7f22442b
AW
538 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
539 t.current_mark_stack_ptr = NULL;
540 t.current_mark_stack_limit = NULL;
541 t.canceled = 0;
542 t.exited = 0;
543 t.guile_mode = 0;
9de87eea 544
7f22442b
AW
545 /* The switcheroo. */
546 {
547 scm_i_thread *t_ptr = &t;
548
549 GC_disable ();
550 t_ptr = GC_malloc (sizeof (scm_i_thread));
551 memcpy (t_ptr, &t, sizeof t);
552
553 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
554
555#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
556 /* Cache the current thread in TLS for faster lookup. */
557 scm_i_current_thread = t_ptr;
f60a7648 558#endif
9de87eea 559
7f22442b
AW
560 scm_i_pthread_mutex_lock (&thread_admin_mutex);
561 t_ptr->next_thread = all_threads;
562 all_threads = t_ptr;
563 thread_count++;
564 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
565
566 GC_enable ();
567 }
d823b11b
MV
568}
569
9de87eea 570/* Perform second stage of thread initialisation, in guile mode.
d823b11b 571 */
9de87eea
MV
572static void
573guilify_self_2 (SCM parent)
d823b11b 574{
9de87eea
MV
575 scm_i_thread *t = SCM_I_CURRENT_THREAD;
576
72e6b608
LC
577 t->guile_mode = 1;
578
9de87eea 579 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 580
9de87eea
MV
581 t->continuation_root = scm_cons (t->handle, SCM_EOL);
582 t->continuation_base = t->base;
2bbe1533 583 t->vm = SCM_BOOL_F;
9de87eea
MV
584
585 if (scm_is_true (parent))
586 t->dynamic_state = scm_make_dynamic_state (parent);
587 else
588 t->dynamic_state = scm_i_make_initial_dynamic_state ();
589
590 t->join_queue = make_queue ();
591 t->block_asyncs = 0;
d823b11b
MV
592}
593
6180e336
NJ
594\f
595/*** Fat mutexes */
596
597/* We implement our own mutex type since we want them to be 'fair', we
598 want to do fancy things while waiting for them (like running
599 asyncs) and we might want to add things that are nice for
600 debugging.
601*/
602
603typedef struct {
604 scm_i_pthread_mutex_t lock;
605 SCM owner;
adc085f1 606 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 607
adc085f1 608 int recursive; /* allow recursive locking? */
6180e336
NJ
609 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
610 int allow_external_unlock; /* is it an error to unlock a mutex that is not
611 owned by the current thread? */
612
613 SCM waiting; /* the threads waiting for this mutex. */
614} fat_mutex;
615
616#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
617#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
618
2002f1f8
AW
619static SCM
620call_cleanup (void *data)
621{
622 SCM *proc_p = data;
623 return scm_call_0 (*proc_p);
624}
625
9de87eea 626/* Perform thread tear-down, in guile mode.
d823b11b 627 */
9de87eea
MV
628static void *
629do_thread_exit (void *v)
630{
2e77f720
LC
631 scm_i_thread *t = (scm_i_thread *) v;
632
633 if (!scm_is_false (t->cleanup_handler))
634 {
635 SCM ptr = t->cleanup_handler;
636
637 t->cleanup_handler = SCM_BOOL_F;
638 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 639 call_cleanup, &ptr,
2e77f720
LC
640 scm_handle_by_message_noexit, NULL);
641 }
9de87eea 642
86a597f8 643 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
644
645 t->exited = 1;
0c97d7dd
MV
646 close (t->sleep_pipe[0]);
647 close (t->sleep_pipe[1]);
9de87eea
MV
648 while (scm_is_true (unblock_from_queue (t->join_queue)))
649 ;
9de87eea 650
74926120 651 while (!scm_is_null (t->mutexes))
6180e336 652 {
a141db86 653 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
74926120 654
a141db86 655 if (scm_is_true (mutex))
a0faf7dd
LC
656 {
657 fat_mutex *m = SCM_MUTEX_DATA (mutex);
658
659 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
660
661 /* Since MUTEX is in `t->mutexes', T must be its owner. */
662 assert (scm_is_eq (m->owner, t->handle));
663
a0faf7dd 664 unblock_from_queue (m->waiting);
f57fdf07 665
a0faf7dd
LC
666 scm_i_pthread_mutex_unlock (&m->lock);
667 }
6180e336 668
a141db86 669 t->mutexes = scm_cdr (t->mutexes);
6180e336
NJ
670 }
671
86a597f8 672 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 673
9de87eea
MV
674 return NULL;
675}
676
f60a7648
AW
677static void *
678do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
679{
7f22442b 680 /* Won't hurt if we are already registered. */
8e76ce94 681#if SCM_USE_PTHREAD_THREADS
7f22442b 682 GC_register_my_thread (sb);
8e76ce94 683#endif
f60a7648 684
7f22442b 685 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
686}
687
d823b11b 688static void
9de87eea 689on_thread_exit (void *v)
d823b11b 690{
29776e85 691 /* This handler is executed in non-guile mode. */
2e77f720 692 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 693
b8441577
LC
694 /* If we were canceled, we were unable to clear `t->guile_mode', so do
695 it here. */
696 t->guile_mode = 0;
697
d2a51087
NJ
698 /* If this thread was cancelled while doing a cond wait, it will
699 still have a mutex locked, so we unlock it here. */
700 if (t->held_mutex)
701 {
702 scm_i_pthread_mutex_unlock (t->held_mutex);
703 t->held_mutex = NULL;
704 }
705
f60a7648
AW
706 /* Reinstate the current thread for purposes of scm_with_guile
707 guile-mode cleanup handlers. Only really needed in the non-TLS
708 case but it doesn't hurt to be consistent. */
709 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 710
7f22442b
AW
711 /* Scheme-level thread finalizers and other cleanup needs to happen in
712 guile mode. */
713 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
714
715 /* Removing ourself from the list of all threads needs to happen in
716 non-guile mode since all SCM values on our stack become
29776e85 717 unprotected once we are no longer in the list. */
0c97d7dd
MV
718 scm_i_pthread_mutex_lock (&thread_admin_mutex);
719 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
720 if (*tp == t)
721 {
722 *tp = t->next_thread;
c058db8a
LC
723
724 /* GC-robust */
725 t->next_thread = NULL;
726
0c97d7dd
MV
727 break;
728 }
729 thread_count--;
2e77f720
LC
730
731 /* If there's only one other thread, it could be the signal delivery
732 thread, so we need to notify it to shut down by closing its read pipe.
733 If it's not the signal delivery thread, then closing the read pipe isn't
734 going to hurt. */
735 if (thread_count <= 1)
736 scm_i_close_signal_pipe ();
737
0c97d7dd
MV
738 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
739
f60a7648 740 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 741
8e76ce94 742#if SCM_USE_PTHREAD_THREADS
7f22442b 743 GC_unregister_my_thread ();
653ccd78 744#endif
d823b11b
MV
745}
746
9de87eea 747static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 748
9de87eea
MV
749static void
750init_thread_key (void)
751{
f60a7648 752 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 753}
d823b11b 754
cde24ce1
AW
755/* Perform any initializations necessary to make the current thread
756 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
757 if necessary.
a54a94b3 758
9de87eea
MV
759 BASE is the stack base to use with GC.
760
761 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
762 which case the default dynamic state is used.
763
cde24ce1 764 Returns zero when the thread was known to guile already; otherwise
9de87eea 765 return 1.
cde24ce1
AW
766
767 Note that it could be the case that the thread was known
768 to Guile, but not in guile mode (because we are within a
769 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
770 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
771
772static int
12c1d861 773scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 774{
9de87eea
MV
775 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
776
cde24ce1
AW
777 if (SCM_I_CURRENT_THREAD)
778 {
779 /* Thread is already known to Guile.
780 */
781 return 0;
782 }
783 else
9de87eea
MV
784 {
785 /* This thread has not been guilified yet.
786 */
787
788 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
789 if (scm_initialized_p == 0)
790 {
791 /* First thread ever to enter Guile. Run the full
792 initialization.
793 */
794 scm_i_init_guile (base);
12c1d861 795
8e76ce94 796#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
797 /* Allow other threads to come in later. */
798 GC_allow_register_threads ();
4000d064 799#endif
12c1d861 800
9de87eea
MV
801 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
802 }
803 else
804 {
805 /* Guile is already initialized, but this thread enters it for
806 the first time. Only initialize this thread.
807 */
808 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
809
810 /* Register this thread with libgc. */
8e76ce94 811#if SCM_USE_PTHREAD_THREADS
12c1d861 812 GC_register_my_thread (base);
8e76ce94 813#endif
12c1d861 814
9de87eea
MV
815 guilify_self_1 (base);
816 guilify_self_2 (parent);
817 }
818 return 1;
819 }
d823b11b
MV
820}
821
12c1d861
AW
822void
823scm_init_guile ()
d823b11b 824{
12c1d861
AW
825 struct GC_stack_base stack_base;
826
827 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
828 scm_i_init_thread_for_guile (&stack_base,
829 scm_i_default_dynamic_state);
9de87eea
MV
830 else
831 {
12c1d861 832 fprintf (stderr, "Failed to get stack base for current thread.\n");
4765b28f 833 exit (EXIT_FAILURE);
9de87eea 834 }
a54a94b3
MV
835}
836
12c1d861 837struct with_guile_args
cde24ce1
AW
838{
839 GC_fn_type func;
840 void *data;
12c1d861 841 SCM parent;
cde24ce1
AW
842};
843
844static void *
845with_guile_trampoline (void *data)
846{
12c1d861 847 struct with_guile_args *args = data;
cde24ce1
AW
848
849 return scm_c_with_continuation_barrier (args->func, args->data);
850}
851
12c1d861
AW
852static void *
853with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
854{
855 void *res;
cde24ce1
AW
856 int new_thread;
857 scm_i_thread *t;
12c1d861 858 struct with_guile_args *args = data;
2e77f720 859
12c1d861 860 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
861 t = SCM_I_CURRENT_THREAD;
862 if (new_thread)
2e77f720 863 {
cde24ce1
AW
864 /* We are in Guile mode. */
865 assert (t->guile_mode);
866
12c1d861 867 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
868
869 /* Leave Guile mode. */
870 t->guile_mode = 0;
871 }
872 else if (t->guile_mode)
873 {
874 /* Already in Guile mode. */
12c1d861 875 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 876 }
74926120 877 else
cde24ce1 878 {
cde24ce1
AW
879 /* We are not in Guile mode, either because we are not within a
880 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 881
cde24ce1
AW
882 This call to scm_with_guile() could happen from anywhere on the
883 stack, and in particular lower on the stack than when it was
884 when this thread was first guilified. Thus, `base' must be
885 updated. */
886#if SCM_STACK_GROWS_UP
12c1d861
AW
887 if (SCM_STACK_PTR (base->mem_base) < t->base)
888 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 889#else
12c1d861
AW
890 if (SCM_STACK_PTR (base->mem_base) > t->base)
891 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 892#endif
72e6b608 893
cde24ce1 894 t->guile_mode = 1;
12c1d861 895 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
896 t->guile_mode = 0;
897 }
898 return res;
72e6b608 899}
3d1af79f 900
12c1d861
AW
901static void *
902scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
903{
904 struct with_guile_args args;
905
906 args.func = func;
907 args.data = data;
908 args.parent = parent;
909
910 return GC_call_with_stack_base (with_guile_and_parent, &args);
911}
912
913void *
914scm_with_guile (void *(*func)(void *), void *data)
915{
916 return scm_i_with_guile_and_parent (func, data,
917 scm_i_default_dynamic_state);
918}
919
9de87eea
MV
920void *
921scm_without_guile (void *(*func)(void *), void *data)
d823b11b 922{
72e6b608 923 void *result;
cde24ce1 924 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 925
cde24ce1 926 if (t->guile_mode)
72e6b608 927 {
cde24ce1
AW
928 SCM_I_CURRENT_THREAD->guile_mode = 0;
929 result = with_gc_inactive (func, data);
930 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
931 }
932 else
cde24ce1 933 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
934 result = func (data);
935
936 return result;
9de87eea
MV
937}
938
72e6b608 939\f
9de87eea
MV
940/*** Thread creation */
941
942typedef struct {
943 SCM parent;
944 SCM thunk;
945 SCM handler;
76da80e7 946 SCM thread;
9de87eea
MV
947 scm_i_pthread_mutex_t mutex;
948 scm_i_pthread_cond_t cond;
949} launch_data;
d823b11b 950
9de87eea
MV
951static void *
952really_launch (void *d)
953{
954 launch_data *data = (launch_data *)d;
955 SCM thunk = data->thunk, handler = data->handler;
956 scm_i_thread *t;
d823b11b 957
9de87eea 958 t = SCM_I_CURRENT_THREAD;
a54a94b3 959
9de87eea
MV
960 scm_i_scm_pthread_mutex_lock (&data->mutex);
961 data->thread = scm_current_thread ();
962 scm_i_pthread_cond_signal (&data->cond);
963 scm_i_pthread_mutex_unlock (&data->mutex);
964
965 if (SCM_UNBNDP (handler))
966 t->result = scm_call_0 (thunk);
967 else
968 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
969
970 return 0;
d823b11b
MV
971}
972
9de87eea
MV
973static void *
974launch_thread (void *d)
975{
976 launch_data *data = (launch_data *)d;
977 scm_i_pthread_detach (scm_i_pthread_self ());
978 scm_i_with_guile_and_parent (really_launch, d, data->parent);
979 return NULL;
980}
981
982SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 983 (SCM thunk, SCM handler),
9de87eea
MV
984 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
985 "returning a new thread object representing the thread. The procedure\n"
986 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
987 "\n"
988 "When @var{handler} is specified, then @var{thunk} is called from\n"
989 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
990 "handler. This catch is established inside the continuation barrier.\n"
991 "\n"
992 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
993 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
994#define FUNC_NAME s_scm_call_with_new_thread
995{
9de87eea
MV
996 launch_data data;
997 scm_i_pthread_t id;
998 int err;
d823b11b 999
9de87eea
MV
1000 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1001 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1002 handler, SCM_ARG2, FUNC_NAME);
1003
9f7537dc 1004 GC_collect_a_little ();
9de87eea
MV
1005 data.parent = scm_current_dynamic_state ();
1006 data.thunk = thunk;
1007 data.handler = handler;
1008 data.thread = SCM_BOOL_F;
1009 scm_i_pthread_mutex_init (&data.mutex, NULL);
1010 scm_i_pthread_cond_init (&data.cond, NULL);
1011
1012 scm_i_scm_pthread_mutex_lock (&data.mutex);
1013 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1014 if (err)
1015 {
1016 scm_i_pthread_mutex_unlock (&data.mutex);
1017 errno = err;
1018 scm_syserror (NULL);
1019 }
1020 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1021 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1022
9de87eea 1023 return data.thread;
d823b11b
MV
1024}
1025#undef FUNC_NAME
1026
9de87eea
MV
1027typedef struct {
1028 SCM parent;
1029 scm_t_catch_body body;
1030 void *body_data;
1031 scm_t_catch_handler handler;
1032 void *handler_data;
1033 SCM thread;
1034 scm_i_pthread_mutex_t mutex;
1035 scm_i_pthread_cond_t cond;
1036} spawn_data;
1037
1038static void *
1039really_spawn (void *d)
1040{
1041 spawn_data *data = (spawn_data *)d;
1042 scm_t_catch_body body = data->body;
1043 void *body_data = data->body_data;
1044 scm_t_catch_handler handler = data->handler;
1045 void *handler_data = data->handler_data;
1046 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1047
1048 scm_i_scm_pthread_mutex_lock (&data->mutex);
1049 data->thread = scm_current_thread ();
1050 scm_i_pthread_cond_signal (&data->cond);
1051 scm_i_pthread_mutex_unlock (&data->mutex);
1052
1053 if (handler == NULL)
1054 t->result = body (body_data);
1055 else
1056 t->result = scm_internal_catch (SCM_BOOL_T,
1057 body, body_data,
1058 handler, handler_data);
1059
1060 return 0;
1061}
1062
1063static void *
1064spawn_thread (void *d)
1065{
1066 spawn_data *data = (spawn_data *)d;
1067 scm_i_pthread_detach (scm_i_pthread_self ());
1068 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1069 return NULL;
1070}
1071
1072SCM
1073scm_spawn_thread (scm_t_catch_body body, void *body_data,
1074 scm_t_catch_handler handler, void *handler_data)
1075{
1076 spawn_data data;
1077 scm_i_pthread_t id;
1078 int err;
1079
1080 data.parent = scm_current_dynamic_state ();
1081 data.body = body;
1082 data.body_data = body_data;
1083 data.handler = handler;
1084 data.handler_data = handler_data;
1085 data.thread = SCM_BOOL_F;
1086 scm_i_pthread_mutex_init (&data.mutex, NULL);
1087 scm_i_pthread_cond_init (&data.cond, NULL);
1088
1089 scm_i_scm_pthread_mutex_lock (&data.mutex);
1090 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1091 if (err)
1092 {
1093 scm_i_pthread_mutex_unlock (&data.mutex);
1094 errno = err;
1095 scm_syserror (NULL);
1096 }
1097 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1098 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1099
9de87eea
MV
1100 return data.thread;
1101}
1102
29717c89
MD
1103SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1104 (),
1105"Move the calling thread to the end of the scheduling queue.")
1106#define FUNC_NAME s_scm_yield
1107{
9de87eea 1108 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1109}
1110#undef FUNC_NAME
1111
2e77f720
LC
1112SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1113 (SCM thread),
1114"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1115"cannot be the current thread, and if @var{thread} has already terminated or "
1116"been signaled to terminate, this function is a no-op.")
1117#define FUNC_NAME s_scm_cancel_thread
1118{
1119 scm_i_thread *t = NULL;
1120
1121 SCM_VALIDATE_THREAD (1, thread);
1122 t = SCM_I_THREAD_DATA (thread);
86a597f8 1123 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1124 if (!t->canceled)
1125 {
1126 t->canceled = 1;
86a597f8 1127 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1128 scm_i_pthread_cancel (t->pthread);
1129 }
1130 else
86a597f8 1131 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1132
1133 return SCM_UNSPECIFIED;
1134}
1135#undef FUNC_NAME
1136
1137SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1138 (SCM thread, SCM proc),
1139"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1140"This handler will be called when the thread exits.")
1141#define FUNC_NAME s_scm_set_thread_cleanup_x
1142{
1143 scm_i_thread *t;
1144
1145 SCM_VALIDATE_THREAD (1, thread);
1146 if (!scm_is_false (proc))
1147 SCM_VALIDATE_THUNK (2, proc);
1148
2e77f720 1149 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1150 scm_i_pthread_mutex_lock (&t->admin_mutex);
1151
2e77f720
LC
1152 if (!(t->exited || t->canceled))
1153 t->cleanup_handler = proc;
1154
86a597f8 1155 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1156
1157 return SCM_UNSPECIFIED;
1158}
1159#undef FUNC_NAME
1160
1161SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1162 (SCM thread),
1163"Return the cleanup handler installed for the thread @var{thread}.")
1164#define FUNC_NAME s_scm_thread_cleanup
1165{
1166 scm_i_thread *t;
1167 SCM ret;
1168
1169 SCM_VALIDATE_THREAD (1, thread);
1170
2e77f720 1171 t = SCM_I_THREAD_DATA (thread);
86a597f8 1172 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1173 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1174 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1175
1176 return ret;
1177}
1178#undef FUNC_NAME
1179
6180e336
NJ
1180SCM scm_join_thread (SCM thread)
1181{
1182 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1183}
1184
1185SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1186 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1187"Suspend execution of the calling thread until the target @var{thread} "
1188"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1189#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1190{
9de87eea 1191 scm_i_thread *t;
6180e336
NJ
1192 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1193 SCM res = SCM_BOOL_F;
1194
1195 if (! (SCM_UNBNDP (timeoutval)))
1196 res = timeoutval;
d823b11b
MV
1197
1198 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1199 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1200 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1201
9de87eea 1202 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1203 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1204
6180e336
NJ
1205 if (! SCM_UNBNDP (timeout))
1206 {
1207 to_timespec (timeout, &ctimeout);
1208 timeout_ptr = &ctimeout;
1209 }
1210
1211 if (t->exited)
1212 res = t->result;
1213 else
d823b11b 1214 {
9de87eea
MV
1215 while (1)
1216 {
74926120 1217 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1218 timeout_ptr);
1219 if (err == 0)
1220 {
1221 if (t->exited)
1222 {
1223 res = t->result;
1224 break;
1225 }
1226 }
1227 else if (err == ETIMEDOUT)
9de87eea 1228 break;
6180e336 1229
86a597f8 1230 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1231 SCM_TICK;
86a597f8 1232 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1233
1234 /* Check for exit again, since we just released and
1235 reacquired the admin mutex, before the next block_self
1236 call (which would block forever if t has already
1237 exited). */
1238 if (t->exited)
1239 {
1240 res = t->result;
1241 break;
1242 }
9de87eea 1243 }
d823b11b 1244 }
9de87eea 1245
86a597f8 1246 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1247
d823b11b 1248 return res;
5f05c406
MV
1249}
1250#undef FUNC_NAME
1251
6180e336
NJ
1252SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1253 (SCM obj),
1254 "Return @code{#t} if @var{obj} is a thread.")
1255#define FUNC_NAME s_scm_thread_p
1256{
1257 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1258}
1259#undef FUNC_NAME
5f05c406 1260
4079f87e 1261
9de87eea
MV
1262static size_t
1263fat_mutex_free (SCM mx)
76da80e7 1264{
9de87eea
MV
1265 fat_mutex *m = SCM_MUTEX_DATA (mx);
1266 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1267 return 0;
1268}
1269
1270static int
9de87eea 1271fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1272{
9de87eea 1273 fat_mutex *m = SCM_MUTEX_DATA (mx);
0607ebbf 1274 scm_puts_unlocked ("#<mutex ", port);
9de87eea 1275 scm_uintprint ((scm_t_bits)m, 16, port);
0607ebbf 1276 scm_puts_unlocked (">", port);
9de87eea 1277 return 1;
76da80e7
MV
1278}
1279
76da80e7 1280static SCM
6180e336 1281make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1282{
9de87eea
MV
1283 fat_mutex *m;
1284 SCM mx;
1285
1286 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1287 scm_i_pthread_mutex_init (&m->lock, NULL);
1288 m->owner = SCM_BOOL_F;
adc085f1 1289 m->level = 0;
6180e336 1290
adc085f1 1291 m->recursive = recursive;
6180e336
NJ
1292 m->unchecked_unlock = unchecked_unlock;
1293 m->allow_external_unlock = external_unlock;
1294
9de87eea
MV
1295 m->waiting = SCM_EOL;
1296 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1297 m->waiting = make_queue ();
1298 return mx;
76da80e7
MV
1299}
1300
6180e336
NJ
1301SCM scm_make_mutex (void)
1302{
1303 return scm_make_mutex_with_flags (SCM_EOL);
1304}
1305
2a1d0688
NJ
1306SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1307SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1308SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1309
1310SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1311 (SCM flags),
9de87eea 1312 "Create a new mutex. ")
6180e336 1313#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1314{
6180e336
NJ
1315 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1316
1317 SCM ptr = flags;
1318 while (! scm_is_null (ptr))
1319 {
1320 SCM flag = SCM_CAR (ptr);
1321 if (scm_is_eq (flag, unchecked_unlock_sym))
1322 unchecked_unlock = 1;
1323 else if (scm_is_eq (flag, allow_external_unlock_sym))
1324 external_unlock = 1;
1325 else if (scm_is_eq (flag, recursive_sym))
1326 recursive = 1;
74926120 1327 else
2a1d0688 1328 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1329 ptr = SCM_CDR (ptr);
1330 }
1331 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1332}
1333#undef FUNC_NAME
1334
9de87eea 1335SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1336 (void),
9de87eea
MV
1337 "Create a new recursive mutex. ")
1338#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1339{
6180e336 1340 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1341}
1342#undef FUNC_NAME
1343
6180e336
NJ
1344SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1345
1346static SCM
adc085f1 1347fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1348{
1349 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1350
adc085f1 1351 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1352 SCM err = SCM_BOOL_F;
1353
1354 struct timeval current_time;
9de87eea
MV
1355
1356 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1357
1358 while (1)
9de87eea 1359 {
adc085f1 1360 if (m->level == 0)
6180e336 1361 {
adc085f1 1362 m->owner = new_owner;
6180e336 1363 m->level++;
74926120 1364
adc085f1 1365 if (SCM_I_IS_THREAD (new_owner))
6180e336 1366 {
adc085f1 1367 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1368
4f39f31e
LC
1369 /* FIXME: The order in which `t->admin_mutex' and
1370 `m->lock' are taken differs from that in
1371 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1372 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1373
1374 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1375 retained when not referenced elsewhere (bug #27450).
1376 The weak pair itself is eventually removed when MUTEX
1377 is unlocked. Note that `t->mutexes' lists mutexes
1378 currently held by T, so it should be small. */
a141db86
AW
1379 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1380 t->mutexes);
a0faf7dd 1381
6180e336 1382 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1383 }
adc085f1
JG
1384 *ret = 1;
1385 break;
1386 }
1387 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1388 {
1389 m->owner = new_owner;
1390 err = scm_cons (scm_abandoned_mutex_error_key,
1391 scm_from_locale_string ("lock obtained on abandoned "
1392 "mutex"));
1393 *ret = 1;
1394 break;
1395 }
1396 else if (scm_is_eq (m->owner, new_owner))
1397 {
1398 if (m->recursive)
1399 {
1400 m->level++;
74926120 1401 *ret = 1;
adc085f1
JG
1402 }
1403 else
6180e336 1404 {
adc085f1
JG
1405 err = scm_cons (scm_misc_error_key,
1406 scm_from_locale_string ("mutex already locked "
1407 "by thread"));
1408 *ret = 0;
1409 }
74926120 1410 break;
adc085f1 1411 }
9de87eea 1412 else
9de87eea 1413 {
74926120 1414 if (timeout != NULL)
adc085f1
JG
1415 {
1416 gettimeofday (&current_time, NULL);
1417 if (current_time.tv_sec > timeout->tv_sec ||
1418 (current_time.tv_sec == timeout->tv_sec &&
1419 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1420 {
adc085f1
JG
1421 *ret = 0;
1422 break;
6180e336 1423 }
6180e336 1424 }
37a52039 1425 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1426 scm_i_pthread_mutex_unlock (&m->lock);
1427 SCM_TICK;
1428 scm_i_scm_pthread_mutex_lock (&m->lock);
1429 }
1430 }
1431 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1432 return err;
9de87eea
MV
1433}
1434
6180e336
NJ
1435SCM scm_lock_mutex (SCM mx)
1436{
adc085f1 1437 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1438}
1439
adc085f1
JG
1440SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1441 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1442"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1443"blocks until the mutex becomes available. The function returns when "
1444"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1445"a thread already owns will succeed right away and will not block the "
1446"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1447#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1448{
6180e336
NJ
1449 SCM exception;
1450 int ret = 0;
1451 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1452
6180e336
NJ
1453 SCM_VALIDATE_MUTEX (1, m);
1454
1455 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1456 {
1457 to_timespec (timeout, &cwaittime);
1458 waittime = &cwaittime;
1459 }
1460
97ec95b7
LC
1461 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1462 SCM_VALIDATE_THREAD (3, owner);
1463
adc085f1 1464 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1465 if (!scm_is_false (exception))
1466 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1467 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1468}
76da80e7 1469#undef FUNC_NAME
9bc4701c 1470
9ec1573d
MW
1471static void
1472lock_mutex_return_void (SCM mx)
1473{
1474 (void) scm_lock_mutex (mx);
1475}
1476
1477static void
1478unlock_mutex_return_void (SCM mx)
1479{
1480 (void) scm_unlock_mutex (mx);
1481}
1482
a4d106c7 1483void
661ae7ab 1484scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1485{
9ec1573d 1486 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1487 SCM_F_WIND_EXPLICITLY);
9ec1573d 1488 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1489 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1490}
1491
9bc4701c 1492SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1493 (SCM mutex),
9bc4701c
MD
1494"Try to lock @var{mutex}. If the mutex is already locked by someone "
1495"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1496#define FUNC_NAME s_scm_try_mutex
1497{
6180e336
NJ
1498 SCM exception;
1499 int ret = 0;
1500 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1501
ba1b7223 1502 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1503
1504 to_timespec (scm_from_int(0), &cwaittime);
1505 waittime = &cwaittime;
74926120 1506
adc085f1 1507 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1508 if (!scm_is_false (exception))
1509 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1510 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1511}
1512#undef FUNC_NAME
76da80e7 1513
6180e336
NJ
1514/*** Fat condition variables */
1515
1516typedef struct {
1517 scm_i_pthread_mutex_t lock;
1518 SCM waiting; /* the threads waiting for this condition. */
1519} fat_cond;
1520
1521#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1522#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1523
a141db86
AW
1524static void
1525remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1526{
1527 SCM walk, prev;
1528
1529 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1530 walk = SCM_CDR (walk))
1531 {
1532 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1533 {
1534 if (scm_is_pair (prev))
1535 SCM_SETCDR (prev, SCM_CDR (walk));
1536 else
1537 t->mutexes = SCM_CDR (walk);
1538 break;
1539 }
1540 }
1541}
1542
6180e336
NJ
1543static int
1544fat_mutex_unlock (SCM mutex, SCM cond,
1545 const scm_t_timespec *waittime, int relock)
9de87eea 1546{
7f991c7d 1547 SCM owner;
6180e336
NJ
1548 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1549 fat_cond *c = NULL;
1550 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1551 int err = 0, ret = 0;
9de87eea
MV
1552
1553 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1554
7f991c7d 1555 owner = m->owner;
adc085f1 1556
d31ae2c3 1557 if (!scm_is_eq (owner, t->handle))
9bc4701c 1558 {
adc085f1 1559 if (m->level == 0)
6180e336
NJ
1560 {
1561 if (!m->unchecked_unlock)
2a1d0688
NJ
1562 {
1563 scm_i_pthread_mutex_unlock (&m->lock);
1564 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1565 }
d31ae2c3 1566 owner = t->handle;
6180e336
NJ
1567 }
1568 else if (!m->allow_external_unlock)
2a1d0688
NJ
1569 {
1570 scm_i_pthread_mutex_unlock (&m->lock);
1571 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1572 }
6180e336
NJ
1573 }
1574
1575 if (! (SCM_UNBNDP (cond)))
1576 {
6180e336
NJ
1577 c = SCM_CONDVAR_DATA (cond);
1578 while (1)
1579 {
1580 int brk = 0;
1581
6180e336
NJ
1582 if (m->level > 0)
1583 m->level--;
adc085f1 1584 if (m->level == 0)
f57fdf07
LC
1585 {
1586 /* Change the owner of MUTEX. */
a141db86 1587 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1588 m->owner = unblock_from_queue (m->waiting);
1589 }
adc085f1 1590
6180e336 1591 t->block_asyncs++;
74926120 1592
d2a51087
NJ
1593 err = block_self (c->waiting, cond, &m->lock, waittime);
1594 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1595
1596 if (err == 0)
1597 {
1598 ret = 1;
1599 brk = 1;
1600 }
1601 else if (err == ETIMEDOUT)
1602 {
1603 ret = 0;
1604 brk = 1;
1605 }
1606 else if (err != EINTR)
74926120 1607 {
6180e336 1608 errno = err;
6180e336 1609 scm_syserror (NULL);
74926120 1610 }
6180e336
NJ
1611
1612 if (brk)
1613 {
1614 if (relock)
adc085f1 1615 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1616 t->block_asyncs--;
6180e336
NJ
1617 break;
1618 }
74926120 1619
6180e336 1620 t->block_asyncs--;
27c6ebcb 1621 scm_async_tick ();
74926120 1622
6180e336
NJ
1623 scm_remember_upto_here_2 (cond, mutex);
1624
1625 scm_i_scm_pthread_mutex_lock (&m->lock);
1626 }
9bc4701c 1627 }
9de87eea 1628 else
6180e336
NJ
1629 {
1630 if (m->level > 0)
1631 m->level--;
74926120 1632 if (m->level == 0)
f57fdf07
LC
1633 {
1634 /* Change the owner of MUTEX. */
a141db86 1635 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1636 m->owner = unblock_from_queue (m->waiting);
1637 }
74926120 1638
6180e336
NJ
1639 scm_i_pthread_mutex_unlock (&m->lock);
1640 ret = 1;
1641 }
9de87eea 1642
6180e336 1643 return ret;
9bc4701c 1644}
9bc4701c 1645
6180e336
NJ
1646SCM scm_unlock_mutex (SCM mx)
1647{
1648 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1649}
9bc4701c 1650
6180e336
NJ
1651SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1652 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1653"Unlocks @var{mutex} if the calling thread owns the lock on "
1654"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1655"thread results in undefined behaviour. Once a mutex has been unlocked, "
1656"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1657"lock. Every call to @code{lock-mutex} by this thread must be matched "
1658"with a call to @code{unlock-mutex}. Only the last call to "
1659"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1660#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1661{
6180e336
NJ
1662 scm_t_timespec cwaittime, *waittime = NULL;
1663
9bc4701c 1664 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1665 if (! (SCM_UNBNDP (cond)))
1666 {
1667 SCM_VALIDATE_CONDVAR (2, cond);
1668
1669 if (! (SCM_UNBNDP (timeout)))
1670 {
1671 to_timespec (timeout, &cwaittime);
1672 waittime = &cwaittime;
1673 }
1674 }
1675
1676 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1677}
1678#undef FUNC_NAME
1679
6180e336
NJ
1680SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1681 (SCM obj),
1682 "Return @code{#t} if @var{obj} is a mutex.")
1683#define FUNC_NAME s_scm_mutex_p
1684{
1685 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1686}
74926120 1687#undef FUNC_NAME
9de87eea
MV
1688
1689SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1690 (SCM mx),
1691 "Return the thread owning @var{mx}, or @code{#f}.")
1692#define FUNC_NAME s_scm_mutex_owner
1693{
adc085f1
JG
1694 SCM owner;
1695 fat_mutex *m = NULL;
1696
9de87eea 1697 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1698 m = SCM_MUTEX_DATA (mx);
1699 scm_i_pthread_mutex_lock (&m->lock);
1700 owner = m->owner;
1701 scm_i_pthread_mutex_unlock (&m->lock);
1702
1703 return owner;
9de87eea
MV
1704}
1705#undef FUNC_NAME
1706
1707SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1708 (SCM mx),
adc085f1 1709 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1710#define FUNC_NAME s_scm_mutex_level
1711{
1712 SCM_VALIDATE_MUTEX (1, mx);
1713 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1714}
1715#undef FUNC_NAME
1716
adc085f1
JG
1717SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1718 (SCM mx),
1719 "Returns @code{#t} if the mutex @var{mx} is locked.")
1720#define FUNC_NAME s_scm_mutex_locked_p
1721{
1722 SCM_VALIDATE_MUTEX (1, mx);
1723 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1724}
1725#undef FUNC_NAME
9de87eea 1726
9de87eea
MV
1727static int
1728fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1729{
1730 fat_cond *c = SCM_CONDVAR_DATA (cv);
0607ebbf 1731 scm_puts_unlocked ("#<condition-variable ", port);
9de87eea 1732 scm_uintprint ((scm_t_bits)c, 16, port);
0607ebbf 1733 scm_puts_unlocked (">", port);
9de87eea
MV
1734 return 1;
1735}
9bc4701c 1736
d823b11b
MV
1737SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1738 (void),
1739 "Make a new condition variable.")
1740#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1741{
9de87eea
MV
1742 fat_cond *c;
1743 SCM cv;
1744
1745 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1746 c->waiting = SCM_EOL;
1747 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1748 c->waiting = make_queue ();
d823b11b 1749 return cv;
5f05c406 1750}
d823b11b 1751#undef FUNC_NAME
5f05c406 1752
d823b11b
MV
1753SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1754 (SCM cv, SCM mx, SCM t),
1755"Wait until @var{cond-var} has been signalled. While waiting, "
1756"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1757"is locked again when this function returns. When @var{time} is given, "
1758"it specifies a point in time where the waiting should be aborted. It "
1759"can be either a integer as returned by @code{current-time} or a pair "
1760"as returned by @code{gettimeofday}. When the waiting is aborted the "
1761"mutex is locked and @code{#f} is returned. When the condition "
1762"variable is in fact signalled, the mutex is also locked and @code{#t} "
1763"is returned. ")
1764#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1765{
9de87eea 1766 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1767
1768 SCM_VALIDATE_CONDVAR (1, cv);
1769 SCM_VALIDATE_MUTEX (2, mx);
74926120 1770
d823b11b
MV
1771 if (!SCM_UNBNDP (t))
1772 {
6180e336 1773 to_timespec (t, &waittime);
9de87eea 1774 waitptr = &waittime;
d823b11b
MV
1775 }
1776
2a1d0688 1777 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1778}
d823b11b 1779#undef FUNC_NAME
5f05c406 1780
9de87eea
MV
1781static void
1782fat_cond_signal (fat_cond *c)
1783{
9de87eea 1784 unblock_from_queue (c->waiting);
9de87eea
MV
1785}
1786
d823b11b
MV
1787SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1788 (SCM cv),
1789 "Wake up one thread that is waiting for @var{cv}")
1790#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1791{
d823b11b 1792 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1793 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1794 return SCM_BOOL_T;
5f05c406 1795}
d823b11b 1796#undef FUNC_NAME
5f05c406 1797
9de87eea
MV
1798static void
1799fat_cond_broadcast (fat_cond *c)
1800{
9de87eea
MV
1801 while (scm_is_true (unblock_from_queue (c->waiting)))
1802 ;
9de87eea
MV
1803}
1804
d823b11b
MV
1805SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1806 (SCM cv),
1807 "Wake up all threads that are waiting for @var{cv}. ")
1808#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1809{
d823b11b 1810 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1811 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1812 return SCM_BOOL_T;
5f05c406 1813}
d823b11b 1814#undef FUNC_NAME
5f05c406 1815
6180e336
NJ
1816SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1817 (SCM obj),
1818 "Return @code{#t} if @var{obj} is a condition variable.")
1819#define FUNC_NAME s_scm_condition_variable_p
1820{
1821 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1822}
1823#undef FUNC_NAME
1824
6087fad9 1825
8c2b3143 1826\f
d823b11b
MV
1827/*** Select */
1828
8c2b3143
LC
1829struct select_args
1830{
1831 int nfds;
1832 SELECT_TYPE *read_fds;
1833 SELECT_TYPE *write_fds;
1834 SELECT_TYPE *except_fds;
1835 struct timeval *timeout;
1836
1837 int result;
1838 int errno_value;
1839};
1840
1841static void *
1842do_std_select (void *args)
1843{
1844 struct select_args *select_args;
1845
1846 select_args = (struct select_args *) args;
1847
1848 select_args->result =
1849 select (select_args->nfds,
1850 select_args->read_fds, select_args->write_fds,
1851 select_args->except_fds, select_args->timeout);
1852 select_args->errno_value = errno;
1853
1854 return NULL;
1855}
1856
911782b7 1857int
9de87eea
MV
1858scm_std_select (int nfds,
1859 SELECT_TYPE *readfds,
1860 SELECT_TYPE *writefds,
1861 SELECT_TYPE *exceptfds,
1862 struct timeval *timeout)
1863{
1864 fd_set my_readfds;
1865 int res, eno, wakeup_fd;
1866 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1867 struct select_args args;
9de87eea
MV
1868
1869 if (readfds == NULL)
1870 {
1871 FD_ZERO (&my_readfds);
1872 readfds = &my_readfds;
1873 }
1874
1875 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1876 SCM_TICK;
1877
1878 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1879 FD_SET (wakeup_fd, readfds);
1880 if (wakeup_fd >= nfds)
1881 nfds = wakeup_fd+1;
9de87eea 1882
8c2b3143
LC
1883 args.nfds = nfds;
1884 args.read_fds = readfds;
1885 args.write_fds = writefds;
1886 args.except_fds = exceptfds;
1887 args.timeout = timeout;
1888
1889 /* Explicitly cooperate with the GC. */
1890 scm_without_guile (do_std_select, &args);
1891
1892 res = args.result;
1893 eno = args.errno_value;
1894
1895 t->sleep_fd = -1;
9de87eea
MV
1896 scm_i_reset_sleep (t);
1897
1898 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1899 {
1900 char dummy;
634aa8de
LC
1901 full_read (wakeup_fd, &dummy, 1);
1902
9de87eea
MV
1903 FD_CLR (wakeup_fd, readfds);
1904 res -= 1;
1905 if (res == 0)
1906 {
1907 eno = EINTR;
1908 res = -1;
1909 }
1910 }
d823b11b
MV
1911 errno = eno;
1912 return res;
5f05c406
MV
1913}
1914
9de87eea 1915/* Convenience API for blocking while in guile mode. */
76da80e7 1916
9de87eea 1917#if SCM_USE_PTHREAD_THREADS
92e64b87 1918
2956b071
LC
1919/* It seems reasonable to not run procedures related to mutex and condition
1920 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1921 without it, and (ii) the only potential gain would be GC latency. See
1922 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1923 for a discussion of the pros and cons. */
1924
9bc4701c 1925int
9de87eea 1926scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1927{
9de87eea 1928 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1929 return res;
1930}
1931
9de87eea 1932static void
2b829bbb 1933do_unlock (void *data)
28d52ebb 1934{
9de87eea 1935 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1936}
1937
1938void
661ae7ab 1939scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1940{
9de87eea 1941 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1942 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1943}
1944
9bc4701c 1945int
9de87eea 1946scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1947{
4cf72f0b
LC
1948 int res;
1949 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1950
1951 t->held_mutex = mutex;
1952 res = scm_i_pthread_cond_wait (cond, mutex);
1953 t->held_mutex = NULL;
1954
9bc4701c
MD
1955 return res;
1956}
9bc4701c 1957
76da80e7 1958int
9de87eea
MV
1959scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1960 scm_i_pthread_mutex_t *mutex,
1961 const scm_t_timespec *wt)
76da80e7 1962{
4cf72f0b
LC
1963 int res;
1964 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1965
1966 t->held_mutex = mutex;
1967 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1968 t->held_mutex = NULL;
1969
9de87eea 1970 return res;
76da80e7
MV
1971}
1972
9de87eea 1973#endif
76da80e7 1974
d823b11b 1975unsigned long
9de87eea 1976scm_std_usleep (unsigned long usecs)
5f05c406 1977{
d823b11b
MV
1978 struct timeval tv;
1979 tv.tv_usec = usecs % 1000000;
1980 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1981 scm_std_select (0, NULL, NULL, NULL, &tv);
1982 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1983}
1984
9de87eea
MV
1985unsigned int
1986scm_std_sleep (unsigned int secs)
6c214b62 1987{
d823b11b
MV
1988 struct timeval tv;
1989 tv.tv_usec = 0;
1990 tv.tv_sec = secs;
9de87eea 1991 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1992 return tv.tv_sec;
6c214b62
MD
1993}
1994
d823b11b
MV
1995/*** Misc */
1996
1997SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1998 (void),
1999 "Return the thread that called this function.")
2000#define FUNC_NAME s_scm_current_thread
2001{
9de87eea 2002 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2003}
2004#undef FUNC_NAME
2005
9de87eea
MV
2006static SCM
2007scm_c_make_list (size_t n, SCM fill)
2008{
2009 SCM res = SCM_EOL;
2010 while (n-- > 0)
2011 res = scm_cons (fill, res);
2012 return res;
2013}
2014
d823b11b
MV
2015SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2016 (void),
2017 "Return a list of all threads.")
9bc4701c 2018#define FUNC_NAME s_scm_all_threads
d823b11b 2019{
9de87eea
MV
2020 /* We can not allocate while holding the thread_admin_mutex because
2021 of the way GC is done.
2022 */
2023 int n = thread_count;
2024 scm_i_thread *t;
2025 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2026
9de87eea
MV
2027 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2028 l = &list;
2029 for (t = all_threads; t && n > 0; t = t->next_thread)
2030 {
2e77f720
LC
2031 if (t != scm_i_signal_delivery_thread)
2032 {
2033 SCM_SETCAR (*l, t->handle);
2034 l = SCM_CDRLOC (*l);
2035 }
9de87eea
MV
2036 n--;
2037 }
2038 *l = SCM_EOL;
2039 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2040 return list;
d823b11b 2041}
9de87eea 2042#undef FUNC_NAME
d823b11b
MV
2043
2044SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2045 (SCM thread),
2046 "Return @code{#t} iff @var{thread} has exited.\n")
2047#define FUNC_NAME s_scm_thread_exited_p
2048{
7888309b 2049 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2050}
2051#undef FUNC_NAME
2052
911782b7 2053int
d823b11b
MV
2054scm_c_thread_exited_p (SCM thread)
2055#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2056{
9de87eea 2057 scm_i_thread *t;
d823b11b 2058 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2059 t = SCM_I_THREAD_DATA (thread);
d823b11b 2060 return t->exited;
5f05c406 2061}
d823b11b 2062#undef FUNC_NAME
5f05c406 2063
d20912e6
LC
2064SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2065 (void),
2066 "Return the total number of processors of the machine, which\n"
2067 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2068 "thread execution unit, which can be either:\n\n"
2069 "@itemize\n"
2070 "@item an execution core in a (possibly multi-core) chip, in a\n"
2071 " (possibly multi- chip) module, in a single computer, or\n"
2072 "@item a thread execution unit inside a core in the case of\n"
2073 " @dfn{hyper-threaded} CPUs.\n"
2074 "@end itemize\n\n"
2075 "Which of the two definitions is used, is unspecified.\n")
2076#define FUNC_NAME s_scm_total_processor_count
2077{
2078 return scm_from_ulong (num_processors (NPROC_ALL));
2079}
2080#undef FUNC_NAME
2081
2082SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2083 (void),
2084 "Like @code{total-processor-count}, but return the number of\n"
2085 "processors available to the current process. See\n"
2086 "@code{setaffinity} and @code{getaffinity} for more\n"
2087 "information.\n")
2088#define FUNC_NAME s_scm_current_processor_count
2089{
2090 return scm_from_ulong (num_processors (NPROC_CURRENT));
2091}
2092#undef FUNC_NAME
2093
2094
2095\f
2096
9de87eea 2097static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2098static int threads_initialized_p = 0;
2099
9bc4701c 2100
a4d106c7
MV
2101/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2102 */
d1138028 2103scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2104
661ae7ab 2105static SCM dynwind_critical_section_mutex;
a54a94b3 2106
9bc4701c 2107void
661ae7ab 2108scm_dynwind_critical_section (SCM mutex)
76da80e7 2109{
a4d106c7 2110 if (scm_is_false (mutex))
661ae7ab
MV
2111 mutex = dynwind_critical_section_mutex;
2112 scm_dynwind_lock_mutex (mutex);
2113 scm_dynwind_block_asyncs ();
9de87eea
MV
2114}
2115
2116/*** Initialization */
2117
9de87eea
MV
2118scm_i_pthread_mutex_t scm_i_misc_mutex;
2119
d1138028
MV
2120#if SCM_USE_PTHREAD_THREADS
2121pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2122#endif
2123
9de87eea 2124void
12c1d861 2125scm_threads_prehistory (void *base)
9de87eea 2126{
d1138028
MV
2127#if SCM_USE_PTHREAD_THREADS
2128 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2129 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2130 PTHREAD_MUTEX_RECURSIVE);
2131#endif
2132
2133 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2134 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2135 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2136 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2137
12c1d861 2138 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2139}
2140
d823b11b
MV
2141scm_t_bits scm_tc16_thread;
2142scm_t_bits scm_tc16_mutex;
2143scm_t_bits scm_tc16_condvar;
7bfd3b9e 2144
7bfd3b9e 2145void
9de87eea 2146scm_init_threads ()
7bfd3b9e 2147{
9de87eea 2148 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2149 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2150
9de87eea 2151 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2152 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2153 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2154
9de87eea
MV
2155 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2156 sizeof (fat_cond));
9de87eea 2157 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2158
9de87eea
MV
2159 scm_i_default_dynamic_state = SCM_BOOL_F;
2160 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2161 threads_initialized_p = 1;
a4d106c7 2162
f39448c5 2163 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2164}
89e00824 2165
5f05c406 2166void
9de87eea 2167scm_init_threads_default_dynamic_state ()
5f05c406 2168{
9de87eea 2169 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2170 scm_i_default_dynamic_state = state;
5f05c406
MV
2171}
2172
d823b11b 2173void
9de87eea 2174scm_init_thread_procs ()
d823b11b 2175{
9de87eea 2176#include "libguile/threads.x"
d823b11b
MV
2177}
2178
3c13664e
LC
2179\f
2180/* IA64-specific things. */
2181
2182#ifdef __ia64__
2183# ifdef __hpux
2184# include <sys/param.h>
2185# include <sys/pstat.h>
2186void *
2187scm_ia64_register_backing_store_base (void)
2188{
2189 struct pst_vm_status vm_status;
2190 int i = 0;
2191 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2192 if (vm_status.pst_type == PS_RSESTACK)
2193 return (void *) vm_status.pst_vaddr;
2194 abort ();
2195}
2196void *
2197scm_ia64_ar_bsp (const void *ctx)
2198{
2199 uint64_t bsp;
2200 __uc_get_ar_bsp (ctx, &bsp);
2201 return (void *) bsp;
2202}
2203# endif /* hpux */
2204# ifdef linux
2205# include <ucontext.h>
2206void *
2207scm_ia64_register_backing_store_base (void)
2208{
2209 extern void *__libc_ia64_register_backing_store_base;
2210 return __libc_ia64_register_backing_store_base;
2211}
2212void *
2213scm_ia64_ar_bsp (const void *opaque)
2214{
2215 const ucontext_t *ctx = opaque;
2216 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2217}
2218# endif /* linux */
2219#endif /* __ia64__ */
2220
2221
89e00824
ML
2222/*
2223 Local Variables:
2224 c-file-style: "gnu"
2225 End:
2226*/