adjust_gc_frequency before gc, for more precision
[bpt/guile.git] / libguile / threads.c
CommitLineData
cde24ce1 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1c44468d 25#include "libguile/bdw-gc.h"
1810dc4e
RB
26#include "libguile/_scm.h"
27
fcc5d734 28#if HAVE_UNISTD_H
d823b11b 29#include <unistd.h>
fcc5d734 30#endif
d823b11b 31#include <stdio.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
f57fdf07 41#include <assert.h>
32f94bf2 42#include <fcntl.h>
d20912e6 43#include <nproc.h>
f57fdf07 44
d823b11b
MV
45#include "libguile/validate.h"
46#include "libguile/root.h"
47#include "libguile/eval.h"
48#include "libguile/async.h"
49#include "libguile/ports.h"
50#include "libguile/threads.h"
a0599745 51#include "libguile/dynwind.h"
d823b11b 52#include "libguile/iselect.h"
9de87eea
MV
53#include "libguile/fluids.h"
54#include "libguile/continuations.h"
2b829bbb 55#include "libguile/gc.h"
9de87eea 56#include "libguile/init.h"
2e77f720 57#include "libguile/scmsigs.h"
6180e336 58#include "libguile/strings.h"
a0faf7dd 59#include "libguile/weaks.h"
7bfd3b9e 60
634aa8de
LC
61#include <full-read.h>
62
cde24ce1
AW
63
64\f
65
66/* First some libgc shims. */
67
68/* Make sure GC_fn_type is defined; it is missing from the public
69 headers of GC 7.1 and earlier. */
70#ifndef HAVE_GC_FN_TYPE
71typedef void * (* GC_fn_type) (void *);
72#endif
73
74
5f0d2951
AW
75#ifndef GC_SUCCESS
76#define GC_SUCCESS 0
77#endif
78
79#ifndef GC_UNIMPLEMENTED
80#define GC_UNIMPLEMENTED 3
81#endif
82
83/* Likewise struct GC_stack_base is missing before 7.1. */
84#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
85struct GC_stack_base {
86 void * mem_base; /* Base of memory stack. */
87#ifdef __ia64__
88 void * reg_base; /* Base of separate register stack. */
89#endif
90};
5f0d2951
AW
91
92static int
987b8160 93GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
94{
95 return GC_UNIMPLEMENTED;
96}
97
98static void
99GC_unregister_my_thread ()
100{
101}
102
12c1d861
AW
103#if !SCM_USE_PTHREAD_THREADS
104/* No threads; we can just use GC_stackbottom. */
5f0d2951 105static void *
12c1d861
AW
106get_thread_stack_base ()
107{
108 return GC_stackbottom;
109}
110
111#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
112 && defined PTHREAD_ATTR_GETSTACK_WORKS
113/* This method for GNU/Linux and perhaps some other systems.
114 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
115 available on them. */
116static void *
117get_thread_stack_base ()
118{
119 pthread_attr_t attr;
120 void *start, *end;
121 size_t size;
122
123 pthread_getattr_np (pthread_self (), &attr);
124 pthread_attr_getstack (&attr, &start, &size);
125 end = (char *)start + size;
126
127#if SCM_STACK_GROWS_UP
128 return start;
129#else
130 return end;
131#endif
132}
133
134#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
135/* This method for MacOS X.
136 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
137 but as of 2006 there's nothing obvious at apple.com. */
138static void *
139get_thread_stack_base ()
140{
141 return pthread_get_stackaddr_np (pthread_self ());
142}
143
144#else
145#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
146#endif
147
148static int
987b8160 149GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 150{
12c1d861
AW
151 stack_base->mem_base = get_thread_stack_base ();
152#ifdef __ia64__
153 /* Calculate and store off the base of this thread's register
154 backing store (RBS). Unfortunately our implementation(s) of
155 scm_ia64_register_backing_store_base are only reliable for the
156 main thread. For other threads, therefore, find out the current
157 top of the RBS, and use that as a maximum. */
158 stack_base->reg_base = scm_ia64_register_backing_store_base ();
159 {
160 ucontext_t ctx;
161 void *bsp;
162 getcontext (&ctx);
163 bsp = scm_ia64_ar_bsp (&ctx);
164 if (stack_base->reg_base > bsp)
165 stack_base->reg_base = bsp;
166 }
167#endif
168 return GC_SUCCESS;
5f0d2951 169}
12c1d861
AW
170
171static void *
172GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
173{
174 struct GC_stack_base stack_base;
175
176 stack_base.mem_base = (void*)&stack_base;
177#ifdef __ia64__
178 /* FIXME: Untested. */
179 {
180 ucontext_t ctx;
181 getcontext (&ctx);
182 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
183 }
5f0d2951
AW
184#endif
185
12c1d861
AW
186 return fn (&stack_base, arg);
187}
188#endif /* HAVE_GC_STACK_BASE */
189
5f0d2951 190
cde24ce1
AW
191/* Now define with_gc_active and with_gc_inactive. */
192
193#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
194
195/* We have a sufficiently new libgc (7.2 or newer). */
196
197static void*
198with_gc_inactive (GC_fn_type func, void *data)
199{
200 return GC_do_blocking (func, data);
201}
202
203static void*
204with_gc_active (GC_fn_type func, void *data)
205{
206 return GC_call_with_gc_active (func, data);
207}
208
209#else
210
211/* libgc not new enough, so never actually deactivate GC.
212
213 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
214 GC_call_with_gc_active. */
215
216static void*
217with_gc_inactive (GC_fn_type func, void *data)
218{
219 return func (data);
220}
221
222static void*
223with_gc_active (GC_fn_type func, void *data)
224{
225 return func (data);
226}
227
228#endif /* HAVE_GC_DO_BLOCKING */
229
230
634aa8de 231\f
6180e336
NJ
232static void
233to_timespec (SCM t, scm_t_timespec *waittime)
234{
235 if (scm_is_pair (t))
236 {
237 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
238 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
239 }
240 else
241 {
242 double time = scm_to_double (t);
243 double sec = scm_c_truncate (time);
244
245 waittime->tv_sec = (long) sec;
2a1d0688 246 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
247 }
248}
249
c058db8a 250\f
d823b11b 251/*** Queues */
7bfd3b9e 252
c058db8a
LC
253/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
254 the risk of false references leading to unbounded retained space as
255 described in "Bounding Space Usage of Conservative Garbage Collectors",
256 H.J. Boehm, 2001. */
257
9de87eea
MV
258/* Make an empty queue data structure.
259 */
d823b11b
MV
260static SCM
261make_queue ()
262{
263 return scm_cons (SCM_EOL, SCM_EOL);
264}
7bfd3b9e 265
9de87eea
MV
266/* Put T at the back of Q and return a handle that can be used with
267 remqueue to remove T from Q again.
268 */
d823b11b
MV
269static SCM
270enqueue (SCM q, SCM t)
271{
272 SCM c = scm_cons (t, SCM_EOL);
d2a51087 273 SCM_CRITICAL_SECTION_START;
d2e53ed6 274 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
275 SCM_SETCDR (q, c);
276 else
277 SCM_SETCDR (SCM_CAR (q), c);
278 SCM_SETCAR (q, c);
d2a51087 279 SCM_CRITICAL_SECTION_END;
d823b11b
MV
280 return c;
281}
7bfd3b9e 282
9de87eea
MV
283/* Remove the element that the handle C refers to from the queue Q. C
284 must have been returned from a call to enqueue. The return value
285 is zero when the element referred to by C has already been removed.
286 Otherwise, 1 is returned.
287*/
288static int
d823b11b
MV
289remqueue (SCM q, SCM c)
290{
291 SCM p, prev = q;
d2a51087 292 SCM_CRITICAL_SECTION_START;
d2e53ed6 293 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 294 {
bc36d050 295 if (scm_is_eq (p, c))
d823b11b 296 {
bc36d050 297 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
298 SCM_SETCAR (q, SCM_CDR (c));
299 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
300
301 /* GC-robust */
302 SCM_SETCDR (c, SCM_EOL);
303
d2a51087 304 SCM_CRITICAL_SECTION_END;
9de87eea 305 return 1;
d823b11b
MV
306 }
307 prev = p;
308 }
d2a51087 309 SCM_CRITICAL_SECTION_END;
9de87eea 310 return 0;
d823b11b
MV
311}
312
9de87eea
MV
313/* Remove the front-most element from the queue Q and return it.
314 Return SCM_BOOL_F when Q is empty.
315*/
d823b11b
MV
316static SCM
317dequeue (SCM q)
318{
d2a51087
NJ
319 SCM c;
320 SCM_CRITICAL_SECTION_START;
321 c = SCM_CDR (q);
d2e53ed6 322 if (scm_is_null (c))
d2a51087
NJ
323 {
324 SCM_CRITICAL_SECTION_END;
325 return SCM_BOOL_F;
326 }
d823b11b
MV
327 else
328 {
329 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 330 if (scm_is_null (SCM_CDR (q)))
d823b11b 331 SCM_SETCAR (q, SCM_EOL);
d2a51087 332 SCM_CRITICAL_SECTION_END;
c058db8a
LC
333
334 /* GC-robust */
335 SCM_SETCDR (c, SCM_EOL);
336
d823b11b
MV
337 return SCM_CAR (c);
338 }
339}
7bfd3b9e 340
9de87eea 341/*** Thread smob routines */
76da80e7 342
d823b11b
MV
343
344static int
345thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
346{
23d72566
KR
347 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
348 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
349 the struct case, hence we go via a union, and extract according to the
350 size of pthread_t. */
351 union {
352 scm_i_pthread_t p;
353 unsigned short us;
354 unsigned int ui;
355 unsigned long ul;
356 scm_t_uintmax um;
357 } u;
9de87eea 358 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
359 scm_i_pthread_t p = t->pthread;
360 scm_t_uintmax id;
361 u.p = p;
362 if (sizeof (p) == sizeof (unsigned short))
363 id = u.us;
364 else if (sizeof (p) == sizeof (unsigned int))
365 id = u.ui;
366 else if (sizeof (p) == sizeof (unsigned long))
367 id = u.ul;
368 else
369 id = u.um;
370
d823b11b 371 scm_puts ("#<thread ", port);
23d72566 372 scm_uintprint (id, 10, port);
1b92fb6b 373 scm_puts (" (", port);
0345e278 374 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 375 scm_puts (")>", port);
d823b11b
MV
376 return 1;
377}
378
706846f6 379\f
9de87eea 380/*** Blocking on queues. */
f7eca35d 381
9de87eea
MV
382/* See also scm_i_queue_async_cell for how such a block is
383 interrputed.
384*/
d823b11b 385
9de87eea
MV
386/* Put the current thread on QUEUE and go to sleep, waiting for it to
387 be woken up by a call to 'unblock_from_queue', or to be
388 interrupted. Upon return of this function, the current thread is
389 no longer on QUEUE, even when the sleep has been interrupted.
390
d2a51087 391 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
392 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
393
394 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
395 as MUTEX is needed.
396
397 When WAITTIME is not NULL, the sleep will be aborted at that time.
398
399 The return value of block_self is an errno value. It will be zero
400 when the sleep has been successfully completed by a call to
401 unblock_from_queue, EINTR when it has been interrupted by the
402 delivery of a system async, and ETIMEDOUT when the timeout has
403 expired.
404
405 The system asyncs themselves are not executed by block_self.
406*/
407static int
408block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
409 const scm_t_timespec *waittime)
76da80e7 410{
9de87eea
MV
411 scm_i_thread *t = SCM_I_CURRENT_THREAD;
412 SCM q_handle;
413 int err;
414
415 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
416 err = EINTR;
417 else
418 {
419 t->block_asyncs++;
420 q_handle = enqueue (queue, t->handle);
421 if (waittime == NULL)
422 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
423 else
424 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
425
426 /* When we are still on QUEUE, we have been interrupted. We
427 report this only when no other error (such as a timeout) has
428 happened above.
429 */
430 if (remqueue (queue, q_handle) && err == 0)
431 err = EINTR;
432 t->block_asyncs--;
433 scm_i_reset_sleep (t);
434 }
435
436 return err;
76da80e7 437}
9de87eea 438
d2a51087
NJ
439/* Wake up the first thread on QUEUE, if any. The awoken thread is
440 returned, or #f if the queue was empty.
9de87eea
MV
441 */
442static SCM
443unblock_from_queue (SCM queue)
444{
445 SCM thread = dequeue (queue);
446 if (scm_is_true (thread))
447 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
448 return thread;
449}
450
45f15cac 451\f
9de87eea
MV
452/* Getting into and out of guile mode.
453 */
454
f60a7648
AW
455/* Key used to attach a cleanup handler to a given thread. Also, if
456 thread-local storage is unavailable, this key is used to retrieve the
457 current thread with `pthread_getspecific ()'. */
458scm_i_pthread_key_t scm_i_thread_key;
459
460
705edb95
LC
461#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
462
463/* When thread-local storage (TLS) is available, a pointer to the
464 current-thread object is kept in TLS. Note that storing the thread-object
465 itself in TLS (rather than a pointer to some malloc'd memory) is not
466 possible since thread objects may live longer than the actual thread they
467 represent. */
468SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
469
f60a7648 470#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 471
d823b11b 472
9de87eea
MV
473static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
474static scm_i_thread *all_threads = NULL;
475static int thread_count;
476
477static SCM scm_i_default_dynamic_state;
478
0b77014f
AW
479/* Run when a fluid is collected. */
480void
aafb4ed7 481scm_i_reset_fluid (size_t n)
0b77014f
AW
482{
483 scm_i_thread *t;
484
485 scm_i_pthread_mutex_lock (&thread_admin_mutex);
486 for (t = all_threads; t; t = t->next_thread)
487 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
488 {
489 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
490
491 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 492 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
493 }
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
495}
496
9de87eea 497/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 498 */
9de87eea 499static void
12c1d861 500guilify_self_1 (struct GC_stack_base *base)
d823b11b 501{
7f22442b
AW
502 scm_i_thread t;
503
504 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
505 before allocating anything in this thread, because allocation could
506 cause GC to run, and GC could cause finalizers, which could invoke
507 Scheme functions, which need the current thread to be set. */
508
509 t.pthread = scm_i_pthread_self ();
510 t.handle = SCM_BOOL_F;
511 t.result = SCM_BOOL_F;
512 t.cleanup_handler = SCM_BOOL_F;
513 t.mutexes = SCM_EOL;
514 t.held_mutex = NULL;
515 t.join_queue = SCM_EOL;
516 t.dynamic_state = SCM_BOOL_F;
517 t.dynwinds = SCM_EOL;
518 t.active_asyncs = SCM_EOL;
519 t.block_asyncs = 1;
520 t.pending_asyncs = 1;
521 t.critical_section_level = 0;
522 t.base = base->mem_base;
346e4402 523#ifdef __ia64__
7f22442b 524 t.register_backing_store_base = base->reg-base;
346e4402 525#endif
7f22442b
AW
526 t.continuation_root = SCM_EOL;
527 t.continuation_base = t.base;
528 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
529 t.sleep_mutex = NULL;
530 t.sleep_object = SCM_BOOL_F;
531 t.sleep_fd = -1;
532
32f94bf2 533 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
534 /* FIXME: Error conditions during the initialization phase are handled
535 gracelessly since public functions such as `scm_init_guile ()'
536 currently have type `void'. */
537 abort ();
538
7f22442b
AW
539 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
540 t.current_mark_stack_ptr = NULL;
541 t.current_mark_stack_limit = NULL;
542 t.canceled = 0;
543 t.exited = 0;
544 t.guile_mode = 0;
9de87eea 545
7f22442b
AW
546 /* The switcheroo. */
547 {
548 scm_i_thread *t_ptr = &t;
549
550 GC_disable ();
551 t_ptr = GC_malloc (sizeof (scm_i_thread));
552 memcpy (t_ptr, &t, sizeof t);
553
554 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
555
556#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
557 /* Cache the current thread in TLS for faster lookup. */
558 scm_i_current_thread = t_ptr;
f60a7648 559#endif
9de87eea 560
7f22442b
AW
561 scm_i_pthread_mutex_lock (&thread_admin_mutex);
562 t_ptr->next_thread = all_threads;
563 all_threads = t_ptr;
564 thread_count++;
565 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
566
567 GC_enable ();
568 }
d823b11b
MV
569}
570
9de87eea 571/* Perform second stage of thread initialisation, in guile mode.
d823b11b 572 */
9de87eea
MV
573static void
574guilify_self_2 (SCM parent)
d823b11b 575{
9de87eea
MV
576 scm_i_thread *t = SCM_I_CURRENT_THREAD;
577
72e6b608
LC
578 t->guile_mode = 1;
579
9de87eea 580 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 581
9de87eea
MV
582 t->continuation_root = scm_cons (t->handle, SCM_EOL);
583 t->continuation_base = t->base;
2bbe1533 584 t->vm = SCM_BOOL_F;
9de87eea
MV
585
586 if (scm_is_true (parent))
587 t->dynamic_state = scm_make_dynamic_state (parent);
588 else
589 t->dynamic_state = scm_i_make_initial_dynamic_state ();
590
591 t->join_queue = make_queue ();
592 t->block_asyncs = 0;
d823b11b
MV
593}
594
6180e336
NJ
595\f
596/*** Fat mutexes */
597
598/* We implement our own mutex type since we want them to be 'fair', we
599 want to do fancy things while waiting for them (like running
600 asyncs) and we might want to add things that are nice for
601 debugging.
602*/
603
604typedef struct {
605 scm_i_pthread_mutex_t lock;
606 SCM owner;
adc085f1 607 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 608
adc085f1 609 int recursive; /* allow recursive locking? */
6180e336
NJ
610 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
611 int allow_external_unlock; /* is it an error to unlock a mutex that is not
612 owned by the current thread? */
613
614 SCM waiting; /* the threads waiting for this mutex. */
615} fat_mutex;
616
617#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
618#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
619
2002f1f8
AW
620static SCM
621call_cleanup (void *data)
622{
623 SCM *proc_p = data;
624 return scm_call_0 (*proc_p);
625}
626
9de87eea 627/* Perform thread tear-down, in guile mode.
d823b11b 628 */
9de87eea
MV
629static void *
630do_thread_exit (void *v)
631{
2e77f720
LC
632 scm_i_thread *t = (scm_i_thread *) v;
633
91230883
LC
634 /* Ensure the signal handling thread has been launched, because we might be
635 shutting it down. This needs to be done in Guile mode. */
636 scm_i_ensure_signal_delivery_thread ();
637
2e77f720
LC
638 if (!scm_is_false (t->cleanup_handler))
639 {
640 SCM ptr = t->cleanup_handler;
641
642 t->cleanup_handler = SCM_BOOL_F;
643 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 644 call_cleanup, &ptr,
2e77f720
LC
645 scm_handle_by_message_noexit, NULL);
646 }
9de87eea 647
86a597f8 648 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
649
650 t->exited = 1;
0c97d7dd
MV
651 close (t->sleep_pipe[0]);
652 close (t->sleep_pipe[1]);
9de87eea
MV
653 while (scm_is_true (unblock_from_queue (t->join_queue)))
654 ;
9de87eea 655
74926120 656 while (!scm_is_null (t->mutexes))
6180e336 657 {
a0faf7dd 658 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 659
a0faf7dd
LC
660 if (!SCM_UNBNDP (mutex))
661 {
662 fat_mutex *m = SCM_MUTEX_DATA (mutex);
663
664 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07
LC
665
666 /* Since MUTEX is in `t->mutexes', T must be its owner. */
667 assert (scm_is_eq (m->owner, t->handle));
668
a0faf7dd 669 unblock_from_queue (m->waiting);
f57fdf07 670
a0faf7dd
LC
671 scm_i_pthread_mutex_unlock (&m->lock);
672 }
6180e336 673
a0faf7dd 674 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
675 }
676
86a597f8 677 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 678
9de87eea
MV
679 return NULL;
680}
681
f60a7648
AW
682static void *
683do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
684{
7f22442b 685 /* Won't hurt if we are already registered. */
8e76ce94 686#if SCM_USE_PTHREAD_THREADS
7f22442b 687 GC_register_my_thread (sb);
8e76ce94 688#endif
f60a7648 689
7f22442b 690 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
691}
692
d823b11b 693static void
9de87eea 694on_thread_exit (void *v)
d823b11b 695{
29776e85 696 /* This handler is executed in non-guile mode. */
2e77f720 697 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 698
b8441577
LC
699 /* If we were canceled, we were unable to clear `t->guile_mode', so do
700 it here. */
701 t->guile_mode = 0;
702
d2a51087
NJ
703 /* If this thread was cancelled while doing a cond wait, it will
704 still have a mutex locked, so we unlock it here. */
705 if (t->held_mutex)
706 {
707 scm_i_pthread_mutex_unlock (t->held_mutex);
708 t->held_mutex = NULL;
709 }
710
f60a7648
AW
711 /* Reinstate the current thread for purposes of scm_with_guile
712 guile-mode cleanup handlers. Only really needed in the non-TLS
713 case but it doesn't hurt to be consistent. */
714 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 715
7f22442b
AW
716 /* Scheme-level thread finalizers and other cleanup needs to happen in
717 guile mode. */
718 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
719
720 /* Removing ourself from the list of all threads needs to happen in
721 non-guile mode since all SCM values on our stack become
29776e85 722 unprotected once we are no longer in the list. */
0c97d7dd
MV
723 scm_i_pthread_mutex_lock (&thread_admin_mutex);
724 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
725 if (*tp == t)
726 {
727 *tp = t->next_thread;
c058db8a
LC
728
729 /* GC-robust */
730 t->next_thread = NULL;
731
0c97d7dd
MV
732 break;
733 }
734 thread_count--;
2e77f720
LC
735
736 /* If there's only one other thread, it could be the signal delivery
737 thread, so we need to notify it to shut down by closing its read pipe.
738 If it's not the signal delivery thread, then closing the read pipe isn't
739 going to hurt. */
740 if (thread_count <= 1)
741 scm_i_close_signal_pipe ();
742
0c97d7dd
MV
743 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
744
f60a7648 745 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 746
8e76ce94 747#if SCM_USE_PTHREAD_THREADS
7f22442b 748 GC_unregister_my_thread ();
653ccd78 749#endif
d823b11b
MV
750}
751
9de87eea 752static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 753
9de87eea
MV
754static void
755init_thread_key (void)
756{
f60a7648 757 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 758}
d823b11b 759
cde24ce1
AW
760/* Perform any initializations necessary to make the current thread
761 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
762 if necessary.
a54a94b3 763
9de87eea
MV
764 BASE is the stack base to use with GC.
765
766 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
767 which case the default dynamic state is used.
768
cde24ce1 769 Returns zero when the thread was known to guile already; otherwise
9de87eea 770 return 1.
cde24ce1
AW
771
772 Note that it could be the case that the thread was known
773 to Guile, but not in guile mode (because we are within a
774 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
775 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
776
777static int
12c1d861 778scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 779{
9de87eea
MV
780 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
781
cde24ce1
AW
782 if (SCM_I_CURRENT_THREAD)
783 {
784 /* Thread is already known to Guile.
785 */
786 return 0;
787 }
788 else
9de87eea
MV
789 {
790 /* This thread has not been guilified yet.
791 */
792
793 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
794 if (scm_initialized_p == 0)
795 {
796 /* First thread ever to enter Guile. Run the full
797 initialization.
798 */
799 scm_i_init_guile (base);
12c1d861 800
8e76ce94 801#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
802 /* Allow other threads to come in later. */
803 GC_allow_register_threads ();
4000d064 804#endif
12c1d861 805
9de87eea
MV
806 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
807 }
808 else
809 {
810 /* Guile is already initialized, but this thread enters it for
811 the first time. Only initialize this thread.
812 */
813 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
814
815 /* Register this thread with libgc. */
8e76ce94 816#if SCM_USE_PTHREAD_THREADS
12c1d861 817 GC_register_my_thread (base);
8e76ce94 818#endif
12c1d861 819
9de87eea
MV
820 guilify_self_1 (base);
821 guilify_self_2 (parent);
822 }
823 return 1;
824 }
d823b11b
MV
825}
826
12c1d861
AW
827void
828scm_init_guile ()
d823b11b 829{
12c1d861
AW
830 struct GC_stack_base stack_base;
831
832 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
833 scm_i_init_thread_for_guile (&stack_base,
834 scm_i_default_dynamic_state);
9de87eea
MV
835 else
836 {
12c1d861
AW
837 fprintf (stderr, "Failed to get stack base for current thread.\n");
838 exit (1);
9de87eea 839 }
a54a94b3
MV
840}
841
12c1d861 842struct with_guile_args
cde24ce1
AW
843{
844 GC_fn_type func;
845 void *data;
12c1d861 846 SCM parent;
cde24ce1
AW
847};
848
849static void *
850with_guile_trampoline (void *data)
851{
12c1d861 852 struct with_guile_args *args = data;
cde24ce1
AW
853
854 return scm_c_with_continuation_barrier (args->func, args->data);
855}
856
12c1d861
AW
857static void *
858with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
859{
860 void *res;
cde24ce1
AW
861 int new_thread;
862 scm_i_thread *t;
12c1d861 863 struct with_guile_args *args = data;
2e77f720 864
12c1d861 865 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
866 t = SCM_I_CURRENT_THREAD;
867 if (new_thread)
2e77f720 868 {
cde24ce1
AW
869 /* We are in Guile mode. */
870 assert (t->guile_mode);
871
12c1d861 872 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
873
874 /* Leave Guile mode. */
875 t->guile_mode = 0;
876 }
877 else if (t->guile_mode)
878 {
879 /* Already in Guile mode. */
12c1d861 880 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 881 }
74926120 882 else
cde24ce1 883 {
cde24ce1
AW
884 /* We are not in Guile mode, either because we are not within a
885 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 886
cde24ce1
AW
887 This call to scm_with_guile() could happen from anywhere on the
888 stack, and in particular lower on the stack than when it was
889 when this thread was first guilified. Thus, `base' must be
890 updated. */
891#if SCM_STACK_GROWS_UP
12c1d861
AW
892 if (SCM_STACK_PTR (base->mem_base) < t->base)
893 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 894#else
12c1d861
AW
895 if (SCM_STACK_PTR (base->mem_base) > t->base)
896 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 897#endif
72e6b608 898
cde24ce1 899 t->guile_mode = 1;
12c1d861 900 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
901 t->guile_mode = 0;
902 }
903 return res;
72e6b608 904}
3d1af79f 905
12c1d861
AW
906static void *
907scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
908{
909 struct with_guile_args args;
910
911 args.func = func;
912 args.data = data;
913 args.parent = parent;
914
915 return GC_call_with_stack_base (with_guile_and_parent, &args);
916}
917
918void *
919scm_with_guile (void *(*func)(void *), void *data)
920{
921 return scm_i_with_guile_and_parent (func, data,
922 scm_i_default_dynamic_state);
923}
924
9de87eea
MV
925void *
926scm_without_guile (void *(*func)(void *), void *data)
d823b11b 927{
72e6b608 928 void *result;
cde24ce1 929 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 930
cde24ce1 931 if (t->guile_mode)
72e6b608 932 {
cde24ce1
AW
933 SCM_I_CURRENT_THREAD->guile_mode = 0;
934 result = with_gc_inactive (func, data);
935 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
936 }
937 else
cde24ce1 938 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
939 result = func (data);
940
941 return result;
9de87eea
MV
942}
943
72e6b608 944\f
9de87eea
MV
945/*** Thread creation */
946
947typedef struct {
948 SCM parent;
949 SCM thunk;
950 SCM handler;
76da80e7 951 SCM thread;
9de87eea
MV
952 scm_i_pthread_mutex_t mutex;
953 scm_i_pthread_cond_t cond;
954} launch_data;
d823b11b 955
9de87eea
MV
956static void *
957really_launch (void *d)
958{
959 launch_data *data = (launch_data *)d;
960 SCM thunk = data->thunk, handler = data->handler;
961 scm_i_thread *t;
d823b11b 962
9de87eea 963 t = SCM_I_CURRENT_THREAD;
a54a94b3 964
9de87eea
MV
965 scm_i_scm_pthread_mutex_lock (&data->mutex);
966 data->thread = scm_current_thread ();
967 scm_i_pthread_cond_signal (&data->cond);
968 scm_i_pthread_mutex_unlock (&data->mutex);
969
970 if (SCM_UNBNDP (handler))
971 t->result = scm_call_0 (thunk);
972 else
973 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
974
975 return 0;
d823b11b
MV
976}
977
9de87eea
MV
978static void *
979launch_thread (void *d)
980{
981 launch_data *data = (launch_data *)d;
982 scm_i_pthread_detach (scm_i_pthread_self ());
983 scm_i_with_guile_and_parent (really_launch, d, data->parent);
984 return NULL;
985}
986
987SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 988 (SCM thunk, SCM handler),
9de87eea
MV
989 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
990 "returning a new thread object representing the thread. The procedure\n"
991 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
992 "\n"
993 "When @var{handler} is specified, then @var{thunk} is called from\n"
994 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
995 "handler. This catch is established inside the continuation barrier.\n"
996 "\n"
997 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
998 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
999#define FUNC_NAME s_scm_call_with_new_thread
1000{
9de87eea
MV
1001 launch_data data;
1002 scm_i_pthread_t id;
1003 int err;
d823b11b 1004
9de87eea
MV
1005 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1006 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1007 handler, SCM_ARG2, FUNC_NAME);
1008
1009 data.parent = scm_current_dynamic_state ();
1010 data.thunk = thunk;
1011 data.handler = handler;
1012 data.thread = SCM_BOOL_F;
1013 scm_i_pthread_mutex_init (&data.mutex, NULL);
1014 scm_i_pthread_cond_init (&data.cond, NULL);
1015
1016 scm_i_scm_pthread_mutex_lock (&data.mutex);
1017 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1018 if (err)
1019 {
1020 scm_i_pthread_mutex_unlock (&data.mutex);
1021 errno = err;
1022 scm_syserror (NULL);
1023 }
1024 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1025 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1026
9de87eea 1027 return data.thread;
d823b11b
MV
1028}
1029#undef FUNC_NAME
1030
9de87eea
MV
1031typedef struct {
1032 SCM parent;
1033 scm_t_catch_body body;
1034 void *body_data;
1035 scm_t_catch_handler handler;
1036 void *handler_data;
1037 SCM thread;
1038 scm_i_pthread_mutex_t mutex;
1039 scm_i_pthread_cond_t cond;
1040} spawn_data;
1041
1042static void *
1043really_spawn (void *d)
1044{
1045 spawn_data *data = (spawn_data *)d;
1046 scm_t_catch_body body = data->body;
1047 void *body_data = data->body_data;
1048 scm_t_catch_handler handler = data->handler;
1049 void *handler_data = data->handler_data;
1050 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1051
1052 scm_i_scm_pthread_mutex_lock (&data->mutex);
1053 data->thread = scm_current_thread ();
1054 scm_i_pthread_cond_signal (&data->cond);
1055 scm_i_pthread_mutex_unlock (&data->mutex);
1056
1057 if (handler == NULL)
1058 t->result = body (body_data);
1059 else
1060 t->result = scm_internal_catch (SCM_BOOL_T,
1061 body, body_data,
1062 handler, handler_data);
1063
1064 return 0;
1065}
1066
1067static void *
1068spawn_thread (void *d)
1069{
1070 spawn_data *data = (spawn_data *)d;
1071 scm_i_pthread_detach (scm_i_pthread_self ());
1072 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1073 return NULL;
1074}
1075
1076SCM
1077scm_spawn_thread (scm_t_catch_body body, void *body_data,
1078 scm_t_catch_handler handler, void *handler_data)
1079{
1080 spawn_data data;
1081 scm_i_pthread_t id;
1082 int err;
1083
1084 data.parent = scm_current_dynamic_state ();
1085 data.body = body;
1086 data.body_data = body_data;
1087 data.handler = handler;
1088 data.handler_data = handler_data;
1089 data.thread = SCM_BOOL_F;
1090 scm_i_pthread_mutex_init (&data.mutex, NULL);
1091 scm_i_pthread_cond_init (&data.cond, NULL);
1092
1093 scm_i_scm_pthread_mutex_lock (&data.mutex);
1094 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1095 if (err)
1096 {
1097 scm_i_pthread_mutex_unlock (&data.mutex);
1098 errno = err;
1099 scm_syserror (NULL);
1100 }
1101 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1102 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1103
9de87eea
MV
1104 return data.thread;
1105}
1106
29717c89
MD
1107SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1108 (),
1109"Move the calling thread to the end of the scheduling queue.")
1110#define FUNC_NAME s_scm_yield
1111{
9de87eea 1112 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1113}
1114#undef FUNC_NAME
1115
2e77f720
LC
1116SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1117 (SCM thread),
1118"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1119"cannot be the current thread, and if @var{thread} has already terminated or "
1120"been signaled to terminate, this function is a no-op.")
1121#define FUNC_NAME s_scm_cancel_thread
1122{
1123 scm_i_thread *t = NULL;
1124
1125 SCM_VALIDATE_THREAD (1, thread);
1126 t = SCM_I_THREAD_DATA (thread);
86a597f8 1127 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1128 if (!t->canceled)
1129 {
1130 t->canceled = 1;
86a597f8 1131 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1132 scm_i_pthread_cancel (t->pthread);
1133 }
1134 else
86a597f8 1135 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1136
1137 return SCM_UNSPECIFIED;
1138}
1139#undef FUNC_NAME
1140
1141SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1142 (SCM thread, SCM proc),
1143"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1144"This handler will be called when the thread exits.")
1145#define FUNC_NAME s_scm_set_thread_cleanup_x
1146{
1147 scm_i_thread *t;
1148
1149 SCM_VALIDATE_THREAD (1, thread);
1150 if (!scm_is_false (proc))
1151 SCM_VALIDATE_THUNK (2, proc);
1152
2e77f720 1153 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1154 scm_i_pthread_mutex_lock (&t->admin_mutex);
1155
2e77f720
LC
1156 if (!(t->exited || t->canceled))
1157 t->cleanup_handler = proc;
1158
86a597f8 1159 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1160
1161 return SCM_UNSPECIFIED;
1162}
1163#undef FUNC_NAME
1164
1165SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1166 (SCM thread),
1167"Return the cleanup handler installed for the thread @var{thread}.")
1168#define FUNC_NAME s_scm_thread_cleanup
1169{
1170 scm_i_thread *t;
1171 SCM ret;
1172
1173 SCM_VALIDATE_THREAD (1, thread);
1174
2e77f720 1175 t = SCM_I_THREAD_DATA (thread);
86a597f8 1176 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1177 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1178 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1179
1180 return ret;
1181}
1182#undef FUNC_NAME
1183
6180e336
NJ
1184SCM scm_join_thread (SCM thread)
1185{
1186 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1187}
1188
1189SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1190 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1191"Suspend execution of the calling thread until the target @var{thread} "
1192"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1193#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1194{
9de87eea 1195 scm_i_thread *t;
6180e336
NJ
1196 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1197 SCM res = SCM_BOOL_F;
1198
1199 if (! (SCM_UNBNDP (timeoutval)))
1200 res = timeoutval;
d823b11b
MV
1201
1202 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1203 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1204 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1205
9de87eea 1206 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1207 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1208
6180e336
NJ
1209 if (! SCM_UNBNDP (timeout))
1210 {
1211 to_timespec (timeout, &ctimeout);
1212 timeout_ptr = &ctimeout;
1213 }
1214
1215 if (t->exited)
1216 res = t->result;
1217 else
d823b11b 1218 {
9de87eea
MV
1219 while (1)
1220 {
74926120 1221 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1222 timeout_ptr);
1223 if (err == 0)
1224 {
1225 if (t->exited)
1226 {
1227 res = t->result;
1228 break;
1229 }
1230 }
1231 else if (err == ETIMEDOUT)
9de87eea 1232 break;
6180e336 1233
86a597f8 1234 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1235 SCM_TICK;
86a597f8 1236 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1237
1238 /* Check for exit again, since we just released and
1239 reacquired the admin mutex, before the next block_self
1240 call (which would block forever if t has already
1241 exited). */
1242 if (t->exited)
1243 {
1244 res = t->result;
1245 break;
1246 }
9de87eea 1247 }
d823b11b 1248 }
9de87eea 1249
86a597f8 1250 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1251
d823b11b 1252 return res;
5f05c406
MV
1253}
1254#undef FUNC_NAME
1255
6180e336
NJ
1256SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1257 (SCM obj),
1258 "Return @code{#t} if @var{obj} is a thread.")
1259#define FUNC_NAME s_scm_thread_p
1260{
1261 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1262}
1263#undef FUNC_NAME
5f05c406 1264
4079f87e 1265
9de87eea
MV
1266static size_t
1267fat_mutex_free (SCM mx)
76da80e7 1268{
9de87eea
MV
1269 fat_mutex *m = SCM_MUTEX_DATA (mx);
1270 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1271 return 0;
1272}
1273
1274static int
9de87eea 1275fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1276{
9de87eea
MV
1277 fat_mutex *m = SCM_MUTEX_DATA (mx);
1278 scm_puts ("#<mutex ", port);
1279 scm_uintprint ((scm_t_bits)m, 16, port);
1280 scm_puts (">", port);
1281 return 1;
76da80e7
MV
1282}
1283
76da80e7 1284static SCM
6180e336 1285make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1286{
9de87eea
MV
1287 fat_mutex *m;
1288 SCM mx;
1289
1290 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1291 scm_i_pthread_mutex_init (&m->lock, NULL);
1292 m->owner = SCM_BOOL_F;
adc085f1 1293 m->level = 0;
6180e336 1294
adc085f1 1295 m->recursive = recursive;
6180e336
NJ
1296 m->unchecked_unlock = unchecked_unlock;
1297 m->allow_external_unlock = external_unlock;
1298
9de87eea
MV
1299 m->waiting = SCM_EOL;
1300 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1301 m->waiting = make_queue ();
1302 return mx;
76da80e7
MV
1303}
1304
6180e336
NJ
1305SCM scm_make_mutex (void)
1306{
1307 return scm_make_mutex_with_flags (SCM_EOL);
1308}
1309
2a1d0688
NJ
1310SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1311SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1312SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1313
1314SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1315 (SCM flags),
9de87eea 1316 "Create a new mutex. ")
6180e336 1317#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1318{
6180e336
NJ
1319 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1320
1321 SCM ptr = flags;
1322 while (! scm_is_null (ptr))
1323 {
1324 SCM flag = SCM_CAR (ptr);
1325 if (scm_is_eq (flag, unchecked_unlock_sym))
1326 unchecked_unlock = 1;
1327 else if (scm_is_eq (flag, allow_external_unlock_sym))
1328 external_unlock = 1;
1329 else if (scm_is_eq (flag, recursive_sym))
1330 recursive = 1;
74926120 1331 else
2a1d0688 1332 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1333 ptr = SCM_CDR (ptr);
1334 }
1335 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1336}
1337#undef FUNC_NAME
1338
9de87eea 1339SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1340 (void),
9de87eea
MV
1341 "Create a new recursive mutex. ")
1342#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1343{
6180e336 1344 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1345}
1346#undef FUNC_NAME
1347
6180e336
NJ
1348SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1349
1350static SCM
adc085f1 1351fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1352{
1353 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1354
adc085f1 1355 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1356 SCM err = SCM_BOOL_F;
1357
1358 struct timeval current_time;
9de87eea
MV
1359
1360 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1361
1362 while (1)
9de87eea 1363 {
adc085f1 1364 if (m->level == 0)
6180e336 1365 {
adc085f1 1366 m->owner = new_owner;
6180e336 1367 m->level++;
74926120 1368
adc085f1 1369 if (SCM_I_IS_THREAD (new_owner))
6180e336 1370 {
adc085f1 1371 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1372
4f39f31e
LC
1373 /* FIXME: The order in which `t->admin_mutex' and
1374 `m->lock' are taken differs from that in
1375 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1376 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1377
1378 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1379 retained when not referenced elsewhere (bug #27450).
1380 The weak pair itself is eventually removed when MUTEX
1381 is unlocked. Note that `t->mutexes' lists mutexes
1382 currently held by T, so it should be small. */
a0faf7dd
LC
1383 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1384
6180e336 1385 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1386 }
adc085f1
JG
1387 *ret = 1;
1388 break;
1389 }
1390 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1391 {
1392 m->owner = new_owner;
1393 err = scm_cons (scm_abandoned_mutex_error_key,
1394 scm_from_locale_string ("lock obtained on abandoned "
1395 "mutex"));
1396 *ret = 1;
1397 break;
1398 }
1399 else if (scm_is_eq (m->owner, new_owner))
1400 {
1401 if (m->recursive)
1402 {
1403 m->level++;
74926120 1404 *ret = 1;
adc085f1
JG
1405 }
1406 else
6180e336 1407 {
adc085f1
JG
1408 err = scm_cons (scm_misc_error_key,
1409 scm_from_locale_string ("mutex already locked "
1410 "by thread"));
1411 *ret = 0;
1412 }
74926120 1413 break;
adc085f1 1414 }
9de87eea 1415 else
9de87eea 1416 {
74926120 1417 if (timeout != NULL)
adc085f1
JG
1418 {
1419 gettimeofday (&current_time, NULL);
1420 if (current_time.tv_sec > timeout->tv_sec ||
1421 (current_time.tv_sec == timeout->tv_sec &&
1422 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1423 {
adc085f1
JG
1424 *ret = 0;
1425 break;
6180e336 1426 }
6180e336 1427 }
37a52039 1428 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1429 scm_i_pthread_mutex_unlock (&m->lock);
1430 SCM_TICK;
1431 scm_i_scm_pthread_mutex_lock (&m->lock);
1432 }
1433 }
1434 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1435 return err;
9de87eea
MV
1436}
1437
6180e336
NJ
1438SCM scm_lock_mutex (SCM mx)
1439{
adc085f1 1440 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1441}
1442
adc085f1
JG
1443SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1444 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1445"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1446"blocks until the mutex becomes available. The function returns when "
1447"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1448"a thread already owns will succeed right away and will not block the "
1449"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1450#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1451{
6180e336
NJ
1452 SCM exception;
1453 int ret = 0;
1454 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1455
6180e336
NJ
1456 SCM_VALIDATE_MUTEX (1, m);
1457
1458 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1459 {
1460 to_timespec (timeout, &cwaittime);
1461 waittime = &cwaittime;
1462 }
1463
97ec95b7
LC
1464 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1465 SCM_VALIDATE_THREAD (3, owner);
1466
adc085f1 1467 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1468 if (!scm_is_false (exception))
1469 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1470 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1471}
76da80e7 1472#undef FUNC_NAME
9bc4701c 1473
2a3db25e
MW
1474static void
1475lock_mutex_return_void (SCM mx)
1476{
1477 (void) scm_lock_mutex (mx);
1478}
1479
1480static void
1481unlock_mutex_return_void (SCM mx)
1482{
1483 (void) scm_unlock_mutex (mx);
1484}
1485
a4d106c7 1486void
661ae7ab 1487scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1488{
2a3db25e 1489 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1490 SCM_F_WIND_EXPLICITLY);
2a3db25e 1491 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1492 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1493}
1494
9bc4701c 1495SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1496 (SCM mutex),
9bc4701c
MD
1497"Try to lock @var{mutex}. If the mutex is already locked by someone "
1498"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1499#define FUNC_NAME s_scm_try_mutex
1500{
6180e336
NJ
1501 SCM exception;
1502 int ret = 0;
1503 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1504
ba1b7223 1505 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1506
1507 to_timespec (scm_from_int(0), &cwaittime);
1508 waittime = &cwaittime;
74926120 1509
adc085f1 1510 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1511 if (!scm_is_false (exception))
1512 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1513 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1514}
1515#undef FUNC_NAME
76da80e7 1516
6180e336
NJ
1517/*** Fat condition variables */
1518
1519typedef struct {
1520 scm_i_pthread_mutex_t lock;
1521 SCM waiting; /* the threads waiting for this condition. */
1522} fat_cond;
1523
1524#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1525#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1526
1527static int
1528fat_mutex_unlock (SCM mutex, SCM cond,
1529 const scm_t_timespec *waittime, int relock)
9de87eea 1530{
7f991c7d 1531 SCM owner;
6180e336
NJ
1532 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1533 fat_cond *c = NULL;
1534 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1535 int err = 0, ret = 0;
9de87eea
MV
1536
1537 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1538
7f991c7d 1539 owner = m->owner;
adc085f1 1540
d31ae2c3 1541 if (!scm_is_eq (owner, t->handle))
9bc4701c 1542 {
adc085f1 1543 if (m->level == 0)
6180e336
NJ
1544 {
1545 if (!m->unchecked_unlock)
2a1d0688
NJ
1546 {
1547 scm_i_pthread_mutex_unlock (&m->lock);
1548 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1549 }
d31ae2c3 1550 owner = t->handle;
6180e336
NJ
1551 }
1552 else if (!m->allow_external_unlock)
2a1d0688
NJ
1553 {
1554 scm_i_pthread_mutex_unlock (&m->lock);
1555 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1556 }
6180e336
NJ
1557 }
1558
1559 if (! (SCM_UNBNDP (cond)))
1560 {
6180e336
NJ
1561 c = SCM_CONDVAR_DATA (cond);
1562 while (1)
1563 {
1564 int brk = 0;
1565
6180e336
NJ
1566 if (m->level > 0)
1567 m->level--;
adc085f1 1568 if (m->level == 0)
f57fdf07
LC
1569 {
1570 /* Change the owner of MUTEX. */
1571 t->mutexes = scm_delq_x (mutex, t->mutexes);
1572 m->owner = unblock_from_queue (m->waiting);
1573 }
adc085f1 1574
6180e336 1575 t->block_asyncs++;
74926120 1576
d2a51087
NJ
1577 err = block_self (c->waiting, cond, &m->lock, waittime);
1578 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1579
1580 if (err == 0)
1581 {
1582 ret = 1;
1583 brk = 1;
1584 }
1585 else if (err == ETIMEDOUT)
1586 {
1587 ret = 0;
1588 brk = 1;
1589 }
1590 else if (err != EINTR)
74926120 1591 {
6180e336 1592 errno = err;
6180e336 1593 scm_syserror (NULL);
74926120 1594 }
6180e336
NJ
1595
1596 if (brk)
1597 {
1598 if (relock)
adc085f1 1599 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1600 t->block_asyncs--;
6180e336
NJ
1601 break;
1602 }
74926120 1603
6180e336
NJ
1604 t->block_asyncs--;
1605 scm_async_click ();
74926120 1606
6180e336
NJ
1607 scm_remember_upto_here_2 (cond, mutex);
1608
1609 scm_i_scm_pthread_mutex_lock (&m->lock);
1610 }
9bc4701c 1611 }
9de87eea 1612 else
6180e336
NJ
1613 {
1614 if (m->level > 0)
1615 m->level--;
74926120 1616 if (m->level == 0)
f57fdf07
LC
1617 {
1618 /* Change the owner of MUTEX. */
1619 t->mutexes = scm_delq_x (mutex, t->mutexes);
1620 m->owner = unblock_from_queue (m->waiting);
1621 }
74926120 1622
6180e336
NJ
1623 scm_i_pthread_mutex_unlock (&m->lock);
1624 ret = 1;
1625 }
9de87eea 1626
6180e336 1627 return ret;
9bc4701c 1628}
9bc4701c 1629
6180e336
NJ
1630SCM scm_unlock_mutex (SCM mx)
1631{
1632 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1633}
9bc4701c 1634
6180e336
NJ
1635SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1636 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1637"Unlocks @var{mutex} if the calling thread owns the lock on "
1638"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1639"thread results in undefined behaviour. Once a mutex has been unlocked, "
1640"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1641"lock. Every call to @code{lock-mutex} by this thread must be matched "
1642"with a call to @code{unlock-mutex}. Only the last call to "
1643"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1644#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1645{
6180e336
NJ
1646 scm_t_timespec cwaittime, *waittime = NULL;
1647
9bc4701c 1648 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1649 if (! (SCM_UNBNDP (cond)))
1650 {
1651 SCM_VALIDATE_CONDVAR (2, cond);
1652
1653 if (! (SCM_UNBNDP (timeout)))
1654 {
1655 to_timespec (timeout, &cwaittime);
1656 waittime = &cwaittime;
1657 }
1658 }
1659
1660 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1661}
1662#undef FUNC_NAME
1663
6180e336
NJ
1664SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1665 (SCM obj),
1666 "Return @code{#t} if @var{obj} is a mutex.")
1667#define FUNC_NAME s_scm_mutex_p
1668{
1669 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1670}
74926120 1671#undef FUNC_NAME
9de87eea
MV
1672
1673SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1674 (SCM mx),
1675 "Return the thread owning @var{mx}, or @code{#f}.")
1676#define FUNC_NAME s_scm_mutex_owner
1677{
adc085f1
JG
1678 SCM owner;
1679 fat_mutex *m = NULL;
1680
9de87eea 1681 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1682 m = SCM_MUTEX_DATA (mx);
1683 scm_i_pthread_mutex_lock (&m->lock);
1684 owner = m->owner;
1685 scm_i_pthread_mutex_unlock (&m->lock);
1686
1687 return owner;
9de87eea
MV
1688}
1689#undef FUNC_NAME
1690
1691SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1692 (SCM mx),
adc085f1 1693 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1694#define FUNC_NAME s_scm_mutex_level
1695{
1696 SCM_VALIDATE_MUTEX (1, mx);
1697 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1698}
1699#undef FUNC_NAME
1700
adc085f1
JG
1701SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1702 (SCM mx),
1703 "Returns @code{#t} if the mutex @var{mx} is locked.")
1704#define FUNC_NAME s_scm_mutex_locked_p
1705{
1706 SCM_VALIDATE_MUTEX (1, mx);
1707 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1708}
1709#undef FUNC_NAME
9de87eea 1710
9de87eea
MV
1711static int
1712fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1713{
1714 fat_cond *c = SCM_CONDVAR_DATA (cv);
1715 scm_puts ("#<condition-variable ", port);
1716 scm_uintprint ((scm_t_bits)c, 16, port);
1717 scm_puts (">", port);
1718 return 1;
1719}
9bc4701c 1720
d823b11b
MV
1721SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1722 (void),
1723 "Make a new condition variable.")
1724#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1725{
9de87eea
MV
1726 fat_cond *c;
1727 SCM cv;
1728
1729 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1730 c->waiting = SCM_EOL;
1731 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1732 c->waiting = make_queue ();
d823b11b 1733 return cv;
5f05c406 1734}
d823b11b 1735#undef FUNC_NAME
5f05c406 1736
d823b11b
MV
1737SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1738 (SCM cv, SCM mx, SCM t),
1739"Wait until @var{cond-var} has been signalled. While waiting, "
1740"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1741"is locked again when this function returns. When @var{time} is given, "
1742"it specifies a point in time where the waiting should be aborted. It "
1743"can be either a integer as returned by @code{current-time} or a pair "
1744"as returned by @code{gettimeofday}. When the waiting is aborted the "
1745"mutex is locked and @code{#f} is returned. When the condition "
1746"variable is in fact signalled, the mutex is also locked and @code{#t} "
1747"is returned. ")
1748#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1749{
9de87eea 1750 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1751
1752 SCM_VALIDATE_CONDVAR (1, cv);
1753 SCM_VALIDATE_MUTEX (2, mx);
74926120 1754
d823b11b
MV
1755 if (!SCM_UNBNDP (t))
1756 {
6180e336 1757 to_timespec (t, &waittime);
9de87eea 1758 waitptr = &waittime;
d823b11b
MV
1759 }
1760
2a1d0688 1761 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1762}
d823b11b 1763#undef FUNC_NAME
5f05c406 1764
9de87eea
MV
1765static void
1766fat_cond_signal (fat_cond *c)
1767{
9de87eea 1768 unblock_from_queue (c->waiting);
9de87eea
MV
1769}
1770
d823b11b
MV
1771SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1772 (SCM cv),
1773 "Wake up one thread that is waiting for @var{cv}")
1774#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1775{
d823b11b 1776 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1777 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1778 return SCM_BOOL_T;
5f05c406 1779}
d823b11b 1780#undef FUNC_NAME
5f05c406 1781
9de87eea
MV
1782static void
1783fat_cond_broadcast (fat_cond *c)
1784{
9de87eea
MV
1785 while (scm_is_true (unblock_from_queue (c->waiting)))
1786 ;
9de87eea
MV
1787}
1788
d823b11b
MV
1789SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1790 (SCM cv),
1791 "Wake up all threads that are waiting for @var{cv}. ")
1792#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1793{
d823b11b 1794 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1795 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1796 return SCM_BOOL_T;
5f05c406 1797}
d823b11b 1798#undef FUNC_NAME
5f05c406 1799
6180e336
NJ
1800SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1801 (SCM obj),
1802 "Return @code{#t} if @var{obj} is a condition variable.")
1803#define FUNC_NAME s_scm_condition_variable_p
1804{
1805 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1806}
1807#undef FUNC_NAME
1808
6087fad9 1809
8c2b3143 1810\f
d823b11b
MV
1811/*** Select */
1812
8c2b3143
LC
1813struct select_args
1814{
1815 int nfds;
1816 SELECT_TYPE *read_fds;
1817 SELECT_TYPE *write_fds;
1818 SELECT_TYPE *except_fds;
1819 struct timeval *timeout;
1820
1821 int result;
1822 int errno_value;
1823};
1824
1825static void *
1826do_std_select (void *args)
1827{
1828 struct select_args *select_args;
1829
1830 select_args = (struct select_args *) args;
1831
1832 select_args->result =
1833 select (select_args->nfds,
1834 select_args->read_fds, select_args->write_fds,
1835 select_args->except_fds, select_args->timeout);
1836 select_args->errno_value = errno;
1837
1838 return NULL;
1839}
1840
911782b7 1841int
9de87eea
MV
1842scm_std_select (int nfds,
1843 SELECT_TYPE *readfds,
1844 SELECT_TYPE *writefds,
1845 SELECT_TYPE *exceptfds,
1846 struct timeval *timeout)
1847{
1848 fd_set my_readfds;
1849 int res, eno, wakeup_fd;
1850 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1851 struct select_args args;
9de87eea
MV
1852
1853 if (readfds == NULL)
1854 {
1855 FD_ZERO (&my_readfds);
1856 readfds = &my_readfds;
1857 }
1858
1859 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1860 SCM_TICK;
1861
1862 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1863 FD_SET (wakeup_fd, readfds);
1864 if (wakeup_fd >= nfds)
1865 nfds = wakeup_fd+1;
9de87eea 1866
8c2b3143
LC
1867 args.nfds = nfds;
1868 args.read_fds = readfds;
1869 args.write_fds = writefds;
1870 args.except_fds = exceptfds;
1871 args.timeout = timeout;
1872
1873 /* Explicitly cooperate with the GC. */
1874 scm_without_guile (do_std_select, &args);
1875
1876 res = args.result;
1877 eno = args.errno_value;
1878
1879 t->sleep_fd = -1;
9de87eea
MV
1880 scm_i_reset_sleep (t);
1881
1882 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1883 {
1884 char dummy;
634aa8de
LC
1885 full_read (wakeup_fd, &dummy, 1);
1886
9de87eea
MV
1887 FD_CLR (wakeup_fd, readfds);
1888 res -= 1;
1889 if (res == 0)
1890 {
1891 eno = EINTR;
1892 res = -1;
1893 }
1894 }
d823b11b
MV
1895 errno = eno;
1896 return res;
5f05c406
MV
1897}
1898
9de87eea 1899/* Convenience API for blocking while in guile mode. */
76da80e7 1900
9de87eea 1901#if SCM_USE_PTHREAD_THREADS
92e64b87 1902
2956b071
LC
1903/* It seems reasonable to not run procedures related to mutex and condition
1904 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1905 without it, and (ii) the only potential gain would be GC latency. See
1906 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1907 for a discussion of the pros and cons. */
1908
9bc4701c 1909int
9de87eea 1910scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1911{
9de87eea 1912 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1913 return res;
1914}
1915
9de87eea 1916static void
2b829bbb 1917do_unlock (void *data)
28d52ebb 1918{
9de87eea 1919 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1920}
1921
1922void
661ae7ab 1923scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1924{
9de87eea 1925 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1926 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1927}
1928
9bc4701c 1929int
9de87eea 1930scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1931{
4cf72f0b
LC
1932 int res;
1933 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1934
1935 t->held_mutex = mutex;
1936 res = scm_i_pthread_cond_wait (cond, mutex);
1937 t->held_mutex = NULL;
1938
9bc4701c
MD
1939 return res;
1940}
9bc4701c 1941
76da80e7 1942int
9de87eea
MV
1943scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1944 scm_i_pthread_mutex_t *mutex,
1945 const scm_t_timespec *wt)
76da80e7 1946{
4cf72f0b
LC
1947 int res;
1948 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1949
1950 t->held_mutex = mutex;
1951 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1952 t->held_mutex = NULL;
1953
9de87eea 1954 return res;
76da80e7
MV
1955}
1956
9de87eea 1957#endif
76da80e7 1958
d823b11b 1959unsigned long
9de87eea 1960scm_std_usleep (unsigned long usecs)
5f05c406 1961{
d823b11b
MV
1962 struct timeval tv;
1963 tv.tv_usec = usecs % 1000000;
1964 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1965 scm_std_select (0, NULL, NULL, NULL, &tv);
1966 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1967}
1968
9de87eea
MV
1969unsigned int
1970scm_std_sleep (unsigned int secs)
6c214b62 1971{
d823b11b
MV
1972 struct timeval tv;
1973 tv.tv_usec = 0;
1974 tv.tv_sec = secs;
9de87eea 1975 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1976 return tv.tv_sec;
6c214b62
MD
1977}
1978
d823b11b
MV
1979/*** Misc */
1980
1981SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1982 (void),
1983 "Return the thread that called this function.")
1984#define FUNC_NAME s_scm_current_thread
1985{
9de87eea 1986 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1987}
1988#undef FUNC_NAME
1989
9de87eea
MV
1990static SCM
1991scm_c_make_list (size_t n, SCM fill)
1992{
1993 SCM res = SCM_EOL;
1994 while (n-- > 0)
1995 res = scm_cons (fill, res);
1996 return res;
1997}
1998
d823b11b
MV
1999SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2000 (void),
2001 "Return a list of all threads.")
9bc4701c 2002#define FUNC_NAME s_scm_all_threads
d823b11b 2003{
9de87eea
MV
2004 /* We can not allocate while holding the thread_admin_mutex because
2005 of the way GC is done.
2006 */
2007 int n = thread_count;
2008 scm_i_thread *t;
2009 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2010
9de87eea
MV
2011 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2012 l = &list;
2013 for (t = all_threads; t && n > 0; t = t->next_thread)
2014 {
2e77f720
LC
2015 if (t != scm_i_signal_delivery_thread)
2016 {
2017 SCM_SETCAR (*l, t->handle);
2018 l = SCM_CDRLOC (*l);
2019 }
9de87eea
MV
2020 n--;
2021 }
2022 *l = SCM_EOL;
2023 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2024 return list;
d823b11b 2025}
9de87eea 2026#undef FUNC_NAME
d823b11b
MV
2027
2028SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2029 (SCM thread),
2030 "Return @code{#t} iff @var{thread} has exited.\n")
2031#define FUNC_NAME s_scm_thread_exited_p
2032{
7888309b 2033 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2034}
2035#undef FUNC_NAME
2036
911782b7 2037int
d823b11b
MV
2038scm_c_thread_exited_p (SCM thread)
2039#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2040{
9de87eea 2041 scm_i_thread *t;
d823b11b 2042 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2043 t = SCM_I_THREAD_DATA (thread);
d823b11b 2044 return t->exited;
5f05c406 2045}
d823b11b 2046#undef FUNC_NAME
5f05c406 2047
d20912e6
LC
2048SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2049 (void),
2050 "Return the total number of processors of the machine, which\n"
2051 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2052 "thread execution unit, which can be either:\n\n"
2053 "@itemize\n"
2054 "@item an execution core in a (possibly multi-core) chip, in a\n"
2055 " (possibly multi- chip) module, in a single computer, or\n"
2056 "@item a thread execution unit inside a core in the case of\n"
2057 " @dfn{hyper-threaded} CPUs.\n"
2058 "@end itemize\n\n"
2059 "Which of the two definitions is used, is unspecified.\n")
2060#define FUNC_NAME s_scm_total_processor_count
2061{
2062 return scm_from_ulong (num_processors (NPROC_ALL));
2063}
2064#undef FUNC_NAME
2065
2066SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2067 (void),
2068 "Like @code{total-processor-count}, but return the number of\n"
2069 "processors available to the current process. See\n"
2070 "@code{setaffinity} and @code{getaffinity} for more\n"
2071 "information.\n")
2072#define FUNC_NAME s_scm_current_processor_count
2073{
2074 return scm_from_ulong (num_processors (NPROC_CURRENT));
2075}
2076#undef FUNC_NAME
2077
2078
2079\f
2080
9de87eea 2081static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2082static int threads_initialized_p = 0;
2083
9bc4701c 2084
a4d106c7
MV
2085/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2086 */
d1138028 2087scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2088
661ae7ab 2089static SCM dynwind_critical_section_mutex;
a54a94b3 2090
9bc4701c 2091void
661ae7ab 2092scm_dynwind_critical_section (SCM mutex)
76da80e7 2093{
a4d106c7 2094 if (scm_is_false (mutex))
661ae7ab
MV
2095 mutex = dynwind_critical_section_mutex;
2096 scm_dynwind_lock_mutex (mutex);
2097 scm_dynwind_block_asyncs ();
9de87eea
MV
2098}
2099
2100/*** Initialization */
2101
9de87eea
MV
2102scm_i_pthread_mutex_t scm_i_misc_mutex;
2103
d1138028
MV
2104#if SCM_USE_PTHREAD_THREADS
2105pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2106#endif
2107
9de87eea 2108void
12c1d861 2109scm_threads_prehistory (void *base)
9de87eea 2110{
d1138028
MV
2111#if SCM_USE_PTHREAD_THREADS
2112 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2113 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2114 PTHREAD_MUTEX_RECURSIVE);
2115#endif
2116
2117 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2118 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2119 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2120 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2121
12c1d861 2122 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2123}
2124
d823b11b
MV
2125scm_t_bits scm_tc16_thread;
2126scm_t_bits scm_tc16_mutex;
2127scm_t_bits scm_tc16_condvar;
7bfd3b9e 2128
7bfd3b9e 2129void
9de87eea 2130scm_init_threads ()
7bfd3b9e 2131{
9de87eea 2132 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2133 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2134
9de87eea 2135 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2136 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2137 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2138
9de87eea
MV
2139 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2140 sizeof (fat_cond));
9de87eea 2141 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2142
9de87eea
MV
2143 scm_i_default_dynamic_state = SCM_BOOL_F;
2144 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2145 threads_initialized_p = 1;
a4d106c7 2146
f39448c5 2147 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2148}
89e00824 2149
5f05c406 2150void
9de87eea 2151scm_init_threads_default_dynamic_state ()
5f05c406 2152{
9de87eea 2153 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2154 scm_i_default_dynamic_state = state;
5f05c406
MV
2155}
2156
d823b11b 2157void
9de87eea 2158scm_init_thread_procs ()
d823b11b 2159{
9de87eea 2160#include "libguile/threads.x"
d823b11b
MV
2161}
2162
3c13664e
LC
2163\f
2164/* IA64-specific things. */
2165
2166#ifdef __ia64__
2167# ifdef __hpux
2168# include <sys/param.h>
2169# include <sys/pstat.h>
2170void *
2171scm_ia64_register_backing_store_base (void)
2172{
2173 struct pst_vm_status vm_status;
2174 int i = 0;
2175 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2176 if (vm_status.pst_type == PS_RSESTACK)
2177 return (void *) vm_status.pst_vaddr;
2178 abort ();
2179}
2180void *
2181scm_ia64_ar_bsp (const void *ctx)
2182{
2183 uint64_t bsp;
2184 __uc_get_ar_bsp (ctx, &bsp);
2185 return (void *) bsp;
2186}
2187# endif /* hpux */
2188# ifdef linux
2189# include <ucontext.h>
2190void *
2191scm_ia64_register_backing_store_base (void)
2192{
2193 extern void *__libc_ia64_register_backing_store_base;
2194 return __libc_ia64_register_backing_store_base;
2195}
2196void *
2197scm_ia64_ar_bsp (const void *opaque)
2198{
2199 const ucontext_t *ctx = opaque;
2200 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2201}
2202# endif /* linux */
2203#endif /* __ia64__ */
2204
2205
89e00824
ML
2206/*
2207 Local Variables:
2208 c-file-style: "gnu"
2209 End:
2210*/