Rely on Gnulib for <unistd.h>.
[bpt/guile.git] / libguile / threads.c
CommitLineData
b3da54d1 1/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
bc8e6d7d
MW
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
3 * 2014 Free Software Foundation, Inc.
74926120 4 *
73be1d9e 5 * This library is free software; you can redistribute it and/or
53befeb7
NJ
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
7bfd3b9e 9 *
53befeb7
NJ
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
7bfd3b9e 14 *
73be1d9e
MV
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
53befeb7
NJ
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
73be1d9e 19 */
1bbd0b84 20
1bbd0b84 21
7bfd3b9e 22\f
dbb605f5
LC
23#ifdef HAVE_CONFIG_H
24# include <config.h>
25#endif
7bfd3b9e 26
1c44468d 27#include "libguile/bdw-gc.h"
1810dc4e
RB
28#include "libguile/_scm.h"
29
b3da54d1 30#include <stdlib.h>
d823b11b
MV
31#include <unistd.h>
32#include <stdio.h>
8ab3d8a0
KR
33
34#ifdef HAVE_STRING_H
35#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
36#endif
37
fcc5d734 38#if HAVE_SYS_TIME_H
d823b11b 39#include <sys/time.h>
fcc5d734 40#endif
5f05c406 41
c1d5d6d7
AW
42#if HAVE_PTHREAD_NP_H
43# include <pthread_np.h>
44#endif
45
6ab4de61
AW
46#include <sys/select.h>
47
f57fdf07 48#include <assert.h>
32f94bf2 49#include <fcntl.h>
d20912e6 50#include <nproc.h>
f57fdf07 51
d823b11b
MV
52#include "libguile/validate.h"
53#include "libguile/root.h"
54#include "libguile/eval.h"
55#include "libguile/async.h"
56#include "libguile/ports.h"
57#include "libguile/threads.h"
a0599745 58#include "libguile/dynwind.h"
d823b11b 59#include "libguile/iselect.h"
9de87eea
MV
60#include "libguile/fluids.h"
61#include "libguile/continuations.h"
2b829bbb 62#include "libguile/gc.h"
9de87eea 63#include "libguile/init.h"
2e77f720 64#include "libguile/scmsigs.h"
6180e336 65#include "libguile/strings.h"
a0faf7dd 66#include "libguile/weaks.h"
7bfd3b9e 67
634aa8de
LC
68#include <full-read.h>
69
cde24ce1
AW
70
71\f
72
73/* First some libgc shims. */
74
75/* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77#ifndef HAVE_GC_FN_TYPE
78typedef void * (* GC_fn_type) (void *);
79#endif
80
81
5f0d2951
AW
82#ifndef GC_SUCCESS
83#define GC_SUCCESS 0
84#endif
85
86#ifndef GC_UNIMPLEMENTED
87#define GC_UNIMPLEMENTED 3
88#endif
89
90/* Likewise struct GC_stack_base is missing before 7.1. */
91#ifndef HAVE_GC_STACK_BASE
12c1d861
AW
92struct GC_stack_base {
93 void * mem_base; /* Base of memory stack. */
94#ifdef __ia64__
95 void * reg_base; /* Base of separate register stack. */
96#endif
97};
5f0d2951
AW
98
99static int
987b8160 100GC_register_my_thread (struct GC_stack_base *stack_base)
5f0d2951
AW
101{
102 return GC_UNIMPLEMENTED;
103}
104
105static void
106GC_unregister_my_thread ()
107{
108}
109
12c1d861
AW
110#if !SCM_USE_PTHREAD_THREADS
111/* No threads; we can just use GC_stackbottom. */
5f0d2951 112static void *
12c1d861
AW
113get_thread_stack_base ()
114{
115 return GC_stackbottom;
116}
117
118#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120/* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
123static void *
124get_thread_stack_base ()
125{
126 pthread_attr_t attr;
127 void *start, *end;
128 size_t size;
129
130 pthread_getattr_np (pthread_self (), &attr);
131 pthread_attr_getstack (&attr, &start, &size);
132 end = (char *)start + size;
133
134#if SCM_STACK_GROWS_UP
135 return start;
136#else
137 return end;
138#endif
139}
140
141#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142/* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
145static void *
146get_thread_stack_base ()
147{
148 return pthread_get_stackaddr_np (pthread_self ());
149}
150
c1d5d6d7
AW
151#elif HAVE_PTHREAD_ATTR_GET_NP
152/* This one is for FreeBSD 9. */
153static void *
154get_thread_stack_base ()
155{
156 pthread_attr_t attr;
157 void *start, *end;
158 size_t size;
159
160 pthread_attr_init (&attr);
161 pthread_attr_get_np (pthread_self (), &attr);
162 pthread_attr_getstack (&attr, &start, &size);
163 pthread_attr_destroy (&attr);
164
165 end = (char *)start + size;
166
167#if SCM_STACK_GROWS_UP
168 return start;
169#else
170 return end;
171#endif
172}
173
12c1d861
AW
174#else
175#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
176#endif
177
178static int
987b8160 179GC_get_stack_base (struct GC_stack_base *stack_base)
5f0d2951 180{
12c1d861
AW
181 stack_base->mem_base = get_thread_stack_base ();
182#ifdef __ia64__
183 /* Calculate and store off the base of this thread's register
184 backing store (RBS). Unfortunately our implementation(s) of
185 scm_ia64_register_backing_store_base are only reliable for the
186 main thread. For other threads, therefore, find out the current
187 top of the RBS, and use that as a maximum. */
188 stack_base->reg_base = scm_ia64_register_backing_store_base ();
189 {
190 ucontext_t ctx;
191 void *bsp;
192 getcontext (&ctx);
193 bsp = scm_ia64_ar_bsp (&ctx);
194 if (stack_base->reg_base > bsp)
195 stack_base->reg_base = bsp;
196 }
197#endif
198 return GC_SUCCESS;
5f0d2951 199}
12c1d861
AW
200
201static void *
202GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
203{
204 struct GC_stack_base stack_base;
205
206 stack_base.mem_base = (void*)&stack_base;
207#ifdef __ia64__
208 /* FIXME: Untested. */
209 {
210 ucontext_t ctx;
211 getcontext (&ctx);
212 stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
213 }
5f0d2951
AW
214#endif
215
12c1d861
AW
216 return fn (&stack_base, arg);
217}
218#endif /* HAVE_GC_STACK_BASE */
219
5f0d2951 220
cde24ce1
AW
221/* Now define with_gc_active and with_gc_inactive. */
222
223#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
224
225/* We have a sufficiently new libgc (7.2 or newer). */
226
227static void*
228with_gc_inactive (GC_fn_type func, void *data)
229{
230 return GC_do_blocking (func, data);
231}
232
233static void*
234with_gc_active (GC_fn_type func, void *data)
235{
236 return GC_call_with_gc_active (func, data);
237}
238
239#else
240
241/* libgc not new enough, so never actually deactivate GC.
242
243 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
244 GC_call_with_gc_active. */
245
246static void*
247with_gc_inactive (GC_fn_type func, void *data)
248{
249 return func (data);
250}
251
252static void*
253with_gc_active (GC_fn_type func, void *data)
254{
255 return func (data);
256}
257
258#endif /* HAVE_GC_DO_BLOCKING */
259
260
634aa8de 261\f
6180e336
NJ
262static void
263to_timespec (SCM t, scm_t_timespec *waittime)
264{
265 if (scm_is_pair (t))
266 {
267 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
268 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
269 }
270 else
271 {
272 double time = scm_to_double (t);
273 double sec = scm_c_truncate (time);
274
275 waittime->tv_sec = (long) sec;
2a1d0688 276 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
277 }
278}
279
c058db8a 280\f
d823b11b 281/*** Queues */
7bfd3b9e 282
c058db8a
LC
283/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
284 the risk of false references leading to unbounded retained space as
285 described in "Bounding Space Usage of Conservative Garbage Collectors",
286 H.J. Boehm, 2001. */
287
9de87eea
MV
288/* Make an empty queue data structure.
289 */
d823b11b
MV
290static SCM
291make_queue ()
292{
293 return scm_cons (SCM_EOL, SCM_EOL);
294}
7bfd3b9e 295
9de87eea
MV
296/* Put T at the back of Q and return a handle that can be used with
297 remqueue to remove T from Q again.
298 */
d823b11b
MV
299static SCM
300enqueue (SCM q, SCM t)
301{
302 SCM c = scm_cons (t, SCM_EOL);
d2a51087 303 SCM_CRITICAL_SECTION_START;
d2e53ed6 304 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
305 SCM_SETCDR (q, c);
306 else
307 SCM_SETCDR (SCM_CAR (q), c);
308 SCM_SETCAR (q, c);
d2a51087 309 SCM_CRITICAL_SECTION_END;
d823b11b
MV
310 return c;
311}
7bfd3b9e 312
9de87eea
MV
313/* Remove the element that the handle C refers to from the queue Q. C
314 must have been returned from a call to enqueue. The return value
315 is zero when the element referred to by C has already been removed.
316 Otherwise, 1 is returned.
317*/
318static int
d823b11b
MV
319remqueue (SCM q, SCM c)
320{
321 SCM p, prev = q;
d2a51087 322 SCM_CRITICAL_SECTION_START;
d2e53ed6 323 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 324 {
bc36d050 325 if (scm_is_eq (p, c))
d823b11b 326 {
bc36d050 327 if (scm_is_eq (c, SCM_CAR (q)))
10454601 328 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
d823b11b 329 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
330
331 /* GC-robust */
332 SCM_SETCDR (c, SCM_EOL);
333
d2a51087 334 SCM_CRITICAL_SECTION_END;
9de87eea 335 return 1;
d823b11b
MV
336 }
337 prev = p;
338 }
d2a51087 339 SCM_CRITICAL_SECTION_END;
9de87eea 340 return 0;
d823b11b
MV
341}
342
9de87eea
MV
343/* Remove the front-most element from the queue Q and return it.
344 Return SCM_BOOL_F when Q is empty.
345*/
d823b11b
MV
346static SCM
347dequeue (SCM q)
348{
d2a51087
NJ
349 SCM c;
350 SCM_CRITICAL_SECTION_START;
351 c = SCM_CDR (q);
d2e53ed6 352 if (scm_is_null (c))
d2a51087
NJ
353 {
354 SCM_CRITICAL_SECTION_END;
355 return SCM_BOOL_F;
356 }
d823b11b
MV
357 else
358 {
359 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 360 if (scm_is_null (SCM_CDR (q)))
d823b11b 361 SCM_SETCAR (q, SCM_EOL);
d2a51087 362 SCM_CRITICAL_SECTION_END;
c058db8a
LC
363
364 /* GC-robust */
365 SCM_SETCDR (c, SCM_EOL);
366
d823b11b
MV
367 return SCM_CAR (c);
368 }
369}
7bfd3b9e 370
9de87eea 371/*** Thread smob routines */
76da80e7 372
d823b11b
MV
373
374static int
375thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
376{
23d72566
KR
377 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
378 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
379 the struct case, hence we go via a union, and extract according to the
380 size of pthread_t. */
381 union {
382 scm_i_pthread_t p;
383 unsigned short us;
384 unsigned int ui;
385 unsigned long ul;
386 scm_t_uintmax um;
387 } u;
9de87eea 388 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
389 scm_i_pthread_t p = t->pthread;
390 scm_t_uintmax id;
391 u.p = p;
392 if (sizeof (p) == sizeof (unsigned short))
393 id = u.us;
394 else if (sizeof (p) == sizeof (unsigned int))
395 id = u.ui;
396 else if (sizeof (p) == sizeof (unsigned long))
397 id = u.ul;
398 else
399 id = u.um;
400
d823b11b 401 scm_puts ("#<thread ", port);
23d72566 402 scm_uintprint (id, 10, port);
1b92fb6b 403 scm_puts (" (", port);
0345e278 404 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 405 scm_puts (")>", port);
d823b11b
MV
406 return 1;
407}
408
706846f6 409\f
9de87eea 410/*** Blocking on queues. */
f7eca35d 411
9de87eea
MV
412/* See also scm_i_queue_async_cell for how such a block is
413 interrputed.
414*/
d823b11b 415
9de87eea
MV
416/* Put the current thread on QUEUE and go to sleep, waiting for it to
417 be woken up by a call to 'unblock_from_queue', or to be
418 interrupted. Upon return of this function, the current thread is
419 no longer on QUEUE, even when the sleep has been interrupted.
420
d2a51087 421 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
422 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
423
424 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
425 as MUTEX is needed.
426
427 When WAITTIME is not NULL, the sleep will be aborted at that time.
428
429 The return value of block_self is an errno value. It will be zero
430 when the sleep has been successfully completed by a call to
431 unblock_from_queue, EINTR when it has been interrupted by the
432 delivery of a system async, and ETIMEDOUT when the timeout has
433 expired.
434
435 The system asyncs themselves are not executed by block_self.
436*/
437static int
438block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
439 const scm_t_timespec *waittime)
76da80e7 440{
9de87eea
MV
441 scm_i_thread *t = SCM_I_CURRENT_THREAD;
442 SCM q_handle;
443 int err;
444
445 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
446 err = EINTR;
447 else
448 {
449 t->block_asyncs++;
450 q_handle = enqueue (queue, t->handle);
451 if (waittime == NULL)
452 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
453 else
454 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
455
456 /* When we are still on QUEUE, we have been interrupted. We
457 report this only when no other error (such as a timeout) has
458 happened above.
459 */
460 if (remqueue (queue, q_handle) && err == 0)
461 err = EINTR;
462 t->block_asyncs--;
463 scm_i_reset_sleep (t);
464 }
465
466 return err;
76da80e7 467}
9de87eea 468
d2a51087
NJ
469/* Wake up the first thread on QUEUE, if any. The awoken thread is
470 returned, or #f if the queue was empty.
9de87eea
MV
471 */
472static SCM
473unblock_from_queue (SCM queue)
474{
475 SCM thread = dequeue (queue);
476 if (scm_is_true (thread))
477 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
478 return thread;
479}
480
45f15cac 481\f
9de87eea
MV
482/* Getting into and out of guile mode.
483 */
484
f60a7648
AW
485/* Key used to attach a cleanup handler to a given thread. Also, if
486 thread-local storage is unavailable, this key is used to retrieve the
487 current thread with `pthread_getspecific ()'. */
488scm_i_pthread_key_t scm_i_thread_key;
489
490
705edb95
LC
491#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
492
493/* When thread-local storage (TLS) is available, a pointer to the
494 current-thread object is kept in TLS. Note that storing the thread-object
495 itself in TLS (rather than a pointer to some malloc'd memory) is not
496 possible since thread objects may live longer than the actual thread they
497 represent. */
498SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
499
f60a7648 500#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 501
d823b11b 502
9de87eea
MV
503static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
504static scm_i_thread *all_threads = NULL;
505static int thread_count;
506
507static SCM scm_i_default_dynamic_state;
508
0b77014f
AW
509/* Run when a fluid is collected. */
510void
aafb4ed7 511scm_i_reset_fluid (size_t n)
0b77014f
AW
512{
513 scm_i_thread *t;
514
515 scm_i_pthread_mutex_lock (&thread_admin_mutex);
516 for (t = all_threads; t; t = t->next_thread)
517 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
518 {
519 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
520
521 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 522 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
523 }
524 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
525}
526
9de87eea 527/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 528 */
9de87eea 529static void
12c1d861 530guilify_self_1 (struct GC_stack_base *base)
d823b11b 531{
7f22442b
AW
532 scm_i_thread t;
533
534 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
535 before allocating anything in this thread, because allocation could
536 cause GC to run, and GC could cause finalizers, which could invoke
537 Scheme functions, which need the current thread to be set. */
538
539 t.pthread = scm_i_pthread_self ();
540 t.handle = SCM_BOOL_F;
541 t.result = SCM_BOOL_F;
542 t.cleanup_handler = SCM_BOOL_F;
543 t.mutexes = SCM_EOL;
544 t.held_mutex = NULL;
545 t.join_queue = SCM_EOL;
546 t.dynamic_state = SCM_BOOL_F;
547 t.dynwinds = SCM_EOL;
548 t.active_asyncs = SCM_EOL;
549 t.block_asyncs = 1;
550 t.pending_asyncs = 1;
551 t.critical_section_level = 0;
552 t.base = base->mem_base;
346e4402 553#ifdef __ia64__
bbd21051 554 t.register_backing_store_base = base->reg_base;
346e4402 555#endif
7f22442b
AW
556 t.continuation_root = SCM_EOL;
557 t.continuation_base = t.base;
558 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
559 t.sleep_mutex = NULL;
560 t.sleep_object = SCM_BOOL_F;
561 t.sleep_fd = -1;
562
32f94bf2 563 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
564 /* FIXME: Error conditions during the initialization phase are handled
565 gracelessly since public functions such as `scm_init_guile ()'
566 currently have type `void'. */
567 abort ();
568
7f22442b
AW
569 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
570 t.current_mark_stack_ptr = NULL;
571 t.current_mark_stack_limit = NULL;
572 t.canceled = 0;
573 t.exited = 0;
574 t.guile_mode = 0;
9de87eea 575
7f22442b
AW
576 /* The switcheroo. */
577 {
578 scm_i_thread *t_ptr = &t;
579
580 GC_disable ();
581 t_ptr = GC_malloc (sizeof (scm_i_thread));
582 memcpy (t_ptr, &t, sizeof t);
583
584 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
585
586#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
587 /* Cache the current thread in TLS for faster lookup. */
588 scm_i_current_thread = t_ptr;
f60a7648 589#endif
9de87eea 590
7f22442b
AW
591 scm_i_pthread_mutex_lock (&thread_admin_mutex);
592 t_ptr->next_thread = all_threads;
593 all_threads = t_ptr;
594 thread_count++;
595 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
596
597 GC_enable ();
598 }
d823b11b
MV
599}
600
9de87eea 601/* Perform second stage of thread initialisation, in guile mode.
d823b11b 602 */
9de87eea
MV
603static void
604guilify_self_2 (SCM parent)
d823b11b 605{
9de87eea
MV
606 scm_i_thread *t = SCM_I_CURRENT_THREAD;
607
72e6b608
LC
608 t->guile_mode = 1;
609
9de87eea 610 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 611
9de87eea
MV
612 t->continuation_root = scm_cons (t->handle, SCM_EOL);
613 t->continuation_base = t->base;
2bbe1533 614 t->vm = SCM_BOOL_F;
9de87eea
MV
615
616 if (scm_is_true (parent))
617 t->dynamic_state = scm_make_dynamic_state (parent);
618 else
619 t->dynamic_state = scm_i_make_initial_dynamic_state ();
620
621 t->join_queue = make_queue ();
622 t->block_asyncs = 0;
f740445a
AW
623
624 /* See note in finalizers.c:queue_finalizer_async(). */
625 GC_invoke_finalizers ();
d823b11b
MV
626}
627
6180e336
NJ
628\f
629/*** Fat mutexes */
630
631/* We implement our own mutex type since we want them to be 'fair', we
632 want to do fancy things while waiting for them (like running
633 asyncs) and we might want to add things that are nice for
634 debugging.
635*/
636
637typedef struct {
638 scm_i_pthread_mutex_t lock;
639 SCM owner;
adc085f1 640 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 641
adc085f1 642 int recursive; /* allow recursive locking? */
6180e336
NJ
643 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
644 int allow_external_unlock; /* is it an error to unlock a mutex that is not
645 owned by the current thread? */
646
647 SCM waiting; /* the threads waiting for this mutex. */
648} fat_mutex;
649
650#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
651#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
652
2002f1f8
AW
653static SCM
654call_cleanup (void *data)
655{
656 SCM *proc_p = data;
657 return scm_call_0 (*proc_p);
658}
659
9de87eea 660/* Perform thread tear-down, in guile mode.
d823b11b 661 */
9de87eea
MV
662static void *
663do_thread_exit (void *v)
664{
2e77f720
LC
665 scm_i_thread *t = (scm_i_thread *) v;
666
91230883
LC
667 /* Ensure the signal handling thread has been launched, because we might be
668 shutting it down. This needs to be done in Guile mode. */
669 scm_i_ensure_signal_delivery_thread ();
670
2e77f720
LC
671 if (!scm_is_false (t->cleanup_handler))
672 {
673 SCM ptr = t->cleanup_handler;
674
675 t->cleanup_handler = SCM_BOOL_F;
676 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 677 call_cleanup, &ptr,
2e77f720
LC
678 scm_handle_by_message_noexit, NULL);
679 }
9de87eea 680
86a597f8 681 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
682
683 t->exited = 1;
0c97d7dd
MV
684 close (t->sleep_pipe[0]);
685 close (t->sleep_pipe[1]);
9de87eea
MV
686 while (scm_is_true (unblock_from_queue (t->join_queue)))
687 ;
9de87eea 688
74926120 689 while (!scm_is_null (t->mutexes))
6180e336 690 {
a0faf7dd 691 SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
74926120 692
a0faf7dd
LC
693 if (!SCM_UNBNDP (mutex))
694 {
695 fat_mutex *m = SCM_MUTEX_DATA (mutex);
696
697 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07 698
02a362a6
LC
699 /* Check whether T owns MUTEX. This is usually the case, unless
700 T abandoned MUTEX; in that case, T is no longer its owner (see
701 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
702 if (scm_is_eq (m->owner, t->handle))
703 unblock_from_queue (m->waiting);
f57fdf07 704
a0faf7dd
LC
705 scm_i_pthread_mutex_unlock (&m->lock);
706 }
6180e336 707
a0faf7dd 708 t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
6180e336
NJ
709 }
710
86a597f8 711 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 712
9de87eea
MV
713 return NULL;
714}
715
f60a7648
AW
716static void *
717do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
718{
7f22442b 719 /* Won't hurt if we are already registered. */
8e76ce94 720#if SCM_USE_PTHREAD_THREADS
7f22442b 721 GC_register_my_thread (sb);
8e76ce94 722#endif
f60a7648 723
7f22442b 724 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
725}
726
d823b11b 727static void
9de87eea 728on_thread_exit (void *v)
d823b11b 729{
29776e85 730 /* This handler is executed in non-guile mode. */
2e77f720 731 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 732
b8441577
LC
733 /* If we were canceled, we were unable to clear `t->guile_mode', so do
734 it here. */
735 t->guile_mode = 0;
736
d2a51087
NJ
737 /* If this thread was cancelled while doing a cond wait, it will
738 still have a mutex locked, so we unlock it here. */
739 if (t->held_mutex)
740 {
741 scm_i_pthread_mutex_unlock (t->held_mutex);
742 t->held_mutex = NULL;
743 }
744
f60a7648
AW
745 /* Reinstate the current thread for purposes of scm_with_guile
746 guile-mode cleanup handlers. Only really needed in the non-TLS
747 case but it doesn't hurt to be consistent. */
748 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 749
7f22442b
AW
750 /* Scheme-level thread finalizers and other cleanup needs to happen in
751 guile mode. */
752 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
753
754 /* Removing ourself from the list of all threads needs to happen in
755 non-guile mode since all SCM values on our stack become
29776e85 756 unprotected once we are no longer in the list. */
0c97d7dd
MV
757 scm_i_pthread_mutex_lock (&thread_admin_mutex);
758 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
759 if (*tp == t)
760 {
761 *tp = t->next_thread;
c058db8a
LC
762
763 /* GC-robust */
764 t->next_thread = NULL;
765
0c97d7dd
MV
766 break;
767 }
768 thread_count--;
2e77f720
LC
769
770 /* If there's only one other thread, it could be the signal delivery
771 thread, so we need to notify it to shut down by closing its read pipe.
772 If it's not the signal delivery thread, then closing the read pipe isn't
773 going to hurt. */
774 if (thread_count <= 1)
775 scm_i_close_signal_pipe ();
776
0c97d7dd
MV
777 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
778
f60a7648 779 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 780
8e76ce94 781#if SCM_USE_PTHREAD_THREADS
7f22442b 782 GC_unregister_my_thread ();
653ccd78 783#endif
d823b11b
MV
784}
785
9de87eea 786static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 787
9de87eea
MV
788static void
789init_thread_key (void)
790{
f60a7648 791 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 792}
d823b11b 793
cde24ce1
AW
794/* Perform any initializations necessary to make the current thread
795 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
796 if necessary.
a54a94b3 797
9de87eea
MV
798 BASE is the stack base to use with GC.
799
800 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
801 which case the default dynamic state is used.
802
cde24ce1 803 Returns zero when the thread was known to guile already; otherwise
9de87eea 804 return 1.
cde24ce1
AW
805
806 Note that it could be the case that the thread was known
807 to Guile, but not in guile mode (because we are within a
808 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
809 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
810
811static int
12c1d861 812scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 813{
9de87eea
MV
814 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
815
cde24ce1
AW
816 if (SCM_I_CURRENT_THREAD)
817 {
818 /* Thread is already known to Guile.
819 */
820 return 0;
821 }
822 else
9de87eea
MV
823 {
824 /* This thread has not been guilified yet.
825 */
826
827 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
828 if (scm_initialized_p == 0)
829 {
830 /* First thread ever to enter Guile. Run the full
831 initialization.
832 */
833 scm_i_init_guile (base);
12c1d861 834
8e76ce94 835#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
12c1d861
AW
836 /* Allow other threads to come in later. */
837 GC_allow_register_threads ();
4000d064 838#endif
12c1d861 839
9de87eea
MV
840 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
841 }
842 else
843 {
844 /* Guile is already initialized, but this thread enters it for
845 the first time. Only initialize this thread.
846 */
847 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
848
849 /* Register this thread with libgc. */
8e76ce94 850#if SCM_USE_PTHREAD_THREADS
12c1d861 851 GC_register_my_thread (base);
8e76ce94 852#endif
12c1d861 853
9de87eea
MV
854 guilify_self_1 (base);
855 guilify_self_2 (parent);
856 }
857 return 1;
858 }
d823b11b
MV
859}
860
12c1d861
AW
861void
862scm_init_guile ()
d823b11b 863{
12c1d861
AW
864 struct GC_stack_base stack_base;
865
866 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
867 scm_i_init_thread_for_guile (&stack_base,
868 scm_i_default_dynamic_state);
9de87eea
MV
869 else
870 {
12c1d861 871 fprintf (stderr, "Failed to get stack base for current thread.\n");
b3da54d1 872 exit (EXIT_FAILURE);
9de87eea 873 }
a54a94b3
MV
874}
875
12c1d861 876struct with_guile_args
cde24ce1
AW
877{
878 GC_fn_type func;
879 void *data;
12c1d861 880 SCM parent;
cde24ce1
AW
881};
882
883static void *
884with_guile_trampoline (void *data)
885{
12c1d861 886 struct with_guile_args *args = data;
cde24ce1
AW
887
888 return scm_c_with_continuation_barrier (args->func, args->data);
889}
890
12c1d861
AW
891static void *
892with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
893{
894 void *res;
cde24ce1
AW
895 int new_thread;
896 scm_i_thread *t;
12c1d861 897 struct with_guile_args *args = data;
2e77f720 898
12c1d861 899 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
900 t = SCM_I_CURRENT_THREAD;
901 if (new_thread)
2e77f720 902 {
cde24ce1
AW
903 /* We are in Guile mode. */
904 assert (t->guile_mode);
905
12c1d861 906 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
907
908 /* Leave Guile mode. */
909 t->guile_mode = 0;
910 }
911 else if (t->guile_mode)
912 {
913 /* Already in Guile mode. */
12c1d861 914 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 915 }
74926120 916 else
cde24ce1 917 {
cde24ce1
AW
918 /* We are not in Guile mode, either because we are not within a
919 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 920
cde24ce1
AW
921 This call to scm_with_guile() could happen from anywhere on the
922 stack, and in particular lower on the stack than when it was
923 when this thread was first guilified. Thus, `base' must be
924 updated. */
925#if SCM_STACK_GROWS_UP
12c1d861
AW
926 if (SCM_STACK_PTR (base->mem_base) < t->base)
927 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 928#else
12c1d861
AW
929 if (SCM_STACK_PTR (base->mem_base) > t->base)
930 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 931#endif
72e6b608 932
cde24ce1 933 t->guile_mode = 1;
12c1d861 934 res = with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
935 t->guile_mode = 0;
936 }
937 return res;
72e6b608 938}
3d1af79f 939
12c1d861
AW
940static void *
941scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
942{
943 struct with_guile_args args;
944
945 args.func = func;
946 args.data = data;
947 args.parent = parent;
948
949 return GC_call_with_stack_base (with_guile_and_parent, &args);
950}
951
952void *
953scm_with_guile (void *(*func)(void *), void *data)
954{
955 return scm_i_with_guile_and_parent (func, data,
956 scm_i_default_dynamic_state);
957}
958
9de87eea
MV
959void *
960scm_without_guile (void *(*func)(void *), void *data)
d823b11b 961{
72e6b608 962 void *result;
cde24ce1 963 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 964
cde24ce1 965 if (t->guile_mode)
72e6b608 966 {
cde24ce1
AW
967 SCM_I_CURRENT_THREAD->guile_mode = 0;
968 result = with_gc_inactive (func, data);
969 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
970 }
971 else
cde24ce1 972 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
973 result = func (data);
974
975 return result;
9de87eea
MV
976}
977
72e6b608 978\f
9de87eea
MV
979/*** Thread creation */
980
981typedef struct {
982 SCM parent;
983 SCM thunk;
984 SCM handler;
76da80e7 985 SCM thread;
9de87eea
MV
986 scm_i_pthread_mutex_t mutex;
987 scm_i_pthread_cond_t cond;
988} launch_data;
d823b11b 989
9de87eea
MV
990static void *
991really_launch (void *d)
992{
993 launch_data *data = (launch_data *)d;
994 SCM thunk = data->thunk, handler = data->handler;
995 scm_i_thread *t;
d823b11b 996
9de87eea 997 t = SCM_I_CURRENT_THREAD;
a54a94b3 998
9de87eea
MV
999 scm_i_scm_pthread_mutex_lock (&data->mutex);
1000 data->thread = scm_current_thread ();
1001 scm_i_pthread_cond_signal (&data->cond);
1002 scm_i_pthread_mutex_unlock (&data->mutex);
1003
1004 if (SCM_UNBNDP (handler))
1005 t->result = scm_call_0 (thunk);
1006 else
1007 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
1008
1009 return 0;
d823b11b
MV
1010}
1011
9de87eea
MV
1012static void *
1013launch_thread (void *d)
1014{
1015 launch_data *data = (launch_data *)d;
1016 scm_i_pthread_detach (scm_i_pthread_self ());
1017 scm_i_with_guile_and_parent (really_launch, d, data->parent);
1018 return NULL;
1019}
1020
1021SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 1022 (SCM thunk, SCM handler),
9de87eea
MV
1023 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1024 "returning a new thread object representing the thread. The procedure\n"
1025 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1026 "\n"
1027 "When @var{handler} is specified, then @var{thunk} is called from\n"
1028 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1029 "handler. This catch is established inside the continuation barrier.\n"
1030 "\n"
1031 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1032 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
1033#define FUNC_NAME s_scm_call_with_new_thread
1034{
9de87eea
MV
1035 launch_data data;
1036 scm_i_pthread_t id;
1037 int err;
d823b11b 1038
9de87eea
MV
1039 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
1040 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
1041 handler, SCM_ARG2, FUNC_NAME);
1042
9f7537dc 1043 GC_collect_a_little ();
9de87eea
MV
1044 data.parent = scm_current_dynamic_state ();
1045 data.thunk = thunk;
1046 data.handler = handler;
1047 data.thread = SCM_BOOL_F;
1048 scm_i_pthread_mutex_init (&data.mutex, NULL);
1049 scm_i_pthread_cond_init (&data.cond, NULL);
1050
1051 scm_i_scm_pthread_mutex_lock (&data.mutex);
1052 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
1053 if (err)
1054 {
1055 scm_i_pthread_mutex_unlock (&data.mutex);
1056 errno = err;
1057 scm_syserror (NULL);
1058 }
dbab8aaa
AW
1059
1060 while (scm_is_false (data.thread))
1061 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1062
9de87eea 1063 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1064
9de87eea 1065 return data.thread;
d823b11b
MV
1066}
1067#undef FUNC_NAME
1068
9de87eea
MV
1069typedef struct {
1070 SCM parent;
1071 scm_t_catch_body body;
1072 void *body_data;
1073 scm_t_catch_handler handler;
1074 void *handler_data;
1075 SCM thread;
1076 scm_i_pthread_mutex_t mutex;
1077 scm_i_pthread_cond_t cond;
1078} spawn_data;
1079
1080static void *
1081really_spawn (void *d)
1082{
1083 spawn_data *data = (spawn_data *)d;
1084 scm_t_catch_body body = data->body;
1085 void *body_data = data->body_data;
1086 scm_t_catch_handler handler = data->handler;
1087 void *handler_data = data->handler_data;
1088 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1089
1090 scm_i_scm_pthread_mutex_lock (&data->mutex);
1091 data->thread = scm_current_thread ();
1092 scm_i_pthread_cond_signal (&data->cond);
1093 scm_i_pthread_mutex_unlock (&data->mutex);
1094
1095 if (handler == NULL)
1096 t->result = body (body_data);
1097 else
1098 t->result = scm_internal_catch (SCM_BOOL_T,
1099 body, body_data,
1100 handler, handler_data);
1101
1102 return 0;
1103}
1104
1105static void *
1106spawn_thread (void *d)
1107{
1108 spawn_data *data = (spawn_data *)d;
1109 scm_i_pthread_detach (scm_i_pthread_self ());
1110 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1111 return NULL;
1112}
1113
1114SCM
1115scm_spawn_thread (scm_t_catch_body body, void *body_data,
1116 scm_t_catch_handler handler, void *handler_data)
1117{
1118 spawn_data data;
1119 scm_i_pthread_t id;
1120 int err;
1121
1122 data.parent = scm_current_dynamic_state ();
1123 data.body = body;
1124 data.body_data = body_data;
1125 data.handler = handler;
1126 data.handler_data = handler_data;
1127 data.thread = SCM_BOOL_F;
1128 scm_i_pthread_mutex_init (&data.mutex, NULL);
1129 scm_i_pthread_cond_init (&data.cond, NULL);
1130
1131 scm_i_scm_pthread_mutex_lock (&data.mutex);
1132 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1133 if (err)
1134 {
1135 scm_i_pthread_mutex_unlock (&data.mutex);
1136 errno = err;
1137 scm_syserror (NULL);
1138 }
dbab8aaa
AW
1139
1140 while (scm_is_false (data.thread))
1141 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1142
9de87eea 1143 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1144
0f4f2d9a
LC
1145 assert (SCM_I_IS_THREAD (data.thread));
1146
9de87eea
MV
1147 return data.thread;
1148}
1149
29717c89
MD
1150SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1151 (),
1152"Move the calling thread to the end of the scheduling queue.")
1153#define FUNC_NAME s_scm_yield
1154{
9de87eea 1155 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1156}
1157#undef FUNC_NAME
1158
2e77f720
LC
1159SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1160 (SCM thread),
1161"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1162"cannot be the current thread, and if @var{thread} has already terminated or "
1163"been signaled to terminate, this function is a no-op.")
1164#define FUNC_NAME s_scm_cancel_thread
1165{
1166 scm_i_thread *t = NULL;
1167
1168 SCM_VALIDATE_THREAD (1, thread);
1169 t = SCM_I_THREAD_DATA (thread);
86a597f8 1170 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1171 if (!t->canceled)
1172 {
1173 t->canceled = 1;
86a597f8 1174 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1175 scm_i_pthread_cancel (t->pthread);
1176 }
1177 else
86a597f8 1178 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1179
1180 return SCM_UNSPECIFIED;
1181}
1182#undef FUNC_NAME
1183
1184SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1185 (SCM thread, SCM proc),
1186"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1187"This handler will be called when the thread exits.")
1188#define FUNC_NAME s_scm_set_thread_cleanup_x
1189{
1190 scm_i_thread *t;
1191
1192 SCM_VALIDATE_THREAD (1, thread);
1193 if (!scm_is_false (proc))
1194 SCM_VALIDATE_THUNK (2, proc);
1195
2e77f720 1196 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1197 scm_i_pthread_mutex_lock (&t->admin_mutex);
1198
2e77f720
LC
1199 if (!(t->exited || t->canceled))
1200 t->cleanup_handler = proc;
1201
86a597f8 1202 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1203
1204 return SCM_UNSPECIFIED;
1205}
1206#undef FUNC_NAME
1207
1208SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1209 (SCM thread),
1210"Return the cleanup handler installed for the thread @var{thread}.")
1211#define FUNC_NAME s_scm_thread_cleanup
1212{
1213 scm_i_thread *t;
1214 SCM ret;
1215
1216 SCM_VALIDATE_THREAD (1, thread);
1217
2e77f720 1218 t = SCM_I_THREAD_DATA (thread);
86a597f8 1219 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1220 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1221 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1222
1223 return ret;
1224}
1225#undef FUNC_NAME
1226
6180e336
NJ
1227SCM scm_join_thread (SCM thread)
1228{
1229 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1230}
1231
1232SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1233 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1234"Suspend execution of the calling thread until the target @var{thread} "
1235"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1236#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1237{
9de87eea 1238 scm_i_thread *t;
6180e336
NJ
1239 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1240 SCM res = SCM_BOOL_F;
1241
1242 if (! (SCM_UNBNDP (timeoutval)))
1243 res = timeoutval;
d823b11b
MV
1244
1245 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1246 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1247 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1248
9de87eea 1249 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1250 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1251
6180e336
NJ
1252 if (! SCM_UNBNDP (timeout))
1253 {
1254 to_timespec (timeout, &ctimeout);
1255 timeout_ptr = &ctimeout;
1256 }
1257
1258 if (t->exited)
1259 res = t->result;
1260 else
d823b11b 1261 {
9de87eea
MV
1262 while (1)
1263 {
74926120 1264 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1265 timeout_ptr);
1266 if (err == 0)
1267 {
1268 if (t->exited)
1269 {
1270 res = t->result;
1271 break;
1272 }
1273 }
1274 else if (err == ETIMEDOUT)
9de87eea 1275 break;
6180e336 1276
86a597f8 1277 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1278 SCM_TICK;
86a597f8 1279 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1280
1281 /* Check for exit again, since we just released and
1282 reacquired the admin mutex, before the next block_self
1283 call (which would block forever if t has already
1284 exited). */
1285 if (t->exited)
1286 {
1287 res = t->result;
1288 break;
1289 }
9de87eea 1290 }
d823b11b 1291 }
9de87eea 1292
86a597f8 1293 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1294
d823b11b 1295 return res;
5f05c406
MV
1296}
1297#undef FUNC_NAME
1298
6180e336
NJ
1299SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1300 (SCM obj),
1301 "Return @code{#t} if @var{obj} is a thread.")
1302#define FUNC_NAME s_scm_thread_p
1303{
1304 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1305}
1306#undef FUNC_NAME
5f05c406 1307
4079f87e 1308
9de87eea
MV
1309static size_t
1310fat_mutex_free (SCM mx)
76da80e7 1311{
9de87eea
MV
1312 fat_mutex *m = SCM_MUTEX_DATA (mx);
1313 scm_i_pthread_mutex_destroy (&m->lock);
76da80e7
MV
1314 return 0;
1315}
1316
1317static int
9de87eea 1318fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1319{
9de87eea
MV
1320 fat_mutex *m = SCM_MUTEX_DATA (mx);
1321 scm_puts ("#<mutex ", port);
1322 scm_uintprint ((scm_t_bits)m, 16, port);
1323 scm_puts (">", port);
1324 return 1;
76da80e7
MV
1325}
1326
76da80e7 1327static SCM
6180e336 1328make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1329{
9de87eea
MV
1330 fat_mutex *m;
1331 SCM mx;
1332
1333 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1334 scm_i_pthread_mutex_init (&m->lock, NULL);
1335 m->owner = SCM_BOOL_F;
adc085f1 1336 m->level = 0;
6180e336 1337
adc085f1 1338 m->recursive = recursive;
6180e336
NJ
1339 m->unchecked_unlock = unchecked_unlock;
1340 m->allow_external_unlock = external_unlock;
1341
9de87eea
MV
1342 m->waiting = SCM_EOL;
1343 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1344 m->waiting = make_queue ();
1345 return mx;
76da80e7
MV
1346}
1347
6180e336
NJ
1348SCM scm_make_mutex (void)
1349{
1350 return scm_make_mutex_with_flags (SCM_EOL);
1351}
1352
2a1d0688
NJ
1353SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1354SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1355SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1356
1357SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1358 (SCM flags),
9de87eea 1359 "Create a new mutex. ")
6180e336 1360#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1361{
6180e336
NJ
1362 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1363
1364 SCM ptr = flags;
1365 while (! scm_is_null (ptr))
1366 {
1367 SCM flag = SCM_CAR (ptr);
1368 if (scm_is_eq (flag, unchecked_unlock_sym))
1369 unchecked_unlock = 1;
1370 else if (scm_is_eq (flag, allow_external_unlock_sym))
1371 external_unlock = 1;
1372 else if (scm_is_eq (flag, recursive_sym))
1373 recursive = 1;
74926120 1374 else
2a1d0688 1375 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1376 ptr = SCM_CDR (ptr);
1377 }
1378 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1379}
1380#undef FUNC_NAME
1381
9de87eea 1382SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1383 (void),
9de87eea
MV
1384 "Create a new recursive mutex. ")
1385#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1386{
6180e336 1387 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1388}
1389#undef FUNC_NAME
1390
6180e336
NJ
1391SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1392
1393static SCM
adc085f1 1394fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1395{
1396 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1397
adc085f1 1398 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1399 SCM err = SCM_BOOL_F;
1400
1401 struct timeval current_time;
9de87eea
MV
1402
1403 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1404
1405 while (1)
9de87eea 1406 {
adc085f1 1407 if (m->level == 0)
6180e336 1408 {
adc085f1 1409 m->owner = new_owner;
6180e336 1410 m->level++;
74926120 1411
adc085f1 1412 if (SCM_I_IS_THREAD (new_owner))
6180e336 1413 {
adc085f1 1414 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1415
4f39f31e
LC
1416 /* FIXME: The order in which `t->admin_mutex' and
1417 `m->lock' are taken differs from that in
1418 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1419 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1420
1421 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1422 retained when not referenced elsewhere (bug #27450).
1423 The weak pair itself is eventually removed when MUTEX
1424 is unlocked. Note that `t->mutexes' lists mutexes
1425 currently held by T, so it should be small. */
a0faf7dd
LC
1426 t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
1427
6180e336 1428 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1429 }
adc085f1
JG
1430 *ret = 1;
1431 break;
1432 }
1433 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1434 {
1435 m->owner = new_owner;
1436 err = scm_cons (scm_abandoned_mutex_error_key,
1437 scm_from_locale_string ("lock obtained on abandoned "
1438 "mutex"));
1439 *ret = 1;
1440 break;
1441 }
1442 else if (scm_is_eq (m->owner, new_owner))
1443 {
1444 if (m->recursive)
1445 {
1446 m->level++;
74926120 1447 *ret = 1;
adc085f1
JG
1448 }
1449 else
6180e336 1450 {
adc085f1
JG
1451 err = scm_cons (scm_misc_error_key,
1452 scm_from_locale_string ("mutex already locked "
1453 "by thread"));
1454 *ret = 0;
1455 }
74926120 1456 break;
adc085f1 1457 }
9de87eea 1458 else
9de87eea 1459 {
74926120 1460 if (timeout != NULL)
adc085f1
JG
1461 {
1462 gettimeofday (&current_time, NULL);
1463 if (current_time.tv_sec > timeout->tv_sec ||
1464 (current_time.tv_sec == timeout->tv_sec &&
1465 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1466 {
adc085f1
JG
1467 *ret = 0;
1468 break;
6180e336 1469 }
6180e336 1470 }
37a52039 1471 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1472 scm_i_pthread_mutex_unlock (&m->lock);
1473 SCM_TICK;
1474 scm_i_scm_pthread_mutex_lock (&m->lock);
1475 }
1476 }
1477 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1478 return err;
9de87eea
MV
1479}
1480
6180e336
NJ
1481SCM scm_lock_mutex (SCM mx)
1482{
adc085f1 1483 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1484}
1485
adc085f1
JG
1486SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1487 (SCM m, SCM timeout, SCM owner),
b7e64f8b
BT
1488 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1489 "thread blocks until the mutex becomes available. The function\n"
1490 "returns when the calling thread owns the lock on @var{m}.\n"
1491 "Locking a mutex that a thread already owns will succeed right\n"
1492 "away and will not block the thread. That is, Guile's mutexes\n"
1493 "are @emph{recursive}.")
6180e336 1494#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1495{
6180e336
NJ
1496 SCM exception;
1497 int ret = 0;
1498 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1499
6180e336
NJ
1500 SCM_VALIDATE_MUTEX (1, m);
1501
1502 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1503 {
1504 to_timespec (timeout, &cwaittime);
1505 waittime = &cwaittime;
1506 }
1507
97ec95b7
LC
1508 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1509 SCM_VALIDATE_THREAD (3, owner);
1510
adc085f1 1511 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1512 if (!scm_is_false (exception))
1513 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1514 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1515}
76da80e7 1516#undef FUNC_NAME
9bc4701c 1517
2a3db25e
MW
1518static void
1519lock_mutex_return_void (SCM mx)
1520{
1521 (void) scm_lock_mutex (mx);
1522}
1523
1524static void
1525unlock_mutex_return_void (SCM mx)
1526{
1527 (void) scm_unlock_mutex (mx);
1528}
1529
a4d106c7 1530void
661ae7ab 1531scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1532{
2a3db25e 1533 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1534 SCM_F_WIND_EXPLICITLY);
2a3db25e 1535 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1536 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1537}
1538
9bc4701c 1539SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1540 (SCM mutex),
9bc4701c
MD
1541"Try to lock @var{mutex}. If the mutex is already locked by someone "
1542"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1543#define FUNC_NAME s_scm_try_mutex
1544{
6180e336
NJ
1545 SCM exception;
1546 int ret = 0;
1547 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1548
ba1b7223 1549 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1550
1551 to_timespec (scm_from_int(0), &cwaittime);
1552 waittime = &cwaittime;
74926120 1553
adc085f1 1554 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1555 if (!scm_is_false (exception))
1556 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1557 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1558}
1559#undef FUNC_NAME
76da80e7 1560
6180e336
NJ
1561/*** Fat condition variables */
1562
1563typedef struct {
1564 scm_i_pthread_mutex_t lock;
1565 SCM waiting; /* the threads waiting for this condition. */
1566} fat_cond;
1567
1568#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1569#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1570
1571static int
1572fat_mutex_unlock (SCM mutex, SCM cond,
1573 const scm_t_timespec *waittime, int relock)
9de87eea 1574{
7f991c7d 1575 SCM owner;
6180e336
NJ
1576 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1577 fat_cond *c = NULL;
1578 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1579 int err = 0, ret = 0;
9de87eea
MV
1580
1581 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1582
7f991c7d 1583 owner = m->owner;
adc085f1 1584
d31ae2c3 1585 if (!scm_is_eq (owner, t->handle))
9bc4701c 1586 {
adc085f1 1587 if (m->level == 0)
6180e336
NJ
1588 {
1589 if (!m->unchecked_unlock)
2a1d0688
NJ
1590 {
1591 scm_i_pthread_mutex_unlock (&m->lock);
1592 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1593 }
d31ae2c3 1594 owner = t->handle;
6180e336
NJ
1595 }
1596 else if (!m->allow_external_unlock)
2a1d0688
NJ
1597 {
1598 scm_i_pthread_mutex_unlock (&m->lock);
1599 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1600 }
6180e336
NJ
1601 }
1602
1603 if (! (SCM_UNBNDP (cond)))
1604 {
6180e336
NJ
1605 c = SCM_CONDVAR_DATA (cond);
1606 while (1)
1607 {
1608 int brk = 0;
1609
6180e336
NJ
1610 if (m->level > 0)
1611 m->level--;
adc085f1 1612 if (m->level == 0)
f57fdf07
LC
1613 {
1614 /* Change the owner of MUTEX. */
1615 t->mutexes = scm_delq_x (mutex, t->mutexes);
1616 m->owner = unblock_from_queue (m->waiting);
1617 }
adc085f1 1618
6180e336 1619 t->block_asyncs++;
74926120 1620
d2a51087
NJ
1621 err = block_self (c->waiting, cond, &m->lock, waittime);
1622 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1623
1624 if (err == 0)
1625 {
1626 ret = 1;
1627 brk = 1;
1628 }
1629 else if (err == ETIMEDOUT)
1630 {
1631 ret = 0;
1632 brk = 1;
1633 }
1634 else if (err != EINTR)
74926120 1635 {
6180e336 1636 errno = err;
6180e336 1637 scm_syserror (NULL);
74926120 1638 }
6180e336
NJ
1639
1640 if (brk)
1641 {
1642 if (relock)
adc085f1 1643 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1644 t->block_asyncs--;
6180e336
NJ
1645 break;
1646 }
74926120 1647
6180e336
NJ
1648 t->block_asyncs--;
1649 scm_async_click ();
74926120 1650
6180e336
NJ
1651 scm_remember_upto_here_2 (cond, mutex);
1652
1653 scm_i_scm_pthread_mutex_lock (&m->lock);
1654 }
9bc4701c 1655 }
9de87eea 1656 else
6180e336
NJ
1657 {
1658 if (m->level > 0)
1659 m->level--;
74926120 1660 if (m->level == 0)
f57fdf07
LC
1661 {
1662 /* Change the owner of MUTEX. */
1663 t->mutexes = scm_delq_x (mutex, t->mutexes);
1664 m->owner = unblock_from_queue (m->waiting);
1665 }
74926120 1666
6180e336
NJ
1667 scm_i_pthread_mutex_unlock (&m->lock);
1668 ret = 1;
1669 }
9de87eea 1670
6180e336 1671 return ret;
9bc4701c 1672}
9bc4701c 1673
6180e336
NJ
1674SCM scm_unlock_mutex (SCM mx)
1675{
1676 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1677}
9bc4701c 1678
6180e336
NJ
1679SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1680 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1681"Unlocks @var{mutex} if the calling thread owns the lock on "
1682"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1683"thread results in undefined behaviour. Once a mutex has been unlocked, "
1684"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1685"lock. Every call to @code{lock-mutex} by this thread must be matched "
1686"with a call to @code{unlock-mutex}. Only the last call to "
1687"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1688#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1689{
6180e336
NJ
1690 scm_t_timespec cwaittime, *waittime = NULL;
1691
9bc4701c 1692 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1693 if (! (SCM_UNBNDP (cond)))
1694 {
1695 SCM_VALIDATE_CONDVAR (2, cond);
1696
2a1d8403 1697 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
6180e336
NJ
1698 {
1699 to_timespec (timeout, &cwaittime);
1700 waittime = &cwaittime;
1701 }
1702 }
1703
1704 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1705}
1706#undef FUNC_NAME
1707
6180e336
NJ
1708SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1709 (SCM obj),
1710 "Return @code{#t} if @var{obj} is a mutex.")
1711#define FUNC_NAME s_scm_mutex_p
1712{
1713 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1714}
74926120 1715#undef FUNC_NAME
9de87eea
MV
1716
1717SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1718 (SCM mx),
1719 "Return the thread owning @var{mx}, or @code{#f}.")
1720#define FUNC_NAME s_scm_mutex_owner
1721{
adc085f1
JG
1722 SCM owner;
1723 fat_mutex *m = NULL;
1724
9de87eea 1725 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1726 m = SCM_MUTEX_DATA (mx);
1727 scm_i_pthread_mutex_lock (&m->lock);
1728 owner = m->owner;
1729 scm_i_pthread_mutex_unlock (&m->lock);
1730
1731 return owner;
9de87eea
MV
1732}
1733#undef FUNC_NAME
1734
1735SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1736 (SCM mx),
adc085f1 1737 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1738#define FUNC_NAME s_scm_mutex_level
1739{
1740 SCM_VALIDATE_MUTEX (1, mx);
1741 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1742}
1743#undef FUNC_NAME
1744
adc085f1
JG
1745SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1746 (SCM mx),
1747 "Returns @code{#t} if the mutex @var{mx} is locked.")
1748#define FUNC_NAME s_scm_mutex_locked_p
1749{
1750 SCM_VALIDATE_MUTEX (1, mx);
1751 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1752}
1753#undef FUNC_NAME
9de87eea 1754
9de87eea
MV
1755static int
1756fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1757{
1758 fat_cond *c = SCM_CONDVAR_DATA (cv);
1759 scm_puts ("#<condition-variable ", port);
1760 scm_uintprint ((scm_t_bits)c, 16, port);
1761 scm_puts (">", port);
1762 return 1;
1763}
9bc4701c 1764
d823b11b
MV
1765SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1766 (void),
1767 "Make a new condition variable.")
1768#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1769{
9de87eea
MV
1770 fat_cond *c;
1771 SCM cv;
1772
1773 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1774 c->waiting = SCM_EOL;
1775 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1776 c->waiting = make_queue ();
d823b11b 1777 return cv;
5f05c406 1778}
d823b11b 1779#undef FUNC_NAME
5f05c406 1780
d823b11b
MV
1781SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1782 (SCM cv, SCM mx, SCM t),
b7e64f8b
BT
1783"Wait until condition variable @var{cv} has been signalled. While waiting, "
1784"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1785"is locked again when this function returns. When @var{t} is given, "
d823b11b
MV
1786"it specifies a point in time where the waiting should be aborted. It "
1787"can be either a integer as returned by @code{current-time} or a pair "
1788"as returned by @code{gettimeofday}. When the waiting is aborted the "
1789"mutex is locked and @code{#f} is returned. When the condition "
1790"variable is in fact signalled, the mutex is also locked and @code{#t} "
1791"is returned. ")
1792#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1793{
9de87eea 1794 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1795
1796 SCM_VALIDATE_CONDVAR (1, cv);
1797 SCM_VALIDATE_MUTEX (2, mx);
74926120 1798
d823b11b
MV
1799 if (!SCM_UNBNDP (t))
1800 {
6180e336 1801 to_timespec (t, &waittime);
9de87eea 1802 waitptr = &waittime;
d823b11b
MV
1803 }
1804
2a1d0688 1805 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1806}
d823b11b 1807#undef FUNC_NAME
5f05c406 1808
9de87eea
MV
1809static void
1810fat_cond_signal (fat_cond *c)
1811{
9de87eea 1812 unblock_from_queue (c->waiting);
9de87eea
MV
1813}
1814
d823b11b
MV
1815SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1816 (SCM cv),
1817 "Wake up one thread that is waiting for @var{cv}")
1818#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1819{
d823b11b 1820 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1821 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1822 return SCM_BOOL_T;
5f05c406 1823}
d823b11b 1824#undef FUNC_NAME
5f05c406 1825
9de87eea
MV
1826static void
1827fat_cond_broadcast (fat_cond *c)
1828{
9de87eea
MV
1829 while (scm_is_true (unblock_from_queue (c->waiting)))
1830 ;
9de87eea
MV
1831}
1832
d823b11b
MV
1833SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1834 (SCM cv),
1835 "Wake up all threads that are waiting for @var{cv}. ")
1836#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1837{
d823b11b 1838 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1839 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1840 return SCM_BOOL_T;
5f05c406 1841}
d823b11b 1842#undef FUNC_NAME
5f05c406 1843
6180e336
NJ
1844SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1845 (SCM obj),
1846 "Return @code{#t} if @var{obj} is a condition variable.")
1847#define FUNC_NAME s_scm_condition_variable_p
1848{
1849 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1850}
1851#undef FUNC_NAME
1852
6087fad9 1853
8c2b3143 1854\f
d823b11b
MV
1855/*** Select */
1856
8c2b3143
LC
1857struct select_args
1858{
1859 int nfds;
6ab4de61
AW
1860 fd_set *read_fds;
1861 fd_set *write_fds;
1862 fd_set *except_fds;
8c2b3143
LC
1863 struct timeval *timeout;
1864
1865 int result;
1866 int errno_value;
1867};
1868
1869static void *
1870do_std_select (void *args)
1871{
1872 struct select_args *select_args;
1873
1874 select_args = (struct select_args *) args;
1875
1876 select_args->result =
1877 select (select_args->nfds,
1878 select_args->read_fds, select_args->write_fds,
1879 select_args->except_fds, select_args->timeout);
1880 select_args->errno_value = errno;
1881
1882 return NULL;
1883}
1884
911782b7 1885int
9de87eea 1886scm_std_select (int nfds,
6ab4de61
AW
1887 fd_set *readfds,
1888 fd_set *writefds,
1889 fd_set *exceptfds,
9de87eea
MV
1890 struct timeval *timeout)
1891{
1892 fd_set my_readfds;
1893 int res, eno, wakeup_fd;
1894 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1895 struct select_args args;
9de87eea
MV
1896
1897 if (readfds == NULL)
1898 {
1899 FD_ZERO (&my_readfds);
1900 readfds = &my_readfds;
1901 }
1902
1903 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1904 SCM_TICK;
1905
1906 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1907 FD_SET (wakeup_fd, readfds);
1908 if (wakeup_fd >= nfds)
1909 nfds = wakeup_fd+1;
9de87eea 1910
8c2b3143
LC
1911 args.nfds = nfds;
1912 args.read_fds = readfds;
1913 args.write_fds = writefds;
1914 args.except_fds = exceptfds;
1915 args.timeout = timeout;
1916
1917 /* Explicitly cooperate with the GC. */
1918 scm_without_guile (do_std_select, &args);
1919
1920 res = args.result;
1921 eno = args.errno_value;
1922
1923 t->sleep_fd = -1;
9de87eea
MV
1924 scm_i_reset_sleep (t);
1925
1926 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1927 {
1928 char dummy;
634aa8de
LC
1929 full_read (wakeup_fd, &dummy, 1);
1930
9de87eea
MV
1931 FD_CLR (wakeup_fd, readfds);
1932 res -= 1;
1933 if (res == 0)
1934 {
1935 eno = EINTR;
1936 res = -1;
1937 }
1938 }
d823b11b
MV
1939 errno = eno;
1940 return res;
5f05c406
MV
1941}
1942
9de87eea 1943/* Convenience API for blocking while in guile mode. */
76da80e7 1944
9de87eea 1945#if SCM_USE_PTHREAD_THREADS
92e64b87 1946
2956b071
LC
1947/* It seems reasonable to not run procedures related to mutex and condition
1948 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1949 without it, and (ii) the only potential gain would be GC latency. See
1950 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1951 for a discussion of the pros and cons. */
1952
9bc4701c 1953int
9de87eea 1954scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1955{
9de87eea 1956 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1957 return res;
1958}
1959
9de87eea 1960static void
2b829bbb 1961do_unlock (void *data)
28d52ebb 1962{
9de87eea 1963 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1964}
1965
1966void
661ae7ab 1967scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1968{
9de87eea 1969 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1970 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1971}
1972
9bc4701c 1973int
9de87eea 1974scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1975{
4cf72f0b
LC
1976 int res;
1977 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1978
1979 t->held_mutex = mutex;
1980 res = scm_i_pthread_cond_wait (cond, mutex);
1981 t->held_mutex = NULL;
1982
9bc4701c
MD
1983 return res;
1984}
9bc4701c 1985
76da80e7 1986int
9de87eea
MV
1987scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1988 scm_i_pthread_mutex_t *mutex,
1989 const scm_t_timespec *wt)
76da80e7 1990{
4cf72f0b
LC
1991 int res;
1992 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1993
1994 t->held_mutex = mutex;
1995 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1996 t->held_mutex = NULL;
1997
9de87eea 1998 return res;
76da80e7
MV
1999}
2000
9de87eea 2001#endif
76da80e7 2002
e676a4c3
MW
2003static void
2004do_unlock_with_asyncs (void *data)
2005{
2006 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
2007 SCM_I_CURRENT_THREAD->block_asyncs--;
2008}
2009
2010void
2011scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
2012{
2013 SCM_I_CURRENT_THREAD->block_asyncs++;
2014 scm_i_scm_pthread_mutex_lock (mutex);
2015 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
2016 SCM_F_WIND_EXPLICITLY);
2017}
2018
d823b11b 2019unsigned long
9de87eea 2020scm_std_usleep (unsigned long usecs)
5f05c406 2021{
d823b11b
MV
2022 struct timeval tv;
2023 tv.tv_usec = usecs % 1000000;
2024 tv.tv_sec = usecs / 1000000;
9de87eea
MV
2025 scm_std_select (0, NULL, NULL, NULL, &tv);
2026 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
2027}
2028
9de87eea
MV
2029unsigned int
2030scm_std_sleep (unsigned int secs)
6c214b62 2031{
d823b11b
MV
2032 struct timeval tv;
2033 tv.tv_usec = 0;
2034 tv.tv_sec = secs;
9de87eea 2035 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 2036 return tv.tv_sec;
6c214b62
MD
2037}
2038
d823b11b
MV
2039/*** Misc */
2040
2041SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
2042 (void),
2043 "Return the thread that called this function.")
2044#define FUNC_NAME s_scm_current_thread
2045{
9de87eea 2046 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
2047}
2048#undef FUNC_NAME
2049
9de87eea
MV
2050static SCM
2051scm_c_make_list (size_t n, SCM fill)
2052{
2053 SCM res = SCM_EOL;
2054 while (n-- > 0)
2055 res = scm_cons (fill, res);
2056 return res;
2057}
2058
d823b11b
MV
2059SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
2060 (void),
2061 "Return a list of all threads.")
9bc4701c 2062#define FUNC_NAME s_scm_all_threads
d823b11b 2063{
9de87eea
MV
2064 /* We can not allocate while holding the thread_admin_mutex because
2065 of the way GC is done.
2066 */
2067 int n = thread_count;
2068 scm_i_thread *t;
2069 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 2070
9de87eea
MV
2071 scm_i_pthread_mutex_lock (&thread_admin_mutex);
2072 l = &list;
2073 for (t = all_threads; t && n > 0; t = t->next_thread)
2074 {
2e77f720
LC
2075 if (t != scm_i_signal_delivery_thread)
2076 {
2077 SCM_SETCAR (*l, t->handle);
2078 l = SCM_CDRLOC (*l);
2079 }
9de87eea
MV
2080 n--;
2081 }
2082 *l = SCM_EOL;
2083 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2084 return list;
d823b11b 2085}
9de87eea 2086#undef FUNC_NAME
d823b11b
MV
2087
2088SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
2089 (SCM thread),
2090 "Return @code{#t} iff @var{thread} has exited.\n")
2091#define FUNC_NAME s_scm_thread_exited_p
2092{
7888309b 2093 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
2094}
2095#undef FUNC_NAME
2096
911782b7 2097int
d823b11b
MV
2098scm_c_thread_exited_p (SCM thread)
2099#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2100{
9de87eea 2101 scm_i_thread *t;
d823b11b 2102 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2103 t = SCM_I_THREAD_DATA (thread);
d823b11b 2104 return t->exited;
5f05c406 2105}
d823b11b 2106#undef FUNC_NAME
5f05c406 2107
d20912e6
LC
2108SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2109 (void),
2110 "Return the total number of processors of the machine, which\n"
2111 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2112 "thread execution unit, which can be either:\n\n"
2113 "@itemize\n"
2114 "@item an execution core in a (possibly multi-core) chip, in a\n"
2115 " (possibly multi- chip) module, in a single computer, or\n"
2116 "@item a thread execution unit inside a core in the case of\n"
2117 " @dfn{hyper-threaded} CPUs.\n"
2118 "@end itemize\n\n"
2119 "Which of the two definitions is used, is unspecified.\n")
2120#define FUNC_NAME s_scm_total_processor_count
2121{
2122 return scm_from_ulong (num_processors (NPROC_ALL));
2123}
2124#undef FUNC_NAME
2125
2126SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2127 (void),
2128 "Like @code{total-processor-count}, but return the number of\n"
2129 "processors available to the current process. See\n"
2130 "@code{setaffinity} and @code{getaffinity} for more\n"
2131 "information.\n")
2132#define FUNC_NAME s_scm_current_processor_count
2133{
2134 return scm_from_ulong (num_processors (NPROC_CURRENT));
2135}
2136#undef FUNC_NAME
2137
2138
2139\f
2140
9de87eea 2141static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2142static int threads_initialized_p = 0;
2143
9bc4701c 2144
a4d106c7
MV
2145/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2146 */
d1138028 2147scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2148
661ae7ab 2149static SCM dynwind_critical_section_mutex;
a54a94b3 2150
9bc4701c 2151void
661ae7ab 2152scm_dynwind_critical_section (SCM mutex)
76da80e7 2153{
a4d106c7 2154 if (scm_is_false (mutex))
661ae7ab
MV
2155 mutex = dynwind_critical_section_mutex;
2156 scm_dynwind_lock_mutex (mutex);
2157 scm_dynwind_block_asyncs ();
9de87eea
MV
2158}
2159
2160/*** Initialization */
2161
9de87eea
MV
2162scm_i_pthread_mutex_t scm_i_misc_mutex;
2163
d1138028
MV
2164#if SCM_USE_PTHREAD_THREADS
2165pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2166#endif
2167
9de87eea 2168void
12c1d861 2169scm_threads_prehistory (void *base)
9de87eea 2170{
d1138028
MV
2171#if SCM_USE_PTHREAD_THREADS
2172 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2173 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2174 PTHREAD_MUTEX_RECURSIVE);
2175#endif
2176
2177 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2178 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2179 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2180 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2181
12c1d861 2182 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2183}
2184
d823b11b
MV
2185scm_t_bits scm_tc16_thread;
2186scm_t_bits scm_tc16_mutex;
2187scm_t_bits scm_tc16_condvar;
7bfd3b9e 2188
7bfd3b9e 2189void
9de87eea 2190scm_init_threads ()
7bfd3b9e 2191{
9de87eea 2192 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2193 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2194
9de87eea 2195 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
2196 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2197 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2198
9de87eea
MV
2199 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2200 sizeof (fat_cond));
9de87eea 2201 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2202
9de87eea
MV
2203 scm_i_default_dynamic_state = SCM_BOOL_F;
2204 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2205 threads_initialized_p = 1;
a4d106c7 2206
f39448c5 2207 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2208}
89e00824 2209
5f05c406 2210void
9de87eea 2211scm_init_threads_default_dynamic_state ()
5f05c406 2212{
9de87eea 2213 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2214 scm_i_default_dynamic_state = state;
5f05c406
MV
2215}
2216
d823b11b 2217void
9de87eea 2218scm_init_thread_procs ()
d823b11b 2219{
9de87eea 2220#include "libguile/threads.x"
d823b11b
MV
2221}
2222
3c13664e
LC
2223\f
2224/* IA64-specific things. */
2225
2226#ifdef __ia64__
2227# ifdef __hpux
2228# include <sys/param.h>
2229# include <sys/pstat.h>
2230void *
2231scm_ia64_register_backing_store_base (void)
2232{
2233 struct pst_vm_status vm_status;
2234 int i = 0;
2235 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2236 if (vm_status.pst_type == PS_RSESTACK)
2237 return (void *) vm_status.pst_vaddr;
2238 abort ();
2239}
2240void *
2241scm_ia64_ar_bsp (const void *ctx)
2242{
2243 uint64_t bsp;
2244 __uc_get_ar_bsp (ctx, &bsp);
2245 return (void *) bsp;
2246}
2247# endif /* hpux */
2248# ifdef linux
2249# include <ucontext.h>
2250void *
2251scm_ia64_register_backing_store_base (void)
2252{
2253 extern void *__libc_ia64_register_backing_store_base;
2254 return __libc_ia64_register_backing_store_base;
2255}
2256void *
2257scm_ia64_ar_bsp (const void *opaque)
2258{
2259 const ucontext_t *ctx = opaque;
2260 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2261}
2262# endif /* linux */
ba20d262
AW
2263# ifdef __FreeBSD__
2264# include <ucontext.h>
2265void *
2266scm_ia64_register_backing_store_base (void)
2267{
2268 return (void *)0x8000000000000000;
2269}
2270void *
2271scm_ia64_ar_bsp (const void *opaque)
2272{
2273 const ucontext_t *ctx = opaque;
2274 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2275 + ctx->uc_mcontext.mc_special.ndirty);
2276}
2277# endif /* __FreeBSD__ */
3c13664e
LC
2278#endif /* __ia64__ */
2279
2280
89e00824
ML
2281/*
2282 Local Variables:
2283 c-file-style: "gnu"
2284 End:
2285*/