32-way branching in intmap.scm, not 16-way
[bpt/guile.git] / libguile / threads.c
CommitLineData
b3da54d1 1/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
bc8e6d7d
MW
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
3 * 2014 Free Software Foundation, Inc.
74926120 4 *
73be1d9e 5 * This library is free software; you can redistribute it and/or
53befeb7
NJ
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
7bfd3b9e 9 *
53befeb7
NJ
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
7bfd3b9e 14 *
73be1d9e
MV
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
53befeb7
NJ
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301 USA
73be1d9e 19 */
1bbd0b84 20
1bbd0b84 21
7bfd3b9e 22\f
dbb605f5
LC
23#ifdef HAVE_CONFIG_H
24# include <config.h>
25#endif
7bfd3b9e 26
1c44468d 27#include "libguile/bdw-gc.h"
7af0c3b3 28#include <gc/gc_mark.h>
1810dc4e
RB
29#include "libguile/_scm.h"
30
b3da54d1 31#include <stdlib.h>
d823b11b
MV
32#include <unistd.h>
33#include <stdio.h>
8ab3d8a0
KR
34
35#ifdef HAVE_STRING_H
36#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
37#endif
38
fcc5d734 39#if HAVE_SYS_TIME_H
d823b11b 40#include <sys/time.h>
fcc5d734 41#endif
5f05c406 42
c1d5d6d7
AW
43#if HAVE_PTHREAD_NP_H
44# include <pthread_np.h>
45#endif
46
6ab4de61
AW
47#include <sys/select.h>
48
f57fdf07 49#include <assert.h>
32f94bf2 50#include <fcntl.h>
d20912e6 51#include <nproc.h>
f57fdf07 52
d823b11b
MV
53#include "libguile/validate.h"
54#include "libguile/root.h"
55#include "libguile/eval.h"
56#include "libguile/async.h"
57#include "libguile/ports.h"
58#include "libguile/threads.h"
a0599745 59#include "libguile/dynwind.h"
d823b11b 60#include "libguile/iselect.h"
9de87eea
MV
61#include "libguile/fluids.h"
62#include "libguile/continuations.h"
2b829bbb 63#include "libguile/gc.h"
aef1fcf9 64#include "libguile/gc-inline.h"
9de87eea 65#include "libguile/init.h"
2e77f720 66#include "libguile/scmsigs.h"
6180e336 67#include "libguile/strings.h"
5f18bc84 68#include "libguile/vm.h"
7bfd3b9e 69
634aa8de
LC
70#include <full-read.h>
71
cde24ce1
AW
72
73\f
7af0c3b3
AW
74
75/* The GC "kind" for threads that allow them to mark their VM
76 stacks. */
77static int thread_gc_kind;
78
79static struct GC_ms_entry *
80thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
81 struct GC_ms_entry *mark_stack_limit, GC_word env)
82{
83 int word;
84 const struct scm_i_thread *t = (struct scm_i_thread *) addr;
85
86 if (SCM_UNPACK (t->handle) == 0)
87 /* T must be on the free-list; ignore. (See warning in
88 gc_mark.h.) */
89 return mark_stack_ptr;
90
5f18bc84 91 /* Mark T. We could be more precise, but it doesn't matter. */
7af0c3b3
AW
92 for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
93 mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
94 mark_stack_ptr, mark_stack_limit,
95 NULL);
96
aef1fcf9
AW
97 /* The pointerless freelists are threaded through their first word,
98 but GC doesn't know to trace them (as they are pointerless), so we
99 need to do that here. See the comments at the top of libgc's
100 gc_inline.h. */
c65ea594
AW
101 if (t->pointerless_freelists)
102 {
103 size_t n;
104 for (n = 0; n < SCM_INLINE_GC_FREELIST_COUNT; n++)
105 {
106 void *chain = t->pointerless_freelists[n];
107 if (chain)
108 {
109 /* The first link is already marked by the freelist vector,
110 so we just have to mark the tail. */
111 while ((chain = *(void **)chain))
112 mark_stack_ptr = GC_mark_and_push (chain, mark_stack_ptr,
113 mark_stack_limit, NULL);
114 }
115 }
116 }
aef1fcf9 117
5f18bc84
AW
118 if (t->vp)
119 mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
120 mark_stack_limit);
121
7af0c3b3
AW
122 return mark_stack_ptr;
123}
124
125
126\f
6180e336
NJ
127static void
128to_timespec (SCM t, scm_t_timespec *waittime)
129{
130 if (scm_is_pair (t))
131 {
132 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
133 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
134 }
135 else
136 {
137 double time = scm_to_double (t);
138 double sec = scm_c_truncate (time);
139
140 waittime->tv_sec = (long) sec;
2a1d0688 141 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
142 }
143}
144
7af0c3b3 145
c058db8a 146\f
d823b11b 147/*** Queues */
7bfd3b9e 148
c058db8a
LC
149/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
150 the risk of false references leading to unbounded retained space as
151 described in "Bounding Space Usage of Conservative Garbage Collectors",
152 H.J. Boehm, 2001. */
153
9de87eea
MV
154/* Make an empty queue data structure.
155 */
d823b11b
MV
156static SCM
157make_queue ()
158{
159 return scm_cons (SCM_EOL, SCM_EOL);
160}
7bfd3b9e 161
9de87eea
MV
162/* Put T at the back of Q and return a handle that can be used with
163 remqueue to remove T from Q again.
164 */
d823b11b
MV
165static SCM
166enqueue (SCM q, SCM t)
167{
168 SCM c = scm_cons (t, SCM_EOL);
d2a51087 169 SCM_CRITICAL_SECTION_START;
d2e53ed6 170 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
171 SCM_SETCDR (q, c);
172 else
173 SCM_SETCDR (SCM_CAR (q), c);
174 SCM_SETCAR (q, c);
d2a51087 175 SCM_CRITICAL_SECTION_END;
d823b11b
MV
176 return c;
177}
7bfd3b9e 178
9de87eea
MV
179/* Remove the element that the handle C refers to from the queue Q. C
180 must have been returned from a call to enqueue. The return value
181 is zero when the element referred to by C has already been removed.
182 Otherwise, 1 is returned.
183*/
184static int
d823b11b
MV
185remqueue (SCM q, SCM c)
186{
187 SCM p, prev = q;
d2a51087 188 SCM_CRITICAL_SECTION_START;
d2e53ed6 189 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 190 {
bc36d050 191 if (scm_is_eq (p, c))
d823b11b 192 {
bc36d050 193 if (scm_is_eq (c, SCM_CAR (q)))
10454601 194 SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
d823b11b 195 SCM_SETCDR (prev, SCM_CDR (c));
c058db8a
LC
196
197 /* GC-robust */
198 SCM_SETCDR (c, SCM_EOL);
199
d2a51087 200 SCM_CRITICAL_SECTION_END;
9de87eea 201 return 1;
d823b11b
MV
202 }
203 prev = p;
204 }
d2a51087 205 SCM_CRITICAL_SECTION_END;
9de87eea 206 return 0;
d823b11b
MV
207}
208
9de87eea
MV
209/* Remove the front-most element from the queue Q and return it.
210 Return SCM_BOOL_F when Q is empty.
211*/
d823b11b
MV
212static SCM
213dequeue (SCM q)
214{
d2a51087
NJ
215 SCM c;
216 SCM_CRITICAL_SECTION_START;
217 c = SCM_CDR (q);
d2e53ed6 218 if (scm_is_null (c))
d2a51087
NJ
219 {
220 SCM_CRITICAL_SECTION_END;
221 return SCM_BOOL_F;
222 }
d823b11b
MV
223 else
224 {
225 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 226 if (scm_is_null (SCM_CDR (q)))
d823b11b 227 SCM_SETCAR (q, SCM_EOL);
d2a51087 228 SCM_CRITICAL_SECTION_END;
c058db8a
LC
229
230 /* GC-robust */
231 SCM_SETCDR (c, SCM_EOL);
232
d823b11b
MV
233 return SCM_CAR (c);
234 }
235}
7bfd3b9e 236
9de87eea 237/*** Thread smob routines */
76da80e7 238
d823b11b
MV
239
240static int
241thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
242{
23d72566
KR
243 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
244 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
245 the struct case, hence we go via a union, and extract according to the
246 size of pthread_t. */
247 union {
248 scm_i_pthread_t p;
249 unsigned short us;
250 unsigned int ui;
251 unsigned long ul;
252 scm_t_uintmax um;
253 } u;
9de87eea 254 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
255 scm_i_pthread_t p = t->pthread;
256 scm_t_uintmax id;
257 u.p = p;
258 if (sizeof (p) == sizeof (unsigned short))
259 id = u.us;
260 else if (sizeof (p) == sizeof (unsigned int))
261 id = u.ui;
262 else if (sizeof (p) == sizeof (unsigned long))
263 id = u.ul;
264 else
265 id = u.um;
266
0607ebbf 267 scm_puts_unlocked ("#<thread ", port);
23d72566 268 scm_uintprint (id, 10, port);
0607ebbf 269 scm_puts_unlocked (" (", port);
0345e278 270 scm_uintprint ((scm_t_bits)t, 16, port);
0607ebbf 271 scm_puts_unlocked (")>", port);
d823b11b
MV
272 return 1;
273}
274
706846f6 275\f
9de87eea 276/*** Blocking on queues. */
f7eca35d 277
9de87eea
MV
278/* See also scm_i_queue_async_cell for how such a block is
279 interrputed.
280*/
d823b11b 281
9de87eea
MV
282/* Put the current thread on QUEUE and go to sleep, waiting for it to
283 be woken up by a call to 'unblock_from_queue', or to be
284 interrupted. Upon return of this function, the current thread is
285 no longer on QUEUE, even when the sleep has been interrupted.
286
d2a51087 287 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
288 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
289
290 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
291 as MUTEX is needed.
292
293 When WAITTIME is not NULL, the sleep will be aborted at that time.
294
295 The return value of block_self is an errno value. It will be zero
296 when the sleep has been successfully completed by a call to
297 unblock_from_queue, EINTR when it has been interrupted by the
298 delivery of a system async, and ETIMEDOUT when the timeout has
299 expired.
300
301 The system asyncs themselves are not executed by block_self.
302*/
303static int
304block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
305 const scm_t_timespec *waittime)
76da80e7 306{
9de87eea
MV
307 scm_i_thread *t = SCM_I_CURRENT_THREAD;
308 SCM q_handle;
309 int err;
310
311 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
312 err = EINTR;
313 else
314 {
315 t->block_asyncs++;
316 q_handle = enqueue (queue, t->handle);
317 if (waittime == NULL)
318 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
319 else
320 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
321
322 /* When we are still on QUEUE, we have been interrupted. We
323 report this only when no other error (such as a timeout) has
324 happened above.
325 */
326 if (remqueue (queue, q_handle) && err == 0)
327 err = EINTR;
328 t->block_asyncs--;
329 scm_i_reset_sleep (t);
330 }
331
332 return err;
76da80e7 333}
9de87eea 334
d2a51087
NJ
335/* Wake up the first thread on QUEUE, if any. The awoken thread is
336 returned, or #f if the queue was empty.
9de87eea
MV
337 */
338static SCM
339unblock_from_queue (SCM queue)
340{
341 SCM thread = dequeue (queue);
342 if (scm_is_true (thread))
343 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
344 return thread;
345}
346
45f15cac 347\f
9de87eea
MV
348/* Getting into and out of guile mode.
349 */
350
f60a7648
AW
351/* Key used to attach a cleanup handler to a given thread. Also, if
352 thread-local storage is unavailable, this key is used to retrieve the
353 current thread with `pthread_getspecific ()'. */
354scm_i_pthread_key_t scm_i_thread_key;
355
356
705edb95
LC
357#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
358
359/* When thread-local storage (TLS) is available, a pointer to the
360 current-thread object is kept in TLS. Note that storing the thread-object
361 itself in TLS (rather than a pointer to some malloc'd memory) is not
362 possible since thread objects may live longer than the actual thread they
363 represent. */
364SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
365
f60a7648 366#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
705edb95 367
d823b11b 368
9de87eea
MV
369static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
370static scm_i_thread *all_threads = NULL;
371static int thread_count;
372
373static SCM scm_i_default_dynamic_state;
374
0b77014f
AW
375/* Run when a fluid is collected. */
376void
aafb4ed7 377scm_i_reset_fluid (size_t n)
0b77014f
AW
378{
379 scm_i_thread *t;
380
381 scm_i_pthread_mutex_lock (&thread_admin_mutex);
382 for (t = all_threads; t; t = t->next_thread)
383 if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
384 {
385 SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
386
387 if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
aafb4ed7 388 SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
0b77014f
AW
389 }
390 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
391}
392
9de87eea 393/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 394 */
9de87eea 395static void
12c1d861 396guilify_self_1 (struct GC_stack_base *base)
d823b11b 397{
7f22442b
AW
398 scm_i_thread t;
399
400 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
401 before allocating anything in this thread, because allocation could
402 cause GC to run, and GC could cause finalizers, which could invoke
403 Scheme functions, which need the current thread to be set. */
404
405 t.pthread = scm_i_pthread_self ();
406 t.handle = SCM_BOOL_F;
407 t.result = SCM_BOOL_F;
408 t.cleanup_handler = SCM_BOOL_F;
409 t.mutexes = SCM_EOL;
410 t.held_mutex = NULL;
411 t.join_queue = SCM_EOL;
aef1fcf9
AW
412 t.freelists = NULL;
413 t.pointerless_freelists = NULL;
7f22442b 414 t.dynamic_state = SCM_BOOL_F;
9ede013f
AW
415 t.dynstack.base = NULL;
416 t.dynstack.top = NULL;
417 t.dynstack.limit = NULL;
7f22442b
AW
418 t.active_asyncs = SCM_EOL;
419 t.block_asyncs = 1;
420 t.pending_asyncs = 1;
421 t.critical_section_level = 0;
422 t.base = base->mem_base;
346e4402 423#ifdef __ia64__
bbd21051 424 t.register_backing_store_base = base->reg_base;
346e4402 425#endif
7f22442b
AW
426 t.continuation_root = SCM_EOL;
427 t.continuation_base = t.base;
428 scm_i_pthread_cond_init (&t.sleep_cond, NULL);
429 t.sleep_mutex = NULL;
430 t.sleep_object = SCM_BOOL_F;
431 t.sleep_fd = -1;
7af0c3b3 432 t.vp = NULL;
7f22442b 433
32f94bf2 434 if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
634aa8de
LC
435 /* FIXME: Error conditions during the initialization phase are handled
436 gracelessly since public functions such as `scm_init_guile ()'
437 currently have type `void'. */
438 abort ();
439
7f22442b 440 scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
7f22442b
AW
441 t.canceled = 0;
442 t.exited = 0;
443 t.guile_mode = 0;
9de87eea 444
7f22442b
AW
445 /* The switcheroo. */
446 {
447 scm_i_thread *t_ptr = &t;
448
449 GC_disable ();
7af0c3b3 450 t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
7f22442b
AW
451 memcpy (t_ptr, &t, sizeof t);
452
453 scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
f60a7648
AW
454
455#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
7f22442b
AW
456 /* Cache the current thread in TLS for faster lookup. */
457 scm_i_current_thread = t_ptr;
f60a7648 458#endif
9de87eea 459
7f22442b
AW
460 scm_i_pthread_mutex_lock (&thread_admin_mutex);
461 t_ptr->next_thread = all_threads;
462 all_threads = t_ptr;
463 thread_count++;
464 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
465
466 GC_enable ();
467 }
d823b11b
MV
468}
469
9de87eea 470/* Perform second stage of thread initialisation, in guile mode.
d823b11b 471 */
9de87eea
MV
472static void
473guilify_self_2 (SCM parent)
d823b11b 474{
9de87eea
MV
475 scm_i_thread *t = SCM_I_CURRENT_THREAD;
476
72e6b608
LC
477 t->guile_mode = 1;
478
9de87eea 479 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 480
9de87eea
MV
481 t->continuation_root = scm_cons (t->handle, SCM_EOL);
482 t->continuation_base = t->base;
483
aef1fcf9
AW
484 {
485 size_t size = SCM_INLINE_GC_FREELIST_COUNT * sizeof (void *);
486 t->freelists = scm_gc_malloc (size, "freelists");
487 t->pointerless_freelists = scm_gc_malloc (size, "atomic freelists");
488 }
489
9de87eea
MV
490 if (scm_is_true (parent))
491 t->dynamic_state = scm_make_dynamic_state (parent);
492 else
493 t->dynamic_state = scm_i_make_initial_dynamic_state ();
494
9ede013f
AW
495 t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
496 t->dynstack.limit = t->dynstack.base + 16;
497 t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
498
9de87eea
MV
499 t->join_queue = make_queue ();
500 t->block_asyncs = 0;
f740445a
AW
501
502 /* See note in finalizers.c:queue_finalizer_async(). */
503 GC_invoke_finalizers ();
d823b11b
MV
504}
505
6180e336
NJ
506\f
507/*** Fat mutexes */
508
509/* We implement our own mutex type since we want them to be 'fair', we
510 want to do fancy things while waiting for them (like running
511 asyncs) and we might want to add things that are nice for
512 debugging.
513*/
514
515typedef struct {
516 scm_i_pthread_mutex_t lock;
517 SCM owner;
adc085f1 518 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 519
adc085f1 520 int recursive; /* allow recursive locking? */
6180e336
NJ
521 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
522 int allow_external_unlock; /* is it an error to unlock a mutex that is not
523 owned by the current thread? */
524
525 SCM waiting; /* the threads waiting for this mutex. */
526} fat_mutex;
527
528#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
529#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
530
2002f1f8
AW
531static SCM
532call_cleanup (void *data)
533{
534 SCM *proc_p = data;
535 return scm_call_0 (*proc_p);
536}
537
9de87eea 538/* Perform thread tear-down, in guile mode.
d823b11b 539 */
9de87eea
MV
540static void *
541do_thread_exit (void *v)
542{
2e77f720
LC
543 scm_i_thread *t = (scm_i_thread *) v;
544
545 if (!scm_is_false (t->cleanup_handler))
546 {
547 SCM ptr = t->cleanup_handler;
548
549 t->cleanup_handler = SCM_BOOL_F;
550 t->result = scm_internal_catch (SCM_BOOL_T,
2002f1f8 551 call_cleanup, &ptr,
2e77f720
LC
552 scm_handle_by_message_noexit, NULL);
553 }
9de87eea 554
86a597f8 555 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
556
557 t->exited = 1;
0c97d7dd
MV
558 close (t->sleep_pipe[0]);
559 close (t->sleep_pipe[1]);
9de87eea
MV
560 while (scm_is_true (unblock_from_queue (t->join_queue)))
561 ;
9de87eea 562
74926120 563 while (!scm_is_null (t->mutexes))
6180e336 564 {
a141db86 565 SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
74926120 566
a141db86 567 if (scm_is_true (mutex))
a0faf7dd
LC
568 {
569 fat_mutex *m = SCM_MUTEX_DATA (mutex);
570
571 scm_i_pthread_mutex_lock (&m->lock);
f57fdf07 572
02a362a6
LC
573 /* Check whether T owns MUTEX. This is usually the case, unless
574 T abandoned MUTEX; in that case, T is no longer its owner (see
575 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
576 if (scm_is_eq (m->owner, t->handle))
577 unblock_from_queue (m->waiting);
f57fdf07 578
a0faf7dd
LC
579 scm_i_pthread_mutex_unlock (&m->lock);
580 }
6180e336 581
a141db86 582 t->mutexes = scm_cdr (t->mutexes);
6180e336
NJ
583 }
584
86a597f8 585 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 586
9de87eea
MV
587 return NULL;
588}
589
f60a7648
AW
590static void *
591do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
592{
7f22442b 593 /* Won't hurt if we are already registered. */
8e76ce94 594#if SCM_USE_PTHREAD_THREADS
7f22442b 595 GC_register_my_thread (sb);
8e76ce94 596#endif
f60a7648 597
7f22442b 598 return scm_with_guile (do_thread_exit, v);
f60a7648
AW
599}
600
d823b11b 601static void
9de87eea 602on_thread_exit (void *v)
d823b11b 603{
29776e85 604 /* This handler is executed in non-guile mode. */
2e77f720 605 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 606
b8441577
LC
607 /* If we were canceled, we were unable to clear `t->guile_mode', so do
608 it here. */
609 t->guile_mode = 0;
610
d2a51087
NJ
611 /* If this thread was cancelled while doing a cond wait, it will
612 still have a mutex locked, so we unlock it here. */
613 if (t->held_mutex)
614 {
615 scm_i_pthread_mutex_unlock (t->held_mutex);
616 t->held_mutex = NULL;
617 }
618
f60a7648
AW
619 /* Reinstate the current thread for purposes of scm_with_guile
620 guile-mode cleanup handlers. Only really needed in the non-TLS
621 case but it doesn't hurt to be consistent. */
622 scm_i_pthread_setspecific (scm_i_thread_key, t);
0c97d7dd 623
7f22442b
AW
624 /* Scheme-level thread finalizers and other cleanup needs to happen in
625 guile mode. */
626 GC_call_with_stack_base (do_thread_exit_trampoline, t);
0c97d7dd
MV
627
628 /* Removing ourself from the list of all threads needs to happen in
629 non-guile mode since all SCM values on our stack become
29776e85 630 unprotected once we are no longer in the list. */
0c97d7dd
MV
631 scm_i_pthread_mutex_lock (&thread_admin_mutex);
632 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
633 if (*tp == t)
634 {
635 *tp = t->next_thread;
c058db8a
LC
636
637 /* GC-robust */
638 t->next_thread = NULL;
639
0c97d7dd
MV
640 break;
641 }
642 thread_count--;
2e77f720
LC
643
644 /* If there's only one other thread, it could be the signal delivery
645 thread, so we need to notify it to shut down by closing its read pipe.
646 If it's not the signal delivery thread, then closing the read pipe isn't
647 going to hurt. */
648 if (thread_count <= 1)
649 scm_i_close_signal_pipe ();
650
0c97d7dd
MV
651 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
652
f60a7648 653 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
7f22442b 654
5f18bc84
AW
655 if (t->vp)
656 {
657 scm_i_vm_free_stack (t->vp);
658 t->vp = NULL;
659 }
660
8e76ce94 661#if SCM_USE_PTHREAD_THREADS
7f22442b 662 GC_unregister_my_thread ();
653ccd78 663#endif
d823b11b
MV
664}
665
9de87eea 666static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 667
9de87eea
MV
668static void
669init_thread_key (void)
670{
f60a7648 671 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
9de87eea 672}
d823b11b 673
cde24ce1
AW
674/* Perform any initializations necessary to make the current thread
675 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
676 if necessary.
a54a94b3 677
9de87eea
MV
678 BASE is the stack base to use with GC.
679
680 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
681 which case the default dynamic state is used.
682
cde24ce1 683 Returns zero when the thread was known to guile already; otherwise
9de87eea 684 return 1.
cde24ce1
AW
685
686 Note that it could be the case that the thread was known
687 to Guile, but not in guile mode (because we are within a
688 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
689 be sure. New threads are put into guile mode implicitly. */
9de87eea
MV
690
691static int
12c1d861 692scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
d823b11b 693{
9de87eea
MV
694 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
695
cde24ce1
AW
696 if (SCM_I_CURRENT_THREAD)
697 {
698 /* Thread is already known to Guile.
699 */
700 return 0;
701 }
702 else
9de87eea
MV
703 {
704 /* This thread has not been guilified yet.
705 */
706
707 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
708 if (scm_initialized_p == 0)
709 {
710 /* First thread ever to enter Guile. Run the full
711 initialization.
712 */
713 scm_i_init_guile (base);
12c1d861 714
03d12949 715#if SCM_USE_PTHREAD_THREADS
12c1d861
AW
716 /* Allow other threads to come in later. */
717 GC_allow_register_threads ();
4000d064 718#endif
12c1d861 719
9de87eea
MV
720 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
721 }
722 else
723 {
724 /* Guile is already initialized, but this thread enters it for
725 the first time. Only initialize this thread.
726 */
727 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
12c1d861
AW
728
729 /* Register this thread with libgc. */
8e76ce94 730#if SCM_USE_PTHREAD_THREADS
12c1d861 731 GC_register_my_thread (base);
8e76ce94 732#endif
12c1d861 733
9de87eea
MV
734 guilify_self_1 (base);
735 guilify_self_2 (parent);
736 }
737 return 1;
738 }
d823b11b
MV
739}
740
12c1d861
AW
741void
742scm_init_guile ()
d823b11b 743{
12c1d861
AW
744 struct GC_stack_base stack_base;
745
746 if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
747 scm_i_init_thread_for_guile (&stack_base,
748 scm_i_default_dynamic_state);
9de87eea
MV
749 else
750 {
12c1d861 751 fprintf (stderr, "Failed to get stack base for current thread.\n");
4765b28f 752 exit (EXIT_FAILURE);
9de87eea 753 }
a54a94b3
MV
754}
755
12c1d861 756struct with_guile_args
cde24ce1
AW
757{
758 GC_fn_type func;
759 void *data;
12c1d861 760 SCM parent;
cde24ce1
AW
761};
762
763static void *
764with_guile_trampoline (void *data)
765{
12c1d861 766 struct with_guile_args *args = data;
cde24ce1
AW
767
768 return scm_c_with_continuation_barrier (args->func, args->data);
769}
770
12c1d861
AW
771static void *
772with_guile_and_parent (struct GC_stack_base *base, void *data)
9de87eea
MV
773{
774 void *res;
cde24ce1
AW
775 int new_thread;
776 scm_i_thread *t;
12c1d861 777 struct with_guile_args *args = data;
2e77f720 778
12c1d861 779 new_thread = scm_i_init_thread_for_guile (base, args->parent);
cde24ce1
AW
780 t = SCM_I_CURRENT_THREAD;
781 if (new_thread)
2e77f720 782 {
cde24ce1
AW
783 /* We are in Guile mode. */
784 assert (t->guile_mode);
785
12c1d861 786 res = scm_c_with_continuation_barrier (args->func, args->data);
cde24ce1
AW
787
788 /* Leave Guile mode. */
789 t->guile_mode = 0;
790 }
791 else if (t->guile_mode)
792 {
793 /* Already in Guile mode. */
12c1d861 794 res = scm_c_with_continuation_barrier (args->func, args->data);
2e77f720 795 }
74926120 796 else
cde24ce1 797 {
cde24ce1
AW
798 /* We are not in Guile mode, either because we are not within a
799 scm_with_guile, or because we are within a scm_without_guile.
72e6b608 800
cde24ce1
AW
801 This call to scm_with_guile() could happen from anywhere on the
802 stack, and in particular lower on the stack than when it was
803 when this thread was first guilified. Thus, `base' must be
804 updated. */
805#if SCM_STACK_GROWS_UP
12c1d861
AW
806 if (SCM_STACK_PTR (base->mem_base) < t->base)
807 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 808#else
12c1d861
AW
809 if (SCM_STACK_PTR (base->mem_base) > t->base)
810 t->base = SCM_STACK_PTR (base->mem_base);
cde24ce1 811#endif
72e6b608 812
cde24ce1 813 t->guile_mode = 1;
03d12949 814 res = GC_call_with_gc_active (with_guile_trampoline, args);
cde24ce1
AW
815 t->guile_mode = 0;
816 }
817 return res;
72e6b608 818}
3d1af79f 819
12c1d861
AW
820static void *
821scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
822{
823 struct with_guile_args args;
824
825 args.func = func;
826 args.data = data;
827 args.parent = parent;
828
829 return GC_call_with_stack_base (with_guile_and_parent, &args);
830}
831
832void *
833scm_with_guile (void *(*func)(void *), void *data)
834{
835 return scm_i_with_guile_and_parent (func, data,
836 scm_i_default_dynamic_state);
837}
838
9de87eea
MV
839void *
840scm_without_guile (void *(*func)(void *), void *data)
d823b11b 841{
72e6b608 842 void *result;
cde24ce1 843 scm_i_thread *t = SCM_I_CURRENT_THREAD;
72e6b608 844
cde24ce1 845 if (t->guile_mode)
72e6b608 846 {
cde24ce1 847 SCM_I_CURRENT_THREAD->guile_mode = 0;
03d12949 848 result = GC_do_blocking (func, data);
cde24ce1 849 SCM_I_CURRENT_THREAD->guile_mode = 1;
72e6b608
LC
850 }
851 else
cde24ce1 852 /* Otherwise we're not in guile mode, so nothing to do. */
72e6b608
LC
853 result = func (data);
854
855 return result;
9de87eea
MV
856}
857
72e6b608 858\f
9de87eea
MV
859/*** Thread creation */
860
861typedef struct {
862 SCM parent;
863 SCM thunk;
864 SCM handler;
76da80e7 865 SCM thread;
9de87eea
MV
866 scm_i_pthread_mutex_t mutex;
867 scm_i_pthread_cond_t cond;
868} launch_data;
d823b11b 869
9de87eea
MV
870static void *
871really_launch (void *d)
872{
873 launch_data *data = (launch_data *)d;
874 SCM thunk = data->thunk, handler = data->handler;
875 scm_i_thread *t;
d823b11b 876
9de87eea 877 t = SCM_I_CURRENT_THREAD;
a54a94b3 878
9de87eea
MV
879 scm_i_scm_pthread_mutex_lock (&data->mutex);
880 data->thread = scm_current_thread ();
881 scm_i_pthread_cond_signal (&data->cond);
882 scm_i_pthread_mutex_unlock (&data->mutex);
883
884 if (SCM_UNBNDP (handler))
885 t->result = scm_call_0 (thunk);
886 else
887 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
888
889 return 0;
d823b11b
MV
890}
891
9de87eea
MV
892static void *
893launch_thread (void *d)
894{
895 launch_data *data = (launch_data *)d;
896 scm_i_pthread_detach (scm_i_pthread_self ());
897 scm_i_with_guile_and_parent (really_launch, d, data->parent);
898 return NULL;
899}
900
901SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 902 (SCM thunk, SCM handler),
9de87eea
MV
903 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
904 "returning a new thread object representing the thread. The procedure\n"
905 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
906 "\n"
907 "When @var{handler} is specified, then @var{thunk} is called from\n"
908 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
909 "handler. This catch is established inside the continuation barrier.\n"
910 "\n"
911 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
912 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
913#define FUNC_NAME s_scm_call_with_new_thread
914{
9de87eea
MV
915 launch_data data;
916 scm_i_pthread_t id;
917 int err;
d823b11b 918
9de87eea
MV
919 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
920 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
921 handler, SCM_ARG2, FUNC_NAME);
922
9f7537dc 923 GC_collect_a_little ();
9de87eea
MV
924 data.parent = scm_current_dynamic_state ();
925 data.thunk = thunk;
926 data.handler = handler;
927 data.thread = SCM_BOOL_F;
928 scm_i_pthread_mutex_init (&data.mutex, NULL);
929 scm_i_pthread_cond_init (&data.cond, NULL);
930
931 scm_i_scm_pthread_mutex_lock (&data.mutex);
932 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
933 if (err)
934 {
935 scm_i_pthread_mutex_unlock (&data.mutex);
936 errno = err;
937 scm_syserror (NULL);
938 }
dbab8aaa
AW
939
940 while (scm_is_false (data.thread))
941 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
942
9de87eea 943 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 944
9de87eea 945 return data.thread;
d823b11b
MV
946}
947#undef FUNC_NAME
948
9de87eea
MV
949typedef struct {
950 SCM parent;
951 scm_t_catch_body body;
952 void *body_data;
953 scm_t_catch_handler handler;
954 void *handler_data;
955 SCM thread;
956 scm_i_pthread_mutex_t mutex;
957 scm_i_pthread_cond_t cond;
958} spawn_data;
959
960static void *
961really_spawn (void *d)
962{
963 spawn_data *data = (spawn_data *)d;
964 scm_t_catch_body body = data->body;
965 void *body_data = data->body_data;
966 scm_t_catch_handler handler = data->handler;
967 void *handler_data = data->handler_data;
968 scm_i_thread *t = SCM_I_CURRENT_THREAD;
969
970 scm_i_scm_pthread_mutex_lock (&data->mutex);
971 data->thread = scm_current_thread ();
972 scm_i_pthread_cond_signal (&data->cond);
973 scm_i_pthread_mutex_unlock (&data->mutex);
974
975 if (handler == NULL)
976 t->result = body (body_data);
977 else
978 t->result = scm_internal_catch (SCM_BOOL_T,
979 body, body_data,
980 handler, handler_data);
981
982 return 0;
983}
984
985static void *
986spawn_thread (void *d)
987{
988 spawn_data *data = (spawn_data *)d;
989 scm_i_pthread_detach (scm_i_pthread_self ());
990 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
991 return NULL;
992}
993
994SCM
995scm_spawn_thread (scm_t_catch_body body, void *body_data,
996 scm_t_catch_handler handler, void *handler_data)
997{
998 spawn_data data;
999 scm_i_pthread_t id;
1000 int err;
1001
1002 data.parent = scm_current_dynamic_state ();
1003 data.body = body;
1004 data.body_data = body_data;
1005 data.handler = handler;
1006 data.handler_data = handler_data;
1007 data.thread = SCM_BOOL_F;
1008 scm_i_pthread_mutex_init (&data.mutex, NULL);
1009 scm_i_pthread_cond_init (&data.cond, NULL);
1010
1011 scm_i_scm_pthread_mutex_lock (&data.mutex);
1012 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1013 if (err)
1014 {
1015 scm_i_pthread_mutex_unlock (&data.mutex);
1016 errno = err;
1017 scm_syserror (NULL);
1018 }
dbab8aaa
AW
1019
1020 while (scm_is_false (data.thread))
1021 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1022
9de87eea 1023 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1024
0f4f2d9a
LC
1025 assert (SCM_I_IS_THREAD (data.thread));
1026
9de87eea
MV
1027 return data.thread;
1028}
1029
29717c89
MD
1030SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1031 (),
1032"Move the calling thread to the end of the scheduling queue.")
1033#define FUNC_NAME s_scm_yield
1034{
9de87eea 1035 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1036}
1037#undef FUNC_NAME
1038
f184e887
LC
1039/* Some systems, notably Android, lack 'pthread_cancel'. Don't provide
1040 'cancel-thread' on these systems. */
1041
1042#if !SCM_USE_PTHREAD_THREADS || defined HAVE_PTHREAD_CANCEL
1043
2e77f720
LC
1044SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1045 (SCM thread),
1046"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1047"cannot be the current thread, and if @var{thread} has already terminated or "
1048"been signaled to terminate, this function is a no-op.")
1049#define FUNC_NAME s_scm_cancel_thread
1050{
1051 scm_i_thread *t = NULL;
1052
1053 SCM_VALIDATE_THREAD (1, thread);
1054 t = SCM_I_THREAD_DATA (thread);
86a597f8 1055 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1056 if (!t->canceled)
1057 {
1058 t->canceled = 1;
86a597f8 1059 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1060 scm_i_pthread_cancel (t->pthread);
1061 }
1062 else
86a597f8 1063 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1064
1065 return SCM_UNSPECIFIED;
1066}
1067#undef FUNC_NAME
1068
f184e887
LC
1069#endif
1070
2e77f720
LC
1071SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1072 (SCM thread, SCM proc),
1073"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1074"This handler will be called when the thread exits.")
1075#define FUNC_NAME s_scm_set_thread_cleanup_x
1076{
1077 scm_i_thread *t;
1078
1079 SCM_VALIDATE_THREAD (1, thread);
1080 if (!scm_is_false (proc))
1081 SCM_VALIDATE_THUNK (2, proc);
1082
2e77f720 1083 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1084 scm_i_pthread_mutex_lock (&t->admin_mutex);
1085
2e77f720
LC
1086 if (!(t->exited || t->canceled))
1087 t->cleanup_handler = proc;
1088
86a597f8 1089 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1090
1091 return SCM_UNSPECIFIED;
1092}
1093#undef FUNC_NAME
1094
1095SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1096 (SCM thread),
1097"Return the cleanup handler installed for the thread @var{thread}.")
1098#define FUNC_NAME s_scm_thread_cleanup
1099{
1100 scm_i_thread *t;
1101 SCM ret;
1102
1103 SCM_VALIDATE_THREAD (1, thread);
1104
2e77f720 1105 t = SCM_I_THREAD_DATA (thread);
86a597f8 1106 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1107 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1108 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1109
1110 return ret;
1111}
1112#undef FUNC_NAME
1113
6180e336
NJ
1114SCM scm_join_thread (SCM thread)
1115{
1116 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1117}
1118
1119SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1120 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1121"Suspend execution of the calling thread until the target @var{thread} "
1122"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1123#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1124{
9de87eea 1125 scm_i_thread *t;
6180e336
NJ
1126 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1127 SCM res = SCM_BOOL_F;
1128
1129 if (! (SCM_UNBNDP (timeoutval)))
1130 res = timeoutval;
d823b11b
MV
1131
1132 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1133 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1134 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1135
9de87eea 1136 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1137 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1138
6180e336
NJ
1139 if (! SCM_UNBNDP (timeout))
1140 {
1141 to_timespec (timeout, &ctimeout);
1142 timeout_ptr = &ctimeout;
1143 }
1144
1145 if (t->exited)
1146 res = t->result;
1147 else
d823b11b 1148 {
9de87eea
MV
1149 while (1)
1150 {
74926120 1151 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1152 timeout_ptr);
1153 if (err == 0)
1154 {
1155 if (t->exited)
1156 {
1157 res = t->result;
1158 break;
1159 }
1160 }
1161 else if (err == ETIMEDOUT)
9de87eea 1162 break;
6180e336 1163
86a597f8 1164 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1165 SCM_TICK;
86a597f8 1166 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1167
1168 /* Check for exit again, since we just released and
1169 reacquired the admin mutex, before the next block_self
1170 call (which would block forever if t has already
1171 exited). */
1172 if (t->exited)
1173 {
1174 res = t->result;
1175 break;
1176 }
9de87eea 1177 }
d823b11b 1178 }
9de87eea 1179
86a597f8 1180 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1181
d823b11b 1182 return res;
5f05c406
MV
1183}
1184#undef FUNC_NAME
1185
6180e336
NJ
1186SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1187 (SCM obj),
1188 "Return @code{#t} if @var{obj} is a thread.")
1189#define FUNC_NAME s_scm_thread_p
1190{
1191 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1192}
1193#undef FUNC_NAME
5f05c406 1194
4079f87e 1195
76da80e7 1196static int
9de87eea 1197fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1198{
9de87eea 1199 fat_mutex *m = SCM_MUTEX_DATA (mx);
0607ebbf 1200 scm_puts_unlocked ("#<mutex ", port);
9de87eea 1201 scm_uintprint ((scm_t_bits)m, 16, port);
0607ebbf 1202 scm_puts_unlocked (">", port);
9de87eea 1203 return 1;
76da80e7
MV
1204}
1205
76da80e7 1206static SCM
6180e336 1207make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1208{
9de87eea
MV
1209 fat_mutex *m;
1210 SCM mx;
fea65eb2 1211 scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
9de87eea
MV
1212
1213 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
fea65eb2
AW
1214 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1215 and so we can just copy it. */
1216 memcpy (&m->lock, &lock, sizeof (m->lock));
9de87eea 1217 m->owner = SCM_BOOL_F;
adc085f1 1218 m->level = 0;
6180e336 1219
adc085f1 1220 m->recursive = recursive;
6180e336
NJ
1221 m->unchecked_unlock = unchecked_unlock;
1222 m->allow_external_unlock = external_unlock;
1223
9de87eea
MV
1224 m->waiting = SCM_EOL;
1225 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1226 m->waiting = make_queue ();
1227 return mx;
76da80e7
MV
1228}
1229
6180e336
NJ
1230SCM scm_make_mutex (void)
1231{
1232 return scm_make_mutex_with_flags (SCM_EOL);
1233}
1234
2a1d0688
NJ
1235SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1236SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1237SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1238
1239SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1240 (SCM flags),
9de87eea 1241 "Create a new mutex. ")
6180e336 1242#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1243{
6180e336
NJ
1244 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1245
1246 SCM ptr = flags;
1247 while (! scm_is_null (ptr))
1248 {
1249 SCM flag = SCM_CAR (ptr);
1250 if (scm_is_eq (flag, unchecked_unlock_sym))
1251 unchecked_unlock = 1;
1252 else if (scm_is_eq (flag, allow_external_unlock_sym))
1253 external_unlock = 1;
1254 else if (scm_is_eq (flag, recursive_sym))
1255 recursive = 1;
74926120 1256 else
2a1d0688 1257 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1258 ptr = SCM_CDR (ptr);
1259 }
1260 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1261}
1262#undef FUNC_NAME
1263
9de87eea 1264SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1265 (void),
9de87eea
MV
1266 "Create a new recursive mutex. ")
1267#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1268{
6180e336 1269 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1270}
1271#undef FUNC_NAME
1272
6180e336
NJ
1273SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1274
1275static SCM
adc085f1 1276fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1277{
1278 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1279
adc085f1 1280 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1281 SCM err = SCM_BOOL_F;
1282
1283 struct timeval current_time;
9de87eea
MV
1284
1285 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1286
1287 while (1)
9de87eea 1288 {
adc085f1 1289 if (m->level == 0)
6180e336 1290 {
adc085f1 1291 m->owner = new_owner;
6180e336 1292 m->level++;
74926120 1293
adc085f1 1294 if (SCM_I_IS_THREAD (new_owner))
6180e336 1295 {
adc085f1 1296 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
ccb80964 1297
4f39f31e
LC
1298 /* FIXME: The order in which `t->admin_mutex' and
1299 `m->lock' are taken differs from that in
1300 `on_thread_exit', potentially leading to deadlocks. */
6180e336 1301 scm_i_pthread_mutex_lock (&t->admin_mutex);
a0faf7dd
LC
1302
1303 /* Only keep a weak reference to MUTEX so that it's not
f57fdf07
LC
1304 retained when not referenced elsewhere (bug #27450).
1305 The weak pair itself is eventually removed when MUTEX
1306 is unlocked. Note that `t->mutexes' lists mutexes
1307 currently held by T, so it should be small. */
a141db86
AW
1308 t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
1309 t->mutexes);
a0faf7dd 1310
6180e336 1311 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1312 }
adc085f1
JG
1313 *ret = 1;
1314 break;
1315 }
1316 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1317 {
1318 m->owner = new_owner;
1319 err = scm_cons (scm_abandoned_mutex_error_key,
1320 scm_from_locale_string ("lock obtained on abandoned "
1321 "mutex"));
1322 *ret = 1;
1323 break;
1324 }
1325 else if (scm_is_eq (m->owner, new_owner))
1326 {
1327 if (m->recursive)
1328 {
1329 m->level++;
74926120 1330 *ret = 1;
adc085f1
JG
1331 }
1332 else
6180e336 1333 {
adc085f1
JG
1334 err = scm_cons (scm_misc_error_key,
1335 scm_from_locale_string ("mutex already locked "
1336 "by thread"));
1337 *ret = 0;
1338 }
74926120 1339 break;
adc085f1 1340 }
9de87eea 1341 else
9de87eea 1342 {
74926120 1343 if (timeout != NULL)
adc085f1
JG
1344 {
1345 gettimeofday (&current_time, NULL);
1346 if (current_time.tv_sec > timeout->tv_sec ||
1347 (current_time.tv_sec == timeout->tv_sec &&
1348 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1349 {
adc085f1
JG
1350 *ret = 0;
1351 break;
6180e336 1352 }
6180e336 1353 }
37a52039 1354 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1355 scm_i_pthread_mutex_unlock (&m->lock);
1356 SCM_TICK;
1357 scm_i_scm_pthread_mutex_lock (&m->lock);
1358 }
1359 }
1360 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1361 return err;
9de87eea
MV
1362}
1363
6180e336
NJ
1364SCM scm_lock_mutex (SCM mx)
1365{
adc085f1 1366 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1367}
1368
adc085f1
JG
1369SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1370 (SCM m, SCM timeout, SCM owner),
b7e64f8b
BT
1371 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1372 "thread blocks until the mutex becomes available. The function\n"
1373 "returns when the calling thread owns the lock on @var{m}.\n"
1374 "Locking a mutex that a thread already owns will succeed right\n"
1375 "away and will not block the thread. That is, Guile's mutexes\n"
1376 "are @emph{recursive}.")
6180e336 1377#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1378{
6180e336
NJ
1379 SCM exception;
1380 int ret = 0;
1381 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1382
6180e336
NJ
1383 SCM_VALIDATE_MUTEX (1, m);
1384
1385 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1386 {
1387 to_timespec (timeout, &cwaittime);
1388 waittime = &cwaittime;
1389 }
1390
97ec95b7
LC
1391 if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
1392 SCM_VALIDATE_THREAD (3, owner);
1393
adc085f1 1394 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1395 if (!scm_is_false (exception))
1396 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1397 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1398}
76da80e7 1399#undef FUNC_NAME
9bc4701c 1400
9ec1573d
MW
1401static void
1402lock_mutex_return_void (SCM mx)
1403{
1404 (void) scm_lock_mutex (mx);
1405}
1406
1407static void
1408unlock_mutex_return_void (SCM mx)
1409{
1410 (void) scm_unlock_mutex (mx);
1411}
1412
a4d106c7 1413void
661ae7ab 1414scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1415{
9ec1573d 1416 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
661ae7ab 1417 SCM_F_WIND_EXPLICITLY);
9ec1573d 1418 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
661ae7ab 1419 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1420}
1421
9bc4701c 1422SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1423 (SCM mutex),
9bc4701c
MD
1424"Try to lock @var{mutex}. If the mutex is already locked by someone "
1425"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1426#define FUNC_NAME s_scm_try_mutex
1427{
6180e336
NJ
1428 SCM exception;
1429 int ret = 0;
1430 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1431
ba1b7223 1432 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1433
1434 to_timespec (scm_from_int(0), &cwaittime);
1435 waittime = &cwaittime;
74926120 1436
adc085f1 1437 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1438 if (!scm_is_false (exception))
1439 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1440 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1441}
1442#undef FUNC_NAME
76da80e7 1443
6180e336
NJ
1444/*** Fat condition variables */
1445
1446typedef struct {
1447 scm_i_pthread_mutex_t lock;
1448 SCM waiting; /* the threads waiting for this condition. */
1449} fat_cond;
1450
1451#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1452#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1453
a141db86
AW
1454static void
1455remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
1456{
1457 SCM walk, prev;
1458
1459 for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
1460 walk = SCM_CDR (walk))
1461 {
1462 if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
1463 {
1464 if (scm_is_pair (prev))
1465 SCM_SETCDR (prev, SCM_CDR (walk));
1466 else
1467 t->mutexes = SCM_CDR (walk);
1468 break;
1469 }
1470 }
1471}
1472
6180e336
NJ
1473static int
1474fat_mutex_unlock (SCM mutex, SCM cond,
1475 const scm_t_timespec *waittime, int relock)
9de87eea 1476{
7f991c7d 1477 SCM owner;
6180e336
NJ
1478 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1479 fat_cond *c = NULL;
1480 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1481 int err = 0, ret = 0;
9de87eea
MV
1482
1483 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1 1484
7f991c7d 1485 owner = m->owner;
adc085f1 1486
d31ae2c3 1487 if (!scm_is_eq (owner, t->handle))
9bc4701c 1488 {
adc085f1 1489 if (m->level == 0)
6180e336
NJ
1490 {
1491 if (!m->unchecked_unlock)
2a1d0688
NJ
1492 {
1493 scm_i_pthread_mutex_unlock (&m->lock);
1494 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1495 }
d31ae2c3 1496 owner = t->handle;
6180e336
NJ
1497 }
1498 else if (!m->allow_external_unlock)
2a1d0688
NJ
1499 {
1500 scm_i_pthread_mutex_unlock (&m->lock);
1501 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1502 }
6180e336
NJ
1503 }
1504
1505 if (! (SCM_UNBNDP (cond)))
1506 {
6180e336
NJ
1507 c = SCM_CONDVAR_DATA (cond);
1508 while (1)
1509 {
1510 int brk = 0;
1511
6180e336
NJ
1512 if (m->level > 0)
1513 m->level--;
adc085f1 1514 if (m->level == 0)
f57fdf07
LC
1515 {
1516 /* Change the owner of MUTEX. */
a141db86 1517 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1518 m->owner = unblock_from_queue (m->waiting);
1519 }
adc085f1 1520
6180e336 1521 t->block_asyncs++;
74926120 1522
d2a51087
NJ
1523 err = block_self (c->waiting, cond, &m->lock, waittime);
1524 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1525
1526 if (err == 0)
1527 {
1528 ret = 1;
1529 brk = 1;
1530 }
1531 else if (err == ETIMEDOUT)
1532 {
1533 ret = 0;
1534 brk = 1;
1535 }
1536 else if (err != EINTR)
74926120 1537 {
6180e336 1538 errno = err;
6180e336 1539 scm_syserror (NULL);
74926120 1540 }
6180e336
NJ
1541
1542 if (brk)
1543 {
1544 if (relock)
adc085f1 1545 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1546 t->block_asyncs--;
6180e336
NJ
1547 break;
1548 }
74926120 1549
6180e336 1550 t->block_asyncs--;
27c6ebcb 1551 scm_async_tick ();
74926120 1552
6180e336
NJ
1553 scm_remember_upto_here_2 (cond, mutex);
1554
1555 scm_i_scm_pthread_mutex_lock (&m->lock);
1556 }
9bc4701c 1557 }
9de87eea 1558 else
6180e336
NJ
1559 {
1560 if (m->level > 0)
1561 m->level--;
74926120 1562 if (m->level == 0)
f57fdf07
LC
1563 {
1564 /* Change the owner of MUTEX. */
a141db86 1565 remove_mutex_from_thread (mutex, t);
f57fdf07
LC
1566 m->owner = unblock_from_queue (m->waiting);
1567 }
74926120 1568
6180e336
NJ
1569 scm_i_pthread_mutex_unlock (&m->lock);
1570 ret = 1;
1571 }
9de87eea 1572
6180e336 1573 return ret;
9bc4701c 1574}
9bc4701c 1575
6180e336
NJ
1576SCM scm_unlock_mutex (SCM mx)
1577{
1578 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1579}
9bc4701c 1580
6180e336
NJ
1581SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1582 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1583"Unlocks @var{mutex} if the calling thread owns the lock on "
1584"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1585"thread results in undefined behaviour. Once a mutex has been unlocked, "
1586"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1587"lock. Every call to @code{lock-mutex} by this thread must be matched "
1588"with a call to @code{unlock-mutex}. Only the last call to "
1589"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1590#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1591{
6180e336
NJ
1592 scm_t_timespec cwaittime, *waittime = NULL;
1593
9bc4701c 1594 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1595 if (! (SCM_UNBNDP (cond)))
1596 {
1597 SCM_VALIDATE_CONDVAR (2, cond);
1598
2a1d8403 1599 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
6180e336
NJ
1600 {
1601 to_timespec (timeout, &cwaittime);
1602 waittime = &cwaittime;
1603 }
1604 }
1605
1606 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1607}
1608#undef FUNC_NAME
1609
6180e336
NJ
1610SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1611 (SCM obj),
1612 "Return @code{#t} if @var{obj} is a mutex.")
1613#define FUNC_NAME s_scm_mutex_p
1614{
1615 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1616}
74926120 1617#undef FUNC_NAME
9de87eea
MV
1618
1619SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1620 (SCM mx),
1621 "Return the thread owning @var{mx}, or @code{#f}.")
1622#define FUNC_NAME s_scm_mutex_owner
1623{
adc085f1
JG
1624 SCM owner;
1625 fat_mutex *m = NULL;
1626
9de87eea 1627 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1628 m = SCM_MUTEX_DATA (mx);
1629 scm_i_pthread_mutex_lock (&m->lock);
1630 owner = m->owner;
1631 scm_i_pthread_mutex_unlock (&m->lock);
1632
1633 return owner;
9de87eea
MV
1634}
1635#undef FUNC_NAME
1636
1637SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1638 (SCM mx),
adc085f1 1639 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1640#define FUNC_NAME s_scm_mutex_level
1641{
1642 SCM_VALIDATE_MUTEX (1, mx);
1643 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1644}
1645#undef FUNC_NAME
1646
adc085f1
JG
1647SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1648 (SCM mx),
1649 "Returns @code{#t} if the mutex @var{mx} is locked.")
1650#define FUNC_NAME s_scm_mutex_locked_p
1651{
1652 SCM_VALIDATE_MUTEX (1, mx);
1653 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1654}
1655#undef FUNC_NAME
9de87eea 1656
9de87eea
MV
1657static int
1658fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1659{
1660 fat_cond *c = SCM_CONDVAR_DATA (cv);
0607ebbf 1661 scm_puts_unlocked ("#<condition-variable ", port);
9de87eea 1662 scm_uintprint ((scm_t_bits)c, 16, port);
0607ebbf 1663 scm_puts_unlocked (">", port);
9de87eea
MV
1664 return 1;
1665}
9bc4701c 1666
d823b11b
MV
1667SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1668 (void),
1669 "Make a new condition variable.")
1670#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1671{
9de87eea
MV
1672 fat_cond *c;
1673 SCM cv;
1674
1675 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1676 c->waiting = SCM_EOL;
1677 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1678 c->waiting = make_queue ();
d823b11b 1679 return cv;
5f05c406 1680}
d823b11b 1681#undef FUNC_NAME
5f05c406 1682
d823b11b
MV
1683SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1684 (SCM cv, SCM mx, SCM t),
b7e64f8b
BT
1685"Wait until condition variable @var{cv} has been signalled. While waiting, "
1686"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1687"is locked again when this function returns. When @var{t} is given, "
d823b11b
MV
1688"it specifies a point in time where the waiting should be aborted. It "
1689"can be either a integer as returned by @code{current-time} or a pair "
1690"as returned by @code{gettimeofday}. When the waiting is aborted the "
1691"mutex is locked and @code{#f} is returned. When the condition "
1692"variable is in fact signalled, the mutex is also locked and @code{#t} "
1693"is returned. ")
1694#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1695{
9de87eea 1696 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1697
1698 SCM_VALIDATE_CONDVAR (1, cv);
1699 SCM_VALIDATE_MUTEX (2, mx);
74926120 1700
d823b11b
MV
1701 if (!SCM_UNBNDP (t))
1702 {
6180e336 1703 to_timespec (t, &waittime);
9de87eea 1704 waitptr = &waittime;
d823b11b
MV
1705 }
1706
2a1d0688 1707 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1708}
d823b11b 1709#undef FUNC_NAME
5f05c406 1710
9de87eea
MV
1711static void
1712fat_cond_signal (fat_cond *c)
1713{
9de87eea 1714 unblock_from_queue (c->waiting);
9de87eea
MV
1715}
1716
d823b11b
MV
1717SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1718 (SCM cv),
1719 "Wake up one thread that is waiting for @var{cv}")
1720#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1721{
d823b11b 1722 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1723 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1724 return SCM_BOOL_T;
5f05c406 1725}
d823b11b 1726#undef FUNC_NAME
5f05c406 1727
9de87eea
MV
1728static void
1729fat_cond_broadcast (fat_cond *c)
1730{
9de87eea
MV
1731 while (scm_is_true (unblock_from_queue (c->waiting)))
1732 ;
9de87eea
MV
1733}
1734
d823b11b
MV
1735SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1736 (SCM cv),
1737 "Wake up all threads that are waiting for @var{cv}. ")
1738#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1739{
d823b11b 1740 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1741 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1742 return SCM_BOOL_T;
5f05c406 1743}
d823b11b 1744#undef FUNC_NAME
5f05c406 1745
6180e336
NJ
1746SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1747 (SCM obj),
1748 "Return @code{#t} if @var{obj} is a condition variable.")
1749#define FUNC_NAME s_scm_condition_variable_p
1750{
1751 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1752}
1753#undef FUNC_NAME
1754
6087fad9 1755
8c2b3143 1756\f
d823b11b
MV
1757/*** Select */
1758
8c2b3143
LC
1759struct select_args
1760{
1761 int nfds;
6ab4de61
AW
1762 fd_set *read_fds;
1763 fd_set *write_fds;
1764 fd_set *except_fds;
8c2b3143
LC
1765 struct timeval *timeout;
1766
1767 int result;
1768 int errno_value;
1769};
1770
1771static void *
1772do_std_select (void *args)
1773{
1774 struct select_args *select_args;
1775
1776 select_args = (struct select_args *) args;
1777
1778 select_args->result =
1779 select (select_args->nfds,
1780 select_args->read_fds, select_args->write_fds,
1781 select_args->except_fds, select_args->timeout);
1782 select_args->errno_value = errno;
1783
1784 return NULL;
1785}
1786
911782b7 1787int
9de87eea 1788scm_std_select (int nfds,
6ab4de61
AW
1789 fd_set *readfds,
1790 fd_set *writefds,
1791 fd_set *exceptfds,
9de87eea
MV
1792 struct timeval *timeout)
1793{
1794 fd_set my_readfds;
1795 int res, eno, wakeup_fd;
1796 scm_i_thread *t = SCM_I_CURRENT_THREAD;
8c2b3143 1797 struct select_args args;
9de87eea
MV
1798
1799 if (readfds == NULL)
1800 {
1801 FD_ZERO (&my_readfds);
1802 readfds = &my_readfds;
1803 }
1804
1805 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1806 SCM_TICK;
1807
1808 wakeup_fd = t->sleep_pipe[0];
9de87eea
MV
1809 FD_SET (wakeup_fd, readfds);
1810 if (wakeup_fd >= nfds)
1811 nfds = wakeup_fd+1;
9de87eea 1812
8c2b3143
LC
1813 args.nfds = nfds;
1814 args.read_fds = readfds;
1815 args.write_fds = writefds;
1816 args.except_fds = exceptfds;
1817 args.timeout = timeout;
1818
1819 /* Explicitly cooperate with the GC. */
1820 scm_without_guile (do_std_select, &args);
1821
1822 res = args.result;
1823 eno = args.errno_value;
1824
1825 t->sleep_fd = -1;
9de87eea
MV
1826 scm_i_reset_sleep (t);
1827
1828 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1829 {
1830 char dummy;
634aa8de
LC
1831 full_read (wakeup_fd, &dummy, 1);
1832
9de87eea
MV
1833 FD_CLR (wakeup_fd, readfds);
1834 res -= 1;
1835 if (res == 0)
1836 {
1837 eno = EINTR;
1838 res = -1;
1839 }
1840 }
d823b11b
MV
1841 errno = eno;
1842 return res;
5f05c406
MV
1843}
1844
9de87eea 1845/* Convenience API for blocking while in guile mode. */
76da80e7 1846
9de87eea 1847#if SCM_USE_PTHREAD_THREADS
92e64b87 1848
2956b071
LC
1849/* It seems reasonable to not run procedures related to mutex and condition
1850 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1851 without it, and (ii) the only potential gain would be GC latency. See
1852 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1853 for a discussion of the pros and cons. */
1854
9bc4701c 1855int
9de87eea 1856scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1857{
9de87eea 1858 int res = scm_i_pthread_mutex_lock (mutex);
9bc4701c
MD
1859 return res;
1860}
1861
9de87eea 1862static void
2b829bbb 1863do_unlock (void *data)
28d52ebb 1864{
9de87eea 1865 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1866}
1867
1868void
661ae7ab 1869scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1870{
9de87eea 1871 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1872 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1873}
1874
9bc4701c 1875int
9de87eea 1876scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1877{
4cf72f0b
LC
1878 int res;
1879 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1880
1881 t->held_mutex = mutex;
1882 res = scm_i_pthread_cond_wait (cond, mutex);
1883 t->held_mutex = NULL;
1884
9bc4701c
MD
1885 return res;
1886}
9bc4701c 1887
76da80e7 1888int
9de87eea
MV
1889scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1890 scm_i_pthread_mutex_t *mutex,
1891 const scm_t_timespec *wt)
76da80e7 1892{
4cf72f0b
LC
1893 int res;
1894 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1895
1896 t->held_mutex = mutex;
1897 res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1898 t->held_mutex = NULL;
1899
9de87eea 1900 return res;
76da80e7
MV
1901}
1902
9de87eea 1903#endif
76da80e7 1904
e676a4c3
MW
1905static void
1906do_unlock_with_asyncs (void *data)
1907{
1908 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1909 SCM_I_CURRENT_THREAD->block_asyncs--;
1910}
1911
1912void
1913scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
1914{
1915 SCM_I_CURRENT_THREAD->block_asyncs++;
1916 scm_i_scm_pthread_mutex_lock (mutex);
1917 scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
1918 SCM_F_WIND_EXPLICITLY);
1919}
1920
d823b11b 1921unsigned long
9de87eea 1922scm_std_usleep (unsigned long usecs)
5f05c406 1923{
d823b11b
MV
1924 struct timeval tv;
1925 tv.tv_usec = usecs % 1000000;
1926 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1927 scm_std_select (0, NULL, NULL, NULL, &tv);
1928 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1929}
1930
9de87eea
MV
1931unsigned int
1932scm_std_sleep (unsigned int secs)
6c214b62 1933{
d823b11b
MV
1934 struct timeval tv;
1935 tv.tv_usec = 0;
1936 tv.tv_sec = secs;
9de87eea 1937 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1938 return tv.tv_sec;
6c214b62
MD
1939}
1940
d823b11b
MV
1941/*** Misc */
1942
1943SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1944 (void),
1945 "Return the thread that called this function.")
1946#define FUNC_NAME s_scm_current_thread
1947{
9de87eea 1948 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1949}
1950#undef FUNC_NAME
1951
9de87eea
MV
1952static SCM
1953scm_c_make_list (size_t n, SCM fill)
1954{
1955 SCM res = SCM_EOL;
1956 while (n-- > 0)
1957 res = scm_cons (fill, res);
1958 return res;
1959}
1960
d823b11b
MV
1961SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1962 (void),
1963 "Return a list of all threads.")
9bc4701c 1964#define FUNC_NAME s_scm_all_threads
d823b11b 1965{
9de87eea
MV
1966 /* We can not allocate while holding the thread_admin_mutex because
1967 of the way GC is done.
1968 */
1969 int n = thread_count;
1970 scm_i_thread *t;
1971 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1972
9de87eea
MV
1973 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1974 l = &list;
1975 for (t = all_threads; t && n > 0; t = t->next_thread)
1976 {
2e77f720
LC
1977 if (t != scm_i_signal_delivery_thread)
1978 {
1979 SCM_SETCAR (*l, t->handle);
1980 l = SCM_CDRLOC (*l);
1981 }
9de87eea
MV
1982 n--;
1983 }
1984 *l = SCM_EOL;
1985 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1986 return list;
d823b11b 1987}
9de87eea 1988#undef FUNC_NAME
d823b11b
MV
1989
1990SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1991 (SCM thread),
1992 "Return @code{#t} iff @var{thread} has exited.\n")
1993#define FUNC_NAME s_scm_thread_exited_p
1994{
7888309b 1995 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1996}
1997#undef FUNC_NAME
1998
911782b7 1999int
d823b11b
MV
2000scm_c_thread_exited_p (SCM thread)
2001#define FUNC_NAME s_scm_thread_exited_p
5f05c406 2002{
9de87eea 2003 scm_i_thread *t;
d823b11b 2004 SCM_VALIDATE_THREAD (1, thread);
9de87eea 2005 t = SCM_I_THREAD_DATA (thread);
d823b11b 2006 return t->exited;
5f05c406 2007}
d823b11b 2008#undef FUNC_NAME
5f05c406 2009
d20912e6
LC
2010SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
2011 (void),
2012 "Return the total number of processors of the machine, which\n"
2013 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2014 "thread execution unit, which can be either:\n\n"
2015 "@itemize\n"
2016 "@item an execution core in a (possibly multi-core) chip, in a\n"
2017 " (possibly multi- chip) module, in a single computer, or\n"
2018 "@item a thread execution unit inside a core in the case of\n"
2019 " @dfn{hyper-threaded} CPUs.\n"
2020 "@end itemize\n\n"
2021 "Which of the two definitions is used, is unspecified.\n")
2022#define FUNC_NAME s_scm_total_processor_count
2023{
2024 return scm_from_ulong (num_processors (NPROC_ALL));
2025}
2026#undef FUNC_NAME
2027
2028SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
2029 (void),
2030 "Like @code{total-processor-count}, but return the number of\n"
2031 "processors available to the current process. See\n"
2032 "@code{setaffinity} and @code{getaffinity} for more\n"
2033 "information.\n")
2034#define FUNC_NAME s_scm_current_processor_count
2035{
2036 return scm_from_ulong (num_processors (NPROC_CURRENT));
2037}
2038#undef FUNC_NAME
2039
2040
2041\f
2042
9de87eea 2043static scm_i_pthread_cond_t wake_up_cond;
9bc4701c
MD
2044static int threads_initialized_p = 0;
2045
9bc4701c 2046
a4d106c7
MV
2047/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2048 */
d1138028 2049scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7 2050
661ae7ab 2051static SCM dynwind_critical_section_mutex;
a54a94b3 2052
9bc4701c 2053void
661ae7ab 2054scm_dynwind_critical_section (SCM mutex)
76da80e7 2055{
a4d106c7 2056 if (scm_is_false (mutex))
661ae7ab
MV
2057 mutex = dynwind_critical_section_mutex;
2058 scm_dynwind_lock_mutex (mutex);
2059 scm_dynwind_block_asyncs ();
9de87eea
MV
2060}
2061
2062/*** Initialization */
2063
9de87eea
MV
2064scm_i_pthread_mutex_t scm_i_misc_mutex;
2065
d1138028
MV
2066#if SCM_USE_PTHREAD_THREADS
2067pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2068#endif
2069
9de87eea 2070void
12c1d861 2071scm_threads_prehistory (void *base)
9de87eea 2072{
d1138028
MV
2073#if SCM_USE_PTHREAD_THREADS
2074 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2075 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2076 PTHREAD_MUTEX_RECURSIVE);
2077#endif
2078
2079 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2080 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2081 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2082 scm_i_pthread_cond_init (&wake_up_cond, NULL);
74926120 2083
7af0c3b3
AW
2084 thread_gc_kind =
2085 GC_new_kind (GC_new_free_list (),
2086 GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
2087 0, 1);
2088
12c1d861 2089 guilify_self_1 ((struct GC_stack_base *) base);
9bc4701c
MD
2090}
2091
d823b11b
MV
2092scm_t_bits scm_tc16_thread;
2093scm_t_bits scm_tc16_mutex;
2094scm_t_bits scm_tc16_condvar;
7bfd3b9e 2095
7bfd3b9e 2096void
9de87eea 2097scm_init_threads ()
7bfd3b9e 2098{
9de87eea 2099 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 2100 scm_set_smob_print (scm_tc16_thread, thread_print);
d823b11b 2101
9de87eea 2102 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea 2103 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
9bc4701c 2104
9de87eea
MV
2105 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2106 sizeof (fat_cond));
9de87eea 2107 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
d823b11b 2108
9de87eea
MV
2109 scm_i_default_dynamic_state = SCM_BOOL_F;
2110 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2111 threads_initialized_p = 1;
a4d106c7 2112
f39448c5 2113 dynwind_critical_section_mutex = scm_make_recursive_mutex ();
7bfd3b9e 2114}
89e00824 2115
5f05c406 2116void
9de87eea 2117scm_init_threads_default_dynamic_state ()
5f05c406 2118{
9de87eea 2119 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
f39448c5 2120 scm_i_default_dynamic_state = state;
5f05c406
MV
2121}
2122
d823b11b 2123void
9de87eea 2124scm_init_thread_procs ()
d823b11b 2125{
9de87eea 2126#include "libguile/threads.x"
d823b11b
MV
2127}
2128
3c13664e
LC
2129\f
2130/* IA64-specific things. */
2131
2132#ifdef __ia64__
2133# ifdef __hpux
2134# include <sys/param.h>
2135# include <sys/pstat.h>
2136void *
2137scm_ia64_register_backing_store_base (void)
2138{
2139 struct pst_vm_status vm_status;
2140 int i = 0;
2141 while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
2142 if (vm_status.pst_type == PS_RSESTACK)
2143 return (void *) vm_status.pst_vaddr;
2144 abort ();
2145}
2146void *
2147scm_ia64_ar_bsp (const void *ctx)
2148{
2149 uint64_t bsp;
2150 __uc_get_ar_bsp (ctx, &bsp);
2151 return (void *) bsp;
2152}
2153# endif /* hpux */
2154# ifdef linux
2155# include <ucontext.h>
2156void *
2157scm_ia64_register_backing_store_base (void)
2158{
2159 extern void *__libc_ia64_register_backing_store_base;
2160 return __libc_ia64_register_backing_store_base;
2161}
2162void *
2163scm_ia64_ar_bsp (const void *opaque)
2164{
2165 const ucontext_t *ctx = opaque;
2166 return (void *) ctx->uc_mcontext.sc_ar_bsp;
2167}
2168# endif /* linux */
ba20d262
AW
2169# ifdef __FreeBSD__
2170# include <ucontext.h>
2171void *
2172scm_ia64_register_backing_store_base (void)
2173{
2174 return (void *)0x8000000000000000;
2175}
2176void *
2177scm_ia64_ar_bsp (const void *opaque)
2178{
2179 const ucontext_t *ctx = opaque;
2180 return (void *)(ctx->uc_mcontext.mc_special.bspstore
2181 + ctx->uc_mcontext.mc_special.ndirty);
2182}
2183# endif /* __FreeBSD__ */
3c13664e
LC
2184#endif /* __ia64__ */
2185
2186
89e00824
ML
2187/*
2188 Local Variables:
2189 c-file-style: "gnu"
2190 End:
2191*/