Fix hang in srfi-18.test
[bpt/guile.git] / libguile / threads.c
CommitLineData
86a597f8 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
74926120 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7bfd3b9e 7 *
73be1d9e
MV
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
1bbd0b84 18
7bfd3b9e 19\f
dbb605f5
LC
20#ifdef HAVE_CONFIG_H
21# include <config.h>
22#endif
7bfd3b9e 23
1810dc4e
RB
24#include "libguile/_scm.h"
25
fcc5d734 26#if HAVE_UNISTD_H
d823b11b 27#include <unistd.h>
fcc5d734 28#endif
d823b11b
MV
29#include <stdio.h>
30#include <assert.h>
8ab3d8a0
KR
31
32#ifdef HAVE_STRING_H
33#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
34#endif
35
fcc5d734 36#if HAVE_SYS_TIME_H
d823b11b 37#include <sys/time.h>
fcc5d734 38#endif
5f05c406 39
d823b11b
MV
40#include "libguile/validate.h"
41#include "libguile/root.h"
42#include "libguile/eval.h"
43#include "libguile/async.h"
44#include "libguile/ports.h"
45#include "libguile/threads.h"
a0599745 46#include "libguile/dynwind.h"
d823b11b 47#include "libguile/iselect.h"
9de87eea
MV
48#include "libguile/fluids.h"
49#include "libguile/continuations.h"
2b829bbb 50#include "libguile/gc.h"
9de87eea 51#include "libguile/init.h"
2e77f720 52#include "libguile/scmsigs.h"
6180e336 53#include "libguile/strings.h"
7bfd3b9e 54
ecc9f40f
MV
55#ifdef __MINGW32__
56#ifndef ETIMEDOUT
57# define ETIMEDOUT WSAETIMEDOUT
58#endif
59# include <fcntl.h>
60# include <process.h>
61# define pipe(fd) _pipe (fd, 256, O_BINARY)
62#endif /* __MINGW32__ */
63
6180e336
NJ
64static void
65to_timespec (SCM t, scm_t_timespec *waittime)
66{
67 if (scm_is_pair (t))
68 {
69 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
70 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
71 }
72 else
73 {
74 double time = scm_to_double (t);
75 double sec = scm_c_truncate (time);
76
77 waittime->tv_sec = (long) sec;
2a1d0688 78 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
79 }
80}
81
d823b11b 82/*** Queues */
7bfd3b9e 83
9de87eea
MV
84/* Make an empty queue data structure.
85 */
d823b11b
MV
86static SCM
87make_queue ()
88{
89 return scm_cons (SCM_EOL, SCM_EOL);
90}
7bfd3b9e 91
9de87eea
MV
92/* Put T at the back of Q and return a handle that can be used with
93 remqueue to remove T from Q again.
94 */
d823b11b
MV
95static SCM
96enqueue (SCM q, SCM t)
97{
98 SCM c = scm_cons (t, SCM_EOL);
d2a51087 99 SCM_CRITICAL_SECTION_START;
d2e53ed6 100 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
101 SCM_SETCDR (q, c);
102 else
103 SCM_SETCDR (SCM_CAR (q), c);
104 SCM_SETCAR (q, c);
d2a51087 105 SCM_CRITICAL_SECTION_END;
d823b11b
MV
106 return c;
107}
7bfd3b9e 108
9de87eea
MV
109/* Remove the element that the handle C refers to from the queue Q. C
110 must have been returned from a call to enqueue. The return value
111 is zero when the element referred to by C has already been removed.
112 Otherwise, 1 is returned.
113*/
114static int
d823b11b
MV
115remqueue (SCM q, SCM c)
116{
117 SCM p, prev = q;
d2a51087 118 SCM_CRITICAL_SECTION_START;
d2e53ed6 119 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 120 {
bc36d050 121 if (scm_is_eq (p, c))
d823b11b 122 {
bc36d050 123 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
124 SCM_SETCAR (q, SCM_CDR (c));
125 SCM_SETCDR (prev, SCM_CDR (c));
d2a51087 126 SCM_CRITICAL_SECTION_END;
9de87eea 127 return 1;
d823b11b
MV
128 }
129 prev = p;
130 }
d2a51087 131 SCM_CRITICAL_SECTION_END;
9de87eea 132 return 0;
d823b11b
MV
133}
134
9de87eea
MV
135/* Remove the front-most element from the queue Q and return it.
136 Return SCM_BOOL_F when Q is empty.
137*/
d823b11b
MV
138static SCM
139dequeue (SCM q)
140{
d2a51087
NJ
141 SCM c;
142 SCM_CRITICAL_SECTION_START;
143 c = SCM_CDR (q);
d2e53ed6 144 if (scm_is_null (c))
d2a51087
NJ
145 {
146 SCM_CRITICAL_SECTION_END;
147 return SCM_BOOL_F;
148 }
d823b11b
MV
149 else
150 {
151 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 152 if (scm_is_null (SCM_CDR (q)))
d823b11b 153 SCM_SETCAR (q, SCM_EOL);
d2a51087 154 SCM_CRITICAL_SECTION_END;
d823b11b
MV
155 return SCM_CAR (c);
156 }
157}
7bfd3b9e 158
9de87eea 159/*** Thread smob routines */
76da80e7 160
d823b11b
MV
161static SCM
162thread_mark (SCM obj)
163{
9de87eea 164 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
d823b11b 165 scm_gc_mark (t->result);
2e77f720 166 scm_gc_mark (t->cleanup_handler);
9de87eea 167 scm_gc_mark (t->join_queue);
6180e336 168 scm_gc_mark (t->mutexes);
9de87eea
MV
169 scm_gc_mark (t->dynwinds);
170 scm_gc_mark (t->active_asyncs);
9de87eea
MV
171 scm_gc_mark (t->continuation_root);
172 return t->dynamic_state;
d823b11b
MV
173}
174
175static int
176thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
177{
23d72566
KR
178 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
179 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
180 the struct case, hence we go via a union, and extract according to the
181 size of pthread_t. */
182 union {
183 scm_i_pthread_t p;
184 unsigned short us;
185 unsigned int ui;
186 unsigned long ul;
187 scm_t_uintmax um;
188 } u;
9de87eea 189 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
190 scm_i_pthread_t p = t->pthread;
191 scm_t_uintmax id;
192 u.p = p;
193 if (sizeof (p) == sizeof (unsigned short))
194 id = u.us;
195 else if (sizeof (p) == sizeof (unsigned int))
196 id = u.ui;
197 else if (sizeof (p) == sizeof (unsigned long))
198 id = u.ul;
199 else
200 id = u.um;
201
d823b11b 202 scm_puts ("#<thread ", port);
23d72566 203 scm_uintprint (id, 10, port);
1b92fb6b 204 scm_puts (" (", port);
0345e278 205 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 206 scm_puts (")>", port);
d823b11b
MV
207 return 1;
208}
209
210static size_t
211thread_free (SCM obj)
212{
9de87eea
MV
213 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
214 assert (t->exited);
d823b11b
MV
215 scm_gc_free (t, sizeof (*t), "thread");
216 return 0;
217}
218
9de87eea 219/*** Blocking on queues. */
f7eca35d 220
9de87eea
MV
221/* See also scm_i_queue_async_cell for how such a block is
222 interrputed.
223*/
d823b11b 224
9de87eea
MV
225/* Put the current thread on QUEUE and go to sleep, waiting for it to
226 be woken up by a call to 'unblock_from_queue', or to be
227 interrupted. Upon return of this function, the current thread is
228 no longer on QUEUE, even when the sleep has been interrupted.
229
d2a51087 230 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
231 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
232
233 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
234 as MUTEX is needed.
235
236 When WAITTIME is not NULL, the sleep will be aborted at that time.
237
238 The return value of block_self is an errno value. It will be zero
239 when the sleep has been successfully completed by a call to
240 unblock_from_queue, EINTR when it has been interrupted by the
241 delivery of a system async, and ETIMEDOUT when the timeout has
242 expired.
243
244 The system asyncs themselves are not executed by block_self.
245*/
246static int
247block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
248 const scm_t_timespec *waittime)
76da80e7 249{
9de87eea
MV
250 scm_i_thread *t = SCM_I_CURRENT_THREAD;
251 SCM q_handle;
252 int err;
253
254 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
255 err = EINTR;
256 else
257 {
258 t->block_asyncs++;
259 q_handle = enqueue (queue, t->handle);
260 if (waittime == NULL)
261 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
262 else
263 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
264
265 /* When we are still on QUEUE, we have been interrupted. We
266 report this only when no other error (such as a timeout) has
267 happened above.
268 */
269 if (remqueue (queue, q_handle) && err == 0)
270 err = EINTR;
271 t->block_asyncs--;
272 scm_i_reset_sleep (t);
273 }
274
275 return err;
76da80e7 276}
9de87eea 277
d2a51087
NJ
278/* Wake up the first thread on QUEUE, if any. The awoken thread is
279 returned, or #f if the queue was empty.
9de87eea
MV
280 */
281static SCM
282unblock_from_queue (SCM queue)
283{
284 SCM thread = dequeue (queue);
285 if (scm_is_true (thread))
286 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
287 return thread;
288}
289
290/* Getting into and out of guile mode.
291 */
292
98648121
MV
293/* Ken Raeburn observes that the implementation of suspend and resume
294 (and the things that build on top of them) are very likely not
295 correct (see below). We will need fix this eventually, and that's
296 why scm_leave_guile/scm_enter_guile are not exported in the API.
297
298 Ken writes:
299
300 Consider this sequence:
301
302 Function foo, called in Guile mode, calls suspend (maybe indirectly
303 through scm_leave_guile), which does this:
304
305 // record top of stack for the GC
306 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
307 var 't'
308 // save registers.
309 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
310 setjmp (t->regs); // here's most of the magic
311
312 ... and returns.
313
314 Function foo has a SCM value X, a handle on a non-immediate object, in
315 a caller-saved register R, and it's the only reference to the object
316 currently.
317
318 The compiler wants to use R in suspend, so it pushes the current
319 value, X, into a stack slot which will be reloaded on exit from
320 suspend; then it loads stuff into R and goes about its business. The
321 setjmp call saves (some of) the current registers, including R, which
322 no longer contains X. (This isn't a problem for a normal
323 setjmp/longjmp situation, where longjmp would be called before
324 setjmp's caller returns; the old value for X would be loaded back from
325 the stack after the longjmp, before the function returned.)
326
327 So, suspend returns, loading X back into R (and invalidating the jump
328 buffer) in the process. The caller foo then goes off and calls a
329 bunch of other functions out of Guile mode, occasionally storing X on
330 the stack again, but, say, much deeper on the stack than suspend's
331 stack frame went, and the stack slot where suspend had written X has
332 long since been overwritten with other values.
333
334 Okay, nothing actively broken so far. Now, let garbage collection
335 run, triggered by another thread.
336
337 The thread calling foo is out of Guile mode at the time, so the
338 garbage collector just scans a range of stack addresses. Too bad that
339 X isn't stored there. So the pointed-to storage goes onto the free
340 list, and I think you can see where things go from there.
341
342 Is there anything I'm missing that'll prevent this scenario from
343 happening? I mean, aside from, "well, suspend and scm_leave_guile
344 don't have many local variables, so they probably won't need to save
345 any registers on most systems, so we hope everything will wind up in
346 the jump buffer and we'll just get away with it"?
347
348 (And, going the other direction, if scm_leave_guile and suspend push
349 the stack pointer over onto a new page, and foo doesn't make further
350 function calls and thus the stack pointer no longer includes that
351 page, are we guaranteed that the kernel cannot release the now-unused
352 stack page that contains the top-of-stack pointer we just saved? I
353 don't know if any OS actually does that. If it does, we could get
354 faults in garbage collection.)
355
356 I don't think scm_without_guile has to have this problem, as it gets
357 more control over the stack handling -- but it should call setjmp
358 itself. I'd probably try something like:
359
360 // record top of stack for the GC
361 t->top = SCM_STACK_PTR (&t);
362 // save registers.
363 SCM_FLUSH_REGISTER_WINDOWS;
364 setjmp (t->regs);
365 res = func(data);
366 scm_enter_guile (t);
367
368 ... though even that's making some assumptions about the stack
369 ordering of local variables versus caller-saved registers.
370
371 For something like scm_leave_guile to work, I don't think it can just
372 rely on invalidated jump buffers. A valid jump buffer, and a handle
373 on the stack state at the point when the jump buffer was initialized,
374 together, would work fine, but I think then we're talking about macros
375 invoking setjmp in the caller's stack frame, and requiring that the
376 caller of scm_leave_guile also call scm_enter_guile before returning,
377 kind of like pthread_cleanup_push/pop calls that have to be paired up
378 in a function. (In fact, the pthread ones have to be paired up
379 syntactically, as if they might expand to a compound statement
380 incorporating the user's code, and invoking a compiler's
381 exception-handling primitives. Which might be something to think
382 about for cases where Guile is used with C++ exceptions or
383 pthread_cancel.)
384*/
385
9de87eea
MV
386scm_i_pthread_key_t scm_i_thread_key;
387
d823b11b 388static void
9de87eea 389resume (scm_i_thread *t)
d823b11b 390{
d823b11b 391 t->top = NULL;
9bc4701c
MD
392 if (t->clear_freelists_p)
393 {
394 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
395 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
396 t->clear_freelists_p = 0;
397 }
d823b11b
MV
398}
399
98648121
MV
400typedef void* scm_t_guile_ticket;
401
402static void
9de87eea 403scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 404{
9de87eea
MV
405 scm_i_thread *t = (scm_i_thread *)ticket;
406 if (t)
407 {
408 scm_i_pthread_mutex_lock (&t->heap_mutex);
409 resume (t);
410 }
d823b11b
MV
411}
412
9de87eea
MV
413static scm_i_thread *
414suspend (void)
d823b11b 415{
9de87eea 416 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
417
418 /* record top of stack for the GC */
9de87eea 419 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
420 /* save registers. */
421 SCM_FLUSH_REGISTER_WINDOWS;
9de87eea
MV
422 setjmp (t->regs);
423 return t;
d823b11b
MV
424}
425
98648121 426static scm_t_guile_ticket
9de87eea 427scm_leave_guile ()
d823b11b 428{
9de87eea
MV
429 scm_i_thread *t = suspend ();
430 scm_i_pthread_mutex_unlock (&t->heap_mutex);
431 return (scm_t_guile_ticket) t;
d823b11b
MV
432}
433
9de87eea
MV
434static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
435static scm_i_thread *all_threads = NULL;
436static int thread_count;
437
438static SCM scm_i_default_dynamic_state;
439
440/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 441 */
9de87eea
MV
442static void
443guilify_self_1 (SCM_STACKITEM *base)
d823b11b 444{
9de87eea
MV
445 scm_i_thread *t = malloc (sizeof (scm_i_thread));
446
447 t->pthread = scm_i_pthread_self ();
448 t->handle = SCM_BOOL_F;
449 t->result = SCM_BOOL_F;
2e77f720 450 t->cleanup_handler = SCM_BOOL_F;
6180e336 451 t->mutexes = SCM_EOL;
d2a51087 452 t->held_mutex = NULL;
9de87eea
MV
453 t->join_queue = SCM_EOL;
454 t->dynamic_state = SCM_BOOL_F;
455 t->dynwinds = SCM_EOL;
456 t->active_asyncs = SCM_EOL;
9de87eea
MV
457 t->block_asyncs = 1;
458 t->pending_asyncs = 1;
459 t->last_debug_frame = NULL;
460 t->base = base;
346e4402
NJ
461#ifdef __ia64__
462 /* Calculate and store off the base of this thread's register
463 backing store (RBS). Unfortunately our implementation(s) of
464 scm_ia64_register_backing_store_base are only reliable for the
465 main thread. For other threads, therefore, find out the current
466 top of the RBS, and use that as a maximum. */
467 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
468 {
469 ucontext_t ctx;
470 void *bsp;
471 getcontext (&ctx);
472 bsp = scm_ia64_ar_bsp (&ctx);
473 if (t->register_backing_store_base > bsp)
474 t->register_backing_store_base = bsp;
475 }
476#endif
0c97d7dd 477 t->continuation_root = SCM_EOL;
9de87eea
MV
478 t->continuation_base = base;
479 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
480 t->sleep_mutex = NULL;
481 t->sleep_object = SCM_BOOL_F;
482 t->sleep_fd = -1;
0c97d7dd 483 /* XXX - check for errors. */
9de87eea
MV
484 pipe (t->sleep_pipe);
485 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
86a597f8 486 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
9de87eea 487 t->clear_freelists_p = 0;
1a8fdd7e 488 t->gc_running_p = 0;
2e77f720 489 t->canceled = 0;
9de87eea
MV
490 t->exited = 0;
491
492 t->freelist = SCM_EOL;
493 t->freelist2 = SCM_EOL;
494 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
495 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
496
497 scm_i_pthread_setspecific (scm_i_thread_key, t);
498
499 scm_i_pthread_mutex_lock (&t->heap_mutex);
500
501 scm_i_pthread_mutex_lock (&thread_admin_mutex);
502 t->next_thread = all_threads;
503 all_threads = t;
504 thread_count++;
505 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
506}
507
9de87eea 508/* Perform second stage of thread initialisation, in guile mode.
d823b11b 509 */
9de87eea
MV
510static void
511guilify_self_2 (SCM parent)
d823b11b 512{
9de87eea
MV
513 scm_i_thread *t = SCM_I_CURRENT_THREAD;
514
515 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
516 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
517 t->continuation_root = scm_cons (t->handle, SCM_EOL);
518 t->continuation_base = t->base;
519
520 if (scm_is_true (parent))
521 t->dynamic_state = scm_make_dynamic_state (parent);
522 else
523 t->dynamic_state = scm_i_make_initial_dynamic_state ();
524
525 t->join_queue = make_queue ();
526 t->block_asyncs = 0;
d823b11b
MV
527}
528
6180e336
NJ
529\f
530/*** Fat mutexes */
531
532/* We implement our own mutex type since we want them to be 'fair', we
533 want to do fancy things while waiting for them (like running
534 asyncs) and we might want to add things that are nice for
535 debugging.
536*/
537
538typedef struct {
539 scm_i_pthread_mutex_t lock;
540 SCM owner;
adc085f1 541 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 542
adc085f1 543 int recursive; /* allow recursive locking? */
6180e336
NJ
544 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
545 int allow_external_unlock; /* is it an error to unlock a mutex that is not
546 owned by the current thread? */
547
548 SCM waiting; /* the threads waiting for this mutex. */
549} fat_mutex;
550
551#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
552#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
553
9de87eea 554/* Perform thread tear-down, in guile mode.
d823b11b 555 */
9de87eea
MV
556static void *
557do_thread_exit (void *v)
558{
2e77f720
LC
559 scm_i_thread *t = (scm_i_thread *) v;
560
561 if (!scm_is_false (t->cleanup_handler))
562 {
563 SCM ptr = t->cleanup_handler;
564
565 t->cleanup_handler = SCM_BOOL_F;
566 t->result = scm_internal_catch (SCM_BOOL_T,
567 (scm_t_catch_body) scm_call_0, ptr,
568 scm_handle_by_message_noexit, NULL);
569 }
9de87eea 570
86a597f8 571 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
572
573 t->exited = 1;
0c97d7dd
MV
574 close (t->sleep_pipe[0]);
575 close (t->sleep_pipe[1]);
9de87eea
MV
576 while (scm_is_true (unblock_from_queue (t->join_queue)))
577 ;
9de87eea 578
74926120 579 while (!scm_is_null (t->mutexes))
6180e336
NJ
580 {
581 SCM mutex = SCM_CAR (t->mutexes);
582 fat_mutex *m = SCM_MUTEX_DATA (mutex);
583 scm_i_pthread_mutex_lock (&m->lock);
74926120 584
6180e336
NJ
585 unblock_from_queue (m->waiting);
586
74926120 587 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
588 t->mutexes = SCM_CDR (t->mutexes);
589 }
590
86a597f8 591 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 592
9de87eea
MV
593 return NULL;
594}
595
d823b11b 596static void
9de87eea 597on_thread_exit (void *v)
d823b11b 598{
29776e85 599 /* This handler is executed in non-guile mode. */
2e77f720 600 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 601
d2a51087
NJ
602 /* If this thread was cancelled while doing a cond wait, it will
603 still have a mutex locked, so we unlock it here. */
604 if (t->held_mutex)
605 {
606 scm_i_pthread_mutex_unlock (t->held_mutex);
607 t->held_mutex = NULL;
608 }
609
9de87eea 610 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd 611
2e77f720
LC
612 /* Ensure the signal handling thread has been launched, because we might be
613 shutting it down. */
614 scm_i_ensure_signal_delivery_thread ();
615
0c97d7dd 616 /* Unblocking the joining threads needs to happen in guile mode
29776e85 617 since the queue is a SCM data structure. */
9de87eea 618 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
619
620 /* Removing ourself from the list of all threads needs to happen in
621 non-guile mode since all SCM values on our stack become
29776e85 622 unprotected once we are no longer in the list. */
0c97d7dd
MV
623 scm_i_pthread_mutex_lock (&thread_admin_mutex);
624 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
625 if (*tp == t)
626 {
627 *tp = t->next_thread;
628 break;
629 }
630 thread_count--;
2e77f720
LC
631
632 /* If there's only one other thread, it could be the signal delivery
633 thread, so we need to notify it to shut down by closing its read pipe.
634 If it's not the signal delivery thread, then closing the read pipe isn't
635 going to hurt. */
636 if (thread_count <= 1)
637 scm_i_close_signal_pipe ();
638
0c97d7dd
MV
639 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
640
9de87eea 641 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
642}
643
9de87eea 644static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 645
9de87eea
MV
646static void
647init_thread_key (void)
648{
649 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
650}
d823b11b 651
9de87eea
MV
652/* Perform any initializations necessary to bring the current thread
653 into guile mode, initializing Guile itself, if necessary.
a54a94b3 654
9de87eea
MV
655 BASE is the stack base to use with GC.
656
657 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
658 which case the default dynamic state is used.
659
660 Return zero when the thread was in guile mode already; otherwise
661 return 1.
662*/
663
664static int
665scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 666{
9de87eea
MV
667 scm_i_thread *t;
668
669 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
670
671 if ((t = SCM_I_CURRENT_THREAD) == NULL)
672 {
673 /* This thread has not been guilified yet.
674 */
675
676 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
677 if (scm_initialized_p == 0)
678 {
679 /* First thread ever to enter Guile. Run the full
680 initialization.
681 */
682 scm_i_init_guile (base);
683 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
684 }
685 else
686 {
687 /* Guile is already initialized, but this thread enters it for
688 the first time. Only initialize this thread.
689 */
690 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
691 guilify_self_1 (base);
692 guilify_self_2 (parent);
693 }
694 return 1;
695 }
696 else if (t->top)
697 {
698 /* This thread is already guilified but not in guile mode, just
699 resume it.
74926120 700
9de87eea
MV
701 XXX - base might be lower than when this thread was first
702 guilified.
703 */
704 scm_enter_guile ((scm_t_guile_ticket) t);
705 return 1;
706 }
707 else
708 {
709 /* Thread is already in guile mode. Nothing to do.
710 */
711 return 0;
712 }
d823b11b
MV
713}
714
9de87eea 715#if SCM_USE_PTHREAD_THREADS
9de87eea 716
23d72566
KR
717#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
718/* This method for GNU/Linux and perhaps some other systems.
719 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
720 available on them. */
9de87eea
MV
721#define HAVE_GET_THREAD_STACK_BASE
722
723static SCM_STACKITEM *
724get_thread_stack_base ()
d823b11b 725{
9de87eea
MV
726 pthread_attr_t attr;
727 void *start, *end;
728 size_t size;
729
9de87eea
MV
730 pthread_getattr_np (pthread_self (), &attr);
731 pthread_attr_getstack (&attr, &start, &size);
732 end = (char *)start + size;
733
2b829bbb
KR
734 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
735 for the main thread, but we can use scm_get_stack_base in that
736 case.
737 */
738
739#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 740 if ((void *)&attr < start || (void *)&attr >= end)
2b829bbb 741 return scm_get_stack_base ();
9de87eea 742 else
2b829bbb 743#endif
9de87eea
MV
744 {
745#if SCM_STACK_GROWS_UP
746 return start;
747#else
748 return end;
749#endif
750 }
a54a94b3
MV
751}
752
23d72566
KR
753#elif HAVE_PTHREAD_GET_STACKADDR_NP
754/* This method for MacOS X.
755 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
756 but as of 2006 there's nothing obvious at apple.com. */
757#define HAVE_GET_THREAD_STACK_BASE
758static SCM_STACKITEM *
759get_thread_stack_base ()
760{
761 return pthread_get_stackaddr_np (pthread_self ());
762}
763
764#elif defined (__MINGW32__)
765/* This method for mingw. In mingw the basic scm_get_stack_base can be used
766 in any thread. We don't like hard-coding the name of a system, but there
767 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
768 work. */
769#define HAVE_GET_THREAD_STACK_BASE
770static SCM_STACKITEM *
771get_thread_stack_base ()
772{
773 return scm_get_stack_base ();
774}
775
776#endif /* pthread methods of get_thread_stack_base */
9de87eea
MV
777
778#else /* !SCM_USE_PTHREAD_THREADS */
779
780#define HAVE_GET_THREAD_STACK_BASE
781
782static SCM_STACKITEM *
783get_thread_stack_base ()
a54a94b3 784{
2b829bbb 785 return scm_get_stack_base ();
d823b11b
MV
786}
787
9de87eea 788#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
789
790#ifdef HAVE_GET_THREAD_STACK_BASE
791
792void
793scm_init_guile ()
d823b11b 794{
9de87eea
MV
795 scm_i_init_thread_for_guile (get_thread_stack_base (),
796 scm_i_default_dynamic_state);
d823b11b
MV
797}
798
9de87eea
MV
799#endif
800
801void *
802scm_with_guile (void *(*func)(void *), void *data)
803{
804 return scm_i_with_guile_and_parent (func, data,
805 scm_i_default_dynamic_state);
806}
807
70eca635 808SCM_UNUSED static void
2e77f720
LC
809scm_leave_guile_cleanup (void *x)
810{
811 scm_leave_guile ();
812}
813
9de87eea 814void *
2e77f720 815scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
9de87eea
MV
816{
817 void *res;
818 int really_entered;
819 SCM_STACKITEM base_item;
2e77f720 820
9de87eea 821 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
9de87eea 822 if (really_entered)
2e77f720
LC
823 {
824 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
825 res = scm_c_with_continuation_barrier (func, data);
826 scm_i_pthread_cleanup_pop (0);
827 scm_leave_guile ();
828 }
74926120 829 else
2e77f720
LC
830 res = scm_c_with_continuation_barrier (func, data);
831
9de87eea
MV
832 return res;
833}
834
835void *
836scm_without_guile (void *(*func)(void *), void *data)
d823b11b 837{
9de87eea
MV
838 void *res;
839 scm_t_guile_ticket t;
840 t = scm_leave_guile ();
841 res = func (data);
842 scm_enter_guile (t);
843 return res;
844}
845
846/*** Thread creation */
847
848typedef struct {
849 SCM parent;
850 SCM thunk;
851 SCM handler;
76da80e7 852 SCM thread;
9de87eea
MV
853 scm_i_pthread_mutex_t mutex;
854 scm_i_pthread_cond_t cond;
855} launch_data;
d823b11b 856
9de87eea
MV
857static void *
858really_launch (void *d)
859{
860 launch_data *data = (launch_data *)d;
861 SCM thunk = data->thunk, handler = data->handler;
862 scm_i_thread *t;
d823b11b 863
9de87eea 864 t = SCM_I_CURRENT_THREAD;
a54a94b3 865
9de87eea
MV
866 scm_i_scm_pthread_mutex_lock (&data->mutex);
867 data->thread = scm_current_thread ();
868 scm_i_pthread_cond_signal (&data->cond);
869 scm_i_pthread_mutex_unlock (&data->mutex);
870
871 if (SCM_UNBNDP (handler))
872 t->result = scm_call_0 (thunk);
873 else
874 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
875
876 return 0;
d823b11b
MV
877}
878
9de87eea
MV
879static void *
880launch_thread (void *d)
881{
882 launch_data *data = (launch_data *)d;
883 scm_i_pthread_detach (scm_i_pthread_self ());
884 scm_i_with_guile_and_parent (really_launch, d, data->parent);
885 return NULL;
886}
887
888SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 889 (SCM thunk, SCM handler),
9de87eea
MV
890 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
891 "returning a new thread object representing the thread. The procedure\n"
892 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
893 "\n"
894 "When @var{handler} is specified, then @var{thunk} is called from\n"
895 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
896 "handler. This catch is established inside the continuation barrier.\n"
897 "\n"
898 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
899 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
900#define FUNC_NAME s_scm_call_with_new_thread
901{
9de87eea
MV
902 launch_data data;
903 scm_i_pthread_t id;
904 int err;
d823b11b 905
9de87eea
MV
906 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
907 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
908 handler, SCM_ARG2, FUNC_NAME);
909
910 data.parent = scm_current_dynamic_state ();
911 data.thunk = thunk;
912 data.handler = handler;
913 data.thread = SCM_BOOL_F;
914 scm_i_pthread_mutex_init (&data.mutex, NULL);
915 scm_i_pthread_cond_init (&data.cond, NULL);
916
917 scm_i_scm_pthread_mutex_lock (&data.mutex);
918 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
919 if (err)
920 {
921 scm_i_pthread_mutex_unlock (&data.mutex);
922 errno = err;
923 scm_syserror (NULL);
924 }
925 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
926 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 927
9de87eea 928 return data.thread;
d823b11b
MV
929}
930#undef FUNC_NAME
931
9de87eea
MV
932typedef struct {
933 SCM parent;
934 scm_t_catch_body body;
935 void *body_data;
936 scm_t_catch_handler handler;
937 void *handler_data;
938 SCM thread;
939 scm_i_pthread_mutex_t mutex;
940 scm_i_pthread_cond_t cond;
941} spawn_data;
942
943static void *
944really_spawn (void *d)
945{
946 spawn_data *data = (spawn_data *)d;
947 scm_t_catch_body body = data->body;
948 void *body_data = data->body_data;
949 scm_t_catch_handler handler = data->handler;
950 void *handler_data = data->handler_data;
951 scm_i_thread *t = SCM_I_CURRENT_THREAD;
952
953 scm_i_scm_pthread_mutex_lock (&data->mutex);
954 data->thread = scm_current_thread ();
955 scm_i_pthread_cond_signal (&data->cond);
956 scm_i_pthread_mutex_unlock (&data->mutex);
957
958 if (handler == NULL)
959 t->result = body (body_data);
960 else
961 t->result = scm_internal_catch (SCM_BOOL_T,
962 body, body_data,
963 handler, handler_data);
964
965 return 0;
966}
967
968static void *
969spawn_thread (void *d)
970{
971 spawn_data *data = (spawn_data *)d;
972 scm_i_pthread_detach (scm_i_pthread_self ());
973 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
974 return NULL;
975}
976
977SCM
978scm_spawn_thread (scm_t_catch_body body, void *body_data,
979 scm_t_catch_handler handler, void *handler_data)
980{
981 spawn_data data;
982 scm_i_pthread_t id;
983 int err;
984
985 data.parent = scm_current_dynamic_state ();
986 data.body = body;
987 data.body_data = body_data;
988 data.handler = handler;
989 data.handler_data = handler_data;
990 data.thread = SCM_BOOL_F;
991 scm_i_pthread_mutex_init (&data.mutex, NULL);
992 scm_i_pthread_cond_init (&data.cond, NULL);
993
994 scm_i_scm_pthread_mutex_lock (&data.mutex);
995 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
996 if (err)
997 {
998 scm_i_pthread_mutex_unlock (&data.mutex);
999 errno = err;
1000 scm_syserror (NULL);
1001 }
1002 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1003 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1004
9de87eea
MV
1005 return data.thread;
1006}
1007
29717c89
MD
1008SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1009 (),
1010"Move the calling thread to the end of the scheduling queue.")
1011#define FUNC_NAME s_scm_yield
1012{
9de87eea 1013 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1014}
1015#undef FUNC_NAME
1016
2e77f720
LC
1017SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1018 (SCM thread),
1019"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1020"cannot be the current thread, and if @var{thread} has already terminated or "
1021"been signaled to terminate, this function is a no-op.")
1022#define FUNC_NAME s_scm_cancel_thread
1023{
1024 scm_i_thread *t = NULL;
1025
1026 SCM_VALIDATE_THREAD (1, thread);
1027 t = SCM_I_THREAD_DATA (thread);
86a597f8 1028 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1029 if (!t->canceled)
1030 {
1031 t->canceled = 1;
86a597f8 1032 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1033 scm_i_pthread_cancel (t->pthread);
1034 }
1035 else
86a597f8 1036 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1037
1038 return SCM_UNSPECIFIED;
1039}
1040#undef FUNC_NAME
1041
1042SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1043 (SCM thread, SCM proc),
1044"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1045"This handler will be called when the thread exits.")
1046#define FUNC_NAME s_scm_set_thread_cleanup_x
1047{
1048 scm_i_thread *t;
1049
1050 SCM_VALIDATE_THREAD (1, thread);
1051 if (!scm_is_false (proc))
1052 SCM_VALIDATE_THUNK (2, proc);
1053
2e77f720 1054 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1055 scm_i_pthread_mutex_lock (&t->admin_mutex);
1056
2e77f720
LC
1057 if (!(t->exited || t->canceled))
1058 t->cleanup_handler = proc;
1059
86a597f8 1060 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1061
1062 return SCM_UNSPECIFIED;
1063}
1064#undef FUNC_NAME
1065
1066SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1067 (SCM thread),
1068"Return the cleanup handler installed for the thread @var{thread}.")
1069#define FUNC_NAME s_scm_thread_cleanup
1070{
1071 scm_i_thread *t;
1072 SCM ret;
1073
1074 SCM_VALIDATE_THREAD (1, thread);
1075
2e77f720 1076 t = SCM_I_THREAD_DATA (thread);
86a597f8 1077 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1078 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1079 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1080
1081 return ret;
1082}
1083#undef FUNC_NAME
1084
6180e336
NJ
1085SCM scm_join_thread (SCM thread)
1086{
1087 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1088}
1089
1090SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1091 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1092"Suspend execution of the calling thread until the target @var{thread} "
1093"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1094#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1095{
9de87eea 1096 scm_i_thread *t;
6180e336
NJ
1097 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1098 SCM res = SCM_BOOL_F;
1099
1100 if (! (SCM_UNBNDP (timeoutval)))
1101 res = timeoutval;
d823b11b
MV
1102
1103 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1104 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1105 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1106
9de87eea 1107 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1108 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1109
6180e336
NJ
1110 if (! SCM_UNBNDP (timeout))
1111 {
1112 to_timespec (timeout, &ctimeout);
1113 timeout_ptr = &ctimeout;
1114 }
1115
1116 if (t->exited)
1117 res = t->result;
1118 else
d823b11b 1119 {
9de87eea
MV
1120 while (1)
1121 {
74926120 1122 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1123 timeout_ptr);
1124 if (err == 0)
1125 {
1126 if (t->exited)
1127 {
1128 res = t->result;
1129 break;
1130 }
1131 }
1132 else if (err == ETIMEDOUT)
9de87eea 1133 break;
6180e336 1134
86a597f8 1135 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1136 SCM_TICK;
86a597f8 1137 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea 1138 }
d823b11b 1139 }
9de87eea 1140
86a597f8 1141 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1142
d823b11b 1143 return res;
5f05c406
MV
1144}
1145#undef FUNC_NAME
1146
6180e336
NJ
1147SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1148 (SCM obj),
1149 "Return @code{#t} if @var{obj} is a thread.")
1150#define FUNC_NAME s_scm_thread_p
1151{
1152 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1153}
1154#undef FUNC_NAME
5f05c406 1155
d823b11b 1156static SCM
9de87eea 1157fat_mutex_mark (SCM mx)
d823b11b 1158{
9de87eea 1159 fat_mutex *m = SCM_MUTEX_DATA (mx);
d823b11b
MV
1160 scm_gc_mark (m->owner);
1161 return m->waiting;
1162}
4079f87e 1163
9de87eea
MV
1164static size_t
1165fat_mutex_free (SCM mx)
76da80e7 1166{
9de87eea
MV
1167 fat_mutex *m = SCM_MUTEX_DATA (mx);
1168 scm_i_pthread_mutex_destroy (&m->lock);
1169 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
1170 return 0;
1171}
1172
1173static int
9de87eea 1174fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1175{
9de87eea
MV
1176 fat_mutex *m = SCM_MUTEX_DATA (mx);
1177 scm_puts ("#<mutex ", port);
1178 scm_uintprint ((scm_t_bits)m, 16, port);
1179 scm_puts (">", port);
1180 return 1;
76da80e7
MV
1181}
1182
76da80e7 1183static SCM
6180e336 1184make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1185{
9de87eea
MV
1186 fat_mutex *m;
1187 SCM mx;
1188
1189 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1190 scm_i_pthread_mutex_init (&m->lock, NULL);
1191 m->owner = SCM_BOOL_F;
adc085f1 1192 m->level = 0;
6180e336 1193
adc085f1 1194 m->recursive = recursive;
6180e336
NJ
1195 m->unchecked_unlock = unchecked_unlock;
1196 m->allow_external_unlock = external_unlock;
1197
9de87eea
MV
1198 m->waiting = SCM_EOL;
1199 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1200 m->waiting = make_queue ();
1201 return mx;
76da80e7
MV
1202}
1203
6180e336
NJ
1204SCM scm_make_mutex (void)
1205{
1206 return scm_make_mutex_with_flags (SCM_EOL);
1207}
1208
2a1d0688
NJ
1209SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1210SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1211SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1212
1213SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1214 (SCM flags),
9de87eea 1215 "Create a new mutex. ")
6180e336 1216#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1217{
6180e336
NJ
1218 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1219
1220 SCM ptr = flags;
1221 while (! scm_is_null (ptr))
1222 {
1223 SCM flag = SCM_CAR (ptr);
1224 if (scm_is_eq (flag, unchecked_unlock_sym))
1225 unchecked_unlock = 1;
1226 else if (scm_is_eq (flag, allow_external_unlock_sym))
1227 external_unlock = 1;
1228 else if (scm_is_eq (flag, recursive_sym))
1229 recursive = 1;
74926120 1230 else
2a1d0688 1231 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1232 ptr = SCM_CDR (ptr);
1233 }
1234 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1235}
1236#undef FUNC_NAME
1237
9de87eea 1238SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1239 (void),
9de87eea
MV
1240 "Create a new recursive mutex. ")
1241#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1242{
6180e336 1243 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1244}
1245#undef FUNC_NAME
1246
6180e336
NJ
1247SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1248
1249static SCM
adc085f1 1250fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1251{
1252 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1253
adc085f1 1254 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1255 SCM err = SCM_BOOL_F;
1256
1257 struct timeval current_time;
9de87eea
MV
1258
1259 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1260
1261 while (1)
9de87eea 1262 {
adc085f1 1263 if (m->level == 0)
6180e336 1264 {
adc085f1 1265 m->owner = new_owner;
6180e336 1266 m->level++;
74926120 1267
adc085f1 1268 if (SCM_I_IS_THREAD (new_owner))
6180e336 1269 {
adc085f1 1270 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
6180e336
NJ
1271 scm_i_pthread_mutex_lock (&t->admin_mutex);
1272 t->mutexes = scm_cons (mutex, t->mutexes);
1273 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1274 }
adc085f1
JG
1275 *ret = 1;
1276 break;
1277 }
1278 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1279 {
1280 m->owner = new_owner;
1281 err = scm_cons (scm_abandoned_mutex_error_key,
1282 scm_from_locale_string ("lock obtained on abandoned "
1283 "mutex"));
1284 *ret = 1;
1285 break;
1286 }
1287 else if (scm_is_eq (m->owner, new_owner))
1288 {
1289 if (m->recursive)
1290 {
1291 m->level++;
74926120 1292 *ret = 1;
adc085f1
JG
1293 }
1294 else
6180e336 1295 {
adc085f1
JG
1296 err = scm_cons (scm_misc_error_key,
1297 scm_from_locale_string ("mutex already locked "
1298 "by thread"));
1299 *ret = 0;
1300 }
74926120 1301 break;
adc085f1
JG
1302 }
1303 else
1304 {
74926120 1305 if (timeout != NULL)
adc085f1
JG
1306 {
1307 gettimeofday (&current_time, NULL);
1308 if (current_time.tv_sec > timeout->tv_sec ||
1309 (current_time.tv_sec == timeout->tv_sec &&
1310 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1311 {
adc085f1
JG
1312 *ret = 0;
1313 break;
6180e336 1314 }
6180e336 1315 }
37a52039 1316 block_self (m->waiting, mutex, &m->lock, timeout);
adc085f1
JG
1317 scm_i_pthread_mutex_unlock (&m->lock);
1318 SCM_TICK;
1319 scm_i_scm_pthread_mutex_lock (&m->lock);
9de87eea
MV
1320 }
1321 }
1322 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1323 return err;
9de87eea
MV
1324}
1325
6180e336
NJ
1326SCM scm_lock_mutex (SCM mx)
1327{
adc085f1 1328 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1329}
1330
adc085f1
JG
1331SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1332 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1333"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1334"blocks until the mutex becomes available. The function returns when "
1335"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1336"a thread already owns will succeed right away and will not block the "
1337"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1338#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1339{
6180e336
NJ
1340 SCM exception;
1341 int ret = 0;
1342 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1343
6180e336
NJ
1344 SCM_VALIDATE_MUTEX (1, m);
1345
1346 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1347 {
1348 to_timespec (timeout, &cwaittime);
1349 waittime = &cwaittime;
1350 }
1351
adc085f1 1352 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1353 if (!scm_is_false (exception))
1354 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1355 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1356}
76da80e7 1357#undef FUNC_NAME
9bc4701c 1358
a4d106c7 1359void
661ae7ab 1360scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1361{
661ae7ab
MV
1362 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1363 SCM_F_WIND_EXPLICITLY);
1364 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1365 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1366}
1367
9bc4701c 1368SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1369 (SCM mutex),
9bc4701c
MD
1370"Try to lock @var{mutex}. If the mutex is already locked by someone "
1371"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1372#define FUNC_NAME s_scm_try_mutex
1373{
6180e336
NJ
1374 SCM exception;
1375 int ret = 0;
1376 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1377
ba1b7223 1378 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1379
1380 to_timespec (scm_from_int(0), &cwaittime);
1381 waittime = &cwaittime;
74926120 1382
adc085f1 1383 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1384 if (!scm_is_false (exception))
1385 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1386 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1387}
1388#undef FUNC_NAME
76da80e7 1389
6180e336
NJ
1390/*** Fat condition variables */
1391
1392typedef struct {
1393 scm_i_pthread_mutex_t lock;
1394 SCM waiting; /* the threads waiting for this condition. */
1395} fat_cond;
1396
1397#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1398#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1399
1400static int
1401fat_mutex_unlock (SCM mutex, SCM cond,
1402 const scm_t_timespec *waittime, int relock)
9de87eea 1403{
6180e336
NJ
1404 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1405 fat_cond *c = NULL;
1406 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1407 int err = 0, ret = 0;
9de87eea
MV
1408
1409 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1410
1411 SCM owner = m->owner;
1412
1413 if (!scm_is_eq (owner, scm_current_thread ()))
9bc4701c 1414 {
adc085f1 1415 if (m->level == 0)
6180e336
NJ
1416 {
1417 if (!m->unchecked_unlock)
2a1d0688
NJ
1418 {
1419 scm_i_pthread_mutex_unlock (&m->lock);
1420 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1421 }
adc085f1 1422 owner = scm_current_thread ();
6180e336
NJ
1423 }
1424 else if (!m->allow_external_unlock)
2a1d0688
NJ
1425 {
1426 scm_i_pthread_mutex_unlock (&m->lock);
1427 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1428 }
6180e336
NJ
1429 }
1430
1431 if (! (SCM_UNBNDP (cond)))
1432 {
6180e336
NJ
1433 c = SCM_CONDVAR_DATA (cond);
1434 while (1)
1435 {
1436 int brk = 0;
1437
6180e336
NJ
1438 if (m->level > 0)
1439 m->level--;
adc085f1 1440 if (m->level == 0)
6180e336 1441 m->owner = unblock_from_queue (m->waiting);
adc085f1 1442
6180e336 1443 t->block_asyncs++;
74926120 1444
d2a51087
NJ
1445 err = block_self (c->waiting, cond, &m->lock, waittime);
1446 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1447
1448 if (err == 0)
1449 {
1450 ret = 1;
1451 brk = 1;
1452 }
1453 else if (err == ETIMEDOUT)
1454 {
1455 ret = 0;
1456 brk = 1;
1457 }
1458 else if (err != EINTR)
74926120 1459 {
6180e336 1460 errno = err;
6180e336 1461 scm_syserror (NULL);
74926120 1462 }
6180e336
NJ
1463
1464 if (brk)
1465 {
1466 if (relock)
adc085f1 1467 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
6180e336
NJ
1468 break;
1469 }
74926120 1470
6180e336
NJ
1471 t->block_asyncs--;
1472 scm_async_click ();
74926120 1473
6180e336
NJ
1474 scm_remember_upto_here_2 (cond, mutex);
1475
1476 scm_i_scm_pthread_mutex_lock (&m->lock);
1477 }
9bc4701c 1478 }
9de87eea 1479 else
6180e336
NJ
1480 {
1481 if (m->level > 0)
1482 m->level--;
74926120 1483 if (m->level == 0)
6180e336 1484 m->owner = unblock_from_queue (m->waiting);
74926120 1485
6180e336
NJ
1486 scm_i_pthread_mutex_unlock (&m->lock);
1487 ret = 1;
1488 }
74926120 1489
6180e336
NJ
1490 return ret;
1491}
9de87eea 1492
6180e336
NJ
1493SCM scm_unlock_mutex (SCM mx)
1494{
1495 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1496}
9bc4701c 1497
6180e336
NJ
1498SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1499 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1500"Unlocks @var{mutex} if the calling thread owns the lock on "
1501"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1502"thread results in undefined behaviour. Once a mutex has been unlocked, "
1503"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1504"lock. Every call to @code{lock-mutex} by this thread must be matched "
1505"with a call to @code{unlock-mutex}. Only the last call to "
1506"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1507#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1508{
6180e336
NJ
1509 scm_t_timespec cwaittime, *waittime = NULL;
1510
9bc4701c 1511 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1512 if (! (SCM_UNBNDP (cond)))
1513 {
1514 SCM_VALIDATE_CONDVAR (2, cond);
1515
1516 if (! (SCM_UNBNDP (timeout)))
1517 {
1518 to_timespec (timeout, &cwaittime);
1519 waittime = &cwaittime;
1520 }
1521 }
1522
1523 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1524}
1525#undef FUNC_NAME
1526
6180e336
NJ
1527SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1528 (SCM obj),
1529 "Return @code{#t} if @var{obj} is a mutex.")
1530#define FUNC_NAME s_scm_mutex_p
1531{
1532 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1533}
74926120 1534#undef FUNC_NAME
6180e336 1535
9de87eea
MV
1536SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1537 (SCM mx),
1538 "Return the thread owning @var{mx}, or @code{#f}.")
1539#define FUNC_NAME s_scm_mutex_owner
1540{
adc085f1
JG
1541 SCM owner;
1542 fat_mutex *m = NULL;
1543
9de87eea 1544 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1545 m = SCM_MUTEX_DATA (mx);
1546 scm_i_pthread_mutex_lock (&m->lock);
1547 owner = m->owner;
1548 scm_i_pthread_mutex_unlock (&m->lock);
1549
1550 return owner;
9de87eea
MV
1551}
1552#undef FUNC_NAME
1553
1554SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1555 (SCM mx),
adc085f1 1556 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1557#define FUNC_NAME s_scm_mutex_level
1558{
1559 SCM_VALIDATE_MUTEX (1, mx);
1560 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1561}
1562#undef FUNC_NAME
1563
adc085f1
JG
1564SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1565 (SCM mx),
1566 "Returns @code{#t} if the mutex @var{mx} is locked.")
1567#define FUNC_NAME s_scm_mutex_locked_p
1568{
1569 SCM_VALIDATE_MUTEX (1, mx);
1570 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1571}
1572#undef FUNC_NAME
9de87eea 1573
9de87eea
MV
1574static SCM
1575fat_cond_mark (SCM cv)
1576{
1577 fat_cond *c = SCM_CONDVAR_DATA (cv);
1578 return c->waiting;
1579}
1580
1581static size_t
1582fat_cond_free (SCM mx)
1583{
1584 fat_cond *c = SCM_CONDVAR_DATA (mx);
9de87eea
MV
1585 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1586 return 0;
1587}
1588
1589static int
1590fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1591{
1592 fat_cond *c = SCM_CONDVAR_DATA (cv);
1593 scm_puts ("#<condition-variable ", port);
1594 scm_uintprint ((scm_t_bits)c, 16, port);
1595 scm_puts (">", port);
1596 return 1;
1597}
9bc4701c 1598
d823b11b
MV
1599SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1600 (void),
1601 "Make a new condition variable.")
1602#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1603{
9de87eea
MV
1604 fat_cond *c;
1605 SCM cv;
1606
1607 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1608 c->waiting = SCM_EOL;
1609 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1610 c->waiting = make_queue ();
d823b11b 1611 return cv;
5f05c406 1612}
d823b11b 1613#undef FUNC_NAME
5f05c406 1614
d823b11b
MV
1615SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1616 (SCM cv, SCM mx, SCM t),
1617"Wait until @var{cond-var} has been signalled. While waiting, "
1618"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1619"is locked again when this function returns. When @var{time} is given, "
1620"it specifies a point in time where the waiting should be aborted. It "
1621"can be either a integer as returned by @code{current-time} or a pair "
1622"as returned by @code{gettimeofday}. When the waiting is aborted the "
1623"mutex is locked and @code{#f} is returned. When the condition "
1624"variable is in fact signalled, the mutex is also locked and @code{#t} "
1625"is returned. ")
1626#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1627{
9de87eea 1628 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1629
1630 SCM_VALIDATE_CONDVAR (1, cv);
1631 SCM_VALIDATE_MUTEX (2, mx);
74926120 1632
d823b11b
MV
1633 if (!SCM_UNBNDP (t))
1634 {
6180e336 1635 to_timespec (t, &waittime);
9de87eea 1636 waitptr = &waittime;
d823b11b
MV
1637 }
1638
2a1d0688 1639 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1640}
d823b11b 1641#undef FUNC_NAME
5f05c406 1642
9de87eea
MV
1643static void
1644fat_cond_signal (fat_cond *c)
1645{
9de87eea 1646 unblock_from_queue (c->waiting);
9de87eea
MV
1647}
1648
d823b11b
MV
1649SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1650 (SCM cv),
1651 "Wake up one thread that is waiting for @var{cv}")
1652#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1653{
d823b11b 1654 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1655 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1656 return SCM_BOOL_T;
5f05c406 1657}
d823b11b 1658#undef FUNC_NAME
5f05c406 1659
9de87eea
MV
1660static void
1661fat_cond_broadcast (fat_cond *c)
1662{
9de87eea
MV
1663 while (scm_is_true (unblock_from_queue (c->waiting)))
1664 ;
9de87eea
MV
1665}
1666
d823b11b
MV
1667SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1668 (SCM cv),
1669 "Wake up all threads that are waiting for @var{cv}. ")
1670#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1671{
d823b11b 1672 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1673 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1674 return SCM_BOOL_T;
5f05c406 1675}
d823b11b 1676#undef FUNC_NAME
5f05c406 1677
6180e336
NJ
1678SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1679 (SCM obj),
1680 "Return @code{#t} if @var{obj} is a condition variable.")
1681#define FUNC_NAME s_scm_condition_variable_p
1682{
1683 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1684}
1685#undef FUNC_NAME
1686
d823b11b
MV
1687/*** Marking stacks */
1688
1689/* XXX - what to do with this? Do we need to handle this for blocked
1690 threads as well?
1691*/
1692#ifdef __ia64__
1693# define SCM_MARK_BACKING_STORE() do { \
1694 ucontext_t ctx; \
1695 SCM_STACKITEM * top, * bot; \
1696 getcontext (&ctx); \
1697 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1698 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1699 / sizeof (SCM_STACKITEM))); \
346e4402 1700 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
9a5fa6e9 1701 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
d823b11b
MV
1702 scm_mark_locations (bot, top - bot); } while (0)
1703#else
1704# define SCM_MARK_BACKING_STORE()
1705#endif
1706
1707void
1708scm_threads_mark_stacks (void)
5f05c406 1709{
9de87eea
MV
1710 scm_i_thread *t;
1711 for (t = all_threads; t; t = t->next_thread)
d823b11b 1712 {
9de87eea
MV
1713 /* Check that thread has indeed been suspended.
1714 */
1715 assert (t->top);
6087fad9 1716
9de87eea 1717 scm_gc_mark (t->handle);
6087fad9 1718
d028af45 1719#if SCM_STACK_GROWS_UP
9de87eea 1720 scm_mark_locations (t->base, t->top - t->base);
d823b11b 1721#else
9de87eea 1722 scm_mark_locations (t->top, t->base - t->top);
d823b11b 1723#endif
88cefbc7 1724 scm_mark_locations ((void *) &t->regs,
6087fad9
MV
1725 ((size_t) sizeof(t->regs)
1726 / sizeof (SCM_STACKITEM)));
d823b11b 1727 }
59152722
MV
1728
1729 SCM_MARK_BACKING_STORE ();
5f05c406
MV
1730}
1731
d823b11b
MV
1732/*** Select */
1733
911782b7 1734int
9de87eea
MV
1735scm_std_select (int nfds,
1736 SELECT_TYPE *readfds,
1737 SELECT_TYPE *writefds,
1738 SELECT_TYPE *exceptfds,
1739 struct timeval *timeout)
1740{
1741 fd_set my_readfds;
1742 int res, eno, wakeup_fd;
1743 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1744 scm_t_guile_ticket ticket;
1745
1746 if (readfds == NULL)
1747 {
1748 FD_ZERO (&my_readfds);
1749 readfds = &my_readfds;
1750 }
1751
1752 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1753 SCM_TICK;
1754
1755 wakeup_fd = t->sleep_pipe[0];
1756 ticket = scm_leave_guile ();
1757 FD_SET (wakeup_fd, readfds);
1758 if (wakeup_fd >= nfds)
1759 nfds = wakeup_fd+1;
1760 res = select (nfds, readfds, writefds, exceptfds, timeout);
1761 t->sleep_fd = -1;
d823b11b 1762 eno = errno;
9de87eea
MV
1763 scm_enter_guile (ticket);
1764
1765 scm_i_reset_sleep (t);
1766
1767 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1768 {
1769 char dummy;
1770 read (wakeup_fd, &dummy, 1);
1771 FD_CLR (wakeup_fd, readfds);
1772 res -= 1;
1773 if (res == 0)
1774 {
1775 eno = EINTR;
1776 res = -1;
1777 }
1778 }
d823b11b
MV
1779 errno = eno;
1780 return res;
5f05c406
MV
1781}
1782
9de87eea 1783/* Convenience API for blocking while in guile mode. */
76da80e7 1784
9de87eea 1785#if SCM_USE_PTHREAD_THREADS
92e64b87 1786
9bc4701c 1787int
9de87eea 1788scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1789{
9de87eea
MV
1790 scm_t_guile_ticket t = scm_leave_guile ();
1791 int res = scm_i_pthread_mutex_lock (mutex);
1792 scm_enter_guile (t);
9bc4701c
MD
1793 return res;
1794}
1795
9de87eea 1796static void
2b829bbb 1797do_unlock (void *data)
28d52ebb 1798{
9de87eea 1799 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1800}
1801
1802void
661ae7ab 1803scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1804{
9de87eea 1805 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1806 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1807}
1808
9bc4701c 1809int
9de87eea 1810scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1811{
9de87eea 1812 scm_t_guile_ticket t = scm_leave_guile ();
d2a51087 1813 ((scm_i_thread *)t)->held_mutex = mutex;
9de87eea 1814 int res = scm_i_pthread_cond_wait (cond, mutex);
d2a51087 1815 ((scm_i_thread *)t)->held_mutex = NULL;
9de87eea 1816 scm_enter_guile (t);
9bc4701c
MD
1817 return res;
1818}
9bc4701c 1819
76da80e7 1820int
9de87eea
MV
1821scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1822 scm_i_pthread_mutex_t *mutex,
1823 const scm_t_timespec *wt)
76da80e7 1824{
9de87eea 1825 scm_t_guile_ticket t = scm_leave_guile ();
d2a51087 1826 ((scm_i_thread *)t)->held_mutex = mutex;
9de87eea 1827 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
d2a51087 1828 ((scm_i_thread *)t)->held_mutex = NULL;
9de87eea
MV
1829 scm_enter_guile (t);
1830 return res;
76da80e7
MV
1831}
1832
9de87eea 1833#endif
76da80e7 1834
d823b11b 1835unsigned long
9de87eea 1836scm_std_usleep (unsigned long usecs)
5f05c406 1837{
d823b11b
MV
1838 struct timeval tv;
1839 tv.tv_usec = usecs % 1000000;
1840 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1841 scm_std_select (0, NULL, NULL, NULL, &tv);
1842 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1843}
1844
9de87eea
MV
1845unsigned int
1846scm_std_sleep (unsigned int secs)
6c214b62 1847{
d823b11b
MV
1848 struct timeval tv;
1849 tv.tv_usec = 0;
1850 tv.tv_sec = secs;
9de87eea 1851 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1852 return tv.tv_sec;
6c214b62
MD
1853}
1854
d823b11b
MV
1855/*** Misc */
1856
1857SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1858 (void),
1859 "Return the thread that called this function.")
1860#define FUNC_NAME s_scm_current_thread
1861{
9de87eea 1862 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1863}
1864#undef FUNC_NAME
1865
9de87eea
MV
1866static SCM
1867scm_c_make_list (size_t n, SCM fill)
1868{
1869 SCM res = SCM_EOL;
1870 while (n-- > 0)
1871 res = scm_cons (fill, res);
1872 return res;
1873}
1874
d823b11b
MV
1875SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1876 (void),
1877 "Return a list of all threads.")
9bc4701c 1878#define FUNC_NAME s_scm_all_threads
d823b11b 1879{
9de87eea
MV
1880 /* We can not allocate while holding the thread_admin_mutex because
1881 of the way GC is done.
1882 */
1883 int n = thread_count;
1884 scm_i_thread *t;
1885 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1886
9de87eea
MV
1887 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1888 l = &list;
1889 for (t = all_threads; t && n > 0; t = t->next_thread)
1890 {
2e77f720
LC
1891 if (t != scm_i_signal_delivery_thread)
1892 {
1893 SCM_SETCAR (*l, t->handle);
1894 l = SCM_CDRLOC (*l);
1895 }
9de87eea
MV
1896 n--;
1897 }
1898 *l = SCM_EOL;
1899 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1900 return list;
d823b11b 1901}
9de87eea 1902#undef FUNC_NAME
d823b11b
MV
1903
1904SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1905 (SCM thread),
1906 "Return @code{#t} iff @var{thread} has exited.\n")
1907#define FUNC_NAME s_scm_thread_exited_p
1908{
7888309b 1909 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1910}
1911#undef FUNC_NAME
1912
911782b7 1913int
d823b11b
MV
1914scm_c_thread_exited_p (SCM thread)
1915#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1916{
9de87eea 1917 scm_i_thread *t;
d823b11b 1918 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1919 t = SCM_I_THREAD_DATA (thread);
d823b11b 1920 return t->exited;
5f05c406 1921}
d823b11b 1922#undef FUNC_NAME
5f05c406 1923
9de87eea 1924static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1925int scm_i_thread_go_to_sleep;
9bc4701c
MD
1926static int threads_initialized_p = 0;
1927
1928void
1929scm_i_thread_put_to_sleep ()
1930{
6087fad9 1931 if (threads_initialized_p)
9bc4701c 1932 {
9de87eea 1933 scm_i_thread *t;
6087fad9 1934
9de87eea
MV
1935 scm_leave_guile ();
1936 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1937
74926120 1938 /* Signal all threads to go to sleep
0c97d7dd
MV
1939 */
1940 scm_i_thread_go_to_sleep = 1;
1941 for (t = all_threads; t; t = t->next_thread)
1942 scm_i_pthread_mutex_lock (&t->heap_mutex);
1943 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1944 }
1945}
1946
b0dc3d71
MD
1947void
1948scm_i_thread_invalidate_freelists ()
1949{
9de87eea
MV
1950 /* thread_admin_mutex is already locked. */
1951
1952 scm_i_thread *t;
1953 for (t = all_threads; t; t = t->next_thread)
1954 if (t != SCM_I_CURRENT_THREAD)
1955 t->clear_freelists_p = 1;
b0dc3d71
MD
1956}
1957
9bc4701c
MD
1958void
1959scm_i_thread_wake_up ()
1960{
6087fad9 1961 if (threads_initialized_p)
9bc4701c 1962 {
9de87eea 1963 scm_i_thread *t;
9de87eea 1964
0c97d7dd
MV
1965 scm_i_pthread_cond_broadcast (&wake_up_cond);
1966 for (t = all_threads; t; t = t->next_thread)
1967 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
1968 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1969 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 1970 }
9bc4701c
MD
1971}
1972
1973void
1974scm_i_thread_sleep_for_gc ()
1975{
9de87eea 1976 scm_i_thread *t = suspend ();
d2a51087 1977 t->held_mutex = &t->heap_mutex;
9de87eea 1978 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
d2a51087 1979 t->held_mutex = NULL;
b0dc3d71 1980 resume (t);
9bc4701c
MD
1981}
1982
a4d106c7
MV
1983/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1984 */
d1138028 1985scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
1986int scm_i_critical_section_level = 0;
1987
661ae7ab 1988static SCM dynwind_critical_section_mutex;
a54a94b3 1989
9bc4701c 1990void
661ae7ab 1991scm_dynwind_critical_section (SCM mutex)
76da80e7 1992{
a4d106c7 1993 if (scm_is_false (mutex))
661ae7ab
MV
1994 mutex = dynwind_critical_section_mutex;
1995 scm_dynwind_lock_mutex (mutex);
1996 scm_dynwind_block_asyncs ();
9de87eea
MV
1997}
1998
1999/*** Initialization */
2000
2001scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2002scm_i_pthread_mutex_t scm_i_misc_mutex;
2003
d1138028
MV
2004#if SCM_USE_PTHREAD_THREADS
2005pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2006#endif
2007
9de87eea
MV
2008void
2009scm_threads_prehistory (SCM_STACKITEM *base)
2010{
d1138028
MV
2011#if SCM_USE_PTHREAD_THREADS
2012 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2013 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2014 PTHREAD_MUTEX_RECURSIVE);
2015#endif
2016
2017 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2018 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2019 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2020 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
2021 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2022 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
74926120 2023
9de87eea 2024 guilify_self_1 (base);
9bc4701c
MD
2025}
2026
d823b11b
MV
2027scm_t_bits scm_tc16_thread;
2028scm_t_bits scm_tc16_mutex;
2029scm_t_bits scm_tc16_condvar;
7bfd3b9e 2030
7bfd3b9e 2031void
9de87eea 2032scm_init_threads ()
7bfd3b9e 2033{
9de87eea 2034 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b
MV
2035 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2036 scm_set_smob_print (scm_tc16_thread, thread_print);
2037 scm_set_smob_free (scm_tc16_thread, thread_free);
2038
9de87eea
MV
2039 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2040 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2041 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2042 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2043
9de87eea
MV
2044 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2045 sizeof (fat_cond));
2046 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2047 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2048 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 2049
9de87eea
MV
2050 scm_i_default_dynamic_state = SCM_BOOL_F;
2051 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2052 threads_initialized_p = 1;
a4d106c7 2053
661ae7ab 2054 dynwind_critical_section_mutex =
a4d106c7 2055 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 2056}
89e00824 2057
5f05c406 2058void
9de87eea 2059scm_init_threads_default_dynamic_state ()
5f05c406 2060{
9de87eea
MV
2061 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2062 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
2063}
2064
d823b11b 2065void
9de87eea 2066scm_init_thread_procs ()
d823b11b 2067{
9de87eea 2068#include "libguile/threads.x"
d823b11b
MV
2069}
2070
89e00824
ML
2071/*
2072 Local Variables:
2073 c-file-style: "gnu"
2074 End:
2075*/