Expand DEFFROM and DEFTO macros in discouraged.c
[bpt/guile.git] / libguile / threads.c
CommitLineData
86a597f8 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
7bfd3b9e 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7bfd3b9e 7 *
73be1d9e
MV
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
1bbd0b84 18
7bfd3b9e
JB
19\f
20
9de87eea 21#define _GNU_SOURCE
76da80e7 22
1810dc4e
RB
23#include "libguile/_scm.h"
24
fcc5d734 25#if HAVE_UNISTD_H
d823b11b 26#include <unistd.h>
fcc5d734 27#endif
d823b11b
MV
28#include <stdio.h>
29#include <assert.h>
8ab3d8a0
KR
30
31#ifdef HAVE_STRING_H
32#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
33#endif
34
fcc5d734 35#if HAVE_SYS_TIME_H
d823b11b 36#include <sys/time.h>
fcc5d734 37#endif
5f05c406 38
d823b11b
MV
39#include "libguile/validate.h"
40#include "libguile/root.h"
41#include "libguile/eval.h"
42#include "libguile/async.h"
43#include "libguile/ports.h"
44#include "libguile/threads.h"
a0599745 45#include "libguile/dynwind.h"
d823b11b 46#include "libguile/iselect.h"
9de87eea
MV
47#include "libguile/fluids.h"
48#include "libguile/continuations.h"
2b829bbb 49#include "libguile/gc.h"
9de87eea 50#include "libguile/init.h"
2e77f720 51#include "libguile/scmsigs.h"
6180e336 52#include "libguile/strings.h"
7bfd3b9e 53
ecc9f40f
MV
54#ifdef __MINGW32__
55#ifndef ETIMEDOUT
56# define ETIMEDOUT WSAETIMEDOUT
57#endif
58# include <fcntl.h>
59# include <process.h>
60# define pipe(fd) _pipe (fd, 256, O_BINARY)
61#endif /* __MINGW32__ */
62
6180e336
NJ
63static void
64to_timespec (SCM t, scm_t_timespec *waittime)
65{
66 if (scm_is_pair (t))
67 {
68 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
69 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
70 }
71 else
72 {
73 double time = scm_to_double (t);
74 double sec = scm_c_truncate (time);
75
76 waittime->tv_sec = (long) sec;
2a1d0688 77 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
78 }
79}
80
d823b11b 81/*** Queues */
7bfd3b9e 82
9de87eea
MV
83/* Make an empty queue data structure.
84 */
d823b11b
MV
85static SCM
86make_queue ()
87{
88 return scm_cons (SCM_EOL, SCM_EOL);
89}
7bfd3b9e 90
9de87eea
MV
91/* Put T at the back of Q and return a handle that can be used with
92 remqueue to remove T from Q again.
93 */
d823b11b
MV
94static SCM
95enqueue (SCM q, SCM t)
96{
97 SCM c = scm_cons (t, SCM_EOL);
d2e53ed6 98 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
99 SCM_SETCDR (q, c);
100 else
101 SCM_SETCDR (SCM_CAR (q), c);
102 SCM_SETCAR (q, c);
103 return c;
104}
7bfd3b9e 105
9de87eea
MV
106/* Remove the element that the handle C refers to from the queue Q. C
107 must have been returned from a call to enqueue. The return value
108 is zero when the element referred to by C has already been removed.
109 Otherwise, 1 is returned.
110*/
111static int
d823b11b
MV
112remqueue (SCM q, SCM c)
113{
114 SCM p, prev = q;
d2e53ed6 115 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 116 {
bc36d050 117 if (scm_is_eq (p, c))
d823b11b 118 {
bc36d050 119 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
120 SCM_SETCAR (q, SCM_CDR (c));
121 SCM_SETCDR (prev, SCM_CDR (c));
9de87eea 122 return 1;
d823b11b
MV
123 }
124 prev = p;
125 }
9de87eea 126 return 0;
d823b11b
MV
127}
128
9de87eea
MV
129/* Remove the front-most element from the queue Q and return it.
130 Return SCM_BOOL_F when Q is empty.
131*/
d823b11b
MV
132static SCM
133dequeue (SCM q)
134{
135 SCM c = SCM_CDR (q);
d2e53ed6 136 if (scm_is_null (c))
d823b11b
MV
137 return SCM_BOOL_F;
138 else
139 {
140 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 141 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
142 SCM_SETCAR (q, SCM_EOL);
143 return SCM_CAR (c);
144 }
145}
7bfd3b9e 146
9de87eea 147/*** Thread smob routines */
76da80e7 148
d823b11b
MV
149static SCM
150thread_mark (SCM obj)
151{
9de87eea 152 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
d823b11b 153 scm_gc_mark (t->result);
2e77f720 154 scm_gc_mark (t->cleanup_handler);
9de87eea 155 scm_gc_mark (t->join_queue);
6180e336 156 scm_gc_mark (t->mutexes);
9de87eea
MV
157 scm_gc_mark (t->dynwinds);
158 scm_gc_mark (t->active_asyncs);
9de87eea
MV
159 scm_gc_mark (t->continuation_root);
160 return t->dynamic_state;
d823b11b
MV
161}
162
163static int
164thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
165{
23d72566
KR
166 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
167 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
168 the struct case, hence we go via a union, and extract according to the
169 size of pthread_t. */
170 union {
171 scm_i_pthread_t p;
172 unsigned short us;
173 unsigned int ui;
174 unsigned long ul;
175 scm_t_uintmax um;
176 } u;
9de87eea 177 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
178 scm_i_pthread_t p = t->pthread;
179 scm_t_uintmax id;
180 u.p = p;
181 if (sizeof (p) == sizeof (unsigned short))
182 id = u.us;
183 else if (sizeof (p) == sizeof (unsigned int))
184 id = u.ui;
185 else if (sizeof (p) == sizeof (unsigned long))
186 id = u.ul;
187 else
188 id = u.um;
189
d823b11b 190 scm_puts ("#<thread ", port);
23d72566 191 scm_uintprint (id, 10, port);
1b92fb6b 192 scm_puts (" (", port);
0345e278 193 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 194 scm_puts (")>", port);
d823b11b
MV
195 return 1;
196}
197
198static size_t
199thread_free (SCM obj)
200{
9de87eea
MV
201 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
202 assert (t->exited);
d823b11b
MV
203 scm_gc_free (t, sizeof (*t), "thread");
204 return 0;
205}
206
9de87eea 207/*** Blocking on queues. */
f7eca35d 208
9de87eea
MV
209/* See also scm_i_queue_async_cell for how such a block is
210 interrputed.
211*/
d823b11b 212
9de87eea
MV
213/* Put the current thread on QUEUE and go to sleep, waiting for it to
214 be woken up by a call to 'unblock_from_queue', or to be
215 interrupted. Upon return of this function, the current thread is
216 no longer on QUEUE, even when the sleep has been interrupted.
217
218 The QUEUE data structure is assumed to be protected by MUTEX and
219 the caller of block_self must hold MUTEX. It will be atomically
220 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
221
222 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
223 as MUTEX is needed.
224
225 When WAITTIME is not NULL, the sleep will be aborted at that time.
226
227 The return value of block_self is an errno value. It will be zero
228 when the sleep has been successfully completed by a call to
229 unblock_from_queue, EINTR when it has been interrupted by the
230 delivery of a system async, and ETIMEDOUT when the timeout has
231 expired.
232
233 The system asyncs themselves are not executed by block_self.
234*/
235static int
236block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
237 const scm_t_timespec *waittime)
76da80e7 238{
9de87eea
MV
239 scm_i_thread *t = SCM_I_CURRENT_THREAD;
240 SCM q_handle;
241 int err;
242
243 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
244 err = EINTR;
245 else
246 {
247 t->block_asyncs++;
248 q_handle = enqueue (queue, t->handle);
249 if (waittime == NULL)
250 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
251 else
252 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
253
254 /* When we are still on QUEUE, we have been interrupted. We
255 report this only when no other error (such as a timeout) has
256 happened above.
257 */
258 if (remqueue (queue, q_handle) && err == 0)
259 err = EINTR;
260 t->block_asyncs--;
261 scm_i_reset_sleep (t);
262 }
263
264 return err;
76da80e7 265}
9de87eea
MV
266
267/* Wake up the first thread on QUEUE, if any. The caller must hold
268 the mutex that protects QUEUE. The awoken thread is returned, or
269 #f when the queue was empty.
270 */
271static SCM
272unblock_from_queue (SCM queue)
273{
274 SCM thread = dequeue (queue);
275 if (scm_is_true (thread))
276 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
277 return thread;
278}
279
280/* Getting into and out of guile mode.
281 */
282
98648121
MV
283/* Ken Raeburn observes that the implementation of suspend and resume
284 (and the things that build on top of them) are very likely not
285 correct (see below). We will need fix this eventually, and that's
286 why scm_leave_guile/scm_enter_guile are not exported in the API.
287
288 Ken writes:
289
290 Consider this sequence:
291
292 Function foo, called in Guile mode, calls suspend (maybe indirectly
293 through scm_leave_guile), which does this:
294
295 // record top of stack for the GC
296 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
297 var 't'
298 // save registers.
299 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
300 setjmp (t->regs); // here's most of the magic
301
302 ... and returns.
303
304 Function foo has a SCM value X, a handle on a non-immediate object, in
305 a caller-saved register R, and it's the only reference to the object
306 currently.
307
308 The compiler wants to use R in suspend, so it pushes the current
309 value, X, into a stack slot which will be reloaded on exit from
310 suspend; then it loads stuff into R and goes about its business. The
311 setjmp call saves (some of) the current registers, including R, which
312 no longer contains X. (This isn't a problem for a normal
313 setjmp/longjmp situation, where longjmp would be called before
314 setjmp's caller returns; the old value for X would be loaded back from
315 the stack after the longjmp, before the function returned.)
316
317 So, suspend returns, loading X back into R (and invalidating the jump
318 buffer) in the process. The caller foo then goes off and calls a
319 bunch of other functions out of Guile mode, occasionally storing X on
320 the stack again, but, say, much deeper on the stack than suspend's
321 stack frame went, and the stack slot where suspend had written X has
322 long since been overwritten with other values.
323
324 Okay, nothing actively broken so far. Now, let garbage collection
325 run, triggered by another thread.
326
327 The thread calling foo is out of Guile mode at the time, so the
328 garbage collector just scans a range of stack addresses. Too bad that
329 X isn't stored there. So the pointed-to storage goes onto the free
330 list, and I think you can see where things go from there.
331
332 Is there anything I'm missing that'll prevent this scenario from
333 happening? I mean, aside from, "well, suspend and scm_leave_guile
334 don't have many local variables, so they probably won't need to save
335 any registers on most systems, so we hope everything will wind up in
336 the jump buffer and we'll just get away with it"?
337
338 (And, going the other direction, if scm_leave_guile and suspend push
339 the stack pointer over onto a new page, and foo doesn't make further
340 function calls and thus the stack pointer no longer includes that
341 page, are we guaranteed that the kernel cannot release the now-unused
342 stack page that contains the top-of-stack pointer we just saved? I
343 don't know if any OS actually does that. If it does, we could get
344 faults in garbage collection.)
345
346 I don't think scm_without_guile has to have this problem, as it gets
347 more control over the stack handling -- but it should call setjmp
348 itself. I'd probably try something like:
349
350 // record top of stack for the GC
351 t->top = SCM_STACK_PTR (&t);
352 // save registers.
353 SCM_FLUSH_REGISTER_WINDOWS;
354 setjmp (t->regs);
355 res = func(data);
356 scm_enter_guile (t);
357
358 ... though even that's making some assumptions about the stack
359 ordering of local variables versus caller-saved registers.
360
361 For something like scm_leave_guile to work, I don't think it can just
362 rely on invalidated jump buffers. A valid jump buffer, and a handle
363 on the stack state at the point when the jump buffer was initialized,
364 together, would work fine, but I think then we're talking about macros
365 invoking setjmp in the caller's stack frame, and requiring that the
366 caller of scm_leave_guile also call scm_enter_guile before returning,
367 kind of like pthread_cleanup_push/pop calls that have to be paired up
368 in a function. (In fact, the pthread ones have to be paired up
369 syntactically, as if they might expand to a compound statement
370 incorporating the user's code, and invoking a compiler's
371 exception-handling primitives. Which might be something to think
372 about for cases where Guile is used with C++ exceptions or
373 pthread_cancel.)
374*/
375
9de87eea
MV
376scm_i_pthread_key_t scm_i_thread_key;
377
d823b11b 378static void
9de87eea 379resume (scm_i_thread *t)
d823b11b 380{
d823b11b 381 t->top = NULL;
9bc4701c
MD
382 if (t->clear_freelists_p)
383 {
384 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
385 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
386 t->clear_freelists_p = 0;
387 }
d823b11b
MV
388}
389
98648121
MV
390typedef void* scm_t_guile_ticket;
391
392static void
9de87eea 393scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 394{
9de87eea
MV
395 scm_i_thread *t = (scm_i_thread *)ticket;
396 if (t)
397 {
398 scm_i_pthread_mutex_lock (&t->heap_mutex);
399 resume (t);
400 }
d823b11b
MV
401}
402
9de87eea
MV
403static scm_i_thread *
404suspend (void)
d823b11b 405{
9de87eea 406 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
407
408 /* record top of stack for the GC */
9de87eea 409 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
410 /* save registers. */
411 SCM_FLUSH_REGISTER_WINDOWS;
9de87eea
MV
412 setjmp (t->regs);
413 return t;
d823b11b
MV
414}
415
98648121 416static scm_t_guile_ticket
9de87eea 417scm_leave_guile ()
d823b11b 418{
9de87eea
MV
419 scm_i_thread *t = suspend ();
420 scm_i_pthread_mutex_unlock (&t->heap_mutex);
421 return (scm_t_guile_ticket) t;
d823b11b
MV
422}
423
9de87eea
MV
424static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
425static scm_i_thread *all_threads = NULL;
426static int thread_count;
427
428static SCM scm_i_default_dynamic_state;
429
430/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 431 */
9de87eea
MV
432static void
433guilify_self_1 (SCM_STACKITEM *base)
d823b11b 434{
9de87eea
MV
435 scm_i_thread *t = malloc (sizeof (scm_i_thread));
436
437 t->pthread = scm_i_pthread_self ();
438 t->handle = SCM_BOOL_F;
439 t->result = SCM_BOOL_F;
2e77f720 440 t->cleanup_handler = SCM_BOOL_F;
6180e336 441 t->mutexes = SCM_EOL;
9de87eea
MV
442 t->join_queue = SCM_EOL;
443 t->dynamic_state = SCM_BOOL_F;
444 t->dynwinds = SCM_EOL;
445 t->active_asyncs = SCM_EOL;
9de87eea
MV
446 t->block_asyncs = 1;
447 t->pending_asyncs = 1;
448 t->last_debug_frame = NULL;
449 t->base = base;
346e4402
NJ
450#ifdef __ia64__
451 /* Calculate and store off the base of this thread's register
452 backing store (RBS). Unfortunately our implementation(s) of
453 scm_ia64_register_backing_store_base are only reliable for the
454 main thread. For other threads, therefore, find out the current
455 top of the RBS, and use that as a maximum. */
456 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
457 {
458 ucontext_t ctx;
459 void *bsp;
460 getcontext (&ctx);
461 bsp = scm_ia64_ar_bsp (&ctx);
462 if (t->register_backing_store_base > bsp)
463 t->register_backing_store_base = bsp;
464 }
465#endif
0c97d7dd 466 t->continuation_root = SCM_EOL;
9de87eea
MV
467 t->continuation_base = base;
468 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
469 t->sleep_mutex = NULL;
470 t->sleep_object = SCM_BOOL_F;
471 t->sleep_fd = -1;
0c97d7dd 472 /* XXX - check for errors. */
9de87eea
MV
473 pipe (t->sleep_pipe);
474 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
86a597f8 475 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
9de87eea 476 t->clear_freelists_p = 0;
1a8fdd7e 477 t->gc_running_p = 0;
2e77f720 478 t->canceled = 0;
9de87eea
MV
479 t->exited = 0;
480
481 t->freelist = SCM_EOL;
482 t->freelist2 = SCM_EOL;
483 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
484 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
485
486 scm_i_pthread_setspecific (scm_i_thread_key, t);
487
488 scm_i_pthread_mutex_lock (&t->heap_mutex);
489
490 scm_i_pthread_mutex_lock (&thread_admin_mutex);
491 t->next_thread = all_threads;
492 all_threads = t;
493 thread_count++;
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
495}
496
9de87eea 497/* Perform second stage of thread initialisation, in guile mode.
d823b11b 498 */
9de87eea
MV
499static void
500guilify_self_2 (SCM parent)
d823b11b 501{
9de87eea
MV
502 scm_i_thread *t = SCM_I_CURRENT_THREAD;
503
504 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
505 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
506 t->continuation_root = scm_cons (t->handle, SCM_EOL);
507 t->continuation_base = t->base;
508
509 if (scm_is_true (parent))
510 t->dynamic_state = scm_make_dynamic_state (parent);
511 else
512 t->dynamic_state = scm_i_make_initial_dynamic_state ();
513
514 t->join_queue = make_queue ();
515 t->block_asyncs = 0;
d823b11b
MV
516}
517
6180e336
NJ
518\f
519/*** Fat mutexes */
520
521/* We implement our own mutex type since we want them to be 'fair', we
522 want to do fancy things while waiting for them (like running
523 asyncs) and we might want to add things that are nice for
524 debugging.
525*/
526
527typedef struct {
528 scm_i_pthread_mutex_t lock;
529 SCM owner;
530 int level; /* how much the owner owns us.
531 < 0 for non-recursive mutexes */
532
533 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
534 int allow_external_unlock; /* is it an error to unlock a mutex that is not
535 owned by the current thread? */
536
537 SCM waiting; /* the threads waiting for this mutex. */
538} fat_mutex;
539
540#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
541#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
542
9de87eea 543/* Perform thread tear-down, in guile mode.
d823b11b 544 */
9de87eea
MV
545static void *
546do_thread_exit (void *v)
547{
2e77f720
LC
548 scm_i_thread *t = (scm_i_thread *) v;
549
550 if (!scm_is_false (t->cleanup_handler))
551 {
552 SCM ptr = t->cleanup_handler;
553
554 t->cleanup_handler = SCM_BOOL_F;
555 t->result = scm_internal_catch (SCM_BOOL_T,
556 (scm_t_catch_body) scm_call_0, ptr,
557 scm_handle_by_message_noexit, NULL);
558 }
9de87eea 559
86a597f8 560 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
561
562 t->exited = 1;
0c97d7dd
MV
563 close (t->sleep_pipe[0]);
564 close (t->sleep_pipe[1]);
9de87eea
MV
565 while (scm_is_true (unblock_from_queue (t->join_queue)))
566 ;
9de87eea 567
6180e336
NJ
568 while (!scm_is_null (t->mutexes))
569 {
570 SCM mutex = SCM_CAR (t->mutexes);
571 fat_mutex *m = SCM_MUTEX_DATA (mutex);
572 scm_i_pthread_mutex_lock (&m->lock);
573
574 unblock_from_queue (m->waiting);
575
576 scm_i_pthread_mutex_unlock (&m->lock);
577 t->mutexes = SCM_CDR (t->mutexes);
578 }
579
86a597f8 580 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 581
9de87eea
MV
582 return NULL;
583}
584
d823b11b 585static void
9de87eea 586on_thread_exit (void *v)
d823b11b 587{
29776e85 588 /* This handler is executed in non-guile mode. */
2e77f720 589 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 590
9de87eea 591 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd 592
2e77f720
LC
593 /* Ensure the signal handling thread has been launched, because we might be
594 shutting it down. */
595 scm_i_ensure_signal_delivery_thread ();
596
0c97d7dd 597 /* Unblocking the joining threads needs to happen in guile mode
29776e85 598 since the queue is a SCM data structure. */
9de87eea 599 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
600
601 /* Removing ourself from the list of all threads needs to happen in
602 non-guile mode since all SCM values on our stack become
29776e85 603 unprotected once we are no longer in the list. */
0c97d7dd
MV
604 scm_i_pthread_mutex_lock (&thread_admin_mutex);
605 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
606 if (*tp == t)
607 {
608 *tp = t->next_thread;
609 break;
610 }
611 thread_count--;
2e77f720
LC
612
613 /* If there's only one other thread, it could be the signal delivery
614 thread, so we need to notify it to shut down by closing its read pipe.
615 If it's not the signal delivery thread, then closing the read pipe isn't
616 going to hurt. */
617 if (thread_count <= 1)
618 scm_i_close_signal_pipe ();
619
0c97d7dd
MV
620 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
621
9de87eea 622 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
623}
624
9de87eea 625static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 626
9de87eea
MV
627static void
628init_thread_key (void)
629{
630 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
631}
d823b11b 632
9de87eea
MV
633/* Perform any initializations necessary to bring the current thread
634 into guile mode, initializing Guile itself, if necessary.
a54a94b3 635
9de87eea
MV
636 BASE is the stack base to use with GC.
637
638 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
639 which case the default dynamic state is used.
640
641 Return zero when the thread was in guile mode already; otherwise
642 return 1.
643*/
644
645static int
646scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 647{
9de87eea
MV
648 scm_i_thread *t;
649
650 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
651
652 if ((t = SCM_I_CURRENT_THREAD) == NULL)
653 {
654 /* This thread has not been guilified yet.
655 */
656
657 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
658 if (scm_initialized_p == 0)
659 {
660 /* First thread ever to enter Guile. Run the full
661 initialization.
662 */
663 scm_i_init_guile (base);
664 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
665 }
666 else
667 {
668 /* Guile is already initialized, but this thread enters it for
669 the first time. Only initialize this thread.
670 */
671 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
672 guilify_self_1 (base);
673 guilify_self_2 (parent);
674 }
675 return 1;
676 }
677 else if (t->top)
678 {
679 /* This thread is already guilified but not in guile mode, just
680 resume it.
681
682 XXX - base might be lower than when this thread was first
683 guilified.
684 */
685 scm_enter_guile ((scm_t_guile_ticket) t);
686 return 1;
687 }
688 else
689 {
690 /* Thread is already in guile mode. Nothing to do.
691 */
692 return 0;
693 }
d823b11b
MV
694}
695
9de87eea 696#if SCM_USE_PTHREAD_THREADS
9de87eea 697
23d72566
KR
698#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
699/* This method for GNU/Linux and perhaps some other systems.
700 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
701 available on them. */
9de87eea
MV
702#define HAVE_GET_THREAD_STACK_BASE
703
704static SCM_STACKITEM *
705get_thread_stack_base ()
d823b11b 706{
9de87eea
MV
707 pthread_attr_t attr;
708 void *start, *end;
709 size_t size;
710
9de87eea
MV
711 pthread_getattr_np (pthread_self (), &attr);
712 pthread_attr_getstack (&attr, &start, &size);
713 end = (char *)start + size;
714
2b829bbb
KR
715 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
716 for the main thread, but we can use scm_get_stack_base in that
717 case.
718 */
719
720#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 721 if ((void *)&attr < start || (void *)&attr >= end)
2b829bbb 722 return scm_get_stack_base ();
9de87eea 723 else
2b829bbb 724#endif
9de87eea
MV
725 {
726#if SCM_STACK_GROWS_UP
727 return start;
728#else
729 return end;
730#endif
731 }
a54a94b3
MV
732}
733
23d72566
KR
734#elif HAVE_PTHREAD_GET_STACKADDR_NP
735/* This method for MacOS X.
736 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
737 but as of 2006 there's nothing obvious at apple.com. */
738#define HAVE_GET_THREAD_STACK_BASE
739static SCM_STACKITEM *
740get_thread_stack_base ()
741{
742 return pthread_get_stackaddr_np (pthread_self ());
743}
744
745#elif defined (__MINGW32__)
746/* This method for mingw. In mingw the basic scm_get_stack_base can be used
747 in any thread. We don't like hard-coding the name of a system, but there
748 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
749 work. */
750#define HAVE_GET_THREAD_STACK_BASE
751static SCM_STACKITEM *
752get_thread_stack_base ()
753{
754 return scm_get_stack_base ();
755}
756
757#endif /* pthread methods of get_thread_stack_base */
9de87eea
MV
758
759#else /* !SCM_USE_PTHREAD_THREADS */
760
761#define HAVE_GET_THREAD_STACK_BASE
762
763static SCM_STACKITEM *
764get_thread_stack_base ()
a54a94b3 765{
2b829bbb 766 return scm_get_stack_base ();
d823b11b
MV
767}
768
9de87eea 769#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
770
771#ifdef HAVE_GET_THREAD_STACK_BASE
772
773void
774scm_init_guile ()
d823b11b 775{
9de87eea
MV
776 scm_i_init_thread_for_guile (get_thread_stack_base (),
777 scm_i_default_dynamic_state);
d823b11b
MV
778}
779
9de87eea
MV
780#endif
781
782void *
783scm_with_guile (void *(*func)(void *), void *data)
784{
785 return scm_i_with_guile_and_parent (func, data,
786 scm_i_default_dynamic_state);
787}
788
2e77f720
LC
789static void
790scm_leave_guile_cleanup (void *x)
791{
792 scm_leave_guile ();
793}
794
9de87eea 795void *
2e77f720 796scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
9de87eea
MV
797{
798 void *res;
799 int really_entered;
800 SCM_STACKITEM base_item;
2e77f720 801
9de87eea 802 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
9de87eea 803 if (really_entered)
2e77f720
LC
804 {
805 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
806 res = scm_c_with_continuation_barrier (func, data);
807 scm_i_pthread_cleanup_pop (0);
808 scm_leave_guile ();
809 }
810 else
811 res = scm_c_with_continuation_barrier (func, data);
812
9de87eea
MV
813 return res;
814}
815
816void *
817scm_without_guile (void *(*func)(void *), void *data)
d823b11b 818{
9de87eea
MV
819 void *res;
820 scm_t_guile_ticket t;
821 t = scm_leave_guile ();
822 res = func (data);
823 scm_enter_guile (t);
824 return res;
825}
826
827/*** Thread creation */
828
829typedef struct {
830 SCM parent;
831 SCM thunk;
832 SCM handler;
76da80e7 833 SCM thread;
9de87eea
MV
834 scm_i_pthread_mutex_t mutex;
835 scm_i_pthread_cond_t cond;
836} launch_data;
d823b11b 837
9de87eea
MV
838static void *
839really_launch (void *d)
840{
841 launch_data *data = (launch_data *)d;
842 SCM thunk = data->thunk, handler = data->handler;
843 scm_i_thread *t;
d823b11b 844
9de87eea 845 t = SCM_I_CURRENT_THREAD;
a54a94b3 846
9de87eea
MV
847 scm_i_scm_pthread_mutex_lock (&data->mutex);
848 data->thread = scm_current_thread ();
849 scm_i_pthread_cond_signal (&data->cond);
850 scm_i_pthread_mutex_unlock (&data->mutex);
851
852 if (SCM_UNBNDP (handler))
853 t->result = scm_call_0 (thunk);
854 else
855 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
856
857 return 0;
d823b11b
MV
858}
859
9de87eea
MV
860static void *
861launch_thread (void *d)
862{
863 launch_data *data = (launch_data *)d;
864 scm_i_pthread_detach (scm_i_pthread_self ());
865 scm_i_with_guile_and_parent (really_launch, d, data->parent);
866 return NULL;
867}
868
869SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 870 (SCM thunk, SCM handler),
9de87eea
MV
871 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
872 "returning a new thread object representing the thread. The procedure\n"
873 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
874 "\n"
875 "When @var{handler} is specified, then @var{thunk} is called from\n"
876 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
877 "handler. This catch is established inside the continuation barrier.\n"
878 "\n"
879 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
880 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
881#define FUNC_NAME s_scm_call_with_new_thread
882{
9de87eea
MV
883 launch_data data;
884 scm_i_pthread_t id;
885 int err;
d823b11b 886
9de87eea
MV
887 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
888 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
889 handler, SCM_ARG2, FUNC_NAME);
890
891 data.parent = scm_current_dynamic_state ();
892 data.thunk = thunk;
893 data.handler = handler;
894 data.thread = SCM_BOOL_F;
895 scm_i_pthread_mutex_init (&data.mutex, NULL);
896 scm_i_pthread_cond_init (&data.cond, NULL);
897
898 scm_i_scm_pthread_mutex_lock (&data.mutex);
899 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
900 if (err)
901 {
902 scm_i_pthread_mutex_unlock (&data.mutex);
903 errno = err;
904 scm_syserror (NULL);
905 }
906 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
907 scm_i_pthread_mutex_unlock (&data.mutex);
908
909 return data.thread;
d823b11b
MV
910}
911#undef FUNC_NAME
912
9de87eea
MV
913typedef struct {
914 SCM parent;
915 scm_t_catch_body body;
916 void *body_data;
917 scm_t_catch_handler handler;
918 void *handler_data;
919 SCM thread;
920 scm_i_pthread_mutex_t mutex;
921 scm_i_pthread_cond_t cond;
922} spawn_data;
923
924static void *
925really_spawn (void *d)
926{
927 spawn_data *data = (spawn_data *)d;
928 scm_t_catch_body body = data->body;
929 void *body_data = data->body_data;
930 scm_t_catch_handler handler = data->handler;
931 void *handler_data = data->handler_data;
932 scm_i_thread *t = SCM_I_CURRENT_THREAD;
933
934 scm_i_scm_pthread_mutex_lock (&data->mutex);
935 data->thread = scm_current_thread ();
936 scm_i_pthread_cond_signal (&data->cond);
937 scm_i_pthread_mutex_unlock (&data->mutex);
938
939 if (handler == NULL)
940 t->result = body (body_data);
941 else
942 t->result = scm_internal_catch (SCM_BOOL_T,
943 body, body_data,
944 handler, handler_data);
945
946 return 0;
947}
948
949static void *
950spawn_thread (void *d)
951{
952 spawn_data *data = (spawn_data *)d;
953 scm_i_pthread_detach (scm_i_pthread_self ());
954 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
955 return NULL;
956}
957
958SCM
959scm_spawn_thread (scm_t_catch_body body, void *body_data,
960 scm_t_catch_handler handler, void *handler_data)
961{
962 spawn_data data;
963 scm_i_pthread_t id;
964 int err;
965
966 data.parent = scm_current_dynamic_state ();
967 data.body = body;
968 data.body_data = body_data;
969 data.handler = handler;
970 data.handler_data = handler_data;
971 data.thread = SCM_BOOL_F;
972 scm_i_pthread_mutex_init (&data.mutex, NULL);
973 scm_i_pthread_cond_init (&data.cond, NULL);
974
975 scm_i_scm_pthread_mutex_lock (&data.mutex);
976 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
977 if (err)
978 {
979 scm_i_pthread_mutex_unlock (&data.mutex);
980 errno = err;
981 scm_syserror (NULL);
982 }
983 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
984 scm_i_pthread_mutex_unlock (&data.mutex);
985
986 return data.thread;
987}
988
29717c89
MD
989SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
990 (),
991"Move the calling thread to the end of the scheduling queue.")
992#define FUNC_NAME s_scm_yield
993{
9de87eea 994 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
995}
996#undef FUNC_NAME
997
2e77f720
LC
998SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
999 (SCM thread),
1000"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1001"cannot be the current thread, and if @var{thread} has already terminated or "
1002"been signaled to terminate, this function is a no-op.")
1003#define FUNC_NAME s_scm_cancel_thread
1004{
1005 scm_i_thread *t = NULL;
1006
1007 SCM_VALIDATE_THREAD (1, thread);
1008 t = SCM_I_THREAD_DATA (thread);
86a597f8 1009 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1010 if (!t->canceled)
1011 {
1012 t->canceled = 1;
86a597f8 1013 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1014 scm_i_pthread_cancel (t->pthread);
1015 }
1016 else
86a597f8 1017 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1018
1019 return SCM_UNSPECIFIED;
1020}
1021#undef FUNC_NAME
1022
1023SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1024 (SCM thread, SCM proc),
1025"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1026"This handler will be called when the thread exits.")
1027#define FUNC_NAME s_scm_set_thread_cleanup_x
1028{
1029 scm_i_thread *t;
1030
1031 SCM_VALIDATE_THREAD (1, thread);
1032 if (!scm_is_false (proc))
1033 SCM_VALIDATE_THUNK (2, proc);
1034
2e77f720 1035 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1036 scm_i_pthread_mutex_lock (&t->admin_mutex);
1037
2e77f720
LC
1038 if (!(t->exited || t->canceled))
1039 t->cleanup_handler = proc;
1040
86a597f8 1041 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1042
1043 return SCM_UNSPECIFIED;
1044}
1045#undef FUNC_NAME
1046
1047SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1048 (SCM thread),
1049"Return the cleanup handler installed for the thread @var{thread}.")
1050#define FUNC_NAME s_scm_thread_cleanup
1051{
1052 scm_i_thread *t;
1053 SCM ret;
1054
1055 SCM_VALIDATE_THREAD (1, thread);
1056
2e77f720 1057 t = SCM_I_THREAD_DATA (thread);
86a597f8 1058 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1059 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1060 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1061
1062 return ret;
1063}
1064#undef FUNC_NAME
1065
6180e336
NJ
1066SCM scm_join_thread (SCM thread)
1067{
1068 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1069}
1070
1071SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1072 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1073"Suspend execution of the calling thread until the target @var{thread} "
1074"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1075#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1076{
9de87eea 1077 scm_i_thread *t;
6180e336
NJ
1078 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1079 SCM res = SCM_BOOL_F;
1080
1081 if (! (SCM_UNBNDP (timeoutval)))
1082 res = timeoutval;
d823b11b
MV
1083
1084 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1085 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1086 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1087
9de87eea 1088 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1089 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1090
6180e336
NJ
1091 if (! SCM_UNBNDP (timeout))
1092 {
1093 to_timespec (timeout, &ctimeout);
1094 timeout_ptr = &ctimeout;
1095 }
1096
1097 if (t->exited)
1098 res = t->result;
1099 else
d823b11b 1100 {
9de87eea
MV
1101 while (1)
1102 {
6180e336
NJ
1103 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1104 timeout_ptr);
1105 if (err == 0)
1106 {
1107 if (t->exited)
1108 {
1109 res = t->result;
1110 break;
1111 }
1112 }
1113 else if (err == ETIMEDOUT)
9de87eea 1114 break;
6180e336 1115
86a597f8 1116 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1117 SCM_TICK;
86a597f8 1118 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea 1119 }
d823b11b 1120 }
9de87eea 1121
86a597f8 1122 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1123
d823b11b 1124 return res;
5f05c406
MV
1125}
1126#undef FUNC_NAME
1127
6180e336
NJ
1128SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1129 (SCM obj),
1130 "Return @code{#t} if @var{obj} is a thread.")
1131#define FUNC_NAME s_scm_thread_p
1132{
1133 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1134}
1135#undef FUNC_NAME
5f05c406 1136
d823b11b 1137static SCM
9de87eea 1138fat_mutex_mark (SCM mx)
d823b11b 1139{
9de87eea 1140 fat_mutex *m = SCM_MUTEX_DATA (mx);
d823b11b
MV
1141 scm_gc_mark (m->owner);
1142 return m->waiting;
1143}
4079f87e 1144
9de87eea
MV
1145static size_t
1146fat_mutex_free (SCM mx)
76da80e7 1147{
9de87eea
MV
1148 fat_mutex *m = SCM_MUTEX_DATA (mx);
1149 scm_i_pthread_mutex_destroy (&m->lock);
1150 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
1151 return 0;
1152}
1153
1154static int
9de87eea 1155fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1156{
9de87eea
MV
1157 fat_mutex *m = SCM_MUTEX_DATA (mx);
1158 scm_puts ("#<mutex ", port);
1159 scm_uintprint ((scm_t_bits)m, 16, port);
1160 scm_puts (">", port);
1161 return 1;
76da80e7
MV
1162}
1163
76da80e7 1164static SCM
6180e336 1165make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1166{
9de87eea
MV
1167 fat_mutex *m;
1168 SCM mx;
1169
1170 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1171 scm_i_pthread_mutex_init (&m->lock, NULL);
1172 m->owner = SCM_BOOL_F;
1173 m->level = recursive? 0 : -1;
6180e336
NJ
1174
1175 m->unchecked_unlock = unchecked_unlock;
1176 m->allow_external_unlock = external_unlock;
1177
9de87eea
MV
1178 m->waiting = SCM_EOL;
1179 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1180 m->waiting = make_queue ();
1181 return mx;
76da80e7
MV
1182}
1183
6180e336
NJ
1184SCM scm_make_mutex (void)
1185{
1186 return scm_make_mutex_with_flags (SCM_EOL);
1187}
1188
2a1d0688
NJ
1189SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1190SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1191SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1192
1193SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1194 (SCM flags),
9de87eea 1195 "Create a new mutex. ")
6180e336 1196#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1197{
6180e336
NJ
1198 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1199
1200 SCM ptr = flags;
1201 while (! scm_is_null (ptr))
1202 {
1203 SCM flag = SCM_CAR (ptr);
1204 if (scm_is_eq (flag, unchecked_unlock_sym))
1205 unchecked_unlock = 1;
1206 else if (scm_is_eq (flag, allow_external_unlock_sym))
1207 external_unlock = 1;
1208 else if (scm_is_eq (flag, recursive_sym))
1209 recursive = 1;
1210 else
2a1d0688 1211 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1212 ptr = SCM_CDR (ptr);
1213 }
1214 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1215}
1216#undef FUNC_NAME
1217
9de87eea 1218SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1219 (void),
9de87eea
MV
1220 "Create a new recursive mutex. ")
1221#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1222{
6180e336 1223 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1224}
1225#undef FUNC_NAME
1226
6180e336
NJ
1227SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1228
1229static SCM
1230fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, int *ret)
9de87eea
MV
1231{
1232 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1233
9de87eea 1234 SCM thread = scm_current_thread ();
6180e336
NJ
1235 scm_i_thread *t = SCM_I_THREAD_DATA (thread);
1236
1237 SCM err = SCM_BOOL_F;
1238
1239 struct timeval current_time;
9de87eea
MV
1240
1241 scm_i_scm_pthread_mutex_lock (&m->lock);
1242 if (scm_is_false (m->owner))
6180e336
NJ
1243 {
1244 m->owner = thread;
1245 scm_i_pthread_mutex_lock (&t->admin_mutex);
1246 t->mutexes = scm_cons (mutex, t->mutexes);
1247 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1248 *ret = 1;
1249 }
9de87eea
MV
1250 else if (scm_is_eq (m->owner, thread))
1251 {
1252 if (m->level >= 0)
6180e336
NJ
1253 {
1254 m->level++;
1255 *ret = 1;
1256 }
9de87eea 1257 else
6180e336
NJ
1258 err = scm_cons (scm_misc_error_key,
1259 scm_from_locale_string ("mutex already locked by "
1260 "current thread"));
9de87eea
MV
1261 }
1262 else
1263 {
6180e336 1264 int first_iteration = 1;
9de87eea
MV
1265 while (1)
1266 {
6180e336
NJ
1267 if (scm_is_eq (m->owner, thread) || scm_c_thread_exited_p (m->owner))
1268 {
1269 scm_i_pthread_mutex_lock (&t->admin_mutex);
1270 t->mutexes = scm_cons (mutex, t->mutexes);
1271 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1272 *ret = 1;
1273 if (scm_c_thread_exited_p (m->owner))
1274 {
1275 m->owner = thread;
1276 err = scm_cons (scm_abandoned_mutex_error_key,
1277 scm_from_locale_string ("lock obtained on "
1278 "abandoned mutex"));
1279 }
1280 break;
1281 }
1282 else if (!first_iteration)
1283 {
1284 if (timeout != NULL)
1285 {
1286 gettimeofday (&current_time, NULL);
1287 if (current_time.tv_sec > timeout->tv_sec ||
1288 (current_time.tv_sec == timeout->tv_sec &&
1289 current_time.tv_usec * 1000 > timeout->tv_nsec))
1290 {
1291 *ret = 0;
1292 break;
1293 }
1294 }
1295 scm_i_pthread_mutex_unlock (&m->lock);
1296 SCM_TICK;
1297 scm_i_scm_pthread_mutex_lock (&m->lock);
1298 }
1299 else
1300 first_iteration = 0;
1301 block_self (m->waiting, mutex, &m->lock, timeout);
9de87eea
MV
1302 }
1303 }
1304 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1305 return err;
9de87eea
MV
1306}
1307
6180e336
NJ
1308SCM scm_lock_mutex (SCM mx)
1309{
1310 return scm_lock_mutex_timed (mx, SCM_UNDEFINED);
1311}
1312
1313SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 1, 0,
1314 (SCM m, SCM timeout),
9bc4701c
MD
1315"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1316"blocks until the mutex becomes available. The function returns when "
1317"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1318"a thread already owns will succeed right away and will not block the "
1319"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1320#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1321{
6180e336
NJ
1322 SCM exception;
1323 int ret = 0;
1324 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1325
6180e336
NJ
1326 SCM_VALIDATE_MUTEX (1, m);
1327
1328 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1329 {
1330 to_timespec (timeout, &cwaittime);
1331 waittime = &cwaittime;
1332 }
1333
1334 exception = fat_mutex_lock (m, waittime, &ret);
1335 if (!scm_is_false (exception))
1336 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1337 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1338}
76da80e7 1339#undef FUNC_NAME
9bc4701c 1340
a4d106c7 1341void
661ae7ab 1342scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1343{
661ae7ab
MV
1344 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1345 SCM_F_WIND_EXPLICITLY);
1346 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1347 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1348}
1349
9bc4701c 1350SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1351 (SCM mutex),
9bc4701c
MD
1352"Try to lock @var{mutex}. If the mutex is already locked by someone "
1353"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1354#define FUNC_NAME s_scm_try_mutex
1355{
6180e336
NJ
1356 SCM exception;
1357 int ret = 0;
1358 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1359
ba1b7223 1360 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1361
1362 to_timespec (scm_from_int(0), &cwaittime);
1363 waittime = &cwaittime;
9bc4701c 1364
6180e336
NJ
1365 exception = fat_mutex_lock (mutex, waittime, &ret);
1366 if (!scm_is_false (exception))
1367 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1368 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1369}
1370#undef FUNC_NAME
76da80e7 1371
6180e336
NJ
1372/*** Fat condition variables */
1373
1374typedef struct {
1375 scm_i_pthread_mutex_t lock;
1376 SCM waiting; /* the threads waiting for this condition. */
1377} fat_cond;
1378
1379#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1380#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1381
1382static int
1383fat_mutex_unlock (SCM mutex, SCM cond,
1384 const scm_t_timespec *waittime, int relock)
9de87eea 1385{
6180e336
NJ
1386 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1387 fat_cond *c = NULL;
1388 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1389 int err = 0, ret = 0;
9de87eea
MV
1390
1391 scm_i_scm_pthread_mutex_lock (&m->lock);
1392 if (!scm_is_eq (m->owner, scm_current_thread ()))
9bc4701c 1393 {
9de87eea 1394 if (scm_is_false (m->owner))
6180e336
NJ
1395 {
1396 if (!m->unchecked_unlock)
2a1d0688
NJ
1397 {
1398 scm_i_pthread_mutex_unlock (&m->lock);
1399 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1400 }
6180e336
NJ
1401 }
1402 else if (!m->allow_external_unlock)
2a1d0688
NJ
1403 {
1404 scm_i_pthread_mutex_unlock (&m->lock);
1405 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1406 }
6180e336
NJ
1407 }
1408
1409 if (! (SCM_UNBNDP (cond)))
1410 {
1411 int lock_ret = 0;
1412
1413 c = SCM_CONDVAR_DATA (cond);
1414 while (1)
1415 {
1416 int brk = 0;
1417
1418 scm_i_scm_pthread_mutex_lock (&c->lock);
1419 if (m->level > 0)
1420 m->level--;
1421 else
1422 m->owner = unblock_from_queue (m->waiting);
1423 scm_i_pthread_mutex_unlock (&m->lock);
1424
1425 t->block_asyncs++;
1426
1427 err = block_self (c->waiting, cond, &c->lock, waittime);
1428
1429 if (err == 0)
1430 {
1431 ret = 1;
1432 brk = 1;
1433 }
1434 else if (err == ETIMEDOUT)
1435 {
1436 ret = 0;
1437 brk = 1;
1438 }
1439 else if (err != EINTR)
1440 {
1441 errno = err;
1442 scm_i_pthread_mutex_unlock (&c->lock);
1443 scm_syserror (NULL);
1444 }
1445
1446 if (brk)
1447 {
1448 if (relock)
1449 fat_mutex_lock (mutex, NULL, &lock_ret);
1450 scm_i_pthread_mutex_unlock (&c->lock);
1451 break;
1452 }
1453
1454 scm_i_pthread_mutex_unlock (&c->lock);
1455
1456 t->block_asyncs--;
1457 scm_async_click ();
1458
1459 scm_remember_upto_here_2 (cond, mutex);
1460
1461 scm_i_scm_pthread_mutex_lock (&m->lock);
1462 }
9bc4701c 1463 }
9de87eea 1464 else
6180e336
NJ
1465 {
1466 if (m->level > 0)
1467 m->level--;
1468 else
1469 m->owner = unblock_from_queue (m->waiting);
1470 scm_i_pthread_mutex_unlock (&m->lock);
1471 ret = 1;
1472 }
1473
1474 return ret;
1475}
9de87eea 1476
6180e336
NJ
1477SCM scm_unlock_mutex (SCM mx)
1478{
1479 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1480}
9bc4701c 1481
6180e336
NJ
1482SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1483 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1484"Unlocks @var{mutex} if the calling thread owns the lock on "
1485"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1486"thread results in undefined behaviour. Once a mutex has been unlocked, "
1487"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1488"lock. Every call to @code{lock-mutex} by this thread must be matched "
1489"with a call to @code{unlock-mutex}. Only the last call to "
1490"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1491#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1492{
6180e336
NJ
1493 scm_t_timespec cwaittime, *waittime = NULL;
1494
9bc4701c 1495 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1496 if (! (SCM_UNBNDP (cond)))
1497 {
1498 SCM_VALIDATE_CONDVAR (2, cond);
1499
1500 if (! (SCM_UNBNDP (timeout)))
1501 {
1502 to_timespec (timeout, &cwaittime);
1503 waittime = &cwaittime;
1504 }
1505 }
1506
1507 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1508}
1509#undef FUNC_NAME
1510
6180e336
NJ
1511SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1512 (SCM obj),
1513 "Return @code{#t} if @var{obj} is a mutex.")
1514#define FUNC_NAME s_scm_mutex_p
1515{
1516 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1517}
1518#undef FUNC_NAME
1519
9de87eea
MV
1520#if 0
1521
1522SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1523 (SCM mx),
1524 "Return the thread owning @var{mx}, or @code{#f}.")
1525#define FUNC_NAME s_scm_mutex_owner
1526{
1527 SCM_VALIDATE_MUTEX (1, mx);
1528 return (SCM_MUTEX_DATA(mx))->owner;
1529}
1530#undef FUNC_NAME
1531
1532SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1533 (SCM mx),
1534 "Return the lock level of a recursive mutex, or -1\n"
1535 "for a standard mutex.")
1536#define FUNC_NAME s_scm_mutex_level
1537{
1538 SCM_VALIDATE_MUTEX (1, mx);
1539 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1540}
1541#undef FUNC_NAME
1542
1543#endif
1544
9de87eea
MV
1545static SCM
1546fat_cond_mark (SCM cv)
1547{
1548 fat_cond *c = SCM_CONDVAR_DATA (cv);
1549 return c->waiting;
1550}
1551
1552static size_t
1553fat_cond_free (SCM mx)
1554{
1555 fat_cond *c = SCM_CONDVAR_DATA (mx);
1556 scm_i_pthread_mutex_destroy (&c->lock);
1557 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1558 return 0;
1559}
1560
1561static int
1562fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1563{
1564 fat_cond *c = SCM_CONDVAR_DATA (cv);
1565 scm_puts ("#<condition-variable ", port);
1566 scm_uintprint ((scm_t_bits)c, 16, port);
1567 scm_puts (">", port);
1568 return 1;
1569}
9bc4701c 1570
d823b11b
MV
1571SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1572 (void),
1573 "Make a new condition variable.")
1574#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1575{
9de87eea
MV
1576 fat_cond *c;
1577 SCM cv;
1578
1579 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1580 scm_i_pthread_mutex_init (&c->lock, 0);
1581 c->waiting = SCM_EOL;
1582 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1583 c->waiting = make_queue ();
d823b11b 1584 return cv;
5f05c406 1585}
d823b11b 1586#undef FUNC_NAME
5f05c406 1587
d823b11b
MV
1588SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1589 (SCM cv, SCM mx, SCM t),
1590"Wait until @var{cond-var} has been signalled. While waiting, "
1591"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1592"is locked again when this function returns. When @var{time} is given, "
1593"it specifies a point in time where the waiting should be aborted. It "
1594"can be either a integer as returned by @code{current-time} or a pair "
1595"as returned by @code{gettimeofday}. When the waiting is aborted the "
1596"mutex is locked and @code{#f} is returned. When the condition "
1597"variable is in fact signalled, the mutex is also locked and @code{#t} "
1598"is returned. ")
1599#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1600{
9de87eea 1601 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1602
1603 SCM_VALIDATE_CONDVAR (1, cv);
1604 SCM_VALIDATE_MUTEX (2, mx);
9bc4701c 1605
d823b11b
MV
1606 if (!SCM_UNBNDP (t))
1607 {
6180e336 1608 to_timespec (t, &waittime);
9de87eea 1609 waitptr = &waittime;
d823b11b
MV
1610 }
1611
2a1d0688 1612 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1613}
d823b11b 1614#undef FUNC_NAME
5f05c406 1615
9de87eea
MV
1616static void
1617fat_cond_signal (fat_cond *c)
1618{
9de87eea
MV
1619 scm_i_scm_pthread_mutex_lock (&c->lock);
1620 unblock_from_queue (c->waiting);
1621 scm_i_pthread_mutex_unlock (&c->lock);
1622}
1623
d823b11b
MV
1624SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1625 (SCM cv),
1626 "Wake up one thread that is waiting for @var{cv}")
1627#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1628{
d823b11b 1629 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1630 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1631 return SCM_BOOL_T;
5f05c406 1632}
d823b11b 1633#undef FUNC_NAME
5f05c406 1634
9de87eea
MV
1635static void
1636fat_cond_broadcast (fat_cond *c)
1637{
1638 scm_i_scm_pthread_mutex_lock (&c->lock);
1639 while (scm_is_true (unblock_from_queue (c->waiting)))
1640 ;
1641 scm_i_pthread_mutex_unlock (&c->lock);
1642}
1643
d823b11b
MV
1644SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1645 (SCM cv),
1646 "Wake up all threads that are waiting for @var{cv}. ")
1647#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1648{
d823b11b 1649 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1650 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1651 return SCM_BOOL_T;
5f05c406 1652}
d823b11b 1653#undef FUNC_NAME
5f05c406 1654
6180e336
NJ
1655SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1656 (SCM obj),
1657 "Return @code{#t} if @var{obj} is a condition variable.")
1658#define FUNC_NAME s_scm_condition_variable_p
1659{
1660 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1661}
1662#undef FUNC_NAME
1663
d823b11b
MV
1664/*** Marking stacks */
1665
1666/* XXX - what to do with this? Do we need to handle this for blocked
1667 threads as well?
1668*/
1669#ifdef __ia64__
1670# define SCM_MARK_BACKING_STORE() do { \
1671 ucontext_t ctx; \
1672 SCM_STACKITEM * top, * bot; \
1673 getcontext (&ctx); \
1674 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1675 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1676 / sizeof (SCM_STACKITEM))); \
346e4402 1677 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
9a5fa6e9 1678 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
d823b11b
MV
1679 scm_mark_locations (bot, top - bot); } while (0)
1680#else
1681# define SCM_MARK_BACKING_STORE()
1682#endif
1683
1684void
1685scm_threads_mark_stacks (void)
5f05c406 1686{
9de87eea
MV
1687 scm_i_thread *t;
1688 for (t = all_threads; t; t = t->next_thread)
d823b11b 1689 {
9de87eea
MV
1690 /* Check that thread has indeed been suspended.
1691 */
1692 assert (t->top);
6087fad9 1693
9de87eea 1694 scm_gc_mark (t->handle);
6087fad9 1695
d028af45 1696#if SCM_STACK_GROWS_UP
9de87eea 1697 scm_mark_locations (t->base, t->top - t->base);
d823b11b 1698#else
9de87eea 1699 scm_mark_locations (t->top, t->base - t->top);
d823b11b 1700#endif
346e4402 1701 scm_mark_locations ((SCM_STACKITEM *) &t->regs,
6087fad9
MV
1702 ((size_t) sizeof(t->regs)
1703 / sizeof (SCM_STACKITEM)));
d823b11b 1704 }
59152722
MV
1705
1706 SCM_MARK_BACKING_STORE ();
5f05c406
MV
1707}
1708
d823b11b
MV
1709/*** Select */
1710
911782b7 1711int
9de87eea
MV
1712scm_std_select (int nfds,
1713 SELECT_TYPE *readfds,
1714 SELECT_TYPE *writefds,
1715 SELECT_TYPE *exceptfds,
1716 struct timeval *timeout)
1717{
1718 fd_set my_readfds;
1719 int res, eno, wakeup_fd;
1720 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1721 scm_t_guile_ticket ticket;
1722
1723 if (readfds == NULL)
1724 {
1725 FD_ZERO (&my_readfds);
1726 readfds = &my_readfds;
1727 }
1728
1729 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1730 SCM_TICK;
1731
1732 wakeup_fd = t->sleep_pipe[0];
1733 ticket = scm_leave_guile ();
1734 FD_SET (wakeup_fd, readfds);
1735 if (wakeup_fd >= nfds)
1736 nfds = wakeup_fd+1;
1737 res = select (nfds, readfds, writefds, exceptfds, timeout);
1738 t->sleep_fd = -1;
d823b11b 1739 eno = errno;
9de87eea
MV
1740 scm_enter_guile (ticket);
1741
1742 scm_i_reset_sleep (t);
1743
1744 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1745 {
1746 char dummy;
1747 read (wakeup_fd, &dummy, 1);
1748 FD_CLR (wakeup_fd, readfds);
1749 res -= 1;
1750 if (res == 0)
1751 {
1752 eno = EINTR;
1753 res = -1;
1754 }
1755 }
d823b11b
MV
1756 errno = eno;
1757 return res;
5f05c406
MV
1758}
1759
9de87eea 1760/* Convenience API for blocking while in guile mode. */
76da80e7 1761
9de87eea 1762#if SCM_USE_PTHREAD_THREADS
92e64b87 1763
9bc4701c 1764int
9de87eea 1765scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1766{
9de87eea
MV
1767 scm_t_guile_ticket t = scm_leave_guile ();
1768 int res = scm_i_pthread_mutex_lock (mutex);
1769 scm_enter_guile (t);
9bc4701c
MD
1770 return res;
1771}
1772
9de87eea 1773static void
2b829bbb 1774do_unlock (void *data)
28d52ebb 1775{
9de87eea 1776 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1777}
1778
1779void
661ae7ab 1780scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1781{
9de87eea 1782 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1783 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1784}
1785
9bc4701c 1786int
9de87eea 1787scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1788{
9de87eea
MV
1789 scm_t_guile_ticket t = scm_leave_guile ();
1790 int res = scm_i_pthread_cond_wait (cond, mutex);
1791 scm_enter_guile (t);
9bc4701c
MD
1792 return res;
1793}
9bc4701c 1794
76da80e7 1795int
9de87eea
MV
1796scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1797 scm_i_pthread_mutex_t *mutex,
1798 const scm_t_timespec *wt)
76da80e7 1799{
9de87eea
MV
1800 scm_t_guile_ticket t = scm_leave_guile ();
1801 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1802 scm_enter_guile (t);
1803 return res;
76da80e7
MV
1804}
1805
9de87eea 1806#endif
76da80e7 1807
d823b11b 1808unsigned long
9de87eea 1809scm_std_usleep (unsigned long usecs)
5f05c406 1810{
d823b11b
MV
1811 struct timeval tv;
1812 tv.tv_usec = usecs % 1000000;
1813 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1814 scm_std_select (0, NULL, NULL, NULL, &tv);
1815 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1816}
1817
9de87eea
MV
1818unsigned int
1819scm_std_sleep (unsigned int secs)
6c214b62 1820{
d823b11b
MV
1821 struct timeval tv;
1822 tv.tv_usec = 0;
1823 tv.tv_sec = secs;
9de87eea 1824 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1825 return tv.tv_sec;
6c214b62
MD
1826}
1827
d823b11b
MV
1828/*** Misc */
1829
1830SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1831 (void),
1832 "Return the thread that called this function.")
1833#define FUNC_NAME s_scm_current_thread
1834{
9de87eea 1835 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1836}
1837#undef FUNC_NAME
1838
9de87eea
MV
1839static SCM
1840scm_c_make_list (size_t n, SCM fill)
1841{
1842 SCM res = SCM_EOL;
1843 while (n-- > 0)
1844 res = scm_cons (fill, res);
1845 return res;
1846}
1847
d823b11b
MV
1848SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1849 (void),
1850 "Return a list of all threads.")
9bc4701c 1851#define FUNC_NAME s_scm_all_threads
d823b11b 1852{
9de87eea
MV
1853 /* We can not allocate while holding the thread_admin_mutex because
1854 of the way GC is done.
1855 */
1856 int n = thread_count;
1857 scm_i_thread *t;
1858 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1859
9de87eea
MV
1860 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1861 l = &list;
1862 for (t = all_threads; t && n > 0; t = t->next_thread)
1863 {
2e77f720
LC
1864 if (t != scm_i_signal_delivery_thread)
1865 {
1866 SCM_SETCAR (*l, t->handle);
1867 l = SCM_CDRLOC (*l);
1868 }
9de87eea
MV
1869 n--;
1870 }
1871 *l = SCM_EOL;
1872 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1873 return list;
d823b11b 1874}
9de87eea 1875#undef FUNC_NAME
d823b11b
MV
1876
1877SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1878 (SCM thread),
1879 "Return @code{#t} iff @var{thread} has exited.\n")
1880#define FUNC_NAME s_scm_thread_exited_p
1881{
7888309b 1882 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1883}
1884#undef FUNC_NAME
1885
911782b7 1886int
d823b11b
MV
1887scm_c_thread_exited_p (SCM thread)
1888#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1889{
9de87eea 1890 scm_i_thread *t;
d823b11b 1891 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1892 t = SCM_I_THREAD_DATA (thread);
d823b11b 1893 return t->exited;
5f05c406 1894}
d823b11b 1895#undef FUNC_NAME
5f05c406 1896
9de87eea 1897static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1898int scm_i_thread_go_to_sleep;
9bc4701c
MD
1899static int threads_initialized_p = 0;
1900
1901void
1902scm_i_thread_put_to_sleep ()
1903{
6087fad9 1904 if (threads_initialized_p)
9bc4701c 1905 {
9de87eea 1906 scm_i_thread *t;
6087fad9 1907
9de87eea
MV
1908 scm_leave_guile ();
1909 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1910
0c97d7dd
MV
1911 /* Signal all threads to go to sleep
1912 */
1913 scm_i_thread_go_to_sleep = 1;
1914 for (t = all_threads; t; t = t->next_thread)
1915 scm_i_pthread_mutex_lock (&t->heap_mutex);
1916 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1917 }
1918}
1919
b0dc3d71
MD
1920void
1921scm_i_thread_invalidate_freelists ()
1922{
9de87eea
MV
1923 /* thread_admin_mutex is already locked. */
1924
1925 scm_i_thread *t;
1926 for (t = all_threads; t; t = t->next_thread)
1927 if (t != SCM_I_CURRENT_THREAD)
1928 t->clear_freelists_p = 1;
b0dc3d71
MD
1929}
1930
9bc4701c
MD
1931void
1932scm_i_thread_wake_up ()
1933{
6087fad9 1934 if (threads_initialized_p)
9bc4701c 1935 {
9de87eea 1936 scm_i_thread *t;
9de87eea 1937
0c97d7dd
MV
1938 scm_i_pthread_cond_broadcast (&wake_up_cond);
1939 for (t = all_threads; t; t = t->next_thread)
1940 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
1941 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1942 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 1943 }
9bc4701c
MD
1944}
1945
1946void
1947scm_i_thread_sleep_for_gc ()
1948{
9de87eea
MV
1949 scm_i_thread *t = suspend ();
1950 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
b0dc3d71 1951 resume (t);
9bc4701c
MD
1952}
1953
a4d106c7
MV
1954/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1955 */
d1138028 1956scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
1957int scm_i_critical_section_level = 0;
1958
661ae7ab 1959static SCM dynwind_critical_section_mutex;
a54a94b3 1960
9bc4701c 1961void
661ae7ab 1962scm_dynwind_critical_section (SCM mutex)
76da80e7 1963{
a4d106c7 1964 if (scm_is_false (mutex))
661ae7ab
MV
1965 mutex = dynwind_critical_section_mutex;
1966 scm_dynwind_lock_mutex (mutex);
1967 scm_dynwind_block_asyncs ();
9de87eea
MV
1968}
1969
1970/*** Initialization */
1971
1972scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1973scm_i_pthread_mutex_t scm_i_misc_mutex;
1974
d1138028
MV
1975#if SCM_USE_PTHREAD_THREADS
1976pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1977#endif
1978
9de87eea
MV
1979void
1980scm_threads_prehistory (SCM_STACKITEM *base)
1981{
d1138028
MV
1982#if SCM_USE_PTHREAD_THREADS
1983 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1984 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1985 PTHREAD_MUTEX_RECURSIVE);
1986#endif
1987
1988 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1989 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
1990 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1991 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
1992 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1993 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1994
1995 guilify_self_1 (base);
9bc4701c
MD
1996}
1997
d823b11b
MV
1998scm_t_bits scm_tc16_thread;
1999scm_t_bits scm_tc16_mutex;
2000scm_t_bits scm_tc16_condvar;
7bfd3b9e 2001
7bfd3b9e 2002void
9de87eea 2003scm_init_threads ()
7bfd3b9e 2004{
9de87eea 2005 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b
MV
2006 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2007 scm_set_smob_print (scm_tc16_thread, thread_print);
2008 scm_set_smob_free (scm_tc16_thread, thread_free);
2009
9de87eea
MV
2010 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2011 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2012 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2013 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2014
9de87eea
MV
2015 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2016 sizeof (fat_cond));
2017 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2018 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2019 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 2020
9de87eea
MV
2021 scm_i_default_dynamic_state = SCM_BOOL_F;
2022 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2023 threads_initialized_p = 1;
a4d106c7 2024
661ae7ab 2025 dynwind_critical_section_mutex =
a4d106c7 2026 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 2027}
89e00824 2028
5f05c406 2029void
9de87eea 2030scm_init_threads_default_dynamic_state ()
5f05c406 2031{
9de87eea
MV
2032 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2033 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
2034}
2035
d823b11b 2036void
9de87eea 2037scm_init_thread_procs ()
d823b11b 2038{
9de87eea 2039#include "libguile/threads.x"
d823b11b
MV
2040}
2041
89e00824
ML
2042/*
2043 Local Variables:
2044 c-file-style: "gnu"
2045 End:
2046*/