Use thread-local allocation (significant perf. improvement!); added the `boehm-gc...
[bpt/guile.git] / libguile / threads.c
CommitLineData
2b829bbb 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
7bfd3b9e 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7bfd3b9e 7 *
73be1d9e
MV
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
1bbd0b84 18
7bfd3b9e
JB
19\f
20
9de87eea 21#define _GNU_SOURCE
76da80e7 22
e7bca227 23#include "libguile/boehm-gc.h"
1810dc4e
RB
24#include "libguile/_scm.h"
25
fcc5d734 26#if HAVE_UNISTD_H
d823b11b 27#include <unistd.h>
fcc5d734 28#endif
d823b11b
MV
29#include <stdio.h>
30#include <assert.h>
fcc5d734 31#if HAVE_SYS_TIME_H
d823b11b 32#include <sys/time.h>
fcc5d734 33#endif
5f05c406 34
d823b11b
MV
35#include "libguile/validate.h"
36#include "libguile/root.h"
37#include "libguile/eval.h"
38#include "libguile/async.h"
39#include "libguile/ports.h"
40#include "libguile/threads.h"
a0599745 41#include "libguile/dynwind.h"
d823b11b 42#include "libguile/iselect.h"
9de87eea
MV
43#include "libguile/fluids.h"
44#include "libguile/continuations.h"
2b829bbb 45#include "libguile/gc.h"
9de87eea 46#include "libguile/init.h"
7bfd3b9e 47
ecc9f40f
MV
48#ifdef __MINGW32__
49#ifndef ETIMEDOUT
50# define ETIMEDOUT WSAETIMEDOUT
51#endif
52# include <fcntl.h>
53# include <process.h>
54# define pipe(fd) _pipe (fd, 256, O_BINARY)
55#endif /* __MINGW32__ */
56
d823b11b 57/*** Queues */
7bfd3b9e 58
9de87eea
MV
59/* Make an empty queue data structure.
60 */
d823b11b
MV
61static SCM
62make_queue ()
63{
64 return scm_cons (SCM_EOL, SCM_EOL);
65}
7bfd3b9e 66
9de87eea
MV
67/* Put T at the back of Q and return a handle that can be used with
68 remqueue to remove T from Q again.
69 */
d823b11b
MV
70static SCM
71enqueue (SCM q, SCM t)
72{
73 SCM c = scm_cons (t, SCM_EOL);
d2e53ed6 74 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
75 SCM_SETCDR (q, c);
76 else
77 SCM_SETCDR (SCM_CAR (q), c);
78 SCM_SETCAR (q, c);
79 return c;
80}
7bfd3b9e 81
9de87eea
MV
82/* Remove the element that the handle C refers to from the queue Q. C
83 must have been returned from a call to enqueue. The return value
84 is zero when the element referred to by C has already been removed.
85 Otherwise, 1 is returned.
86*/
87static int
d823b11b
MV
88remqueue (SCM q, SCM c)
89{
90 SCM p, prev = q;
d2e53ed6 91 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 92 {
bc36d050 93 if (scm_is_eq (p, c))
d823b11b 94 {
bc36d050 95 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
96 SCM_SETCAR (q, SCM_CDR (c));
97 SCM_SETCDR (prev, SCM_CDR (c));
9de87eea 98 return 1;
d823b11b
MV
99 }
100 prev = p;
101 }
9de87eea 102 return 0;
d823b11b
MV
103}
104
9de87eea
MV
105/* Remove the front-most element from the queue Q and return it.
106 Return SCM_BOOL_F when Q is empty.
107*/
d823b11b
MV
108static SCM
109dequeue (SCM q)
110{
111 SCM c = SCM_CDR (q);
d2e53ed6 112 if (scm_is_null (c))
d823b11b
MV
113 return SCM_BOOL_F;
114 else
115 {
116 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 117 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
118 SCM_SETCAR (q, SCM_EOL);
119 return SCM_CAR (c);
120 }
121}
7bfd3b9e 122
9de87eea 123/*** Thread smob routines */
76da80e7 124
d823b11b
MV
125
126static int
127thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
128{
9de87eea 129 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
d823b11b 130 scm_puts ("#<thread ", port);
9de87eea 131 scm_uintprint ((size_t)t->pthread, 10, port);
1b92fb6b 132 scm_puts (" (", port);
0345e278 133 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 134 scm_puts (")>", port);
d823b11b
MV
135 return 1;
136}
137
138static size_t
139thread_free (SCM obj)
140{
9de87eea
MV
141 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
142 assert (t->exited);
d823b11b
MV
143 scm_gc_free (t, sizeof (*t), "thread");
144 return 0;
145}
146
9de87eea 147/*** Blocking on queues. */
f7eca35d 148
9de87eea
MV
149/* See also scm_i_queue_async_cell for how such a block is
150 interrputed.
151*/
d823b11b 152
9de87eea
MV
153/* Put the current thread on QUEUE and go to sleep, waiting for it to
154 be woken up by a call to 'unblock_from_queue', or to be
155 interrupted. Upon return of this function, the current thread is
156 no longer on QUEUE, even when the sleep has been interrupted.
157
158 The QUEUE data structure is assumed to be protected by MUTEX and
159 the caller of block_self must hold MUTEX. It will be atomically
160 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
161
162 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
163 as MUTEX is needed.
164
165 When WAITTIME is not NULL, the sleep will be aborted at that time.
166
167 The return value of block_self is an errno value. It will be zero
168 when the sleep has been successfully completed by a call to
169 unblock_from_queue, EINTR when it has been interrupted by the
170 delivery of a system async, and ETIMEDOUT when the timeout has
171 expired.
172
173 The system asyncs themselves are not executed by block_self.
174*/
175static int
176block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
177 const scm_t_timespec *waittime)
76da80e7 178{
9de87eea
MV
179 scm_i_thread *t = SCM_I_CURRENT_THREAD;
180 SCM q_handle;
181 int err;
182
183 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
184 err = EINTR;
185 else
186 {
187 t->block_asyncs++;
188 q_handle = enqueue (queue, t->handle);
189 if (waittime == NULL)
190 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
191 else
192 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
193
194 /* When we are still on QUEUE, we have been interrupted. We
195 report this only when no other error (such as a timeout) has
196 happened above.
197 */
198 if (remqueue (queue, q_handle) && err == 0)
199 err = EINTR;
200 t->block_asyncs--;
201 scm_i_reset_sleep (t);
202 }
203
204 return err;
76da80e7 205}
9de87eea
MV
206
207/* Wake up the first thread on QUEUE, if any. The caller must hold
208 the mutex that protects QUEUE. The awoken thread is returned, or
209 #f when the queue was empty.
210 */
211static SCM
212unblock_from_queue (SCM queue)
213{
214 SCM thread = dequeue (queue);
215 if (scm_is_true (thread))
216 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
217 return thread;
218}
219
220/* Getting into and out of guile mode.
221 */
222
98648121
MV
223/* Ken Raeburn observes that the implementation of suspend and resume
224 (and the things that build on top of them) are very likely not
225 correct (see below). We will need fix this eventually, and that's
226 why scm_leave_guile/scm_enter_guile are not exported in the API.
227
228 Ken writes:
229
230 Consider this sequence:
231
232 Function foo, called in Guile mode, calls suspend (maybe indirectly
233 through scm_leave_guile), which does this:
234
235 // record top of stack for the GC
236 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
237 var 't'
238 // save registers.
239 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
240 setjmp (t->regs); // here's most of the magic
241
242 ... and returns.
243
244 Function foo has a SCM value X, a handle on a non-immediate object, in
245 a caller-saved register R, and it's the only reference to the object
246 currently.
247
248 The compiler wants to use R in suspend, so it pushes the current
249 value, X, into a stack slot which will be reloaded on exit from
250 suspend; then it loads stuff into R and goes about its business. The
251 setjmp call saves (some of) the current registers, including R, which
252 no longer contains X. (This isn't a problem for a normal
253 setjmp/longjmp situation, where longjmp would be called before
254 setjmp's caller returns; the old value for X would be loaded back from
255 the stack after the longjmp, before the function returned.)
256
257 So, suspend returns, loading X back into R (and invalidating the jump
258 buffer) in the process. The caller foo then goes off and calls a
259 bunch of other functions out of Guile mode, occasionally storing X on
260 the stack again, but, say, much deeper on the stack than suspend's
261 stack frame went, and the stack slot where suspend had written X has
262 long since been overwritten with other values.
263
264 Okay, nothing actively broken so far. Now, let garbage collection
265 run, triggered by another thread.
266
267 The thread calling foo is out of Guile mode at the time, so the
268 garbage collector just scans a range of stack addresses. Too bad that
269 X isn't stored there. So the pointed-to storage goes onto the free
270 list, and I think you can see where things go from there.
271
272 Is there anything I'm missing that'll prevent this scenario from
273 happening? I mean, aside from, "well, suspend and scm_leave_guile
274 don't have many local variables, so they probably won't need to save
275 any registers on most systems, so we hope everything will wind up in
276 the jump buffer and we'll just get away with it"?
277
278 (And, going the other direction, if scm_leave_guile and suspend push
279 the stack pointer over onto a new page, and foo doesn't make further
280 function calls and thus the stack pointer no longer includes that
281 page, are we guaranteed that the kernel cannot release the now-unused
282 stack page that contains the top-of-stack pointer we just saved? I
283 don't know if any OS actually does that. If it does, we could get
284 faults in garbage collection.)
285
286 I don't think scm_without_guile has to have this problem, as it gets
287 more control over the stack handling -- but it should call setjmp
288 itself. I'd probably try something like:
289
290 // record top of stack for the GC
291 t->top = SCM_STACK_PTR (&t);
292 // save registers.
293 SCM_FLUSH_REGISTER_WINDOWS;
294 setjmp (t->regs);
295 res = func(data);
296 scm_enter_guile (t);
297
298 ... though even that's making some assumptions about the stack
299 ordering of local variables versus caller-saved registers.
300
301 For something like scm_leave_guile to work, I don't think it can just
302 rely on invalidated jump buffers. A valid jump buffer, and a handle
303 on the stack state at the point when the jump buffer was initialized,
304 together, would work fine, but I think then we're talking about macros
305 invoking setjmp in the caller's stack frame, and requiring that the
306 caller of scm_leave_guile also call scm_enter_guile before returning,
307 kind of like pthread_cleanup_push/pop calls that have to be paired up
308 in a function. (In fact, the pthread ones have to be paired up
309 syntactically, as if they might expand to a compound statement
310 incorporating the user's code, and invoking a compiler's
311 exception-handling primitives. Which might be something to think
312 about for cases where Guile is used with C++ exceptions or
313 pthread_cancel.)
314*/
315
9de87eea
MV
316scm_i_pthread_key_t scm_i_thread_key;
317
d823b11b 318static void
9de87eea 319resume (scm_i_thread *t)
d823b11b 320{
d823b11b 321 t->top = NULL;
9bc4701c
MD
322 if (t->clear_freelists_p)
323 {
324 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
325 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
326 t->clear_freelists_p = 0;
327 }
d823b11b
MV
328}
329
98648121
MV
330typedef void* scm_t_guile_ticket;
331
332static void
9de87eea 333scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 334{
9de87eea
MV
335 scm_i_thread *t = (scm_i_thread *)ticket;
336 if (t)
337 {
338 scm_i_pthread_mutex_lock (&t->heap_mutex);
339 resume (t);
340 }
d823b11b
MV
341}
342
9de87eea
MV
343static scm_i_thread *
344suspend (void)
d823b11b 345{
9de87eea 346 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
347
348 /* record top of stack for the GC */
9de87eea 349 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
350 /* save registers. */
351 SCM_FLUSH_REGISTER_WINDOWS;
9de87eea
MV
352 setjmp (t->regs);
353 return t;
d823b11b
MV
354}
355
98648121 356static scm_t_guile_ticket
9de87eea 357scm_leave_guile ()
d823b11b 358{
9de87eea
MV
359 scm_i_thread *t = suspend ();
360 scm_i_pthread_mutex_unlock (&t->heap_mutex);
361 return (scm_t_guile_ticket) t;
d823b11b
MV
362}
363
9de87eea
MV
364static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
365static scm_i_thread *all_threads = NULL;
366static int thread_count;
367
368static SCM scm_i_default_dynamic_state;
369
370/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 371 */
9de87eea
MV
372static void
373guilify_self_1 (SCM_STACKITEM *base)
d823b11b 374{
c812243b 375 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
9de87eea
MV
376
377 t->pthread = scm_i_pthread_self ();
378 t->handle = SCM_BOOL_F;
379 t->result = SCM_BOOL_F;
380 t->join_queue = SCM_EOL;
381 t->dynamic_state = SCM_BOOL_F;
382 t->dynwinds = SCM_EOL;
383 t->active_asyncs = SCM_EOL;
9de87eea
MV
384 t->block_asyncs = 1;
385 t->pending_asyncs = 1;
386 t->last_debug_frame = NULL;
387 t->base = base;
0c97d7dd 388 t->continuation_root = SCM_EOL;
9de87eea
MV
389 t->continuation_base = base;
390 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
391 t->sleep_mutex = NULL;
392 t->sleep_object = SCM_BOOL_F;
393 t->sleep_fd = -1;
0c97d7dd 394 /* XXX - check for errors. */
9de87eea
MV
395 pipe (t->sleep_pipe);
396 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
397 t->clear_freelists_p = 0;
1a8fdd7e 398 t->gc_running_p = 0;
378f2625
LC
399 t->current_mark_stack_ptr = NULL;
400 t->current_mark_stack_limit = NULL;
9de87eea
MV
401 t->exited = 0;
402
403 t->freelist = SCM_EOL;
404 t->freelist2 = SCM_EOL;
405 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
406 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
407
408 scm_i_pthread_setspecific (scm_i_thread_key, t);
409
410 scm_i_pthread_mutex_lock (&t->heap_mutex);
411
412 scm_i_pthread_mutex_lock (&thread_admin_mutex);
413 t->next_thread = all_threads;
414 all_threads = t;
415 thread_count++;
416 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
417}
418
9de87eea 419/* Perform second stage of thread initialisation, in guile mode.
d823b11b 420 */
9de87eea
MV
421static void
422guilify_self_2 (SCM parent)
d823b11b 423{
9de87eea
MV
424 scm_i_thread *t = SCM_I_CURRENT_THREAD;
425
426 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
c812243b 427
9de87eea
MV
428 t->continuation_root = scm_cons (t->handle, SCM_EOL);
429 t->continuation_base = t->base;
430
431 if (scm_is_true (parent))
432 t->dynamic_state = scm_make_dynamic_state (parent);
433 else
434 t->dynamic_state = scm_i_make_initial_dynamic_state ();
435
436 t->join_queue = make_queue ();
437 t->block_asyncs = 0;
d823b11b
MV
438}
439
9de87eea 440/* Perform thread tear-down, in guile mode.
d823b11b 441 */
9de87eea
MV
442static void *
443do_thread_exit (void *v)
444{
0c97d7dd 445 scm_i_thread *t = (scm_i_thread *)v;
9de87eea
MV
446
447 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
448
449 t->exited = 1;
0c97d7dd
MV
450 close (t->sleep_pipe[0]);
451 close (t->sleep_pipe[1]);
9de87eea
MV
452 while (scm_is_true (unblock_from_queue (t->join_queue)))
453 ;
9de87eea
MV
454
455 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
456 return NULL;
457}
458
d823b11b 459static void
9de87eea 460on_thread_exit (void *v)
d823b11b 461{
0c97d7dd
MV
462 scm_i_thread *t = (scm_i_thread *)v, **tp;
463
9de87eea 464 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd
MV
465
466 /* Unblocking the joining threads needs to happen in guile mode
467 since the queue is a SCM data structure.
468 */
9de87eea 469 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
470
471 /* Removing ourself from the list of all threads needs to happen in
472 non-guile mode since all SCM values on our stack become
473 unprotected once we are no longer in the list.
474 */
475 scm_leave_guile ();
476 scm_i_pthread_mutex_lock (&thread_admin_mutex);
477 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
478 if (*tp == t)
479 {
480 *tp = t->next_thread;
481 break;
482 }
483 thread_count--;
484 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
485
9de87eea 486 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
487}
488
9de87eea 489static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 490
9de87eea
MV
491static void
492init_thread_key (void)
493{
494 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
495}
d823b11b 496
9de87eea
MV
497/* Perform any initializations necessary to bring the current thread
498 into guile mode, initializing Guile itself, if necessary.
a54a94b3 499
9de87eea
MV
500 BASE is the stack base to use with GC.
501
502 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
503 which case the default dynamic state is used.
504
505 Return zero when the thread was in guile mode already; otherwise
506 return 1.
507*/
508
509static int
510scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 511{
9de87eea
MV
512 scm_i_thread *t;
513
514 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
515
516 if ((t = SCM_I_CURRENT_THREAD) == NULL)
517 {
518 /* This thread has not been guilified yet.
519 */
520
521 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
522 if (scm_initialized_p == 0)
523 {
524 /* First thread ever to enter Guile. Run the full
525 initialization.
526 */
527 scm_i_init_guile (base);
528 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
529 }
530 else
531 {
532 /* Guile is already initialized, but this thread enters it for
533 the first time. Only initialize this thread.
534 */
535 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
536 guilify_self_1 (base);
537 guilify_self_2 (parent);
538 }
539 return 1;
540 }
541 else if (t->top)
542 {
543 /* This thread is already guilified but not in guile mode, just
544 resume it.
545
546 XXX - base might be lower than when this thread was first
547 guilified.
548 */
549 scm_enter_guile ((scm_t_guile_ticket) t);
550 return 1;
551 }
552 else
553 {
554 /* Thread is already in guile mode. Nothing to do.
555 */
556 return 0;
557 }
d823b11b
MV
558}
559
9de87eea
MV
560#if SCM_USE_PTHREAD_THREADS
561#ifdef HAVE_PTHREAD_ATTR_GETSTACK
562
563#define HAVE_GET_THREAD_STACK_BASE
564
565static SCM_STACKITEM *
566get_thread_stack_base ()
d823b11b 567{
9de87eea
MV
568 pthread_attr_t attr;
569 void *start, *end;
570 size_t size;
571
9de87eea
MV
572 pthread_getattr_np (pthread_self (), &attr);
573 pthread_attr_getstack (&attr, &start, &size);
574 end = (char *)start + size;
575
2b829bbb
KR
576 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
577 for the main thread, but we can use scm_get_stack_base in that
578 case.
579 */
580
581#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 582 if ((void *)&attr < start || (void *)&attr >= end)
2b829bbb 583 return scm_get_stack_base ();
9de87eea 584 else
2b829bbb 585#endif
9de87eea
MV
586 {
587#if SCM_STACK_GROWS_UP
588 return start;
589#else
590 return end;
591#endif
592 }
a54a94b3
MV
593}
594
9de87eea
MV
595#endif /* HAVE_PTHREAD_ATTR_GETSTACK */
596
597#else /* !SCM_USE_PTHREAD_THREADS */
598
599#define HAVE_GET_THREAD_STACK_BASE
600
601static SCM_STACKITEM *
602get_thread_stack_base ()
a54a94b3 603{
2b829bbb 604 return scm_get_stack_base ();
d823b11b
MV
605}
606
9de87eea 607#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
608
609#ifdef HAVE_GET_THREAD_STACK_BASE
610
611void
612scm_init_guile ()
d823b11b 613{
9de87eea
MV
614 scm_i_init_thread_for_guile (get_thread_stack_base (),
615 scm_i_default_dynamic_state);
d823b11b
MV
616}
617
9de87eea
MV
618#endif
619
620void *
621scm_with_guile (void *(*func)(void *), void *data)
622{
623 return scm_i_with_guile_and_parent (func, data,
624 scm_i_default_dynamic_state);
625}
626
627void *
628scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
629 SCM parent)
630{
631 void *res;
632 int really_entered;
633 SCM_STACKITEM base_item;
634 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
635 res = scm_c_with_continuation_barrier (func, data);
636 if (really_entered)
637 scm_leave_guile ();
638 return res;
639}
640
641void *
642scm_without_guile (void *(*func)(void *), void *data)
d823b11b 643{
9de87eea
MV
644 void *res;
645 scm_t_guile_ticket t;
646 t = scm_leave_guile ();
647 res = func (data);
648 scm_enter_guile (t);
649 return res;
650}
651
652/*** Thread creation */
653
654typedef struct {
655 SCM parent;
656 SCM thunk;
657 SCM handler;
76da80e7 658 SCM thread;
9de87eea
MV
659 scm_i_pthread_mutex_t mutex;
660 scm_i_pthread_cond_t cond;
661} launch_data;
d823b11b 662
9de87eea
MV
663static void *
664really_launch (void *d)
665{
666 launch_data *data = (launch_data *)d;
667 SCM thunk = data->thunk, handler = data->handler;
668 scm_i_thread *t;
d823b11b 669
9de87eea 670 t = SCM_I_CURRENT_THREAD;
a54a94b3 671
9de87eea
MV
672 scm_i_scm_pthread_mutex_lock (&data->mutex);
673 data->thread = scm_current_thread ();
674 scm_i_pthread_cond_signal (&data->cond);
675 scm_i_pthread_mutex_unlock (&data->mutex);
676
677 if (SCM_UNBNDP (handler))
678 t->result = scm_call_0 (thunk);
679 else
680 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
681
682 return 0;
d823b11b
MV
683}
684
9de87eea
MV
685static void *
686launch_thread (void *d)
687{
688 launch_data *data = (launch_data *)d;
689 scm_i_pthread_detach (scm_i_pthread_self ());
690 scm_i_with_guile_and_parent (really_launch, d, data->parent);
691 return NULL;
692}
693
694SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 695 (SCM thunk, SCM handler),
9de87eea
MV
696 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
697 "returning a new thread object representing the thread. The procedure\n"
698 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
699 "\n"
700 "When @var{handler} is specified, then @var{thunk} is called from\n"
701 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
702 "handler. This catch is established inside the continuation barrier.\n"
703 "\n"
704 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
705 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
706#define FUNC_NAME s_scm_call_with_new_thread
707{
9de87eea
MV
708 launch_data data;
709 scm_i_pthread_t id;
710 int err;
d823b11b 711
9de87eea
MV
712 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
713 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
714 handler, SCM_ARG2, FUNC_NAME);
715
716 data.parent = scm_current_dynamic_state ();
717 data.thunk = thunk;
718 data.handler = handler;
719 data.thread = SCM_BOOL_F;
720 scm_i_pthread_mutex_init (&data.mutex, NULL);
721 scm_i_pthread_cond_init (&data.cond, NULL);
722
723 scm_i_scm_pthread_mutex_lock (&data.mutex);
724 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
725 if (err)
726 {
727 scm_i_pthread_mutex_unlock (&data.mutex);
728 errno = err;
729 scm_syserror (NULL);
730 }
731 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
732 scm_i_pthread_mutex_unlock (&data.mutex);
733
734 return data.thread;
d823b11b
MV
735}
736#undef FUNC_NAME
737
9de87eea
MV
738typedef struct {
739 SCM parent;
740 scm_t_catch_body body;
741 void *body_data;
742 scm_t_catch_handler handler;
743 void *handler_data;
744 SCM thread;
745 scm_i_pthread_mutex_t mutex;
746 scm_i_pthread_cond_t cond;
747} spawn_data;
748
749static void *
750really_spawn (void *d)
751{
752 spawn_data *data = (spawn_data *)d;
753 scm_t_catch_body body = data->body;
754 void *body_data = data->body_data;
755 scm_t_catch_handler handler = data->handler;
756 void *handler_data = data->handler_data;
757 scm_i_thread *t = SCM_I_CURRENT_THREAD;
758
759 scm_i_scm_pthread_mutex_lock (&data->mutex);
760 data->thread = scm_current_thread ();
761 scm_i_pthread_cond_signal (&data->cond);
762 scm_i_pthread_mutex_unlock (&data->mutex);
763
764 if (handler == NULL)
765 t->result = body (body_data);
766 else
767 t->result = scm_internal_catch (SCM_BOOL_T,
768 body, body_data,
769 handler, handler_data);
770
771 return 0;
772}
773
774static void *
775spawn_thread (void *d)
776{
777 spawn_data *data = (spawn_data *)d;
778 scm_i_pthread_detach (scm_i_pthread_self ());
779 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
780 return NULL;
781}
782
783SCM
784scm_spawn_thread (scm_t_catch_body body, void *body_data,
785 scm_t_catch_handler handler, void *handler_data)
786{
787 spawn_data data;
788 scm_i_pthread_t id;
789 int err;
790
791 data.parent = scm_current_dynamic_state ();
792 data.body = body;
793 data.body_data = body_data;
794 data.handler = handler;
795 data.handler_data = handler_data;
796 data.thread = SCM_BOOL_F;
797 scm_i_pthread_mutex_init (&data.mutex, NULL);
798 scm_i_pthread_cond_init (&data.cond, NULL);
799
800 scm_i_scm_pthread_mutex_lock (&data.mutex);
801 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
802 if (err)
803 {
804 scm_i_pthread_mutex_unlock (&data.mutex);
805 errno = err;
806 scm_syserror (NULL);
807 }
808 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
809 scm_i_pthread_mutex_unlock (&data.mutex);
810
811 return data.thread;
812}
813
29717c89
MD
814SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
815 (),
816"Move the calling thread to the end of the scheduling queue.")
817#define FUNC_NAME s_scm_yield
818{
9de87eea 819 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
820}
821#undef FUNC_NAME
822
d823b11b 823SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
5f05c406 824 (SCM thread),
d823b11b
MV
825"Suspend execution of the calling thread until the target @var{thread} "
826"terminates, unless the target @var{thread} has already terminated. ")
827#define FUNC_NAME s_scm_join_thread
5f05c406 828{
9de87eea 829 scm_i_thread *t;
d823b11b
MV
830 SCM res;
831
832 SCM_VALIDATE_THREAD (1, thread);
9de87eea 833 if (scm_is_eq (scm_current_thread (), thread))
d823b11b
MV
834 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
835
9de87eea
MV
836 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
837
838 t = SCM_I_THREAD_DATA (thread);
d823b11b
MV
839 if (!t->exited)
840 {
9de87eea
MV
841 while (1)
842 {
843 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
844 if (t->exited)
845 break;
846 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
847 SCM_TICK;
848 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
849 }
d823b11b
MV
850 }
851 res = t->result;
9de87eea
MV
852
853 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b 854 return res;
5f05c406
MV
855}
856#undef FUNC_NAME
857
9de87eea 858/*** Fat mutexes */
4079f87e 859
d823b11b
MV
860/* We implement our own mutex type since we want them to be 'fair', we
861 want to do fancy things while waiting for them (like running
9de87eea
MV
862 asyncs) and we might want to add things that are nice for
863 debugging.
d823b11b 864*/
4079f87e 865
9de87eea
MV
866typedef struct {
867 scm_i_pthread_mutex_t lock;
d823b11b 868 SCM owner;
9de87eea
MV
869 int level; /* how much the owner owns us.
870 < 0 for non-recursive mutexes */
871 SCM waiting; /* the threads waiting for this mutex. */
872} fat_mutex;
873
874#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
875#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
5f05c406 876
4079f87e 877
9de87eea
MV
878static size_t
879fat_mutex_free (SCM mx)
76da80e7 880{
9de87eea
MV
881 fat_mutex *m = SCM_MUTEX_DATA (mx);
882 scm_i_pthread_mutex_destroy (&m->lock);
883 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
884 return 0;
885}
886
887static int
9de87eea 888fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 889{
9de87eea
MV
890 fat_mutex *m = SCM_MUTEX_DATA (mx);
891 scm_puts ("#<mutex ", port);
892 scm_uintprint ((scm_t_bits)m, 16, port);
893 scm_puts (">", port);
894 return 1;
76da80e7
MV
895}
896
76da80e7 897static SCM
9de87eea 898make_fat_mutex (int recursive)
76da80e7 899{
9de87eea
MV
900 fat_mutex *m;
901 SCM mx;
902
903 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
904 scm_i_pthread_mutex_init (&m->lock, NULL);
905 m->owner = SCM_BOOL_F;
906 m->level = recursive? 0 : -1;
907 m->waiting = SCM_EOL;
908 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
909 m->waiting = make_queue ();
910 return mx;
76da80e7
MV
911}
912
9de87eea 913SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
76da80e7 914 (void),
9de87eea
MV
915 "Create a new mutex. ")
916#define FUNC_NAME s_scm_make_mutex
76da80e7 917{
9de87eea 918 return make_fat_mutex (0);
76da80e7
MV
919}
920#undef FUNC_NAME
921
9de87eea 922SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 923 (void),
9de87eea
MV
924 "Create a new recursive mutex. ")
925#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 926{
9de87eea 927 return make_fat_mutex (1);
9bc4701c
MD
928}
929#undef FUNC_NAME
930
9de87eea
MV
931static char *
932fat_mutex_lock (SCM mutex)
933{
934 fat_mutex *m = SCM_MUTEX_DATA (mutex);
935 SCM thread = scm_current_thread ();
936 char *msg = NULL;
937
938 scm_i_scm_pthread_mutex_lock (&m->lock);
939 if (scm_is_false (m->owner))
940 m->owner = thread;
941 else if (scm_is_eq (m->owner, thread))
942 {
943 if (m->level >= 0)
944 m->level++;
945 else
946 msg = "mutex already locked by current thread";
947 }
948 else
949 {
950 while (1)
951 {
952 block_self (m->waiting, mutex, &m->lock, NULL);
953 if (scm_is_eq (m->owner, thread))
954 break;
955 scm_i_pthread_mutex_unlock (&m->lock);
956 SCM_TICK;
957 scm_i_scm_pthread_mutex_lock (&m->lock);
958 }
959 }
960 scm_i_pthread_mutex_unlock (&m->lock);
961 return msg;
962}
963
9bc4701c
MD
964SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
965 (SCM mx),
966"Lock @var{mutex}. If the mutex is already locked, the calling thread "
967"blocks until the mutex becomes available. The function returns when "
968"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
969"a thread already owns will succeed right away and will not block the "
970"thread. That is, Guile's mutexes are @emph{recursive}. ")
971#define FUNC_NAME s_scm_lock_mutex
972{
9de87eea 973 char *msg;
76da80e7 974
b01532af 975 SCM_VALIDATE_MUTEX (1, mx);
9de87eea
MV
976 msg = fat_mutex_lock (mx);
977 if (msg)
978 scm_misc_error (NULL, msg, SCM_EOL);
76da80e7 979 return SCM_BOOL_T;
9bc4701c 980}
76da80e7 981#undef FUNC_NAME
9bc4701c 982
a4d106c7 983void
661ae7ab 984scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 985{
661ae7ab
MV
986 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
987 SCM_F_WIND_EXPLICITLY);
988 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
989 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
990}
991
9de87eea
MV
992static char *
993fat_mutex_trylock (fat_mutex *m, int *resp)
994{
995 char *msg = NULL;
996 SCM thread = scm_current_thread ();
997
998 *resp = 1;
999 scm_i_pthread_mutex_lock (&m->lock);
1000 if (scm_is_false (m->owner))
1001 m->owner = thread;
1002 else if (scm_is_eq (m->owner, thread))
1003 {
1004 if (m->level >= 0)
1005 m->level++;
1006 else
1007 msg = "mutex already locked by current thread";
1008 }
1009 else
1010 *resp = 0;
1011 scm_i_pthread_mutex_unlock (&m->lock);
1012 return msg;
1013}
1014
9bc4701c 1015SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1016 (SCM mutex),
9bc4701c
MD
1017"Try to lock @var{mutex}. If the mutex is already locked by someone "
1018"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1019#define FUNC_NAME s_scm_try_mutex
1020{
9de87eea
MV
1021 char *msg;
1022 int res;
1023
ba1b7223 1024 SCM_VALIDATE_MUTEX (1, mutex);
9bc4701c 1025
ba1b7223 1026 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
9de87eea
MV
1027 if (msg)
1028 scm_misc_error (NULL, msg, SCM_EOL);
1029 return scm_from_bool (res);
1030}
1031#undef FUNC_NAME
76da80e7 1032
9de87eea
MV
1033static char *
1034fat_mutex_unlock (fat_mutex *m)
1035{
1036 char *msg = NULL;
1037
1038 scm_i_scm_pthread_mutex_lock (&m->lock);
1039 if (!scm_is_eq (m->owner, scm_current_thread ()))
9bc4701c 1040 {
9de87eea
MV
1041 if (scm_is_false (m->owner))
1042 msg = "mutex not locked";
1043 else
1044 msg = "mutex not locked by current thread";
9bc4701c 1045 }
9de87eea
MV
1046 else if (m->level > 0)
1047 m->level--;
1048 else
1049 m->owner = unblock_from_queue (m->waiting);
1050 scm_i_pthread_mutex_unlock (&m->lock);
1051
1052 return msg;
9bc4701c 1053}
9bc4701c
MD
1054
1055SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1056 (SCM mx),
1057"Unlocks @var{mutex} if the calling thread owns the lock on "
1058"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1059"thread results in undefined behaviour. Once a mutex has been unlocked, "
1060"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1061"lock. Every call to @code{lock-mutex} by this thread must be matched "
1062"with a call to @code{unlock-mutex}. Only the last call to "
1063"@code{unlock-mutex} will actually unlock the mutex. ")
1064#define FUNC_NAME s_scm_unlock_mutex
1065{
9de87eea 1066 char *msg;
9bc4701c
MD
1067 SCM_VALIDATE_MUTEX (1, mx);
1068
9de87eea
MV
1069 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1070 if (msg)
1071 scm_misc_error (NULL, msg, SCM_EOL);
9bc4701c
MD
1072 return SCM_BOOL_T;
1073}
1074#undef FUNC_NAME
1075
9de87eea
MV
1076#if 0
1077
1078SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1079 (SCM mx),
1080 "Return the thread owning @var{mx}, or @code{#f}.")
1081#define FUNC_NAME s_scm_mutex_owner
1082{
1083 SCM_VALIDATE_MUTEX (1, mx);
1084 return (SCM_MUTEX_DATA(mx))->owner;
1085}
1086#undef FUNC_NAME
1087
1088SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1089 (SCM mx),
1090 "Return the lock level of a recursive mutex, or -1\n"
1091 "for a standard mutex.")
1092#define FUNC_NAME s_scm_mutex_level
1093{
1094 SCM_VALIDATE_MUTEX (1, mx);
1095 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1096}
1097#undef FUNC_NAME
1098
1099#endif
1100
1101/*** Fat condition variables */
1102
1103typedef struct {
1104 scm_i_pthread_mutex_t lock;
1105 SCM waiting; /* the threads waiting for this condition. */
1106} fat_cond;
1107
1108#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1109#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1110
9de87eea
MV
1111static size_t
1112fat_cond_free (SCM mx)
1113{
1114 fat_cond *c = SCM_CONDVAR_DATA (mx);
1115 scm_i_pthread_mutex_destroy (&c->lock);
1116 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1117 return 0;
1118}
1119
1120static int
1121fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1122{
1123 fat_cond *c = SCM_CONDVAR_DATA (cv);
1124 scm_puts ("#<condition-variable ", port);
1125 scm_uintprint ((scm_t_bits)c, 16, port);
1126 scm_puts (">", port);
1127 return 1;
1128}
9bc4701c 1129
d823b11b
MV
1130SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1131 (void),
1132 "Make a new condition variable.")
1133#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1134{
9de87eea
MV
1135 fat_cond *c;
1136 SCM cv;
1137
1138 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1139 scm_i_pthread_mutex_init (&c->lock, 0);
1140 c->waiting = SCM_EOL;
1141 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1142 c->waiting = make_queue ();
d823b11b 1143 return cv;
5f05c406 1144}
d823b11b 1145#undef FUNC_NAME
5f05c406 1146
9de87eea
MV
1147static int
1148fat_cond_timedwait (SCM cond, SCM mutex,
1149 const scm_t_timespec *waittime)
1150{
1151 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1152 fat_cond *c = SCM_CONDVAR_DATA (cond);
1153 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1154 const char *msg;
1155 int err = 0;
1156
1157 while (1)
1158 {
9de87eea
MV
1159 scm_i_scm_pthread_mutex_lock (&c->lock);
1160 msg = fat_mutex_unlock (m);
1161 t->block_asyncs++;
1162 if (msg == NULL)
1163 {
1164 err = block_self (c->waiting, cond, &c->lock, waittime);
1165 scm_i_pthread_mutex_unlock (&c->lock);
9de87eea
MV
1166 fat_mutex_lock (mutex);
1167 }
1168 else
1169 scm_i_pthread_mutex_unlock (&c->lock);
1170 t->block_asyncs--;
1171 scm_async_click ();
1172
9de87eea
MV
1173 if (msg)
1174 scm_misc_error (NULL, msg, SCM_EOL);
1175
1176 scm_remember_upto_here_2 (cond, mutex);
1177
1178 if (err == 0)
1179 return 1;
1180 if (err == ETIMEDOUT)
1181 return 0;
1182 if (err != EINTR)
1183 {
1184 errno = err;
1185 scm_syserror (NULL);
1186 }
1187 }
1188}
1189
d823b11b
MV
1190SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1191 (SCM cv, SCM mx, SCM t),
1192"Wait until @var{cond-var} has been signalled. While waiting, "
1193"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1194"is locked again when this function returns. When @var{time} is given, "
1195"it specifies a point in time where the waiting should be aborted. It "
1196"can be either a integer as returned by @code{current-time} or a pair "
1197"as returned by @code{gettimeofday}. When the waiting is aborted the "
1198"mutex is locked and @code{#f} is returned. When the condition "
1199"variable is in fact signalled, the mutex is also locked and @code{#t} "
1200"is returned. ")
1201#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1202{
9de87eea 1203 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1204
1205 SCM_VALIDATE_CONDVAR (1, cv);
1206 SCM_VALIDATE_MUTEX (2, mx);
9bc4701c 1207
d823b11b
MV
1208 if (!SCM_UNBNDP (t))
1209 {
d2e53ed6 1210 if (scm_is_pair (t))
d823b11b 1211 {
9de87eea
MV
1212 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1213 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
d823b11b
MV
1214 }
1215 else
1216 {
9de87eea 1217 waittime.tv_sec = scm_to_ulong (t);
d823b11b
MV
1218 waittime.tv_nsec = 0;
1219 }
9de87eea 1220 waitptr = &waittime;
d823b11b
MV
1221 }
1222
9de87eea 1223 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
5f05c406 1224}
d823b11b 1225#undef FUNC_NAME
5f05c406 1226
9de87eea
MV
1227static void
1228fat_cond_signal (fat_cond *c)
1229{
9de87eea
MV
1230 scm_i_scm_pthread_mutex_lock (&c->lock);
1231 unblock_from_queue (c->waiting);
1232 scm_i_pthread_mutex_unlock (&c->lock);
1233}
1234
d823b11b
MV
1235SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1236 (SCM cv),
1237 "Wake up one thread that is waiting for @var{cv}")
1238#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1239{
d823b11b 1240 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1241 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1242 return SCM_BOOL_T;
5f05c406 1243}
d823b11b 1244#undef FUNC_NAME
5f05c406 1245
9de87eea
MV
1246static void
1247fat_cond_broadcast (fat_cond *c)
1248{
1249 scm_i_scm_pthread_mutex_lock (&c->lock);
1250 while (scm_is_true (unblock_from_queue (c->waiting)))
1251 ;
1252 scm_i_pthread_mutex_unlock (&c->lock);
1253}
1254
d823b11b
MV
1255SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1256 (SCM cv),
1257 "Wake up all threads that are waiting for @var{cv}. ")
1258#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1259{
d823b11b 1260 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1261 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1262 return SCM_BOOL_T;
5f05c406 1263}
d823b11b 1264#undef FUNC_NAME
5f05c406 1265
d823b11b
MV
1266/*** Marking stacks */
1267
1268/* XXX - what to do with this? Do we need to handle this for blocked
1269 threads as well?
1270*/
1271#ifdef __ia64__
1272# define SCM_MARK_BACKING_STORE() do { \
1273 ucontext_t ctx; \
1274 SCM_STACKITEM * top, * bot; \
1275 getcontext (&ctx); \
1276 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1277 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1278 / sizeof (SCM_STACKITEM))); \
1279 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1280 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1281 scm_mark_locations (bot, top - bot); } while (0)
1282#else
1283# define SCM_MARK_BACKING_STORE()
1284#endif
1285
6087fad9 1286
5f05c406 1287
d823b11b
MV
1288/*** Select */
1289
911782b7 1290int
9de87eea
MV
1291scm_std_select (int nfds,
1292 SELECT_TYPE *readfds,
1293 SELECT_TYPE *writefds,
1294 SELECT_TYPE *exceptfds,
1295 struct timeval *timeout)
1296{
1297 fd_set my_readfds;
1298 int res, eno, wakeup_fd;
1299 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1300 scm_t_guile_ticket ticket;
1301
1302 if (readfds == NULL)
1303 {
1304 FD_ZERO (&my_readfds);
1305 readfds = &my_readfds;
1306 }
1307
1308 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1309 SCM_TICK;
1310
1311 wakeup_fd = t->sleep_pipe[0];
1312 ticket = scm_leave_guile ();
1313 FD_SET (wakeup_fd, readfds);
1314 if (wakeup_fd >= nfds)
1315 nfds = wakeup_fd+1;
1316 res = select (nfds, readfds, writefds, exceptfds, timeout);
1317 t->sleep_fd = -1;
d823b11b 1318 eno = errno;
9de87eea
MV
1319 scm_enter_guile (ticket);
1320
1321 scm_i_reset_sleep (t);
1322
1323 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1324 {
1325 char dummy;
1326 read (wakeup_fd, &dummy, 1);
1327 FD_CLR (wakeup_fd, readfds);
1328 res -= 1;
1329 if (res == 0)
1330 {
1331 eno = EINTR;
1332 res = -1;
1333 }
1334 }
d823b11b
MV
1335 errno = eno;
1336 return res;
5f05c406
MV
1337}
1338
9de87eea 1339/* Convenience API for blocking while in guile mode. */
76da80e7 1340
9de87eea 1341#if SCM_USE_PTHREAD_THREADS
92e64b87 1342
9bc4701c 1343int
9de87eea 1344scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1345{
9de87eea
MV
1346 scm_t_guile_ticket t = scm_leave_guile ();
1347 int res = scm_i_pthread_mutex_lock (mutex);
1348 scm_enter_guile (t);
9bc4701c
MD
1349 return res;
1350}
1351
9de87eea 1352static void
2b829bbb 1353do_unlock (void *data)
28d52ebb 1354{
9de87eea 1355 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1356}
1357
1358void
661ae7ab 1359scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1360{
9de87eea 1361 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1362 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1363}
1364
9bc4701c 1365int
9de87eea 1366scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1367{
9de87eea
MV
1368 scm_t_guile_ticket t = scm_leave_guile ();
1369 int res = scm_i_pthread_cond_wait (cond, mutex);
1370 scm_enter_guile (t);
9bc4701c
MD
1371 return res;
1372}
9bc4701c 1373
76da80e7 1374int
9de87eea
MV
1375scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1376 scm_i_pthread_mutex_t *mutex,
1377 const scm_t_timespec *wt)
76da80e7 1378{
9de87eea
MV
1379 scm_t_guile_ticket t = scm_leave_guile ();
1380 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1381 scm_enter_guile (t);
1382 return res;
76da80e7
MV
1383}
1384
9de87eea 1385#endif
76da80e7 1386
d823b11b 1387unsigned long
9de87eea 1388scm_std_usleep (unsigned long usecs)
5f05c406 1389{
d823b11b
MV
1390 struct timeval tv;
1391 tv.tv_usec = usecs % 1000000;
1392 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1393 scm_std_select (0, NULL, NULL, NULL, &tv);
1394 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1395}
1396
9de87eea
MV
1397unsigned int
1398scm_std_sleep (unsigned int secs)
6c214b62 1399{
d823b11b
MV
1400 struct timeval tv;
1401 tv.tv_usec = 0;
1402 tv.tv_sec = secs;
9de87eea 1403 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1404 return tv.tv_sec;
6c214b62
MD
1405}
1406
d823b11b
MV
1407/*** Misc */
1408
1409SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1410 (void),
1411 "Return the thread that called this function.")
1412#define FUNC_NAME s_scm_current_thread
1413{
9de87eea 1414 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1415}
1416#undef FUNC_NAME
1417
9de87eea
MV
1418static SCM
1419scm_c_make_list (size_t n, SCM fill)
1420{
1421 SCM res = SCM_EOL;
1422 while (n-- > 0)
1423 res = scm_cons (fill, res);
1424 return res;
1425}
1426
d823b11b
MV
1427SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1428 (void),
1429 "Return a list of all threads.")
9bc4701c 1430#define FUNC_NAME s_scm_all_threads
d823b11b 1431{
9de87eea
MV
1432 /* We can not allocate while holding the thread_admin_mutex because
1433 of the way GC is done.
1434 */
1435 int n = thread_count;
1436 scm_i_thread *t;
1437 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1438
9de87eea
MV
1439 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1440 l = &list;
1441 for (t = all_threads; t && n > 0; t = t->next_thread)
1442 {
1443 SCM_SETCAR (*l, t->handle);
1444 l = SCM_CDRLOC (*l);
1445 n--;
1446 }
1447 *l = SCM_EOL;
1448 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1449 return list;
d823b11b 1450}
9de87eea 1451#undef FUNC_NAME
d823b11b
MV
1452
1453SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1454 (SCM thread),
1455 "Return @code{#t} iff @var{thread} has exited.\n")
1456#define FUNC_NAME s_scm_thread_exited_p
1457{
7888309b 1458 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1459}
1460#undef FUNC_NAME
1461
911782b7 1462int
d823b11b
MV
1463scm_c_thread_exited_p (SCM thread)
1464#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1465{
9de87eea 1466 scm_i_thread *t;
d823b11b 1467 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1468 t = SCM_I_THREAD_DATA (thread);
d823b11b 1469 return t->exited;
5f05c406 1470}
d823b11b 1471#undef FUNC_NAME
5f05c406 1472
9de87eea 1473static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1474int scm_i_thread_go_to_sleep;
9bc4701c
MD
1475static int threads_initialized_p = 0;
1476
1477void
1478scm_i_thread_put_to_sleep ()
1479{
6087fad9 1480 if (threads_initialized_p)
9bc4701c 1481 {
9de87eea 1482 scm_i_thread *t;
6087fad9 1483
9de87eea
MV
1484 scm_leave_guile ();
1485 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1486
0c97d7dd
MV
1487 /* Signal all threads to go to sleep
1488 */
1489 scm_i_thread_go_to_sleep = 1;
1490 for (t = all_threads; t; t = t->next_thread)
1491 scm_i_pthread_mutex_lock (&t->heap_mutex);
1492 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1493 }
1494}
1495
b0dc3d71
MD
1496void
1497scm_i_thread_invalidate_freelists ()
1498{
9de87eea
MV
1499 /* thread_admin_mutex is already locked. */
1500
1501 scm_i_thread *t;
1502 for (t = all_threads; t; t = t->next_thread)
1503 if (t != SCM_I_CURRENT_THREAD)
1504 t->clear_freelists_p = 1;
b0dc3d71
MD
1505}
1506
9bc4701c
MD
1507void
1508scm_i_thread_wake_up ()
1509{
6087fad9 1510 if (threads_initialized_p)
9bc4701c 1511 {
9de87eea 1512 scm_i_thread *t;
9de87eea 1513
0c97d7dd
MV
1514 scm_i_pthread_cond_broadcast (&wake_up_cond);
1515 for (t = all_threads; t; t = t->next_thread)
1516 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
1517 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1518 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 1519 }
9bc4701c
MD
1520}
1521
1522void
1523scm_i_thread_sleep_for_gc ()
1524{
9de87eea
MV
1525 scm_i_thread *t = suspend ();
1526 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
b0dc3d71 1527 resume (t);
9bc4701c
MD
1528}
1529
a4d106c7
MV
1530/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1531 */
d1138028 1532scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
1533int scm_i_critical_section_level = 0;
1534
661ae7ab 1535static SCM dynwind_critical_section_mutex;
a54a94b3 1536
9bc4701c 1537void
661ae7ab 1538scm_dynwind_critical_section (SCM mutex)
76da80e7 1539{
a4d106c7 1540 if (scm_is_false (mutex))
661ae7ab
MV
1541 mutex = dynwind_critical_section_mutex;
1542 scm_dynwind_lock_mutex (mutex);
1543 scm_dynwind_block_asyncs ();
9de87eea
MV
1544}
1545
1546/*** Initialization */
1547
1548scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1549scm_i_pthread_mutex_t scm_i_misc_mutex;
1550
d1138028
MV
1551#if SCM_USE_PTHREAD_THREADS
1552pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1553#endif
1554
9de87eea
MV
1555void
1556scm_threads_prehistory (SCM_STACKITEM *base)
1557{
d1138028
MV
1558#if SCM_USE_PTHREAD_THREADS
1559 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1560 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1561 PTHREAD_MUTEX_RECURSIVE);
1562#endif
1563
1564 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1565 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
1566 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1567 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
1568 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1569 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1570
1571 guilify_self_1 (base);
9bc4701c
MD
1572}
1573
d823b11b
MV
1574scm_t_bits scm_tc16_thread;
1575scm_t_bits scm_tc16_mutex;
1576scm_t_bits scm_tc16_condvar;
7bfd3b9e 1577
7bfd3b9e 1578void
9de87eea 1579scm_init_threads ()
7bfd3b9e 1580{
9de87eea 1581 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b 1582 scm_set_smob_print (scm_tc16_thread, thread_print);
a4a141f6 1583 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
d823b11b 1584
9de87eea 1585 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
9de87eea
MV
1586 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1587 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 1588
9de87eea
MV
1589 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1590 sizeof (fat_cond));
9de87eea
MV
1591 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1592 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 1593
9de87eea
MV
1594 scm_i_default_dynamic_state = SCM_BOOL_F;
1595 guilify_self_2 (SCM_BOOL_F);
9bc4701c 1596 threads_initialized_p = 1;
a4d106c7 1597
661ae7ab 1598 dynwind_critical_section_mutex =
a4d106c7 1599 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 1600}
89e00824 1601
5f05c406 1602void
9de87eea 1603scm_init_threads_default_dynamic_state ()
5f05c406 1604{
9de87eea
MV
1605 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1606 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
1607}
1608
d823b11b 1609void
9de87eea 1610scm_init_thread_procs ()
d823b11b 1611{
9de87eea 1612#include "libguile/threads.x"
d823b11b
MV
1613}
1614
89e00824
ML
1615/*
1616 Local Variables:
1617 c-file-style: "gnu"
1618 End:
1619*/