merge from 1.8 branch (removing this file)
[bpt/guile.git] / libguile / threads.c
CommitLineData
2b829bbb 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
7bfd3b9e 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7bfd3b9e 7 *
73be1d9e
MV
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
1bbd0b84 18
7bfd3b9e
JB
19\f
20
9de87eea 21#define _GNU_SOURCE
76da80e7 22
1810dc4e
RB
23#include "libguile/_scm.h"
24
fcc5d734 25#if HAVE_UNISTD_H
d823b11b 26#include <unistd.h>
fcc5d734 27#endif
d823b11b
MV
28#include <stdio.h>
29#include <assert.h>
fcc5d734 30#if HAVE_SYS_TIME_H
d823b11b 31#include <sys/time.h>
fcc5d734 32#endif
5f05c406 33
d823b11b
MV
34#include "libguile/validate.h"
35#include "libguile/root.h"
36#include "libguile/eval.h"
37#include "libguile/async.h"
38#include "libguile/ports.h"
39#include "libguile/threads.h"
a0599745 40#include "libguile/dynwind.h"
d823b11b 41#include "libguile/iselect.h"
9de87eea
MV
42#include "libguile/fluids.h"
43#include "libguile/continuations.h"
2b829bbb 44#include "libguile/gc.h"
9de87eea 45#include "libguile/init.h"
7bfd3b9e 46
ecc9f40f
MV
47#ifdef __MINGW32__
48#ifndef ETIMEDOUT
49# define ETIMEDOUT WSAETIMEDOUT
50#endif
51# include <fcntl.h>
52# include <process.h>
53# define pipe(fd) _pipe (fd, 256, O_BINARY)
54#endif /* __MINGW32__ */
55
d823b11b 56/*** Queues */
7bfd3b9e 57
9de87eea
MV
58/* Make an empty queue data structure.
59 */
d823b11b
MV
60static SCM
61make_queue ()
62{
63 return scm_cons (SCM_EOL, SCM_EOL);
64}
7bfd3b9e 65
9de87eea
MV
66/* Put T at the back of Q and return a handle that can be used with
67 remqueue to remove T from Q again.
68 */
d823b11b
MV
69static SCM
70enqueue (SCM q, SCM t)
71{
72 SCM c = scm_cons (t, SCM_EOL);
d2e53ed6 73 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
74 SCM_SETCDR (q, c);
75 else
76 SCM_SETCDR (SCM_CAR (q), c);
77 SCM_SETCAR (q, c);
78 return c;
79}
7bfd3b9e 80
9de87eea
MV
81/* Remove the element that the handle C refers to from the queue Q. C
82 must have been returned from a call to enqueue. The return value
83 is zero when the element referred to by C has already been removed.
84 Otherwise, 1 is returned.
85*/
86static int
d823b11b
MV
87remqueue (SCM q, SCM c)
88{
89 SCM p, prev = q;
d2e53ed6 90 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 91 {
bc36d050 92 if (scm_is_eq (p, c))
d823b11b 93 {
bc36d050 94 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
95 SCM_SETCAR (q, SCM_CDR (c));
96 SCM_SETCDR (prev, SCM_CDR (c));
9de87eea 97 return 1;
d823b11b
MV
98 }
99 prev = p;
100 }
9de87eea 101 return 0;
d823b11b
MV
102}
103
9de87eea
MV
104/* Remove the front-most element from the queue Q and return it.
105 Return SCM_BOOL_F when Q is empty.
106*/
d823b11b
MV
107static SCM
108dequeue (SCM q)
109{
110 SCM c = SCM_CDR (q);
d2e53ed6 111 if (scm_is_null (c))
d823b11b
MV
112 return SCM_BOOL_F;
113 else
114 {
115 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 116 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
117 SCM_SETCAR (q, SCM_EOL);
118 return SCM_CAR (c);
119 }
120}
7bfd3b9e 121
9de87eea 122/*** Thread smob routines */
76da80e7 123
d823b11b
MV
124static SCM
125thread_mark (SCM obj)
126{
9de87eea 127 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
d823b11b 128 scm_gc_mark (t->result);
9de87eea
MV
129 scm_gc_mark (t->join_queue);
130 scm_gc_mark (t->dynwinds);
131 scm_gc_mark (t->active_asyncs);
9de87eea
MV
132 scm_gc_mark (t->continuation_root);
133 return t->dynamic_state;
d823b11b
MV
134}
135
136static int
137thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
138{
9de87eea 139 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
d823b11b 140 scm_puts ("#<thread ", port);
9de87eea 141 scm_uintprint ((size_t)t->pthread, 10, port);
1b92fb6b 142 scm_puts (" (", port);
0345e278 143 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 144 scm_puts (")>", port);
d823b11b
MV
145 return 1;
146}
147
148static size_t
149thread_free (SCM obj)
150{
9de87eea
MV
151 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
152 assert (t->exited);
d823b11b
MV
153 scm_gc_free (t, sizeof (*t), "thread");
154 return 0;
155}
156
9de87eea 157/*** Blocking on queues. */
f7eca35d 158
9de87eea
MV
159/* See also scm_i_queue_async_cell for how such a block is
160 interrputed.
161*/
d823b11b 162
9de87eea
MV
163/* Put the current thread on QUEUE and go to sleep, waiting for it to
164 be woken up by a call to 'unblock_from_queue', or to be
165 interrupted. Upon return of this function, the current thread is
166 no longer on QUEUE, even when the sleep has been interrupted.
167
168 The QUEUE data structure is assumed to be protected by MUTEX and
169 the caller of block_self must hold MUTEX. It will be atomically
170 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
171
172 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
173 as MUTEX is needed.
174
175 When WAITTIME is not NULL, the sleep will be aborted at that time.
176
177 The return value of block_self is an errno value. It will be zero
178 when the sleep has been successfully completed by a call to
179 unblock_from_queue, EINTR when it has been interrupted by the
180 delivery of a system async, and ETIMEDOUT when the timeout has
181 expired.
182
183 The system asyncs themselves are not executed by block_self.
184*/
185static int
186block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
187 const scm_t_timespec *waittime)
76da80e7 188{
9de87eea
MV
189 scm_i_thread *t = SCM_I_CURRENT_THREAD;
190 SCM q_handle;
191 int err;
192
193 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
194 err = EINTR;
195 else
196 {
197 t->block_asyncs++;
198 q_handle = enqueue (queue, t->handle);
199 if (waittime == NULL)
200 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
201 else
202 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
203
204 /* When we are still on QUEUE, we have been interrupted. We
205 report this only when no other error (such as a timeout) has
206 happened above.
207 */
208 if (remqueue (queue, q_handle) && err == 0)
209 err = EINTR;
210 t->block_asyncs--;
211 scm_i_reset_sleep (t);
212 }
213
214 return err;
76da80e7 215}
9de87eea
MV
216
217/* Wake up the first thread on QUEUE, if any. The caller must hold
218 the mutex that protects QUEUE. The awoken thread is returned, or
219 #f when the queue was empty.
220 */
221static SCM
222unblock_from_queue (SCM queue)
223{
224 SCM thread = dequeue (queue);
225 if (scm_is_true (thread))
226 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
227 return thread;
228}
229
230/* Getting into and out of guile mode.
231 */
232
98648121
MV
233/* Ken Raeburn observes that the implementation of suspend and resume
234 (and the things that build on top of them) are very likely not
235 correct (see below). We will need fix this eventually, and that's
236 why scm_leave_guile/scm_enter_guile are not exported in the API.
237
238 Ken writes:
239
240 Consider this sequence:
241
242 Function foo, called in Guile mode, calls suspend (maybe indirectly
243 through scm_leave_guile), which does this:
244
245 // record top of stack for the GC
246 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
247 var 't'
248 // save registers.
249 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
250 setjmp (t->regs); // here's most of the magic
251
252 ... and returns.
253
254 Function foo has a SCM value X, a handle on a non-immediate object, in
255 a caller-saved register R, and it's the only reference to the object
256 currently.
257
258 The compiler wants to use R in suspend, so it pushes the current
259 value, X, into a stack slot which will be reloaded on exit from
260 suspend; then it loads stuff into R and goes about its business. The
261 setjmp call saves (some of) the current registers, including R, which
262 no longer contains X. (This isn't a problem for a normal
263 setjmp/longjmp situation, where longjmp would be called before
264 setjmp's caller returns; the old value for X would be loaded back from
265 the stack after the longjmp, before the function returned.)
266
267 So, suspend returns, loading X back into R (and invalidating the jump
268 buffer) in the process. The caller foo then goes off and calls a
269 bunch of other functions out of Guile mode, occasionally storing X on
270 the stack again, but, say, much deeper on the stack than suspend's
271 stack frame went, and the stack slot where suspend had written X has
272 long since been overwritten with other values.
273
274 Okay, nothing actively broken so far. Now, let garbage collection
275 run, triggered by another thread.
276
277 The thread calling foo is out of Guile mode at the time, so the
278 garbage collector just scans a range of stack addresses. Too bad that
279 X isn't stored there. So the pointed-to storage goes onto the free
280 list, and I think you can see where things go from there.
281
282 Is there anything I'm missing that'll prevent this scenario from
283 happening? I mean, aside from, "well, suspend and scm_leave_guile
284 don't have many local variables, so they probably won't need to save
285 any registers on most systems, so we hope everything will wind up in
286 the jump buffer and we'll just get away with it"?
287
288 (And, going the other direction, if scm_leave_guile and suspend push
289 the stack pointer over onto a new page, and foo doesn't make further
290 function calls and thus the stack pointer no longer includes that
291 page, are we guaranteed that the kernel cannot release the now-unused
292 stack page that contains the top-of-stack pointer we just saved? I
293 don't know if any OS actually does that. If it does, we could get
294 faults in garbage collection.)
295
296 I don't think scm_without_guile has to have this problem, as it gets
297 more control over the stack handling -- but it should call setjmp
298 itself. I'd probably try something like:
299
300 // record top of stack for the GC
301 t->top = SCM_STACK_PTR (&t);
302 // save registers.
303 SCM_FLUSH_REGISTER_WINDOWS;
304 setjmp (t->regs);
305 res = func(data);
306 scm_enter_guile (t);
307
308 ... though even that's making some assumptions about the stack
309 ordering of local variables versus caller-saved registers.
310
311 For something like scm_leave_guile to work, I don't think it can just
312 rely on invalidated jump buffers. A valid jump buffer, and a handle
313 on the stack state at the point when the jump buffer was initialized,
314 together, would work fine, but I think then we're talking about macros
315 invoking setjmp in the caller's stack frame, and requiring that the
316 caller of scm_leave_guile also call scm_enter_guile before returning,
317 kind of like pthread_cleanup_push/pop calls that have to be paired up
318 in a function. (In fact, the pthread ones have to be paired up
319 syntactically, as if they might expand to a compound statement
320 incorporating the user's code, and invoking a compiler's
321 exception-handling primitives. Which might be something to think
322 about for cases where Guile is used with C++ exceptions or
323 pthread_cancel.)
324*/
325
9de87eea
MV
326scm_i_pthread_key_t scm_i_thread_key;
327
d823b11b 328static void
9de87eea 329resume (scm_i_thread *t)
d823b11b 330{
d823b11b 331 t->top = NULL;
9bc4701c
MD
332 if (t->clear_freelists_p)
333 {
334 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
335 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
336 t->clear_freelists_p = 0;
337 }
d823b11b
MV
338}
339
98648121
MV
340typedef void* scm_t_guile_ticket;
341
342static void
9de87eea 343scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 344{
9de87eea
MV
345 scm_i_thread *t = (scm_i_thread *)ticket;
346 if (t)
347 {
348 scm_i_pthread_mutex_lock (&t->heap_mutex);
349 resume (t);
350 }
d823b11b
MV
351}
352
9de87eea
MV
353static scm_i_thread *
354suspend (void)
d823b11b 355{
9de87eea 356 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
357
358 /* record top of stack for the GC */
9de87eea 359 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
360 /* save registers. */
361 SCM_FLUSH_REGISTER_WINDOWS;
9de87eea
MV
362 setjmp (t->regs);
363 return t;
d823b11b
MV
364}
365
98648121 366static scm_t_guile_ticket
9de87eea 367scm_leave_guile ()
d823b11b 368{
9de87eea
MV
369 scm_i_thread *t = suspend ();
370 scm_i_pthread_mutex_unlock (&t->heap_mutex);
371 return (scm_t_guile_ticket) t;
d823b11b
MV
372}
373
9de87eea
MV
374static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
375static scm_i_thread *all_threads = NULL;
376static int thread_count;
377
378static SCM scm_i_default_dynamic_state;
379
380/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 381 */
9de87eea
MV
382static void
383guilify_self_1 (SCM_STACKITEM *base)
d823b11b 384{
9de87eea
MV
385 scm_i_thread *t = malloc (sizeof (scm_i_thread));
386
387 t->pthread = scm_i_pthread_self ();
388 t->handle = SCM_BOOL_F;
389 t->result = SCM_BOOL_F;
390 t->join_queue = SCM_EOL;
391 t->dynamic_state = SCM_BOOL_F;
392 t->dynwinds = SCM_EOL;
393 t->active_asyncs = SCM_EOL;
9de87eea
MV
394 t->block_asyncs = 1;
395 t->pending_asyncs = 1;
396 t->last_debug_frame = NULL;
397 t->base = base;
0c97d7dd 398 t->continuation_root = SCM_EOL;
9de87eea
MV
399 t->continuation_base = base;
400 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
401 t->sleep_mutex = NULL;
402 t->sleep_object = SCM_BOOL_F;
403 t->sleep_fd = -1;
0c97d7dd 404 /* XXX - check for errors. */
9de87eea
MV
405 pipe (t->sleep_pipe);
406 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
407 t->clear_freelists_p = 0;
1a8fdd7e 408 t->gc_running_p = 0;
9de87eea
MV
409 t->exited = 0;
410
411 t->freelist = SCM_EOL;
412 t->freelist2 = SCM_EOL;
413 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
414 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
415
416 scm_i_pthread_setspecific (scm_i_thread_key, t);
417
418 scm_i_pthread_mutex_lock (&t->heap_mutex);
419
420 scm_i_pthread_mutex_lock (&thread_admin_mutex);
421 t->next_thread = all_threads;
422 all_threads = t;
423 thread_count++;
424 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
425}
426
9de87eea 427/* Perform second stage of thread initialisation, in guile mode.
d823b11b 428 */
9de87eea
MV
429static void
430guilify_self_2 (SCM parent)
d823b11b 431{
9de87eea
MV
432 scm_i_thread *t = SCM_I_CURRENT_THREAD;
433
434 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
435 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
436 t->continuation_root = scm_cons (t->handle, SCM_EOL);
437 t->continuation_base = t->base;
438
439 if (scm_is_true (parent))
440 t->dynamic_state = scm_make_dynamic_state (parent);
441 else
442 t->dynamic_state = scm_i_make_initial_dynamic_state ();
443
444 t->join_queue = make_queue ();
445 t->block_asyncs = 0;
d823b11b
MV
446}
447
9de87eea 448/* Perform thread tear-down, in guile mode.
d823b11b 449 */
9de87eea
MV
450static void *
451do_thread_exit (void *v)
452{
0c97d7dd 453 scm_i_thread *t = (scm_i_thread *)v;
9de87eea
MV
454
455 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
456
457 t->exited = 1;
0c97d7dd
MV
458 close (t->sleep_pipe[0]);
459 close (t->sleep_pipe[1]);
9de87eea
MV
460 while (scm_is_true (unblock_from_queue (t->join_queue)))
461 ;
9de87eea
MV
462
463 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
464 return NULL;
465}
466
d823b11b 467static void
9de87eea 468on_thread_exit (void *v)
d823b11b 469{
0c97d7dd
MV
470 scm_i_thread *t = (scm_i_thread *)v, **tp;
471
9de87eea 472 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd
MV
473
474 /* Unblocking the joining threads needs to happen in guile mode
475 since the queue is a SCM data structure.
476 */
9de87eea 477 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
478
479 /* Removing ourself from the list of all threads needs to happen in
480 non-guile mode since all SCM values on our stack become
481 unprotected once we are no longer in the list.
482 */
483 scm_leave_guile ();
484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
485 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
486 if (*tp == t)
487 {
488 *tp = t->next_thread;
489 break;
490 }
491 thread_count--;
492 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
493
9de87eea 494 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
495}
496
9de87eea 497static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 498
9de87eea
MV
499static void
500init_thread_key (void)
501{
502 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
503}
d823b11b 504
9de87eea
MV
505/* Perform any initializations necessary to bring the current thread
506 into guile mode, initializing Guile itself, if necessary.
a54a94b3 507
9de87eea
MV
508 BASE is the stack base to use with GC.
509
510 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
511 which case the default dynamic state is used.
512
513 Return zero when the thread was in guile mode already; otherwise
514 return 1.
515*/
516
517static int
518scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 519{
9de87eea
MV
520 scm_i_thread *t;
521
522 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
523
524 if ((t = SCM_I_CURRENT_THREAD) == NULL)
525 {
526 /* This thread has not been guilified yet.
527 */
528
529 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
530 if (scm_initialized_p == 0)
531 {
532 /* First thread ever to enter Guile. Run the full
533 initialization.
534 */
535 scm_i_init_guile (base);
536 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
537 }
538 else
539 {
540 /* Guile is already initialized, but this thread enters it for
541 the first time. Only initialize this thread.
542 */
543 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
544 guilify_self_1 (base);
545 guilify_self_2 (parent);
546 }
547 return 1;
548 }
549 else if (t->top)
550 {
551 /* This thread is already guilified but not in guile mode, just
552 resume it.
553
554 XXX - base might be lower than when this thread was first
555 guilified.
556 */
557 scm_enter_guile ((scm_t_guile_ticket) t);
558 return 1;
559 }
560 else
561 {
562 /* Thread is already in guile mode. Nothing to do.
563 */
564 return 0;
565 }
d823b11b
MV
566}
567
9de87eea
MV
568#if SCM_USE_PTHREAD_THREADS
569#ifdef HAVE_PTHREAD_ATTR_GETSTACK
570
571#define HAVE_GET_THREAD_STACK_BASE
572
573static SCM_STACKITEM *
574get_thread_stack_base ()
d823b11b 575{
9de87eea
MV
576 pthread_attr_t attr;
577 void *start, *end;
578 size_t size;
579
9de87eea
MV
580 pthread_getattr_np (pthread_self (), &attr);
581 pthread_attr_getstack (&attr, &start, &size);
582 end = (char *)start + size;
583
2b829bbb
KR
584 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
585 for the main thread, but we can use scm_get_stack_base in that
586 case.
587 */
588
589#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 590 if ((void *)&attr < start || (void *)&attr >= end)
2b829bbb 591 return scm_get_stack_base ();
9de87eea 592 else
2b829bbb 593#endif
9de87eea
MV
594 {
595#if SCM_STACK_GROWS_UP
596 return start;
597#else
598 return end;
599#endif
600 }
a54a94b3
MV
601}
602
9de87eea
MV
603#endif /* HAVE_PTHREAD_ATTR_GETSTACK */
604
605#else /* !SCM_USE_PTHREAD_THREADS */
606
607#define HAVE_GET_THREAD_STACK_BASE
608
609static SCM_STACKITEM *
610get_thread_stack_base ()
a54a94b3 611{
2b829bbb 612 return scm_get_stack_base ();
d823b11b
MV
613}
614
9de87eea 615#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
616
617#ifdef HAVE_GET_THREAD_STACK_BASE
618
619void
620scm_init_guile ()
d823b11b 621{
9de87eea
MV
622 scm_i_init_thread_for_guile (get_thread_stack_base (),
623 scm_i_default_dynamic_state);
d823b11b
MV
624}
625
9de87eea
MV
626#endif
627
628void *
629scm_with_guile (void *(*func)(void *), void *data)
630{
631 return scm_i_with_guile_and_parent (func, data,
632 scm_i_default_dynamic_state);
633}
634
635void *
636scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
637 SCM parent)
638{
639 void *res;
640 int really_entered;
641 SCM_STACKITEM base_item;
642 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
643 res = scm_c_with_continuation_barrier (func, data);
644 if (really_entered)
645 scm_leave_guile ();
646 return res;
647}
648
649void *
650scm_without_guile (void *(*func)(void *), void *data)
d823b11b 651{
9de87eea
MV
652 void *res;
653 scm_t_guile_ticket t;
654 t = scm_leave_guile ();
655 res = func (data);
656 scm_enter_guile (t);
657 return res;
658}
659
660/*** Thread creation */
661
662typedef struct {
663 SCM parent;
664 SCM thunk;
665 SCM handler;
76da80e7 666 SCM thread;
9de87eea
MV
667 scm_i_pthread_mutex_t mutex;
668 scm_i_pthread_cond_t cond;
669} launch_data;
d823b11b 670
9de87eea
MV
671static void *
672really_launch (void *d)
673{
674 launch_data *data = (launch_data *)d;
675 SCM thunk = data->thunk, handler = data->handler;
676 scm_i_thread *t;
d823b11b 677
9de87eea 678 t = SCM_I_CURRENT_THREAD;
a54a94b3 679
9de87eea
MV
680 scm_i_scm_pthread_mutex_lock (&data->mutex);
681 data->thread = scm_current_thread ();
682 scm_i_pthread_cond_signal (&data->cond);
683 scm_i_pthread_mutex_unlock (&data->mutex);
684
685 if (SCM_UNBNDP (handler))
686 t->result = scm_call_0 (thunk);
687 else
688 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
689
690 return 0;
d823b11b
MV
691}
692
9de87eea
MV
693static void *
694launch_thread (void *d)
695{
696 launch_data *data = (launch_data *)d;
697 scm_i_pthread_detach (scm_i_pthread_self ());
698 scm_i_with_guile_and_parent (really_launch, d, data->parent);
699 return NULL;
700}
701
702SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 703 (SCM thunk, SCM handler),
9de87eea
MV
704 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
705 "returning a new thread object representing the thread. The procedure\n"
706 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
707 "\n"
708 "When @var{handler} is specified, then @var{thunk} is called from\n"
709 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
710 "handler. This catch is established inside the continuation barrier.\n"
711 "\n"
712 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
713 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
714#define FUNC_NAME s_scm_call_with_new_thread
715{
9de87eea
MV
716 launch_data data;
717 scm_i_pthread_t id;
718 int err;
d823b11b 719
9de87eea
MV
720 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
721 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
722 handler, SCM_ARG2, FUNC_NAME);
723
724 data.parent = scm_current_dynamic_state ();
725 data.thunk = thunk;
726 data.handler = handler;
727 data.thread = SCM_BOOL_F;
728 scm_i_pthread_mutex_init (&data.mutex, NULL);
729 scm_i_pthread_cond_init (&data.cond, NULL);
730
731 scm_i_scm_pthread_mutex_lock (&data.mutex);
732 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
733 if (err)
734 {
735 scm_i_pthread_mutex_unlock (&data.mutex);
736 errno = err;
737 scm_syserror (NULL);
738 }
739 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
740 scm_i_pthread_mutex_unlock (&data.mutex);
741
742 return data.thread;
d823b11b
MV
743}
744#undef FUNC_NAME
745
9de87eea
MV
746typedef struct {
747 SCM parent;
748 scm_t_catch_body body;
749 void *body_data;
750 scm_t_catch_handler handler;
751 void *handler_data;
752 SCM thread;
753 scm_i_pthread_mutex_t mutex;
754 scm_i_pthread_cond_t cond;
755} spawn_data;
756
757static void *
758really_spawn (void *d)
759{
760 spawn_data *data = (spawn_data *)d;
761 scm_t_catch_body body = data->body;
762 void *body_data = data->body_data;
763 scm_t_catch_handler handler = data->handler;
764 void *handler_data = data->handler_data;
765 scm_i_thread *t = SCM_I_CURRENT_THREAD;
766
767 scm_i_scm_pthread_mutex_lock (&data->mutex);
768 data->thread = scm_current_thread ();
769 scm_i_pthread_cond_signal (&data->cond);
770 scm_i_pthread_mutex_unlock (&data->mutex);
771
772 if (handler == NULL)
773 t->result = body (body_data);
774 else
775 t->result = scm_internal_catch (SCM_BOOL_T,
776 body, body_data,
777 handler, handler_data);
778
779 return 0;
780}
781
782static void *
783spawn_thread (void *d)
784{
785 spawn_data *data = (spawn_data *)d;
786 scm_i_pthread_detach (scm_i_pthread_self ());
787 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
788 return NULL;
789}
790
791SCM
792scm_spawn_thread (scm_t_catch_body body, void *body_data,
793 scm_t_catch_handler handler, void *handler_data)
794{
795 spawn_data data;
796 scm_i_pthread_t id;
797 int err;
798
799 data.parent = scm_current_dynamic_state ();
800 data.body = body;
801 data.body_data = body_data;
802 data.handler = handler;
803 data.handler_data = handler_data;
804 data.thread = SCM_BOOL_F;
805 scm_i_pthread_mutex_init (&data.mutex, NULL);
806 scm_i_pthread_cond_init (&data.cond, NULL);
807
808 scm_i_scm_pthread_mutex_lock (&data.mutex);
809 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
810 if (err)
811 {
812 scm_i_pthread_mutex_unlock (&data.mutex);
813 errno = err;
814 scm_syserror (NULL);
815 }
816 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
817 scm_i_pthread_mutex_unlock (&data.mutex);
818
819 return data.thread;
820}
821
29717c89
MD
822SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
823 (),
824"Move the calling thread to the end of the scheduling queue.")
825#define FUNC_NAME s_scm_yield
826{
9de87eea 827 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
828}
829#undef FUNC_NAME
830
d823b11b 831SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
5f05c406 832 (SCM thread),
d823b11b
MV
833"Suspend execution of the calling thread until the target @var{thread} "
834"terminates, unless the target @var{thread} has already terminated. ")
835#define FUNC_NAME s_scm_join_thread
5f05c406 836{
9de87eea 837 scm_i_thread *t;
d823b11b
MV
838 SCM res;
839
840 SCM_VALIDATE_THREAD (1, thread);
9de87eea 841 if (scm_is_eq (scm_current_thread (), thread))
d823b11b
MV
842 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
843
9de87eea
MV
844 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
845
846 t = SCM_I_THREAD_DATA (thread);
d823b11b
MV
847 if (!t->exited)
848 {
9de87eea
MV
849 while (1)
850 {
851 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
852 if (t->exited)
853 break;
854 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
855 SCM_TICK;
856 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
857 }
d823b11b
MV
858 }
859 res = t->result;
9de87eea
MV
860
861 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b 862 return res;
5f05c406
MV
863}
864#undef FUNC_NAME
865
9de87eea 866/*** Fat mutexes */
4079f87e 867
d823b11b
MV
868/* We implement our own mutex type since we want them to be 'fair', we
869 want to do fancy things while waiting for them (like running
9de87eea
MV
870 asyncs) and we might want to add things that are nice for
871 debugging.
d823b11b 872*/
4079f87e 873
9de87eea
MV
874typedef struct {
875 scm_i_pthread_mutex_t lock;
d823b11b 876 SCM owner;
9de87eea
MV
877 int level; /* how much the owner owns us.
878 < 0 for non-recursive mutexes */
879 SCM waiting; /* the threads waiting for this mutex. */
880} fat_mutex;
881
882#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
883#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
5f05c406 884
d823b11b 885static SCM
9de87eea 886fat_mutex_mark (SCM mx)
d823b11b 887{
9de87eea 888 fat_mutex *m = SCM_MUTEX_DATA (mx);
d823b11b
MV
889 scm_gc_mark (m->owner);
890 return m->waiting;
891}
4079f87e 892
9de87eea
MV
893static size_t
894fat_mutex_free (SCM mx)
76da80e7 895{
9de87eea
MV
896 fat_mutex *m = SCM_MUTEX_DATA (mx);
897 scm_i_pthread_mutex_destroy (&m->lock);
898 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
899 return 0;
900}
901
902static int
9de87eea 903fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 904{
9de87eea
MV
905 fat_mutex *m = SCM_MUTEX_DATA (mx);
906 scm_puts ("#<mutex ", port);
907 scm_uintprint ((scm_t_bits)m, 16, port);
908 scm_puts (">", port);
909 return 1;
76da80e7
MV
910}
911
76da80e7 912static SCM
9de87eea 913make_fat_mutex (int recursive)
76da80e7 914{
9de87eea
MV
915 fat_mutex *m;
916 SCM mx;
917
918 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
919 scm_i_pthread_mutex_init (&m->lock, NULL);
920 m->owner = SCM_BOOL_F;
921 m->level = recursive? 0 : -1;
922 m->waiting = SCM_EOL;
923 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
924 m->waiting = make_queue ();
925 return mx;
76da80e7
MV
926}
927
9de87eea 928SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
76da80e7 929 (void),
9de87eea
MV
930 "Create a new mutex. ")
931#define FUNC_NAME s_scm_make_mutex
76da80e7 932{
9de87eea 933 return make_fat_mutex (0);
76da80e7
MV
934}
935#undef FUNC_NAME
936
9de87eea 937SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 938 (void),
9de87eea
MV
939 "Create a new recursive mutex. ")
940#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 941{
9de87eea 942 return make_fat_mutex (1);
9bc4701c
MD
943}
944#undef FUNC_NAME
945
9de87eea
MV
946static char *
947fat_mutex_lock (SCM mutex)
948{
949 fat_mutex *m = SCM_MUTEX_DATA (mutex);
950 SCM thread = scm_current_thread ();
951 char *msg = NULL;
952
953 scm_i_scm_pthread_mutex_lock (&m->lock);
954 if (scm_is_false (m->owner))
955 m->owner = thread;
956 else if (scm_is_eq (m->owner, thread))
957 {
958 if (m->level >= 0)
959 m->level++;
960 else
961 msg = "mutex already locked by current thread";
962 }
963 else
964 {
965 while (1)
966 {
967 block_self (m->waiting, mutex, &m->lock, NULL);
968 if (scm_is_eq (m->owner, thread))
969 break;
970 scm_i_pthread_mutex_unlock (&m->lock);
971 SCM_TICK;
972 scm_i_scm_pthread_mutex_lock (&m->lock);
973 }
974 }
975 scm_i_pthread_mutex_unlock (&m->lock);
976 return msg;
977}
978
9bc4701c
MD
979SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
980 (SCM mx),
981"Lock @var{mutex}. If the mutex is already locked, the calling thread "
982"blocks until the mutex becomes available. The function returns when "
983"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
984"a thread already owns will succeed right away and will not block the "
985"thread. That is, Guile's mutexes are @emph{recursive}. ")
986#define FUNC_NAME s_scm_lock_mutex
987{
9de87eea 988 char *msg;
76da80e7 989
b01532af 990 SCM_VALIDATE_MUTEX (1, mx);
9de87eea
MV
991 msg = fat_mutex_lock (mx);
992 if (msg)
993 scm_misc_error (NULL, msg, SCM_EOL);
76da80e7 994 return SCM_BOOL_T;
9bc4701c 995}
76da80e7 996#undef FUNC_NAME
9bc4701c 997
a4d106c7 998void
661ae7ab 999scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1000{
661ae7ab
MV
1001 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1002 SCM_F_WIND_EXPLICITLY);
1003 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1004 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1005}
1006
9de87eea
MV
1007static char *
1008fat_mutex_trylock (fat_mutex *m, int *resp)
1009{
1010 char *msg = NULL;
1011 SCM thread = scm_current_thread ();
1012
1013 *resp = 1;
1014 scm_i_pthread_mutex_lock (&m->lock);
1015 if (scm_is_false (m->owner))
1016 m->owner = thread;
1017 else if (scm_is_eq (m->owner, thread))
1018 {
1019 if (m->level >= 0)
1020 m->level++;
1021 else
1022 msg = "mutex already locked by current thread";
1023 }
1024 else
1025 *resp = 0;
1026 scm_i_pthread_mutex_unlock (&m->lock);
1027 return msg;
1028}
1029
9bc4701c 1030SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1031 (SCM mutex),
9bc4701c
MD
1032"Try to lock @var{mutex}. If the mutex is already locked by someone "
1033"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1034#define FUNC_NAME s_scm_try_mutex
1035{
9de87eea
MV
1036 char *msg;
1037 int res;
1038
ba1b7223 1039 SCM_VALIDATE_MUTEX (1, mutex);
9bc4701c 1040
ba1b7223 1041 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
9de87eea
MV
1042 if (msg)
1043 scm_misc_error (NULL, msg, SCM_EOL);
1044 return scm_from_bool (res);
1045}
1046#undef FUNC_NAME
76da80e7 1047
9de87eea
MV
1048static char *
1049fat_mutex_unlock (fat_mutex *m)
1050{
1051 char *msg = NULL;
1052
1053 scm_i_scm_pthread_mutex_lock (&m->lock);
1054 if (!scm_is_eq (m->owner, scm_current_thread ()))
9bc4701c 1055 {
9de87eea
MV
1056 if (scm_is_false (m->owner))
1057 msg = "mutex not locked";
1058 else
1059 msg = "mutex not locked by current thread";
9bc4701c 1060 }
9de87eea
MV
1061 else if (m->level > 0)
1062 m->level--;
1063 else
1064 m->owner = unblock_from_queue (m->waiting);
1065 scm_i_pthread_mutex_unlock (&m->lock);
1066
1067 return msg;
9bc4701c 1068}
9bc4701c
MD
1069
1070SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1071 (SCM mx),
1072"Unlocks @var{mutex} if the calling thread owns the lock on "
1073"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1074"thread results in undefined behaviour. Once a mutex has been unlocked, "
1075"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1076"lock. Every call to @code{lock-mutex} by this thread must be matched "
1077"with a call to @code{unlock-mutex}. Only the last call to "
1078"@code{unlock-mutex} will actually unlock the mutex. ")
1079#define FUNC_NAME s_scm_unlock_mutex
1080{
9de87eea 1081 char *msg;
9bc4701c
MD
1082 SCM_VALIDATE_MUTEX (1, mx);
1083
9de87eea
MV
1084 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1085 if (msg)
1086 scm_misc_error (NULL, msg, SCM_EOL);
9bc4701c
MD
1087 return SCM_BOOL_T;
1088}
1089#undef FUNC_NAME
1090
9de87eea
MV
1091#if 0
1092
1093SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1094 (SCM mx),
1095 "Return the thread owning @var{mx}, or @code{#f}.")
1096#define FUNC_NAME s_scm_mutex_owner
1097{
1098 SCM_VALIDATE_MUTEX (1, mx);
1099 return (SCM_MUTEX_DATA(mx))->owner;
1100}
1101#undef FUNC_NAME
1102
1103SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1104 (SCM mx),
1105 "Return the lock level of a recursive mutex, or -1\n"
1106 "for a standard mutex.")
1107#define FUNC_NAME s_scm_mutex_level
1108{
1109 SCM_VALIDATE_MUTEX (1, mx);
1110 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1111}
1112#undef FUNC_NAME
1113
1114#endif
1115
1116/*** Fat condition variables */
1117
1118typedef struct {
1119 scm_i_pthread_mutex_t lock;
1120 SCM waiting; /* the threads waiting for this condition. */
1121} fat_cond;
1122
1123#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1124#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1125
1126static SCM
1127fat_cond_mark (SCM cv)
1128{
1129 fat_cond *c = SCM_CONDVAR_DATA (cv);
1130 return c->waiting;
1131}
1132
1133static size_t
1134fat_cond_free (SCM mx)
1135{
1136 fat_cond *c = SCM_CONDVAR_DATA (mx);
1137 scm_i_pthread_mutex_destroy (&c->lock);
1138 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1139 return 0;
1140}
1141
1142static int
1143fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1144{
1145 fat_cond *c = SCM_CONDVAR_DATA (cv);
1146 scm_puts ("#<condition-variable ", port);
1147 scm_uintprint ((scm_t_bits)c, 16, port);
1148 scm_puts (">", port);
1149 return 1;
1150}
9bc4701c 1151
d823b11b
MV
1152SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1153 (void),
1154 "Make a new condition variable.")
1155#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1156{
9de87eea
MV
1157 fat_cond *c;
1158 SCM cv;
1159
1160 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1161 scm_i_pthread_mutex_init (&c->lock, 0);
1162 c->waiting = SCM_EOL;
1163 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1164 c->waiting = make_queue ();
d823b11b 1165 return cv;
5f05c406 1166}
d823b11b 1167#undef FUNC_NAME
5f05c406 1168
9de87eea
MV
1169static int
1170fat_cond_timedwait (SCM cond, SCM mutex,
1171 const scm_t_timespec *waittime)
1172{
1173 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1174 fat_cond *c = SCM_CONDVAR_DATA (cond);
1175 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1176 const char *msg;
1177 int err = 0;
1178
1179 while (1)
1180 {
9de87eea
MV
1181 scm_i_scm_pthread_mutex_lock (&c->lock);
1182 msg = fat_mutex_unlock (m);
1183 t->block_asyncs++;
1184 if (msg == NULL)
1185 {
1186 err = block_self (c->waiting, cond, &c->lock, waittime);
1187 scm_i_pthread_mutex_unlock (&c->lock);
9de87eea
MV
1188 fat_mutex_lock (mutex);
1189 }
1190 else
1191 scm_i_pthread_mutex_unlock (&c->lock);
1192 t->block_asyncs--;
1193 scm_async_click ();
1194
9de87eea
MV
1195 if (msg)
1196 scm_misc_error (NULL, msg, SCM_EOL);
1197
1198 scm_remember_upto_here_2 (cond, mutex);
1199
1200 if (err == 0)
1201 return 1;
1202 if (err == ETIMEDOUT)
1203 return 0;
1204 if (err != EINTR)
1205 {
1206 errno = err;
1207 scm_syserror (NULL);
1208 }
1209 }
1210}
1211
d823b11b
MV
1212SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1213 (SCM cv, SCM mx, SCM t),
1214"Wait until @var{cond-var} has been signalled. While waiting, "
1215"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1216"is locked again when this function returns. When @var{time} is given, "
1217"it specifies a point in time where the waiting should be aborted. It "
1218"can be either a integer as returned by @code{current-time} or a pair "
1219"as returned by @code{gettimeofday}. When the waiting is aborted the "
1220"mutex is locked and @code{#f} is returned. When the condition "
1221"variable is in fact signalled, the mutex is also locked and @code{#t} "
1222"is returned. ")
1223#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1224{
9de87eea 1225 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1226
1227 SCM_VALIDATE_CONDVAR (1, cv);
1228 SCM_VALIDATE_MUTEX (2, mx);
9bc4701c 1229
d823b11b
MV
1230 if (!SCM_UNBNDP (t))
1231 {
d2e53ed6 1232 if (scm_is_pair (t))
d823b11b 1233 {
9de87eea
MV
1234 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1235 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
d823b11b
MV
1236 }
1237 else
1238 {
9de87eea 1239 waittime.tv_sec = scm_to_ulong (t);
d823b11b
MV
1240 waittime.tv_nsec = 0;
1241 }
9de87eea 1242 waitptr = &waittime;
d823b11b
MV
1243 }
1244
9de87eea 1245 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
5f05c406 1246}
d823b11b 1247#undef FUNC_NAME
5f05c406 1248
9de87eea
MV
1249static void
1250fat_cond_signal (fat_cond *c)
1251{
9de87eea
MV
1252 scm_i_scm_pthread_mutex_lock (&c->lock);
1253 unblock_from_queue (c->waiting);
1254 scm_i_pthread_mutex_unlock (&c->lock);
1255}
1256
d823b11b
MV
1257SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1258 (SCM cv),
1259 "Wake up one thread that is waiting for @var{cv}")
1260#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1261{
d823b11b 1262 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1263 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1264 return SCM_BOOL_T;
5f05c406 1265}
d823b11b 1266#undef FUNC_NAME
5f05c406 1267
9de87eea
MV
1268static void
1269fat_cond_broadcast (fat_cond *c)
1270{
1271 scm_i_scm_pthread_mutex_lock (&c->lock);
1272 while (scm_is_true (unblock_from_queue (c->waiting)))
1273 ;
1274 scm_i_pthread_mutex_unlock (&c->lock);
1275}
1276
d823b11b
MV
1277SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1278 (SCM cv),
1279 "Wake up all threads that are waiting for @var{cv}. ")
1280#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1281{
d823b11b 1282 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1283 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1284 return SCM_BOOL_T;
5f05c406 1285}
d823b11b 1286#undef FUNC_NAME
5f05c406 1287
d823b11b
MV
1288/*** Marking stacks */
1289
1290/* XXX - what to do with this? Do we need to handle this for blocked
1291 threads as well?
1292*/
1293#ifdef __ia64__
1294# define SCM_MARK_BACKING_STORE() do { \
1295 ucontext_t ctx; \
1296 SCM_STACKITEM * top, * bot; \
1297 getcontext (&ctx); \
1298 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1299 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1300 / sizeof (SCM_STACKITEM))); \
1301 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1302 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1303 scm_mark_locations (bot, top - bot); } while (0)
1304#else
1305# define SCM_MARK_BACKING_STORE()
1306#endif
1307
1308void
1309scm_threads_mark_stacks (void)
5f05c406 1310{
9de87eea
MV
1311 scm_i_thread *t;
1312 for (t = all_threads; t; t = t->next_thread)
d823b11b 1313 {
9de87eea
MV
1314 /* Check that thread has indeed been suspended.
1315 */
1316 assert (t->top);
6087fad9 1317
9de87eea 1318 scm_gc_mark (t->handle);
6087fad9 1319
d028af45 1320#if SCM_STACK_GROWS_UP
9de87eea 1321 scm_mark_locations (t->base, t->top - t->base);
d823b11b 1322#else
9de87eea 1323 scm_mark_locations (t->top, t->base - t->top);
d823b11b 1324#endif
6087fad9
MV
1325 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1326 ((size_t) sizeof(t->regs)
1327 / sizeof (SCM_STACKITEM)));
d823b11b 1328 }
59152722
MV
1329
1330 SCM_MARK_BACKING_STORE ();
5f05c406
MV
1331}
1332
d823b11b
MV
1333/*** Select */
1334
911782b7 1335int
9de87eea
MV
1336scm_std_select (int nfds,
1337 SELECT_TYPE *readfds,
1338 SELECT_TYPE *writefds,
1339 SELECT_TYPE *exceptfds,
1340 struct timeval *timeout)
1341{
1342 fd_set my_readfds;
1343 int res, eno, wakeup_fd;
1344 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1345 scm_t_guile_ticket ticket;
1346
1347 if (readfds == NULL)
1348 {
1349 FD_ZERO (&my_readfds);
1350 readfds = &my_readfds;
1351 }
1352
1353 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1354 SCM_TICK;
1355
1356 wakeup_fd = t->sleep_pipe[0];
1357 ticket = scm_leave_guile ();
1358 FD_SET (wakeup_fd, readfds);
1359 if (wakeup_fd >= nfds)
1360 nfds = wakeup_fd+1;
1361 res = select (nfds, readfds, writefds, exceptfds, timeout);
1362 t->sleep_fd = -1;
d823b11b 1363 eno = errno;
9de87eea
MV
1364 scm_enter_guile (ticket);
1365
1366 scm_i_reset_sleep (t);
1367
1368 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1369 {
1370 char dummy;
1371 read (wakeup_fd, &dummy, 1);
1372 FD_CLR (wakeup_fd, readfds);
1373 res -= 1;
1374 if (res == 0)
1375 {
1376 eno = EINTR;
1377 res = -1;
1378 }
1379 }
d823b11b
MV
1380 errno = eno;
1381 return res;
5f05c406
MV
1382}
1383
9de87eea 1384/* Convenience API for blocking while in guile mode. */
76da80e7 1385
9de87eea 1386#if SCM_USE_PTHREAD_THREADS
92e64b87 1387
9bc4701c 1388int
9de87eea 1389scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1390{
9de87eea
MV
1391 scm_t_guile_ticket t = scm_leave_guile ();
1392 int res = scm_i_pthread_mutex_lock (mutex);
1393 scm_enter_guile (t);
9bc4701c
MD
1394 return res;
1395}
1396
9de87eea 1397static void
2b829bbb 1398do_unlock (void *data)
28d52ebb 1399{
9de87eea 1400 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1401}
1402
1403void
661ae7ab 1404scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1405{
9de87eea 1406 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1407 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1408}
1409
9bc4701c 1410int
9de87eea 1411scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1412{
9de87eea
MV
1413 scm_t_guile_ticket t = scm_leave_guile ();
1414 int res = scm_i_pthread_cond_wait (cond, mutex);
1415 scm_enter_guile (t);
9bc4701c
MD
1416 return res;
1417}
9bc4701c 1418
76da80e7 1419int
9de87eea
MV
1420scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1421 scm_i_pthread_mutex_t *mutex,
1422 const scm_t_timespec *wt)
76da80e7 1423{
9de87eea
MV
1424 scm_t_guile_ticket t = scm_leave_guile ();
1425 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1426 scm_enter_guile (t);
1427 return res;
76da80e7
MV
1428}
1429
9de87eea 1430#endif
76da80e7 1431
d823b11b 1432unsigned long
9de87eea 1433scm_std_usleep (unsigned long usecs)
5f05c406 1434{
d823b11b
MV
1435 struct timeval tv;
1436 tv.tv_usec = usecs % 1000000;
1437 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1438 scm_std_select (0, NULL, NULL, NULL, &tv);
1439 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1440}
1441
9de87eea
MV
1442unsigned int
1443scm_std_sleep (unsigned int secs)
6c214b62 1444{
d823b11b
MV
1445 struct timeval tv;
1446 tv.tv_usec = 0;
1447 tv.tv_sec = secs;
9de87eea 1448 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1449 return tv.tv_sec;
6c214b62
MD
1450}
1451
d823b11b
MV
1452/*** Misc */
1453
1454SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1455 (void),
1456 "Return the thread that called this function.")
1457#define FUNC_NAME s_scm_current_thread
1458{
9de87eea 1459 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1460}
1461#undef FUNC_NAME
1462
9de87eea
MV
1463static SCM
1464scm_c_make_list (size_t n, SCM fill)
1465{
1466 SCM res = SCM_EOL;
1467 while (n-- > 0)
1468 res = scm_cons (fill, res);
1469 return res;
1470}
1471
d823b11b
MV
1472SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1473 (void),
1474 "Return a list of all threads.")
9bc4701c 1475#define FUNC_NAME s_scm_all_threads
d823b11b 1476{
9de87eea
MV
1477 /* We can not allocate while holding the thread_admin_mutex because
1478 of the way GC is done.
1479 */
1480 int n = thread_count;
1481 scm_i_thread *t;
1482 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1483
9de87eea
MV
1484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1485 l = &list;
1486 for (t = all_threads; t && n > 0; t = t->next_thread)
1487 {
1488 SCM_SETCAR (*l, t->handle);
1489 l = SCM_CDRLOC (*l);
1490 n--;
1491 }
1492 *l = SCM_EOL;
1493 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1494 return list;
d823b11b 1495}
9de87eea 1496#undef FUNC_NAME
d823b11b
MV
1497
1498SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1499 (SCM thread),
1500 "Return @code{#t} iff @var{thread} has exited.\n")
1501#define FUNC_NAME s_scm_thread_exited_p
1502{
7888309b 1503 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1504}
1505#undef FUNC_NAME
1506
911782b7 1507int
d823b11b
MV
1508scm_c_thread_exited_p (SCM thread)
1509#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1510{
9de87eea 1511 scm_i_thread *t;
d823b11b 1512 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1513 t = SCM_I_THREAD_DATA (thread);
d823b11b 1514 return t->exited;
5f05c406 1515}
d823b11b 1516#undef FUNC_NAME
5f05c406 1517
9de87eea 1518static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1519int scm_i_thread_go_to_sleep;
9bc4701c
MD
1520static int threads_initialized_p = 0;
1521
1522void
1523scm_i_thread_put_to_sleep ()
1524{
6087fad9 1525 if (threads_initialized_p)
9bc4701c 1526 {
9de87eea 1527 scm_i_thread *t;
6087fad9 1528
9de87eea
MV
1529 scm_leave_guile ();
1530 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1531
0c97d7dd
MV
1532 /* Signal all threads to go to sleep
1533 */
1534 scm_i_thread_go_to_sleep = 1;
1535 for (t = all_threads; t; t = t->next_thread)
1536 scm_i_pthread_mutex_lock (&t->heap_mutex);
1537 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1538 }
1539}
1540
b0dc3d71
MD
1541void
1542scm_i_thread_invalidate_freelists ()
1543{
9de87eea
MV
1544 /* thread_admin_mutex is already locked. */
1545
1546 scm_i_thread *t;
1547 for (t = all_threads; t; t = t->next_thread)
1548 if (t != SCM_I_CURRENT_THREAD)
1549 t->clear_freelists_p = 1;
b0dc3d71
MD
1550}
1551
9bc4701c
MD
1552void
1553scm_i_thread_wake_up ()
1554{
6087fad9 1555 if (threads_initialized_p)
9bc4701c 1556 {
9de87eea 1557 scm_i_thread *t;
9de87eea 1558
0c97d7dd
MV
1559 scm_i_pthread_cond_broadcast (&wake_up_cond);
1560 for (t = all_threads; t; t = t->next_thread)
1561 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
1562 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1563 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 1564 }
9bc4701c
MD
1565}
1566
1567void
1568scm_i_thread_sleep_for_gc ()
1569{
9de87eea
MV
1570 scm_i_thread *t = suspend ();
1571 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
b0dc3d71 1572 resume (t);
9bc4701c
MD
1573}
1574
a4d106c7
MV
1575/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1576 */
d1138028 1577scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
1578int scm_i_critical_section_level = 0;
1579
661ae7ab 1580static SCM dynwind_critical_section_mutex;
a54a94b3 1581
9bc4701c 1582void
661ae7ab 1583scm_dynwind_critical_section (SCM mutex)
76da80e7 1584{
a4d106c7 1585 if (scm_is_false (mutex))
661ae7ab
MV
1586 mutex = dynwind_critical_section_mutex;
1587 scm_dynwind_lock_mutex (mutex);
1588 scm_dynwind_block_asyncs ();
9de87eea
MV
1589}
1590
1591/*** Initialization */
1592
1593scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1594scm_i_pthread_mutex_t scm_i_misc_mutex;
1595
d1138028
MV
1596#if SCM_USE_PTHREAD_THREADS
1597pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1598#endif
1599
9de87eea
MV
1600void
1601scm_threads_prehistory (SCM_STACKITEM *base)
1602{
d1138028
MV
1603#if SCM_USE_PTHREAD_THREADS
1604 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1605 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1606 PTHREAD_MUTEX_RECURSIVE);
1607#endif
1608
1609 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1610 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
1611 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1612 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
1613 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1614 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1615
1616 guilify_self_1 (base);
9bc4701c
MD
1617}
1618
d823b11b
MV
1619scm_t_bits scm_tc16_thread;
1620scm_t_bits scm_tc16_mutex;
1621scm_t_bits scm_tc16_condvar;
7bfd3b9e 1622
7bfd3b9e 1623void
9de87eea 1624scm_init_threads ()
7bfd3b9e 1625{
9de87eea 1626 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b
MV
1627 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1628 scm_set_smob_print (scm_tc16_thread, thread_print);
1629 scm_set_smob_free (scm_tc16_thread, thread_free);
1630
9de87eea
MV
1631 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1632 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1633 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1634 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 1635
9de87eea
MV
1636 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1637 sizeof (fat_cond));
1638 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1639 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1640 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 1641
9de87eea
MV
1642 scm_i_default_dynamic_state = SCM_BOOL_F;
1643 guilify_self_2 (SCM_BOOL_F);
9bc4701c 1644 threads_initialized_p = 1;
a4d106c7 1645
661ae7ab 1646 dynwind_critical_section_mutex =
a4d106c7 1647 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 1648}
89e00824 1649
5f05c406 1650void
9de87eea 1651scm_init_threads_default_dynamic_state ()
5f05c406 1652{
9de87eea
MV
1653 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1654 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
1655}
1656
d823b11b 1657void
9de87eea 1658scm_init_thread_procs ()
d823b11b 1659{
9de87eea 1660#include "libguile/threads.x"
d823b11b
MV
1661}
1662
89e00824
ML
1663/*
1664 Local Variables:
1665 c-file-style: "gnu"
1666 End:
1667*/