*** empty log message ***
[bpt/guile.git] / libguile / threads.c
CommitLineData
4c908f2c 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
7bfd3b9e 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7bfd3b9e 7 *
73be1d9e
MV
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
1bbd0b84 18
7bfd3b9e
JB
19\f
20
9de87eea 21#define _GNU_SOURCE
76da80e7 22
1810dc4e
RB
23#include "libguile/_scm.h"
24
fcc5d734 25#if HAVE_UNISTD_H
d823b11b 26#include <unistd.h>
fcc5d734 27#endif
d823b11b
MV
28#include <stdio.h>
29#include <assert.h>
fcc5d734 30#if HAVE_SYS_TIME_H
d823b11b 31#include <sys/time.h>
fcc5d734 32#endif
5f05c406 33
d823b11b
MV
34#include "libguile/validate.h"
35#include "libguile/root.h"
36#include "libguile/eval.h"
37#include "libguile/async.h"
38#include "libguile/ports.h"
39#include "libguile/threads.h"
a0599745 40#include "libguile/dynwind.h"
d823b11b 41#include "libguile/iselect.h"
9de87eea
MV
42#include "libguile/fluids.h"
43#include "libguile/continuations.h"
44#include "libguile/init.h"
7bfd3b9e 45
ecc9f40f
MV
46#ifdef __MINGW32__
47#ifndef ETIMEDOUT
48# define ETIMEDOUT WSAETIMEDOUT
49#endif
50# include <fcntl.h>
51# include <process.h>
52# define pipe(fd) _pipe (fd, 256, O_BINARY)
53#endif /* __MINGW32__ */
54
d823b11b 55/*** Queues */
7bfd3b9e 56
9de87eea
MV
57/* Make an empty queue data structure.
58 */
d823b11b
MV
59static SCM
60make_queue ()
61{
62 return scm_cons (SCM_EOL, SCM_EOL);
63}
7bfd3b9e 64
9de87eea
MV
65/* Put T at the back of Q and return a handle that can be used with
66 remqueue to remove T from Q again.
67 */
d823b11b
MV
68static SCM
69enqueue (SCM q, SCM t)
70{
71 SCM c = scm_cons (t, SCM_EOL);
d2e53ed6 72 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
73 SCM_SETCDR (q, c);
74 else
75 SCM_SETCDR (SCM_CAR (q), c);
76 SCM_SETCAR (q, c);
77 return c;
78}
7bfd3b9e 79
9de87eea
MV
80/* Remove the element that the handle C refers to from the queue Q. C
81 must have been returned from a call to enqueue. The return value
82 is zero when the element referred to by C has already been removed.
83 Otherwise, 1 is returned.
84*/
85static int
d823b11b
MV
86remqueue (SCM q, SCM c)
87{
88 SCM p, prev = q;
d2e53ed6 89 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 90 {
bc36d050 91 if (scm_is_eq (p, c))
d823b11b 92 {
bc36d050 93 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
94 SCM_SETCAR (q, SCM_CDR (c));
95 SCM_SETCDR (prev, SCM_CDR (c));
9de87eea 96 return 1;
d823b11b
MV
97 }
98 prev = p;
99 }
9de87eea 100 return 0;
d823b11b
MV
101}
102
9de87eea
MV
103/* Remove the front-most element from the queue Q and return it.
104 Return SCM_BOOL_F when Q is empty.
105*/
d823b11b
MV
106static SCM
107dequeue (SCM q)
108{
109 SCM c = SCM_CDR (q);
d2e53ed6 110 if (scm_is_null (c))
d823b11b
MV
111 return SCM_BOOL_F;
112 else
113 {
114 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 115 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
116 SCM_SETCAR (q, SCM_EOL);
117 return SCM_CAR (c);
118 }
119}
7bfd3b9e 120
9de87eea 121/*** Thread smob routines */
76da80e7 122
d823b11b
MV
123static SCM
124thread_mark (SCM obj)
125{
9de87eea 126 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
d823b11b 127 scm_gc_mark (t->result);
9de87eea
MV
128 scm_gc_mark (t->join_queue);
129 scm_gc_mark (t->dynwinds);
130 scm_gc_mark (t->active_asyncs);
9de87eea
MV
131 scm_gc_mark (t->continuation_root);
132 return t->dynamic_state;
d823b11b
MV
133}
134
135static int
136thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
137{
9de87eea 138 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
d823b11b 139 scm_puts ("#<thread ", port);
9de87eea 140 scm_uintprint ((size_t)t->pthread, 10, port);
1b92fb6b 141 scm_puts (" (", port);
0345e278 142 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 143 scm_puts (")>", port);
d823b11b
MV
144 return 1;
145}
146
147static size_t
148thread_free (SCM obj)
149{
9de87eea
MV
150 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
151 assert (t->exited);
d823b11b
MV
152 scm_gc_free (t, sizeof (*t), "thread");
153 return 0;
154}
155
9de87eea 156/*** Blocking on queues. */
f7eca35d 157
9de87eea
MV
158/* See also scm_i_queue_async_cell for how such a block is
159 interrputed.
160*/
d823b11b 161
9de87eea
MV
162/* Put the current thread on QUEUE and go to sleep, waiting for it to
163 be woken up by a call to 'unblock_from_queue', or to be
164 interrupted. Upon return of this function, the current thread is
165 no longer on QUEUE, even when the sleep has been interrupted.
166
167 The QUEUE data structure is assumed to be protected by MUTEX and
168 the caller of block_self must hold MUTEX. It will be atomically
169 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
170
171 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
172 as MUTEX is needed.
173
174 When WAITTIME is not NULL, the sleep will be aborted at that time.
175
176 The return value of block_self is an errno value. It will be zero
177 when the sleep has been successfully completed by a call to
178 unblock_from_queue, EINTR when it has been interrupted by the
179 delivery of a system async, and ETIMEDOUT when the timeout has
180 expired.
181
182 The system asyncs themselves are not executed by block_self.
183*/
184static int
185block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
186 const scm_t_timespec *waittime)
76da80e7 187{
9de87eea
MV
188 scm_i_thread *t = SCM_I_CURRENT_THREAD;
189 SCM q_handle;
190 int err;
191
192 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
193 err = EINTR;
194 else
195 {
196 t->block_asyncs++;
197 q_handle = enqueue (queue, t->handle);
198 if (waittime == NULL)
199 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
200 else
201 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
202
203 /* When we are still on QUEUE, we have been interrupted. We
204 report this only when no other error (such as a timeout) has
205 happened above.
206 */
207 if (remqueue (queue, q_handle) && err == 0)
208 err = EINTR;
209 t->block_asyncs--;
210 scm_i_reset_sleep (t);
211 }
212
213 return err;
76da80e7 214}
9de87eea
MV
215
216/* Wake up the first thread on QUEUE, if any. The caller must hold
217 the mutex that protects QUEUE. The awoken thread is returned, or
218 #f when the queue was empty.
219 */
220static SCM
221unblock_from_queue (SCM queue)
222{
223 SCM thread = dequeue (queue);
224 if (scm_is_true (thread))
225 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
226 return thread;
227}
228
229/* Getting into and out of guile mode.
230 */
231
98648121
MV
232/* Ken Raeburn observes that the implementation of suspend and resume
233 (and the things that build on top of them) are very likely not
234 correct (see below). We will need fix this eventually, and that's
235 why scm_leave_guile/scm_enter_guile are not exported in the API.
236
237 Ken writes:
238
239 Consider this sequence:
240
241 Function foo, called in Guile mode, calls suspend (maybe indirectly
242 through scm_leave_guile), which does this:
243
244 // record top of stack for the GC
245 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
246 var 't'
247 // save registers.
248 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
249 setjmp (t->regs); // here's most of the magic
250
251 ... and returns.
252
253 Function foo has a SCM value X, a handle on a non-immediate object, in
254 a caller-saved register R, and it's the only reference to the object
255 currently.
256
257 The compiler wants to use R in suspend, so it pushes the current
258 value, X, into a stack slot which will be reloaded on exit from
259 suspend; then it loads stuff into R and goes about its business. The
260 setjmp call saves (some of) the current registers, including R, which
261 no longer contains X. (This isn't a problem for a normal
262 setjmp/longjmp situation, where longjmp would be called before
263 setjmp's caller returns; the old value for X would be loaded back from
264 the stack after the longjmp, before the function returned.)
265
266 So, suspend returns, loading X back into R (and invalidating the jump
267 buffer) in the process. The caller foo then goes off and calls a
268 bunch of other functions out of Guile mode, occasionally storing X on
269 the stack again, but, say, much deeper on the stack than suspend's
270 stack frame went, and the stack slot where suspend had written X has
271 long since been overwritten with other values.
272
273 Okay, nothing actively broken so far. Now, let garbage collection
274 run, triggered by another thread.
275
276 The thread calling foo is out of Guile mode at the time, so the
277 garbage collector just scans a range of stack addresses. Too bad that
278 X isn't stored there. So the pointed-to storage goes onto the free
279 list, and I think you can see where things go from there.
280
281 Is there anything I'm missing that'll prevent this scenario from
282 happening? I mean, aside from, "well, suspend and scm_leave_guile
283 don't have many local variables, so they probably won't need to save
284 any registers on most systems, so we hope everything will wind up in
285 the jump buffer and we'll just get away with it"?
286
287 (And, going the other direction, if scm_leave_guile and suspend push
288 the stack pointer over onto a new page, and foo doesn't make further
289 function calls and thus the stack pointer no longer includes that
290 page, are we guaranteed that the kernel cannot release the now-unused
291 stack page that contains the top-of-stack pointer we just saved? I
292 don't know if any OS actually does that. If it does, we could get
293 faults in garbage collection.)
294
295 I don't think scm_without_guile has to have this problem, as it gets
296 more control over the stack handling -- but it should call setjmp
297 itself. I'd probably try something like:
298
299 // record top of stack for the GC
300 t->top = SCM_STACK_PTR (&t);
301 // save registers.
302 SCM_FLUSH_REGISTER_WINDOWS;
303 setjmp (t->regs);
304 res = func(data);
305 scm_enter_guile (t);
306
307 ... though even that's making some assumptions about the stack
308 ordering of local variables versus caller-saved registers.
309
310 For something like scm_leave_guile to work, I don't think it can just
311 rely on invalidated jump buffers. A valid jump buffer, and a handle
312 on the stack state at the point when the jump buffer was initialized,
313 together, would work fine, but I think then we're talking about macros
314 invoking setjmp in the caller's stack frame, and requiring that the
315 caller of scm_leave_guile also call scm_enter_guile before returning,
316 kind of like pthread_cleanup_push/pop calls that have to be paired up
317 in a function. (In fact, the pthread ones have to be paired up
318 syntactically, as if they might expand to a compound statement
319 incorporating the user's code, and invoking a compiler's
320 exception-handling primitives. Which might be something to think
321 about for cases where Guile is used with C++ exceptions or
322 pthread_cancel.)
323*/
324
9de87eea
MV
325scm_i_pthread_key_t scm_i_thread_key;
326
d823b11b 327static void
9de87eea 328resume (scm_i_thread *t)
d823b11b 329{
d823b11b 330 t->top = NULL;
9bc4701c
MD
331 if (t->clear_freelists_p)
332 {
333 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
334 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
335 t->clear_freelists_p = 0;
336 }
d823b11b
MV
337}
338
98648121
MV
339typedef void* scm_t_guile_ticket;
340
341static void
9de87eea 342scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 343{
9de87eea
MV
344 scm_i_thread *t = (scm_i_thread *)ticket;
345 if (t)
346 {
347 scm_i_pthread_mutex_lock (&t->heap_mutex);
348 resume (t);
349 }
d823b11b
MV
350}
351
9de87eea
MV
352static scm_i_thread *
353suspend (void)
d823b11b 354{
9de87eea 355 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
356
357 /* record top of stack for the GC */
9de87eea 358 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
359 /* save registers. */
360 SCM_FLUSH_REGISTER_WINDOWS;
9de87eea
MV
361 setjmp (t->regs);
362 return t;
d823b11b
MV
363}
364
98648121 365static scm_t_guile_ticket
9de87eea 366scm_leave_guile ()
d823b11b 367{
9de87eea
MV
368 scm_i_thread *t = suspend ();
369 scm_i_pthread_mutex_unlock (&t->heap_mutex);
370 return (scm_t_guile_ticket) t;
d823b11b
MV
371}
372
9de87eea
MV
373static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
374static scm_i_thread *all_threads = NULL;
375static int thread_count;
376
377static SCM scm_i_default_dynamic_state;
378
379/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 380 */
9de87eea
MV
381static void
382guilify_self_1 (SCM_STACKITEM *base)
d823b11b 383{
9de87eea
MV
384 scm_i_thread *t = malloc (sizeof (scm_i_thread));
385
386 t->pthread = scm_i_pthread_self ();
387 t->handle = SCM_BOOL_F;
388 t->result = SCM_BOOL_F;
389 t->join_queue = SCM_EOL;
390 t->dynamic_state = SCM_BOOL_F;
391 t->dynwinds = SCM_EOL;
392 t->active_asyncs = SCM_EOL;
9de87eea
MV
393 t->block_asyncs = 1;
394 t->pending_asyncs = 1;
395 t->last_debug_frame = NULL;
396 t->base = base;
0c97d7dd 397 t->continuation_root = SCM_EOL;
9de87eea
MV
398 t->continuation_base = base;
399 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
400 t->sleep_mutex = NULL;
401 t->sleep_object = SCM_BOOL_F;
402 t->sleep_fd = -1;
0c97d7dd 403 /* XXX - check for errors. */
9de87eea
MV
404 pipe (t->sleep_pipe);
405 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
406 t->clear_freelists_p = 0;
1a8fdd7e 407 t->gc_running_p = 0;
9de87eea
MV
408 t->exited = 0;
409
410 t->freelist = SCM_EOL;
411 t->freelist2 = SCM_EOL;
412 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
413 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
414
415 scm_i_pthread_setspecific (scm_i_thread_key, t);
416
417 scm_i_pthread_mutex_lock (&t->heap_mutex);
418
419 scm_i_pthread_mutex_lock (&thread_admin_mutex);
420 t->next_thread = all_threads;
421 all_threads = t;
422 thread_count++;
423 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
424}
425
9de87eea 426/* Perform second stage of thread initialisation, in guile mode.
d823b11b 427 */
9de87eea
MV
428static void
429guilify_self_2 (SCM parent)
d823b11b 430{
9de87eea
MV
431 scm_i_thread *t = SCM_I_CURRENT_THREAD;
432
433 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
434 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
435 t->continuation_root = scm_cons (t->handle, SCM_EOL);
436 t->continuation_base = t->base;
437
438 if (scm_is_true (parent))
439 t->dynamic_state = scm_make_dynamic_state (parent);
440 else
441 t->dynamic_state = scm_i_make_initial_dynamic_state ();
442
443 t->join_queue = make_queue ();
444 t->block_asyncs = 0;
d823b11b
MV
445}
446
9de87eea 447/* Perform thread tear-down, in guile mode.
d823b11b 448 */
9de87eea
MV
449static void *
450do_thread_exit (void *v)
451{
0c97d7dd 452 scm_i_thread *t = (scm_i_thread *)v;
9de87eea
MV
453
454 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
455
456 t->exited = 1;
0c97d7dd
MV
457 close (t->sleep_pipe[0]);
458 close (t->sleep_pipe[1]);
9de87eea
MV
459 while (scm_is_true (unblock_from_queue (t->join_queue)))
460 ;
9de87eea
MV
461
462 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
463 return NULL;
464}
465
d823b11b 466static void
9de87eea 467on_thread_exit (void *v)
d823b11b 468{
0c97d7dd
MV
469 scm_i_thread *t = (scm_i_thread *)v, **tp;
470
9de87eea 471 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd
MV
472
473 /* Unblocking the joining threads needs to happen in guile mode
474 since the queue is a SCM data structure.
475 */
9de87eea 476 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
477
478 /* Removing ourself from the list of all threads needs to happen in
479 non-guile mode since all SCM values on our stack become
480 unprotected once we are no longer in the list.
481 */
482 scm_leave_guile ();
483 scm_i_pthread_mutex_lock (&thread_admin_mutex);
484 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
485 if (*tp == t)
486 {
487 *tp = t->next_thread;
488 break;
489 }
490 thread_count--;
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
492
9de87eea 493 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
494}
495
9de87eea 496static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 497
9de87eea
MV
498static void
499init_thread_key (void)
500{
501 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
502}
d823b11b 503
9de87eea
MV
504/* Perform any initializations necessary to bring the current thread
505 into guile mode, initializing Guile itself, if necessary.
a54a94b3 506
9de87eea
MV
507 BASE is the stack base to use with GC.
508
509 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
510 which case the default dynamic state is used.
511
512 Return zero when the thread was in guile mode already; otherwise
513 return 1.
514*/
515
516static int
517scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 518{
9de87eea
MV
519 scm_i_thread *t;
520
521 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
522
523 if ((t = SCM_I_CURRENT_THREAD) == NULL)
524 {
525 /* This thread has not been guilified yet.
526 */
527
528 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
529 if (scm_initialized_p == 0)
530 {
531 /* First thread ever to enter Guile. Run the full
532 initialization.
533 */
534 scm_i_init_guile (base);
535 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
536 }
537 else
538 {
539 /* Guile is already initialized, but this thread enters it for
540 the first time. Only initialize this thread.
541 */
542 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
543 guilify_self_1 (base);
544 guilify_self_2 (parent);
545 }
546 return 1;
547 }
548 else if (t->top)
549 {
550 /* This thread is already guilified but not in guile mode, just
551 resume it.
552
553 XXX - base might be lower than when this thread was first
554 guilified.
555 */
556 scm_enter_guile ((scm_t_guile_ticket) t);
557 return 1;
558 }
559 else
560 {
561 /* Thread is already in guile mode. Nothing to do.
562 */
563 return 0;
564 }
d823b11b
MV
565}
566
9de87eea
MV
567#ifdef HAVE_LIBC_STACK_END
568
569extern void *__libc_stack_end;
570
571#if SCM_USE_PTHREAD_THREADS
572#ifdef HAVE_PTHREAD_ATTR_GETSTACK
573
574#define HAVE_GET_THREAD_STACK_BASE
575
576static SCM_STACKITEM *
577get_thread_stack_base ()
d823b11b 578{
9de87eea
MV
579 pthread_attr_t attr;
580 void *start, *end;
581 size_t size;
582
583 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
584 for the main thread, but we can use __libc_stack_end in that
585 case.
586 */
587
588 pthread_getattr_np (pthread_self (), &attr);
589 pthread_attr_getstack (&attr, &start, &size);
590 end = (char *)start + size;
591
592 if ((void *)&attr < start || (void *)&attr >= end)
593 return __libc_stack_end;
594 else
595 {
596#if SCM_STACK_GROWS_UP
597 return start;
598#else
599 return end;
600#endif
601 }
a54a94b3
MV
602}
603
9de87eea
MV
604#endif /* HAVE_PTHREAD_ATTR_GETSTACK */
605
606#else /* !SCM_USE_PTHREAD_THREADS */
607
608#define HAVE_GET_THREAD_STACK_BASE
609
610static SCM_STACKITEM *
611get_thread_stack_base ()
a54a94b3 612{
9de87eea 613 return __libc_stack_end;
d823b11b
MV
614}
615
9de87eea
MV
616#endif /* !SCM_USE_PTHREAD_THREADS */
617#endif /* HAVE_LIBC_STACK_END */
618
619#ifdef HAVE_GET_THREAD_STACK_BASE
620
621void
622scm_init_guile ()
d823b11b 623{
9de87eea
MV
624 scm_i_init_thread_for_guile (get_thread_stack_base (),
625 scm_i_default_dynamic_state);
d823b11b
MV
626}
627
9de87eea
MV
628#endif
629
630void *
631scm_with_guile (void *(*func)(void *), void *data)
632{
633 return scm_i_with_guile_and_parent (func, data,
634 scm_i_default_dynamic_state);
635}
636
637void *
638scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
639 SCM parent)
640{
641 void *res;
642 int really_entered;
643 SCM_STACKITEM base_item;
644 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
645 res = scm_c_with_continuation_barrier (func, data);
646 if (really_entered)
647 scm_leave_guile ();
648 return res;
649}
650
651void *
652scm_without_guile (void *(*func)(void *), void *data)
d823b11b 653{
9de87eea
MV
654 void *res;
655 scm_t_guile_ticket t;
656 t = scm_leave_guile ();
657 res = func (data);
658 scm_enter_guile (t);
659 return res;
660}
661
662/*** Thread creation */
663
664typedef struct {
665 SCM parent;
666 SCM thunk;
667 SCM handler;
76da80e7 668 SCM thread;
9de87eea
MV
669 scm_i_pthread_mutex_t mutex;
670 scm_i_pthread_cond_t cond;
671} launch_data;
d823b11b 672
9de87eea
MV
673static void *
674really_launch (void *d)
675{
676 launch_data *data = (launch_data *)d;
677 SCM thunk = data->thunk, handler = data->handler;
678 scm_i_thread *t;
d823b11b 679
9de87eea 680 t = SCM_I_CURRENT_THREAD;
a54a94b3 681
9de87eea
MV
682 scm_i_scm_pthread_mutex_lock (&data->mutex);
683 data->thread = scm_current_thread ();
684 scm_i_pthread_cond_signal (&data->cond);
685 scm_i_pthread_mutex_unlock (&data->mutex);
686
687 if (SCM_UNBNDP (handler))
688 t->result = scm_call_0 (thunk);
689 else
690 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
691
692 return 0;
d823b11b
MV
693}
694
9de87eea
MV
695static void *
696launch_thread (void *d)
697{
698 launch_data *data = (launch_data *)d;
699 scm_i_pthread_detach (scm_i_pthread_self ());
700 scm_i_with_guile_and_parent (really_launch, d, data->parent);
701 return NULL;
702}
703
704SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 705 (SCM thunk, SCM handler),
9de87eea
MV
706 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
707 "returning a new thread object representing the thread. The procedure\n"
708 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
709 "\n"
710 "When @var{handler} is specified, then @var{thunk} is called from\n"
711 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
712 "handler. This catch is established inside the continuation barrier.\n"
713 "\n"
714 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
715 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
716#define FUNC_NAME s_scm_call_with_new_thread
717{
9de87eea
MV
718 launch_data data;
719 scm_i_pthread_t id;
720 int err;
d823b11b 721
9de87eea
MV
722 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
723 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
724 handler, SCM_ARG2, FUNC_NAME);
725
726 data.parent = scm_current_dynamic_state ();
727 data.thunk = thunk;
728 data.handler = handler;
729 data.thread = SCM_BOOL_F;
730 scm_i_pthread_mutex_init (&data.mutex, NULL);
731 scm_i_pthread_cond_init (&data.cond, NULL);
732
733 scm_i_scm_pthread_mutex_lock (&data.mutex);
734 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
735 if (err)
736 {
737 scm_i_pthread_mutex_unlock (&data.mutex);
738 errno = err;
739 scm_syserror (NULL);
740 }
741 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
742 scm_i_pthread_mutex_unlock (&data.mutex);
743
744 return data.thread;
d823b11b
MV
745}
746#undef FUNC_NAME
747
9de87eea
MV
748typedef struct {
749 SCM parent;
750 scm_t_catch_body body;
751 void *body_data;
752 scm_t_catch_handler handler;
753 void *handler_data;
754 SCM thread;
755 scm_i_pthread_mutex_t mutex;
756 scm_i_pthread_cond_t cond;
757} spawn_data;
758
759static void *
760really_spawn (void *d)
761{
762 spawn_data *data = (spawn_data *)d;
763 scm_t_catch_body body = data->body;
764 void *body_data = data->body_data;
765 scm_t_catch_handler handler = data->handler;
766 void *handler_data = data->handler_data;
767 scm_i_thread *t = SCM_I_CURRENT_THREAD;
768
769 scm_i_scm_pthread_mutex_lock (&data->mutex);
770 data->thread = scm_current_thread ();
771 scm_i_pthread_cond_signal (&data->cond);
772 scm_i_pthread_mutex_unlock (&data->mutex);
773
774 if (handler == NULL)
775 t->result = body (body_data);
776 else
777 t->result = scm_internal_catch (SCM_BOOL_T,
778 body, body_data,
779 handler, handler_data);
780
781 return 0;
782}
783
784static void *
785spawn_thread (void *d)
786{
787 spawn_data *data = (spawn_data *)d;
788 scm_i_pthread_detach (scm_i_pthread_self ());
789 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
790 return NULL;
791}
792
793SCM
794scm_spawn_thread (scm_t_catch_body body, void *body_data,
795 scm_t_catch_handler handler, void *handler_data)
796{
797 spawn_data data;
798 scm_i_pthread_t id;
799 int err;
800
801 data.parent = scm_current_dynamic_state ();
802 data.body = body;
803 data.body_data = body_data;
804 data.handler = handler;
805 data.handler_data = handler_data;
806 data.thread = SCM_BOOL_F;
807 scm_i_pthread_mutex_init (&data.mutex, NULL);
808 scm_i_pthread_cond_init (&data.cond, NULL);
809
810 scm_i_scm_pthread_mutex_lock (&data.mutex);
811 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
812 if (err)
813 {
814 scm_i_pthread_mutex_unlock (&data.mutex);
815 errno = err;
816 scm_syserror (NULL);
817 }
818 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
819 scm_i_pthread_mutex_unlock (&data.mutex);
820
821 return data.thread;
822}
823
29717c89
MD
824SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
825 (),
826"Move the calling thread to the end of the scheduling queue.")
827#define FUNC_NAME s_scm_yield
828{
9de87eea 829 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
830}
831#undef FUNC_NAME
832
d823b11b 833SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
5f05c406 834 (SCM thread),
d823b11b
MV
835"Suspend execution of the calling thread until the target @var{thread} "
836"terminates, unless the target @var{thread} has already terminated. ")
837#define FUNC_NAME s_scm_join_thread
5f05c406 838{
9de87eea 839 scm_i_thread *t;
d823b11b
MV
840 SCM res;
841
842 SCM_VALIDATE_THREAD (1, thread);
9de87eea 843 if (scm_is_eq (scm_current_thread (), thread))
d823b11b
MV
844 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
845
9de87eea
MV
846 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
847
848 t = SCM_I_THREAD_DATA (thread);
d823b11b
MV
849 if (!t->exited)
850 {
9de87eea
MV
851 while (1)
852 {
853 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
854 if (t->exited)
855 break;
856 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
857 SCM_TICK;
858 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
859 }
d823b11b
MV
860 }
861 res = t->result;
9de87eea
MV
862
863 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b 864 return res;
5f05c406
MV
865}
866#undef FUNC_NAME
867
9de87eea 868/*** Fat mutexes */
4079f87e 869
d823b11b
MV
870/* We implement our own mutex type since we want them to be 'fair', we
871 want to do fancy things while waiting for them (like running
9de87eea
MV
872 asyncs) and we might want to add things that are nice for
873 debugging.
d823b11b 874*/
4079f87e 875
9de87eea
MV
876typedef struct {
877 scm_i_pthread_mutex_t lock;
d823b11b 878 SCM owner;
9de87eea
MV
879 int level; /* how much the owner owns us.
880 < 0 for non-recursive mutexes */
881 SCM waiting; /* the threads waiting for this mutex. */
882} fat_mutex;
883
884#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
885#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
5f05c406 886
d823b11b 887static SCM
9de87eea 888fat_mutex_mark (SCM mx)
d823b11b 889{
9de87eea 890 fat_mutex *m = SCM_MUTEX_DATA (mx);
d823b11b
MV
891 scm_gc_mark (m->owner);
892 return m->waiting;
893}
4079f87e 894
9de87eea
MV
895static size_t
896fat_mutex_free (SCM mx)
76da80e7 897{
9de87eea
MV
898 fat_mutex *m = SCM_MUTEX_DATA (mx);
899 scm_i_pthread_mutex_destroy (&m->lock);
900 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
901 return 0;
902}
903
904static int
9de87eea 905fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 906{
9de87eea
MV
907 fat_mutex *m = SCM_MUTEX_DATA (mx);
908 scm_puts ("#<mutex ", port);
909 scm_uintprint ((scm_t_bits)m, 16, port);
910 scm_puts (">", port);
911 return 1;
76da80e7
MV
912}
913
76da80e7 914static SCM
9de87eea 915make_fat_mutex (int recursive)
76da80e7 916{
9de87eea
MV
917 fat_mutex *m;
918 SCM mx;
919
920 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
921 scm_i_pthread_mutex_init (&m->lock, NULL);
922 m->owner = SCM_BOOL_F;
923 m->level = recursive? 0 : -1;
924 m->waiting = SCM_EOL;
925 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
926 m->waiting = make_queue ();
927 return mx;
76da80e7
MV
928}
929
9de87eea 930SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
76da80e7 931 (void),
9de87eea
MV
932 "Create a new mutex. ")
933#define FUNC_NAME s_scm_make_mutex
76da80e7 934{
9de87eea 935 return make_fat_mutex (0);
76da80e7
MV
936}
937#undef FUNC_NAME
938
9de87eea 939SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 940 (void),
9de87eea
MV
941 "Create a new recursive mutex. ")
942#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 943{
9de87eea 944 return make_fat_mutex (1);
9bc4701c
MD
945}
946#undef FUNC_NAME
947
9de87eea
MV
948static char *
949fat_mutex_lock (SCM mutex)
950{
951 fat_mutex *m = SCM_MUTEX_DATA (mutex);
952 SCM thread = scm_current_thread ();
953 char *msg = NULL;
954
955 scm_i_scm_pthread_mutex_lock (&m->lock);
956 if (scm_is_false (m->owner))
957 m->owner = thread;
958 else if (scm_is_eq (m->owner, thread))
959 {
960 if (m->level >= 0)
961 m->level++;
962 else
963 msg = "mutex already locked by current thread";
964 }
965 else
966 {
967 while (1)
968 {
969 block_self (m->waiting, mutex, &m->lock, NULL);
970 if (scm_is_eq (m->owner, thread))
971 break;
972 scm_i_pthread_mutex_unlock (&m->lock);
973 SCM_TICK;
974 scm_i_scm_pthread_mutex_lock (&m->lock);
975 }
976 }
977 scm_i_pthread_mutex_unlock (&m->lock);
978 return msg;
979}
980
9bc4701c
MD
981SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
982 (SCM mx),
983"Lock @var{mutex}. If the mutex is already locked, the calling thread "
984"blocks until the mutex becomes available. The function returns when "
985"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
986"a thread already owns will succeed right away and will not block the "
987"thread. That is, Guile's mutexes are @emph{recursive}. ")
988#define FUNC_NAME s_scm_lock_mutex
989{
9de87eea 990 char *msg;
76da80e7 991
b01532af 992 SCM_VALIDATE_MUTEX (1, mx);
9de87eea
MV
993 msg = fat_mutex_lock (mx);
994 if (msg)
995 scm_misc_error (NULL, msg, SCM_EOL);
76da80e7 996 return SCM_BOOL_T;
9bc4701c 997}
76da80e7 998#undef FUNC_NAME
9bc4701c 999
a4d106c7
MV
1000void
1001scm_frame_lock_mutex (SCM mutex)
1002{
1003 scm_frame_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1004 SCM_F_WIND_EXPLICITLY);
1005 scm_frame_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1006 SCM_F_WIND_EXPLICITLY);
1007}
1008
9de87eea
MV
1009static char *
1010fat_mutex_trylock (fat_mutex *m, int *resp)
1011{
1012 char *msg = NULL;
1013 SCM thread = scm_current_thread ();
1014
1015 *resp = 1;
1016 scm_i_pthread_mutex_lock (&m->lock);
1017 if (scm_is_false (m->owner))
1018 m->owner = thread;
1019 else if (scm_is_eq (m->owner, thread))
1020 {
1021 if (m->level >= 0)
1022 m->level++;
1023 else
1024 msg = "mutex already locked by current thread";
1025 }
1026 else
1027 *resp = 0;
1028 scm_i_pthread_mutex_unlock (&m->lock);
1029 return msg;
1030}
1031
9bc4701c 1032SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1033 (SCM mutex),
9bc4701c
MD
1034"Try to lock @var{mutex}. If the mutex is already locked by someone "
1035"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1036#define FUNC_NAME s_scm_try_mutex
1037{
9de87eea
MV
1038 char *msg;
1039 int res;
1040
ba1b7223 1041 SCM_VALIDATE_MUTEX (1, mutex);
9bc4701c 1042
ba1b7223 1043 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
9de87eea
MV
1044 if (msg)
1045 scm_misc_error (NULL, msg, SCM_EOL);
1046 return scm_from_bool (res);
1047}
1048#undef FUNC_NAME
76da80e7 1049
9de87eea
MV
1050static char *
1051fat_mutex_unlock (fat_mutex *m)
1052{
1053 char *msg = NULL;
1054
1055 scm_i_scm_pthread_mutex_lock (&m->lock);
1056 if (!scm_is_eq (m->owner, scm_current_thread ()))
9bc4701c 1057 {
9de87eea
MV
1058 if (scm_is_false (m->owner))
1059 msg = "mutex not locked";
1060 else
1061 msg = "mutex not locked by current thread";
9bc4701c 1062 }
9de87eea
MV
1063 else if (m->level > 0)
1064 m->level--;
1065 else
1066 m->owner = unblock_from_queue (m->waiting);
1067 scm_i_pthread_mutex_unlock (&m->lock);
1068
1069 return msg;
9bc4701c 1070}
9bc4701c
MD
1071
1072SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1073 (SCM mx),
1074"Unlocks @var{mutex} if the calling thread owns the lock on "
1075"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1076"thread results in undefined behaviour. Once a mutex has been unlocked, "
1077"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1078"lock. Every call to @code{lock-mutex} by this thread must be matched "
1079"with a call to @code{unlock-mutex}. Only the last call to "
1080"@code{unlock-mutex} will actually unlock the mutex. ")
1081#define FUNC_NAME s_scm_unlock_mutex
1082{
9de87eea 1083 char *msg;
9bc4701c
MD
1084 SCM_VALIDATE_MUTEX (1, mx);
1085
9de87eea
MV
1086 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1087 if (msg)
1088 scm_misc_error (NULL, msg, SCM_EOL);
9bc4701c
MD
1089 return SCM_BOOL_T;
1090}
1091#undef FUNC_NAME
1092
9de87eea
MV
1093#if 0
1094
1095SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1096 (SCM mx),
1097 "Return the thread owning @var{mx}, or @code{#f}.")
1098#define FUNC_NAME s_scm_mutex_owner
1099{
1100 SCM_VALIDATE_MUTEX (1, mx);
1101 return (SCM_MUTEX_DATA(mx))->owner;
1102}
1103#undef FUNC_NAME
1104
1105SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1106 (SCM mx),
1107 "Return the lock level of a recursive mutex, or -1\n"
1108 "for a standard mutex.")
1109#define FUNC_NAME s_scm_mutex_level
1110{
1111 SCM_VALIDATE_MUTEX (1, mx);
1112 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1113}
1114#undef FUNC_NAME
1115
1116#endif
1117
1118/*** Fat condition variables */
1119
1120typedef struct {
1121 scm_i_pthread_mutex_t lock;
1122 SCM waiting; /* the threads waiting for this condition. */
1123} fat_cond;
1124
1125#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1126#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1127
1128static SCM
1129fat_cond_mark (SCM cv)
1130{
1131 fat_cond *c = SCM_CONDVAR_DATA (cv);
1132 return c->waiting;
1133}
1134
1135static size_t
1136fat_cond_free (SCM mx)
1137{
1138 fat_cond *c = SCM_CONDVAR_DATA (mx);
1139 scm_i_pthread_mutex_destroy (&c->lock);
1140 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1141 return 0;
1142}
1143
1144static int
1145fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1146{
1147 fat_cond *c = SCM_CONDVAR_DATA (cv);
1148 scm_puts ("#<condition-variable ", port);
1149 scm_uintprint ((scm_t_bits)c, 16, port);
1150 scm_puts (">", port);
1151 return 1;
1152}
9bc4701c 1153
d823b11b
MV
1154SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1155 (void),
1156 "Make a new condition variable.")
1157#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1158{
9de87eea
MV
1159 fat_cond *c;
1160 SCM cv;
1161
1162 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1163 scm_i_pthread_mutex_init (&c->lock, 0);
1164 c->waiting = SCM_EOL;
1165 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1166 c->waiting = make_queue ();
d823b11b 1167 return cv;
5f05c406 1168}
d823b11b 1169#undef FUNC_NAME
5f05c406 1170
9de87eea
MV
1171static int
1172fat_cond_timedwait (SCM cond, SCM mutex,
1173 const scm_t_timespec *waittime)
1174{
1175 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1176 fat_cond *c = SCM_CONDVAR_DATA (cond);
1177 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1178 const char *msg;
1179 int err = 0;
1180
1181 while (1)
1182 {
9de87eea
MV
1183 scm_i_scm_pthread_mutex_lock (&c->lock);
1184 msg = fat_mutex_unlock (m);
1185 t->block_asyncs++;
1186 if (msg == NULL)
1187 {
1188 err = block_self (c->waiting, cond, &c->lock, waittime);
1189 scm_i_pthread_mutex_unlock (&c->lock);
9de87eea
MV
1190 fat_mutex_lock (mutex);
1191 }
1192 else
1193 scm_i_pthread_mutex_unlock (&c->lock);
1194 t->block_asyncs--;
1195 scm_async_click ();
1196
9de87eea
MV
1197 if (msg)
1198 scm_misc_error (NULL, msg, SCM_EOL);
1199
1200 scm_remember_upto_here_2 (cond, mutex);
1201
1202 if (err == 0)
1203 return 1;
1204 if (err == ETIMEDOUT)
1205 return 0;
1206 if (err != EINTR)
1207 {
1208 errno = err;
1209 scm_syserror (NULL);
1210 }
1211 }
1212}
1213
d823b11b
MV
1214SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1215 (SCM cv, SCM mx, SCM t),
1216"Wait until @var{cond-var} has been signalled. While waiting, "
1217"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1218"is locked again when this function returns. When @var{time} is given, "
1219"it specifies a point in time where the waiting should be aborted. It "
1220"can be either a integer as returned by @code{current-time} or a pair "
1221"as returned by @code{gettimeofday}. When the waiting is aborted the "
1222"mutex is locked and @code{#f} is returned. When the condition "
1223"variable is in fact signalled, the mutex is also locked and @code{#t} "
1224"is returned. ")
1225#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1226{
9de87eea 1227 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1228
1229 SCM_VALIDATE_CONDVAR (1, cv);
1230 SCM_VALIDATE_MUTEX (2, mx);
9bc4701c 1231
d823b11b
MV
1232 if (!SCM_UNBNDP (t))
1233 {
d2e53ed6 1234 if (scm_is_pair (t))
d823b11b 1235 {
9de87eea
MV
1236 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1237 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
d823b11b
MV
1238 }
1239 else
1240 {
9de87eea 1241 waittime.tv_sec = scm_to_ulong (t);
d823b11b
MV
1242 waittime.tv_nsec = 0;
1243 }
9de87eea 1244 waitptr = &waittime;
d823b11b
MV
1245 }
1246
9de87eea 1247 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
5f05c406 1248}
d823b11b 1249#undef FUNC_NAME
5f05c406 1250
9de87eea
MV
1251static void
1252fat_cond_signal (fat_cond *c)
1253{
9de87eea
MV
1254 scm_i_scm_pthread_mutex_lock (&c->lock);
1255 unblock_from_queue (c->waiting);
1256 scm_i_pthread_mutex_unlock (&c->lock);
1257}
1258
d823b11b
MV
1259SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1260 (SCM cv),
1261 "Wake up one thread that is waiting for @var{cv}")
1262#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1263{
d823b11b 1264 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1265 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1266 return SCM_BOOL_T;
5f05c406 1267}
d823b11b 1268#undef FUNC_NAME
5f05c406 1269
9de87eea
MV
1270static void
1271fat_cond_broadcast (fat_cond *c)
1272{
1273 scm_i_scm_pthread_mutex_lock (&c->lock);
1274 while (scm_is_true (unblock_from_queue (c->waiting)))
1275 ;
1276 scm_i_pthread_mutex_unlock (&c->lock);
1277}
1278
d823b11b
MV
1279SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1280 (SCM cv),
1281 "Wake up all threads that are waiting for @var{cv}. ")
1282#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1283{
d823b11b 1284 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1285 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1286 return SCM_BOOL_T;
5f05c406 1287}
d823b11b 1288#undef FUNC_NAME
5f05c406 1289
d823b11b
MV
1290/*** Marking stacks */
1291
1292/* XXX - what to do with this? Do we need to handle this for blocked
1293 threads as well?
1294*/
1295#ifdef __ia64__
1296# define SCM_MARK_BACKING_STORE() do { \
1297 ucontext_t ctx; \
1298 SCM_STACKITEM * top, * bot; \
1299 getcontext (&ctx); \
1300 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1301 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1302 / sizeof (SCM_STACKITEM))); \
1303 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1304 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1305 scm_mark_locations (bot, top - bot); } while (0)
1306#else
1307# define SCM_MARK_BACKING_STORE()
1308#endif
1309
1310void
1311scm_threads_mark_stacks (void)
5f05c406 1312{
9de87eea
MV
1313 scm_i_thread *t;
1314 for (t = all_threads; t; t = t->next_thread)
d823b11b 1315 {
9de87eea
MV
1316 /* Check that thread has indeed been suspended.
1317 */
1318 assert (t->top);
6087fad9 1319
9de87eea 1320 scm_gc_mark (t->handle);
6087fad9 1321
d028af45 1322#if SCM_STACK_GROWS_UP
9de87eea 1323 scm_mark_locations (t->base, t->top - t->base);
d823b11b 1324#else
9de87eea 1325 scm_mark_locations (t->top, t->base - t->top);
d823b11b 1326#endif
6087fad9
MV
1327 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1328 ((size_t) sizeof(t->regs)
1329 / sizeof (SCM_STACKITEM)));
d823b11b 1330 }
59152722
MV
1331
1332 SCM_MARK_BACKING_STORE ();
5f05c406
MV
1333}
1334
d823b11b
MV
1335/*** Select */
1336
911782b7 1337int
9de87eea
MV
1338scm_std_select (int nfds,
1339 SELECT_TYPE *readfds,
1340 SELECT_TYPE *writefds,
1341 SELECT_TYPE *exceptfds,
1342 struct timeval *timeout)
1343{
1344 fd_set my_readfds;
1345 int res, eno, wakeup_fd;
1346 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1347 scm_t_guile_ticket ticket;
1348
1349 if (readfds == NULL)
1350 {
1351 FD_ZERO (&my_readfds);
1352 readfds = &my_readfds;
1353 }
1354
1355 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1356 SCM_TICK;
1357
1358 wakeup_fd = t->sleep_pipe[0];
1359 ticket = scm_leave_guile ();
1360 FD_SET (wakeup_fd, readfds);
1361 if (wakeup_fd >= nfds)
1362 nfds = wakeup_fd+1;
1363 res = select (nfds, readfds, writefds, exceptfds, timeout);
1364 t->sleep_fd = -1;
d823b11b 1365 eno = errno;
9de87eea
MV
1366 scm_enter_guile (ticket);
1367
1368 scm_i_reset_sleep (t);
1369
1370 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1371 {
1372 char dummy;
1373 read (wakeup_fd, &dummy, 1);
1374 FD_CLR (wakeup_fd, readfds);
1375 res -= 1;
1376 if (res == 0)
1377 {
1378 eno = EINTR;
1379 res = -1;
1380 }
1381 }
d823b11b
MV
1382 errno = eno;
1383 return res;
5f05c406
MV
1384}
1385
9de87eea 1386/* Convenience API for blocking while in guile mode. */
76da80e7 1387
9de87eea 1388#if SCM_USE_PTHREAD_THREADS
92e64b87 1389
9bc4701c 1390int
9de87eea 1391scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1392{
9de87eea
MV
1393 scm_t_guile_ticket t = scm_leave_guile ();
1394 int res = scm_i_pthread_mutex_lock (mutex);
1395 scm_enter_guile (t);
9bc4701c
MD
1396 return res;
1397}
1398
9de87eea
MV
1399static void
1400unlock (void *data)
28d52ebb 1401{
9de87eea 1402 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1403}
1404
1405void
9de87eea 1406scm_frame_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1407{
9de87eea
MV
1408 scm_i_scm_pthread_mutex_lock (mutex);
1409 scm_frame_unwind_handler (unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1410}
1411
9bc4701c 1412int
9de87eea 1413scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1414{
9de87eea
MV
1415 scm_t_guile_ticket t = scm_leave_guile ();
1416 int res = scm_i_pthread_cond_wait (cond, mutex);
1417 scm_enter_guile (t);
9bc4701c
MD
1418 return res;
1419}
9bc4701c 1420
76da80e7 1421int
9de87eea
MV
1422scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1423 scm_i_pthread_mutex_t *mutex,
1424 const scm_t_timespec *wt)
76da80e7 1425{
9de87eea
MV
1426 scm_t_guile_ticket t = scm_leave_guile ();
1427 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1428 scm_enter_guile (t);
1429 return res;
76da80e7
MV
1430}
1431
9de87eea 1432#endif
76da80e7 1433
d823b11b 1434unsigned long
9de87eea 1435scm_std_usleep (unsigned long usecs)
5f05c406 1436{
d823b11b
MV
1437 struct timeval tv;
1438 tv.tv_usec = usecs % 1000000;
1439 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1440 scm_std_select (0, NULL, NULL, NULL, &tv);
1441 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1442}
1443
9de87eea
MV
1444unsigned int
1445scm_std_sleep (unsigned int secs)
6c214b62 1446{
d823b11b
MV
1447 struct timeval tv;
1448 tv.tv_usec = 0;
1449 tv.tv_sec = secs;
9de87eea 1450 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1451 return tv.tv_sec;
6c214b62
MD
1452}
1453
d823b11b
MV
1454/*** Misc */
1455
1456SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1457 (void),
1458 "Return the thread that called this function.")
1459#define FUNC_NAME s_scm_current_thread
1460{
9de87eea 1461 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1462}
1463#undef FUNC_NAME
1464
9de87eea
MV
1465static SCM
1466scm_c_make_list (size_t n, SCM fill)
1467{
1468 SCM res = SCM_EOL;
1469 while (n-- > 0)
1470 res = scm_cons (fill, res);
1471 return res;
1472}
1473
d823b11b
MV
1474SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1475 (void),
1476 "Return a list of all threads.")
9bc4701c 1477#define FUNC_NAME s_scm_all_threads
d823b11b 1478{
9de87eea
MV
1479 /* We can not allocate while holding the thread_admin_mutex because
1480 of the way GC is done.
1481 */
1482 int n = thread_count;
1483 scm_i_thread *t;
1484 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1485
9de87eea
MV
1486 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1487 l = &list;
1488 for (t = all_threads; t && n > 0; t = t->next_thread)
1489 {
1490 SCM_SETCAR (*l, t->handle);
1491 l = SCM_CDRLOC (*l);
1492 n--;
1493 }
1494 *l = SCM_EOL;
1495 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1496 return list;
d823b11b 1497}
9de87eea 1498#undef FUNC_NAME
d823b11b
MV
1499
1500SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1501 (SCM thread),
1502 "Return @code{#t} iff @var{thread} has exited.\n")
1503#define FUNC_NAME s_scm_thread_exited_p
1504{
7888309b 1505 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1506}
1507#undef FUNC_NAME
1508
911782b7 1509int
d823b11b
MV
1510scm_c_thread_exited_p (SCM thread)
1511#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1512{
9de87eea 1513 scm_i_thread *t;
d823b11b 1514 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1515 t = SCM_I_THREAD_DATA (thread);
d823b11b 1516 return t->exited;
5f05c406 1517}
d823b11b 1518#undef FUNC_NAME
5f05c406 1519
9de87eea 1520static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1521int scm_i_thread_go_to_sleep;
9bc4701c
MD
1522static int threads_initialized_p = 0;
1523
1524void
1525scm_i_thread_put_to_sleep ()
1526{
6087fad9 1527 if (threads_initialized_p)
9bc4701c 1528 {
9de87eea 1529 scm_i_thread *t;
6087fad9 1530
9de87eea
MV
1531 scm_leave_guile ();
1532 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1533
0c97d7dd
MV
1534 /* Signal all threads to go to sleep
1535 */
1536 scm_i_thread_go_to_sleep = 1;
1537 for (t = all_threads; t; t = t->next_thread)
1538 scm_i_pthread_mutex_lock (&t->heap_mutex);
1539 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1540 }
1541}
1542
b0dc3d71
MD
1543void
1544scm_i_thread_invalidate_freelists ()
1545{
9de87eea
MV
1546 /* thread_admin_mutex is already locked. */
1547
1548 scm_i_thread *t;
1549 for (t = all_threads; t; t = t->next_thread)
1550 if (t != SCM_I_CURRENT_THREAD)
1551 t->clear_freelists_p = 1;
b0dc3d71
MD
1552}
1553
9bc4701c
MD
1554void
1555scm_i_thread_wake_up ()
1556{
6087fad9 1557 if (threads_initialized_p)
9bc4701c 1558 {
9de87eea 1559 scm_i_thread *t;
9de87eea 1560
0c97d7dd
MV
1561 scm_i_pthread_cond_broadcast (&wake_up_cond);
1562 for (t = all_threads; t; t = t->next_thread)
1563 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
1564 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1565 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 1566 }
9bc4701c
MD
1567}
1568
1569void
1570scm_i_thread_sleep_for_gc ()
1571{
9de87eea
MV
1572 scm_i_thread *t = suspend ();
1573 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
b0dc3d71 1574 resume (t);
9bc4701c
MD
1575}
1576
a4d106c7
MV
1577/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1578 */
d1138028 1579scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
1580int scm_i_critical_section_level = 0;
1581
1582static SCM framed_critical_section_mutex;
a54a94b3 1583
9bc4701c 1584void
a4d106c7 1585scm_frame_critical_section (SCM mutex)
76da80e7 1586{
a4d106c7
MV
1587 if (scm_is_false (mutex))
1588 mutex = framed_critical_section_mutex;
1589 scm_frame_lock_mutex (mutex);
9de87eea
MV
1590 scm_frame_block_asyncs ();
1591}
1592
1593/*** Initialization */
1594
1595scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1596scm_i_pthread_mutex_t scm_i_misc_mutex;
1597
d1138028
MV
1598#if SCM_USE_PTHREAD_THREADS
1599pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1600#endif
1601
9de87eea
MV
1602void
1603scm_threads_prehistory (SCM_STACKITEM *base)
1604{
d1138028
MV
1605#if SCM_USE_PTHREAD_THREADS
1606 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1607 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1608 PTHREAD_MUTEX_RECURSIVE);
1609#endif
1610
1611 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1612 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
1613 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1614 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
1615 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1616 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1617
1618 guilify_self_1 (base);
9bc4701c
MD
1619}
1620
d823b11b
MV
1621scm_t_bits scm_tc16_thread;
1622scm_t_bits scm_tc16_mutex;
1623scm_t_bits scm_tc16_condvar;
7bfd3b9e 1624
7bfd3b9e 1625void
9de87eea 1626scm_init_threads ()
7bfd3b9e 1627{
9de87eea 1628 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b
MV
1629 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1630 scm_set_smob_print (scm_tc16_thread, thread_print);
1631 scm_set_smob_free (scm_tc16_thread, thread_free);
1632
9de87eea
MV
1633 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1634 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1635 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1636 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 1637
9de87eea
MV
1638 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1639 sizeof (fat_cond));
1640 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1641 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1642 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 1643
9de87eea
MV
1644 scm_i_default_dynamic_state = SCM_BOOL_F;
1645 guilify_self_2 (SCM_BOOL_F);
9bc4701c 1646 threads_initialized_p = 1;
a4d106c7
MV
1647
1648 framed_critical_section_mutex =
1649 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 1650}
89e00824 1651
5f05c406 1652void
9de87eea 1653scm_init_threads_default_dynamic_state ()
5f05c406 1654{
9de87eea
MV
1655 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1656 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
1657}
1658
d823b11b 1659void
9de87eea 1660scm_init_thread_procs ()
d823b11b 1661{
9de87eea 1662#include "libguile/threads.x"
d823b11b
MV
1663}
1664
89e00824
ML
1665/*
1666 Local Variables:
1667 c-file-style: "gnu"
1668 End:
1669*/