Don't leave and reenter guile mode if mutex is available
[bpt/guile.git] / libguile / threads.c
CommitLineData
86a597f8 1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
74926120 2 *
73be1d9e 3 * This library is free software; you can redistribute it and/or
53befeb7
NJ
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
7bfd3b9e 7 *
53befeb7
NJ
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
7bfd3b9e 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
53befeb7
NJ
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301 USA
73be1d9e 17 */
1bbd0b84 18
1bbd0b84 19
7bfd3b9e 20\f
dbb605f5
LC
21#ifdef HAVE_CONFIG_H
22# include <config.h>
23#endif
7bfd3b9e 24
1810dc4e
RB
25#include "libguile/_scm.h"
26
fcc5d734 27#if HAVE_UNISTD_H
d823b11b 28#include <unistd.h>
fcc5d734 29#endif
d823b11b
MV
30#include <stdio.h>
31#include <assert.h>
8ab3d8a0
KR
32
33#ifdef HAVE_STRING_H
34#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35#endif
36
fcc5d734 37#if HAVE_SYS_TIME_H
d823b11b 38#include <sys/time.h>
fcc5d734 39#endif
5f05c406 40
d823b11b
MV
41#include "libguile/validate.h"
42#include "libguile/root.h"
43#include "libguile/eval.h"
44#include "libguile/async.h"
45#include "libguile/ports.h"
46#include "libguile/threads.h"
a0599745 47#include "libguile/dynwind.h"
d823b11b 48#include "libguile/iselect.h"
9de87eea
MV
49#include "libguile/fluids.h"
50#include "libguile/continuations.h"
2b829bbb 51#include "libguile/gc.h"
9de87eea 52#include "libguile/init.h"
2e77f720 53#include "libguile/scmsigs.h"
6180e336 54#include "libguile/strings.h"
7bfd3b9e 55
ecc9f40f
MV
56#ifdef __MINGW32__
57#ifndef ETIMEDOUT
58# define ETIMEDOUT WSAETIMEDOUT
59#endif
60# include <fcntl.h>
61# include <process.h>
62# define pipe(fd) _pipe (fd, 256, O_BINARY)
63#endif /* __MINGW32__ */
64
634aa8de
LC
65#include <full-read.h>
66
67\f
6180e336
NJ
68static void
69to_timespec (SCM t, scm_t_timespec *waittime)
70{
71 if (scm_is_pair (t))
72 {
73 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
74 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
75 }
76 else
77 {
78 double time = scm_to_double (t);
79 double sec = scm_c_truncate (time);
80
81 waittime->tv_sec = (long) sec;
2a1d0688 82 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
6180e336
NJ
83 }
84}
85
d823b11b 86/*** Queues */
7bfd3b9e 87
9de87eea
MV
88/* Make an empty queue data structure.
89 */
d823b11b
MV
90static SCM
91make_queue ()
92{
93 return scm_cons (SCM_EOL, SCM_EOL);
94}
7bfd3b9e 95
9de87eea
MV
96/* Put T at the back of Q and return a handle that can be used with
97 remqueue to remove T from Q again.
98 */
d823b11b
MV
99static SCM
100enqueue (SCM q, SCM t)
101{
102 SCM c = scm_cons (t, SCM_EOL);
f9d44e84 103 SCM_CRITICAL_SECTION_START;
d2e53ed6 104 if (scm_is_null (SCM_CDR (q)))
d823b11b
MV
105 SCM_SETCDR (q, c);
106 else
107 SCM_SETCDR (SCM_CAR (q), c);
108 SCM_SETCAR (q, c);
f9d44e84 109 SCM_CRITICAL_SECTION_END;
d823b11b
MV
110 return c;
111}
7bfd3b9e 112
9de87eea
MV
113/* Remove the element that the handle C refers to from the queue Q. C
114 must have been returned from a call to enqueue. The return value
115 is zero when the element referred to by C has already been removed.
116 Otherwise, 1 is returned.
117*/
118static int
d823b11b
MV
119remqueue (SCM q, SCM c)
120{
121 SCM p, prev = q;
f9d44e84 122 SCM_CRITICAL_SECTION_START;
d2e53ed6 123 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
d823b11b 124 {
bc36d050 125 if (scm_is_eq (p, c))
d823b11b 126 {
bc36d050 127 if (scm_is_eq (c, SCM_CAR (q)))
d823b11b
MV
128 SCM_SETCAR (q, SCM_CDR (c));
129 SCM_SETCDR (prev, SCM_CDR (c));
f9d44e84 130 SCM_CRITICAL_SECTION_END;
9de87eea 131 return 1;
d823b11b
MV
132 }
133 prev = p;
134 }
f9d44e84 135 SCM_CRITICAL_SECTION_END;
9de87eea 136 return 0;
d823b11b
MV
137}
138
9de87eea
MV
139/* Remove the front-most element from the queue Q and return it.
140 Return SCM_BOOL_F when Q is empty.
141*/
d823b11b
MV
142static SCM
143dequeue (SCM q)
144{
f9d44e84
NJ
145 SCM c;
146 SCM_CRITICAL_SECTION_START;
147 c = SCM_CDR (q);
d2e53ed6 148 if (scm_is_null (c))
f9d44e84
NJ
149 {
150 SCM_CRITICAL_SECTION_END;
151 return SCM_BOOL_F;
152 }
d823b11b
MV
153 else
154 {
155 SCM_SETCDR (q, SCM_CDR (c));
d2e53ed6 156 if (scm_is_null (SCM_CDR (q)))
d823b11b 157 SCM_SETCAR (q, SCM_EOL);
f9d44e84 158 SCM_CRITICAL_SECTION_END;
d823b11b
MV
159 return SCM_CAR (c);
160 }
161}
7bfd3b9e 162
9de87eea 163/*** Thread smob routines */
76da80e7 164
d823b11b
MV
165static SCM
166thread_mark (SCM obj)
167{
9de87eea 168 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
d823b11b 169 scm_gc_mark (t->result);
2e77f720 170 scm_gc_mark (t->cleanup_handler);
9de87eea 171 scm_gc_mark (t->join_queue);
6180e336 172 scm_gc_mark (t->mutexes);
9de87eea
MV
173 scm_gc_mark (t->dynwinds);
174 scm_gc_mark (t->active_asyncs);
9de87eea 175 scm_gc_mark (t->continuation_root);
2bbe1533 176 scm_gc_mark (t->vm);
9de87eea 177 return t->dynamic_state;
d823b11b
MV
178}
179
180static int
181thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
182{
23d72566
KR
183 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
184 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
185 the struct case, hence we go via a union, and extract according to the
186 size of pthread_t. */
187 union {
188 scm_i_pthread_t p;
189 unsigned short us;
190 unsigned int ui;
191 unsigned long ul;
192 scm_t_uintmax um;
193 } u;
9de87eea 194 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
23d72566
KR
195 scm_i_pthread_t p = t->pthread;
196 scm_t_uintmax id;
197 u.p = p;
198 if (sizeof (p) == sizeof (unsigned short))
199 id = u.us;
200 else if (sizeof (p) == sizeof (unsigned int))
201 id = u.ui;
202 else if (sizeof (p) == sizeof (unsigned long))
203 id = u.ul;
204 else
205 id = u.um;
206
d823b11b 207 scm_puts ("#<thread ", port);
23d72566 208 scm_uintprint (id, 10, port);
1b92fb6b 209 scm_puts (" (", port);
0345e278 210 scm_uintprint ((scm_t_bits)t, 16, port);
1b92fb6b 211 scm_puts (")>", port);
d823b11b
MV
212 return 1;
213}
214
215static size_t
216thread_free (SCM obj)
217{
9de87eea
MV
218 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
219 assert (t->exited);
d823b11b
MV
220 scm_gc_free (t, sizeof (*t), "thread");
221 return 0;
222}
223
9de87eea 224/*** Blocking on queues. */
f7eca35d 225
9de87eea
MV
226/* See also scm_i_queue_async_cell for how such a block is
227 interrputed.
228*/
d823b11b 229
9de87eea
MV
230/* Put the current thread on QUEUE and go to sleep, waiting for it to
231 be woken up by a call to 'unblock_from_queue', or to be
232 interrupted. Upon return of this function, the current thread is
233 no longer on QUEUE, even when the sleep has been interrupted.
234
f9d44e84 235 The caller of block_self must hold MUTEX. It will be atomically
9de87eea
MV
236 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
237
238 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
239 as MUTEX is needed.
240
241 When WAITTIME is not NULL, the sleep will be aborted at that time.
242
243 The return value of block_self is an errno value. It will be zero
244 when the sleep has been successfully completed by a call to
245 unblock_from_queue, EINTR when it has been interrupted by the
246 delivery of a system async, and ETIMEDOUT when the timeout has
247 expired.
248
249 The system asyncs themselves are not executed by block_self.
250*/
251static int
252block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
253 const scm_t_timespec *waittime)
76da80e7 254{
9de87eea
MV
255 scm_i_thread *t = SCM_I_CURRENT_THREAD;
256 SCM q_handle;
257 int err;
258
259 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
260 err = EINTR;
261 else
262 {
263 t->block_asyncs++;
264 q_handle = enqueue (queue, t->handle);
265 if (waittime == NULL)
266 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
267 else
268 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
269
270 /* When we are still on QUEUE, we have been interrupted. We
271 report this only when no other error (such as a timeout) has
272 happened above.
273 */
274 if (remqueue (queue, q_handle) && err == 0)
275 err = EINTR;
276 t->block_asyncs--;
277 scm_i_reset_sleep (t);
278 }
279
280 return err;
76da80e7 281}
9de87eea 282
f9d44e84
NJ
283/* Wake up the first thread on QUEUE, if any. The awoken thread is
284 returned, or #f if the queue was empty.
9de87eea
MV
285 */
286static SCM
287unblock_from_queue (SCM queue)
288{
289 SCM thread = dequeue (queue);
290 if (scm_is_true (thread))
291 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
292 return thread;
293}
294
295/* Getting into and out of guile mode.
296 */
297
98648121
MV
298/* Ken Raeburn observes that the implementation of suspend and resume
299 (and the things that build on top of them) are very likely not
300 correct (see below). We will need fix this eventually, and that's
301 why scm_leave_guile/scm_enter_guile are not exported in the API.
302
303 Ken writes:
304
305 Consider this sequence:
306
307 Function foo, called in Guile mode, calls suspend (maybe indirectly
308 through scm_leave_guile), which does this:
309
310 // record top of stack for the GC
311 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
312 var 't'
313 // save registers.
314 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
a4dbe1ac 315 SCM_I_SETJMP (t->regs); // here's most of the magic
98648121
MV
316
317 ... and returns.
318
319 Function foo has a SCM value X, a handle on a non-immediate object, in
320 a caller-saved register R, and it's the only reference to the object
321 currently.
322
323 The compiler wants to use R in suspend, so it pushes the current
324 value, X, into a stack slot which will be reloaded on exit from
325 suspend; then it loads stuff into R and goes about its business. The
326 setjmp call saves (some of) the current registers, including R, which
327 no longer contains X. (This isn't a problem for a normal
328 setjmp/longjmp situation, where longjmp would be called before
329 setjmp's caller returns; the old value for X would be loaded back from
330 the stack after the longjmp, before the function returned.)
331
332 So, suspend returns, loading X back into R (and invalidating the jump
333 buffer) in the process. The caller foo then goes off and calls a
334 bunch of other functions out of Guile mode, occasionally storing X on
335 the stack again, but, say, much deeper on the stack than suspend's
336 stack frame went, and the stack slot where suspend had written X has
337 long since been overwritten with other values.
338
339 Okay, nothing actively broken so far. Now, let garbage collection
340 run, triggered by another thread.
341
342 The thread calling foo is out of Guile mode at the time, so the
343 garbage collector just scans a range of stack addresses. Too bad that
344 X isn't stored there. So the pointed-to storage goes onto the free
345 list, and I think you can see where things go from there.
346
347 Is there anything I'm missing that'll prevent this scenario from
348 happening? I mean, aside from, "well, suspend and scm_leave_guile
349 don't have many local variables, so they probably won't need to save
350 any registers on most systems, so we hope everything will wind up in
351 the jump buffer and we'll just get away with it"?
352
353 (And, going the other direction, if scm_leave_guile and suspend push
354 the stack pointer over onto a new page, and foo doesn't make further
355 function calls and thus the stack pointer no longer includes that
356 page, are we guaranteed that the kernel cannot release the now-unused
357 stack page that contains the top-of-stack pointer we just saved? I
358 don't know if any OS actually does that. If it does, we could get
359 faults in garbage collection.)
360
361 I don't think scm_without_guile has to have this problem, as it gets
362 more control over the stack handling -- but it should call setjmp
363 itself. I'd probably try something like:
364
365 // record top of stack for the GC
366 t->top = SCM_STACK_PTR (&t);
367 // save registers.
368 SCM_FLUSH_REGISTER_WINDOWS;
a4dbe1ac 369 SCM_I_SETJMP (t->regs);
98648121
MV
370 res = func(data);
371 scm_enter_guile (t);
372
373 ... though even that's making some assumptions about the stack
374 ordering of local variables versus caller-saved registers.
375
376 For something like scm_leave_guile to work, I don't think it can just
377 rely on invalidated jump buffers. A valid jump buffer, and a handle
378 on the stack state at the point when the jump buffer was initialized,
379 together, would work fine, but I think then we're talking about macros
380 invoking setjmp in the caller's stack frame, and requiring that the
381 caller of scm_leave_guile also call scm_enter_guile before returning,
382 kind of like pthread_cleanup_push/pop calls that have to be paired up
383 in a function. (In fact, the pthread ones have to be paired up
384 syntactically, as if they might expand to a compound statement
385 incorporating the user's code, and invoking a compiler's
386 exception-handling primitives. Which might be something to think
387 about for cases where Guile is used with C++ exceptions or
388 pthread_cancel.)
389*/
390
9de87eea
MV
391scm_i_pthread_key_t scm_i_thread_key;
392
d823b11b 393static void
9de87eea 394resume (scm_i_thread *t)
d823b11b 395{
d823b11b 396 t->top = NULL;
9bc4701c
MD
397 if (t->clear_freelists_p)
398 {
399 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
400 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
401 t->clear_freelists_p = 0;
402 }
d823b11b
MV
403}
404
98648121
MV
405typedef void* scm_t_guile_ticket;
406
407static void
9de87eea 408scm_enter_guile (scm_t_guile_ticket ticket)
d823b11b 409{
9de87eea
MV
410 scm_i_thread *t = (scm_i_thread *)ticket;
411 if (t)
412 {
413 scm_i_pthread_mutex_lock (&t->heap_mutex);
8b0174c8 414 t->heap_mutex_locked_by_self = 1;
9de87eea
MV
415 resume (t);
416 }
d823b11b
MV
417}
418
9de87eea
MV
419static scm_i_thread *
420suspend (void)
d823b11b 421{
9de87eea 422 scm_i_thread *t = SCM_I_CURRENT_THREAD;
d823b11b
MV
423
424 /* record top of stack for the GC */
9de87eea 425 t->top = SCM_STACK_PTR (&t);
d823b11b
MV
426 /* save registers. */
427 SCM_FLUSH_REGISTER_WINDOWS;
a4dbe1ac 428 SCM_I_SETJMP (t->regs);
9de87eea 429 return t;
d823b11b
MV
430}
431
98648121 432static scm_t_guile_ticket
9de87eea 433scm_leave_guile ()
d823b11b 434{
9de87eea 435 scm_i_thread *t = suspend ();
8b0174c8
NJ
436 if (t->heap_mutex_locked_by_self)
437 {
438 t->heap_mutex_locked_by_self = 0;
439 scm_i_pthread_mutex_unlock (&t->heap_mutex);
440 }
9de87eea 441 return (scm_t_guile_ticket) t;
d823b11b
MV
442}
443
9de87eea
MV
444static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
445static scm_i_thread *all_threads = NULL;
446static int thread_count;
447
448static SCM scm_i_default_dynamic_state;
449
450/* Perform first stage of thread initialisation, in non-guile mode.
d823b11b 451 */
9de87eea
MV
452static void
453guilify_self_1 (SCM_STACKITEM *base)
d823b11b 454{
9de87eea
MV
455 scm_i_thread *t = malloc (sizeof (scm_i_thread));
456
457 t->pthread = scm_i_pthread_self ();
458 t->handle = SCM_BOOL_F;
459 t->result = SCM_BOOL_F;
2e77f720 460 t->cleanup_handler = SCM_BOOL_F;
6180e336 461 t->mutexes = SCM_EOL;
f9d44e84 462 t->held_mutex = NULL;
9de87eea
MV
463 t->join_queue = SCM_EOL;
464 t->dynamic_state = SCM_BOOL_F;
465 t->dynwinds = SCM_EOL;
466 t->active_asyncs = SCM_EOL;
9de87eea
MV
467 t->block_asyncs = 1;
468 t->pending_asyncs = 1;
469 t->last_debug_frame = NULL;
470 t->base = base;
346e4402
NJ
471#ifdef __ia64__
472 /* Calculate and store off the base of this thread's register
473 backing store (RBS). Unfortunately our implementation(s) of
474 scm_ia64_register_backing_store_base are only reliable for the
475 main thread. For other threads, therefore, find out the current
476 top of the RBS, and use that as a maximum. */
477 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
478 {
479 ucontext_t ctx;
480 void *bsp;
481 getcontext (&ctx);
482 bsp = scm_ia64_ar_bsp (&ctx);
483 if (t->register_backing_store_base > bsp)
484 t->register_backing_store_base = bsp;
485 }
486#endif
0c97d7dd 487 t->continuation_root = SCM_EOL;
9de87eea
MV
488 t->continuation_base = base;
489 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
490 t->sleep_mutex = NULL;
491 t->sleep_object = SCM_BOOL_F;
492 t->sleep_fd = -1;
634aa8de
LC
493
494 if (pipe (t->sleep_pipe) != 0)
495 /* FIXME: Error conditions during the initialization phase are handled
496 gracelessly since public functions such as `scm_init_guile ()'
497 currently have type `void'. */
498 abort ();
499
9de87eea 500 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
8b0174c8 501 t->heap_mutex_locked_by_self = 0;
86a597f8 502 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
9de87eea 503 t->clear_freelists_p = 0;
1a8fdd7e 504 t->gc_running_p = 0;
2e77f720 505 t->canceled = 0;
9de87eea
MV
506 t->exited = 0;
507
508 t->freelist = SCM_EOL;
509 t->freelist2 = SCM_EOL;
510 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
511 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
512
513 scm_i_pthread_setspecific (scm_i_thread_key, t);
514
515 scm_i_pthread_mutex_lock (&t->heap_mutex);
8b0174c8 516 t->heap_mutex_locked_by_self = 1;
9de87eea
MV
517
518 scm_i_pthread_mutex_lock (&thread_admin_mutex);
519 t->next_thread = all_threads;
520 all_threads = t;
521 thread_count++;
522 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
d823b11b
MV
523}
524
9de87eea 525/* Perform second stage of thread initialisation, in guile mode.
d823b11b 526 */
9de87eea
MV
527static void
528guilify_self_2 (SCM parent)
d823b11b 529{
9de87eea
MV
530 scm_i_thread *t = SCM_I_CURRENT_THREAD;
531
532 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
533 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
534 t->continuation_root = scm_cons (t->handle, SCM_EOL);
535 t->continuation_base = t->base;
2bbe1533 536 t->vm = SCM_BOOL_F;
9de87eea
MV
537
538 if (scm_is_true (parent))
539 t->dynamic_state = scm_make_dynamic_state (parent);
540 else
541 t->dynamic_state = scm_i_make_initial_dynamic_state ();
542
543 t->join_queue = make_queue ();
544 t->block_asyncs = 0;
d823b11b
MV
545}
546
6180e336
NJ
547\f
548/*** Fat mutexes */
549
550/* We implement our own mutex type since we want them to be 'fair', we
551 want to do fancy things while waiting for them (like running
552 asyncs) and we might want to add things that are nice for
553 debugging.
554*/
555
556typedef struct {
557 scm_i_pthread_mutex_t lock;
558 SCM owner;
adc085f1 559 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
6180e336 560
adc085f1 561 int recursive; /* allow recursive locking? */
6180e336
NJ
562 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
563 int allow_external_unlock; /* is it an error to unlock a mutex that is not
564 owned by the current thread? */
565
566 SCM waiting; /* the threads waiting for this mutex. */
567} fat_mutex;
568
569#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
570#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
571
9de87eea 572/* Perform thread tear-down, in guile mode.
d823b11b 573 */
9de87eea
MV
574static void *
575do_thread_exit (void *v)
576{
2e77f720
LC
577 scm_i_thread *t = (scm_i_thread *) v;
578
579 if (!scm_is_false (t->cleanup_handler))
580 {
581 SCM ptr = t->cleanup_handler;
582
583 t->cleanup_handler = SCM_BOOL_F;
584 t->result = scm_internal_catch (SCM_BOOL_T,
585 (scm_t_catch_body) scm_call_0, ptr,
586 scm_handle_by_message_noexit, NULL);
587 }
9de87eea 588
86a597f8 589 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
9de87eea
MV
590
591 t->exited = 1;
0c97d7dd
MV
592 close (t->sleep_pipe[0]);
593 close (t->sleep_pipe[1]);
9de87eea
MV
594 while (scm_is_true (unblock_from_queue (t->join_queue)))
595 ;
9de87eea 596
74926120 597 while (!scm_is_null (t->mutexes))
6180e336
NJ
598 {
599 SCM mutex = SCM_CAR (t->mutexes);
600 fat_mutex *m = SCM_MUTEX_DATA (mutex);
601 scm_i_pthread_mutex_lock (&m->lock);
74926120 602
6180e336
NJ
603 unblock_from_queue (m->waiting);
604
74926120 605 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
606 t->mutexes = SCM_CDR (t->mutexes);
607 }
608
86a597f8 609 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 610
9de87eea
MV
611 return NULL;
612}
613
d823b11b 614static void
9de87eea 615on_thread_exit (void *v)
d823b11b 616{
29776e85 617 /* This handler is executed in non-guile mode. */
2e77f720 618 scm_i_thread *t = (scm_i_thread *) v, **tp;
0c97d7dd 619
f9d44e84
NJ
620 /* If this thread was cancelled while doing a cond wait, it will
621 still have a mutex locked, so we unlock it here. */
622 if (t->held_mutex)
623 {
624 scm_i_pthread_mutex_unlock (t->held_mutex);
625 t->held_mutex = NULL;
626 }
627
9de87eea 628 scm_i_pthread_setspecific (scm_i_thread_key, v);
0c97d7dd 629
2e77f720
LC
630 /* Ensure the signal handling thread has been launched, because we might be
631 shutting it down. */
632 scm_i_ensure_signal_delivery_thread ();
633
0c97d7dd 634 /* Unblocking the joining threads needs to happen in guile mode
29776e85 635 since the queue is a SCM data structure. */
9de87eea 636 scm_with_guile (do_thread_exit, v);
0c97d7dd
MV
637
638 /* Removing ourself from the list of all threads needs to happen in
639 non-guile mode since all SCM values on our stack become
29776e85 640 unprotected once we are no longer in the list. */
0c97d7dd
MV
641 scm_i_pthread_mutex_lock (&thread_admin_mutex);
642 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
643 if (*tp == t)
644 {
645 *tp = t->next_thread;
646 break;
647 }
648 thread_count--;
2e77f720
LC
649
650 /* If there's only one other thread, it could be the signal delivery
651 thread, so we need to notify it to shut down by closing its read pipe.
652 If it's not the signal delivery thread, then closing the read pipe isn't
653 going to hurt. */
654 if (thread_count <= 1)
655 scm_i_close_signal_pipe ();
656
0c97d7dd
MV
657 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
658
9de87eea 659 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
d823b11b
MV
660}
661
9de87eea 662static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
d823b11b 663
9de87eea
MV
664static void
665init_thread_key (void)
666{
667 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
668}
d823b11b 669
9de87eea
MV
670/* Perform any initializations necessary to bring the current thread
671 into guile mode, initializing Guile itself, if necessary.
a54a94b3 672
9de87eea
MV
673 BASE is the stack base to use with GC.
674
675 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
676 which case the default dynamic state is used.
677
678 Return zero when the thread was in guile mode already; otherwise
679 return 1.
680*/
681
682static int
683scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
d823b11b 684{
9de87eea
MV
685 scm_i_thread *t;
686
687 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
688
689 if ((t = SCM_I_CURRENT_THREAD) == NULL)
690 {
691 /* This thread has not been guilified yet.
692 */
693
694 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
695 if (scm_initialized_p == 0)
696 {
697 /* First thread ever to enter Guile. Run the full
698 initialization.
699 */
700 scm_i_init_guile (base);
701 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
702 }
703 else
704 {
705 /* Guile is already initialized, but this thread enters it for
706 the first time. Only initialize this thread.
707 */
708 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
709 guilify_self_1 (base);
710 guilify_self_2 (parent);
711 }
712 return 1;
713 }
714 else if (t->top)
715 {
716 /* This thread is already guilified but not in guile mode, just
717 resume it.
74926120 718
ccf1ca4a
LC
719 A user call to scm_with_guile() will lead us to here. This could
720 happen from anywhere on the stack, and in particular lower on the
721 stack than when it was when this thread was first guilified. Thus,
722 `base' must be updated. */
723#if SCM_STACK_GROWS_UP
724 if (base < t->base)
725 t->base = base;
726#else
727 if (base > t->base)
728 t->base = base;
729#endif
730
9de87eea
MV
731 scm_enter_guile ((scm_t_guile_ticket) t);
732 return 1;
733 }
734 else
735 {
736 /* Thread is already in guile mode. Nothing to do.
737 */
738 return 0;
739 }
d823b11b
MV
740}
741
9de87eea 742#if SCM_USE_PTHREAD_THREADS
9de87eea 743
23d72566
KR
744#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
745/* This method for GNU/Linux and perhaps some other systems.
746 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
747 available on them. */
9de87eea
MV
748#define HAVE_GET_THREAD_STACK_BASE
749
750static SCM_STACKITEM *
751get_thread_stack_base ()
d823b11b 752{
9de87eea
MV
753 pthread_attr_t attr;
754 void *start, *end;
755 size_t size;
756
9de87eea
MV
757 pthread_getattr_np (pthread_self (), &attr);
758 pthread_attr_getstack (&attr, &start, &size);
759 end = (char *)start + size;
760
2b829bbb
KR
761 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
762 for the main thread, but we can use scm_get_stack_base in that
763 case.
764 */
765
766#ifndef PTHREAD_ATTR_GETSTACK_WORKS
9de87eea 767 if ((void *)&attr < start || (void *)&attr >= end)
2b829bbb 768 return scm_get_stack_base ();
9de87eea 769 else
2b829bbb 770#endif
9de87eea
MV
771 {
772#if SCM_STACK_GROWS_UP
773 return start;
774#else
775 return end;
776#endif
777 }
a54a94b3
MV
778}
779
23d72566
KR
780#elif HAVE_PTHREAD_GET_STACKADDR_NP
781/* This method for MacOS X.
782 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
783 but as of 2006 there's nothing obvious at apple.com. */
784#define HAVE_GET_THREAD_STACK_BASE
785static SCM_STACKITEM *
786get_thread_stack_base ()
787{
788 return pthread_get_stackaddr_np (pthread_self ());
789}
790
791#elif defined (__MINGW32__)
792/* This method for mingw. In mingw the basic scm_get_stack_base can be used
793 in any thread. We don't like hard-coding the name of a system, but there
794 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
795 work. */
796#define HAVE_GET_THREAD_STACK_BASE
797static SCM_STACKITEM *
798get_thread_stack_base ()
799{
800 return scm_get_stack_base ();
801}
802
803#endif /* pthread methods of get_thread_stack_base */
9de87eea
MV
804
805#else /* !SCM_USE_PTHREAD_THREADS */
806
807#define HAVE_GET_THREAD_STACK_BASE
808
809static SCM_STACKITEM *
810get_thread_stack_base ()
a54a94b3 811{
2b829bbb 812 return scm_get_stack_base ();
d823b11b
MV
813}
814
9de87eea 815#endif /* !SCM_USE_PTHREAD_THREADS */
9de87eea
MV
816
817#ifdef HAVE_GET_THREAD_STACK_BASE
818
819void
820scm_init_guile ()
d823b11b 821{
9de87eea
MV
822 scm_i_init_thread_for_guile (get_thread_stack_base (),
823 scm_i_default_dynamic_state);
d823b11b
MV
824}
825
9de87eea
MV
826#endif
827
828void *
829scm_with_guile (void *(*func)(void *), void *data)
830{
831 return scm_i_with_guile_and_parent (func, data,
832 scm_i_default_dynamic_state);
833}
834
70eca635 835SCM_UNUSED static void
2e77f720
LC
836scm_leave_guile_cleanup (void *x)
837{
838 scm_leave_guile ();
839}
840
9de87eea 841void *
2e77f720 842scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
9de87eea
MV
843{
844 void *res;
845 int really_entered;
846 SCM_STACKITEM base_item;
2e77f720 847
9de87eea 848 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
9de87eea 849 if (really_entered)
2e77f720
LC
850 {
851 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
852 res = scm_c_with_continuation_barrier (func, data);
853 scm_i_pthread_cleanup_pop (0);
854 scm_leave_guile ();
855 }
74926120 856 else
2e77f720
LC
857 res = scm_c_with_continuation_barrier (func, data);
858
9de87eea
MV
859 return res;
860}
861
862void *
863scm_without_guile (void *(*func)(void *), void *data)
d823b11b 864{
9de87eea
MV
865 void *res;
866 scm_t_guile_ticket t;
867 t = scm_leave_guile ();
868 res = func (data);
869 scm_enter_guile (t);
870 return res;
871}
872
873/*** Thread creation */
874
875typedef struct {
876 SCM parent;
877 SCM thunk;
878 SCM handler;
76da80e7 879 SCM thread;
9de87eea
MV
880 scm_i_pthread_mutex_t mutex;
881 scm_i_pthread_cond_t cond;
882} launch_data;
d823b11b 883
9de87eea
MV
884static void *
885really_launch (void *d)
886{
887 launch_data *data = (launch_data *)d;
888 SCM thunk = data->thunk, handler = data->handler;
889 scm_i_thread *t;
d823b11b 890
9de87eea 891 t = SCM_I_CURRENT_THREAD;
a54a94b3 892
9de87eea
MV
893 scm_i_scm_pthread_mutex_lock (&data->mutex);
894 data->thread = scm_current_thread ();
895 scm_i_pthread_cond_signal (&data->cond);
896 scm_i_pthread_mutex_unlock (&data->mutex);
897
898 if (SCM_UNBNDP (handler))
899 t->result = scm_call_0 (thunk);
900 else
901 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
902
903 return 0;
d823b11b
MV
904}
905
9de87eea
MV
906static void *
907launch_thread (void *d)
908{
909 launch_data *data = (launch_data *)d;
910 scm_i_pthread_detach (scm_i_pthread_self ());
911 scm_i_with_guile_and_parent (really_launch, d, data->parent);
912 return NULL;
913}
914
915SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
d823b11b 916 (SCM thunk, SCM handler),
9de87eea
MV
917 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
918 "returning a new thread object representing the thread. The procedure\n"
919 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
920 "\n"
921 "When @var{handler} is specified, then @var{thunk} is called from\n"
922 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
923 "handler. This catch is established inside the continuation barrier.\n"
924 "\n"
925 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
926 "the @emph{exit value} of the thread and the thread is terminated.")
d823b11b
MV
927#define FUNC_NAME s_scm_call_with_new_thread
928{
9de87eea
MV
929 launch_data data;
930 scm_i_pthread_t id;
931 int err;
d823b11b 932
9de87eea
MV
933 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
934 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
935 handler, SCM_ARG2, FUNC_NAME);
936
937 data.parent = scm_current_dynamic_state ();
938 data.thunk = thunk;
939 data.handler = handler;
940 data.thread = SCM_BOOL_F;
941 scm_i_pthread_mutex_init (&data.mutex, NULL);
942 scm_i_pthread_cond_init (&data.cond, NULL);
943
944 scm_i_scm_pthread_mutex_lock (&data.mutex);
945 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
946 if (err)
947 {
948 scm_i_pthread_mutex_unlock (&data.mutex);
949 errno = err;
950 scm_syserror (NULL);
951 }
952 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
953 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 954
9de87eea 955 return data.thread;
d823b11b
MV
956}
957#undef FUNC_NAME
958
9de87eea
MV
959typedef struct {
960 SCM parent;
961 scm_t_catch_body body;
962 void *body_data;
963 scm_t_catch_handler handler;
964 void *handler_data;
965 SCM thread;
966 scm_i_pthread_mutex_t mutex;
967 scm_i_pthread_cond_t cond;
968} spawn_data;
969
970static void *
971really_spawn (void *d)
972{
973 spawn_data *data = (spawn_data *)d;
974 scm_t_catch_body body = data->body;
975 void *body_data = data->body_data;
976 scm_t_catch_handler handler = data->handler;
977 void *handler_data = data->handler_data;
978 scm_i_thread *t = SCM_I_CURRENT_THREAD;
979
980 scm_i_scm_pthread_mutex_lock (&data->mutex);
981 data->thread = scm_current_thread ();
982 scm_i_pthread_cond_signal (&data->cond);
983 scm_i_pthread_mutex_unlock (&data->mutex);
984
985 if (handler == NULL)
986 t->result = body (body_data);
987 else
988 t->result = scm_internal_catch (SCM_BOOL_T,
989 body, body_data,
990 handler, handler_data);
991
992 return 0;
993}
994
995static void *
996spawn_thread (void *d)
997{
998 spawn_data *data = (spawn_data *)d;
999 scm_i_pthread_detach (scm_i_pthread_self ());
1000 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
1001 return NULL;
1002}
1003
1004SCM
1005scm_spawn_thread (scm_t_catch_body body, void *body_data,
1006 scm_t_catch_handler handler, void *handler_data)
1007{
1008 spawn_data data;
1009 scm_i_pthread_t id;
1010 int err;
1011
1012 data.parent = scm_current_dynamic_state ();
1013 data.body = body;
1014 data.body_data = body_data;
1015 data.handler = handler;
1016 data.handler_data = handler_data;
1017 data.thread = SCM_BOOL_F;
1018 scm_i_pthread_mutex_init (&data.mutex, NULL);
1019 scm_i_pthread_cond_init (&data.cond, NULL);
1020
1021 scm_i_scm_pthread_mutex_lock (&data.mutex);
1022 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
1023 if (err)
1024 {
1025 scm_i_pthread_mutex_unlock (&data.mutex);
1026 errno = err;
1027 scm_syserror (NULL);
1028 }
1029 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
1030 scm_i_pthread_mutex_unlock (&data.mutex);
74926120 1031
9de87eea
MV
1032 return data.thread;
1033}
1034
29717c89
MD
1035SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
1036 (),
1037"Move the calling thread to the end of the scheduling queue.")
1038#define FUNC_NAME s_scm_yield
1039{
9de87eea 1040 return scm_from_bool (scm_i_sched_yield ());
29717c89
MD
1041}
1042#undef FUNC_NAME
1043
2e77f720
LC
1044SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
1045 (SCM thread),
1046"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1047"cannot be the current thread, and if @var{thread} has already terminated or "
1048"been signaled to terminate, this function is a no-op.")
1049#define FUNC_NAME s_scm_cancel_thread
1050{
1051 scm_i_thread *t = NULL;
1052
1053 SCM_VALIDATE_THREAD (1, thread);
1054 t = SCM_I_THREAD_DATA (thread);
86a597f8 1055 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
2e77f720
LC
1056 if (!t->canceled)
1057 {
1058 t->canceled = 1;
86a597f8 1059 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1060 scm_i_pthread_cancel (t->pthread);
1061 }
1062 else
86a597f8 1063 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1064
1065 return SCM_UNSPECIFIED;
1066}
1067#undef FUNC_NAME
1068
1069SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1070 (SCM thread, SCM proc),
1071"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1072"This handler will be called when the thread exits.")
1073#define FUNC_NAME s_scm_set_thread_cleanup_x
1074{
1075 scm_i_thread *t;
1076
1077 SCM_VALIDATE_THREAD (1, thread);
1078 if (!scm_is_false (proc))
1079 SCM_VALIDATE_THUNK (2, proc);
1080
2e77f720 1081 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1082 scm_i_pthread_mutex_lock (&t->admin_mutex);
1083
2e77f720
LC
1084 if (!(t->exited || t->canceled))
1085 t->cleanup_handler = proc;
1086
86a597f8 1087 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1088
1089 return SCM_UNSPECIFIED;
1090}
1091#undef FUNC_NAME
1092
1093SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1094 (SCM thread),
1095"Return the cleanup handler installed for the thread @var{thread}.")
1096#define FUNC_NAME s_scm_thread_cleanup
1097{
1098 scm_i_thread *t;
1099 SCM ret;
1100
1101 SCM_VALIDATE_THREAD (1, thread);
1102
2e77f720 1103 t = SCM_I_THREAD_DATA (thread);
86a597f8 1104 scm_i_pthread_mutex_lock (&t->admin_mutex);
2e77f720 1105 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
86a597f8 1106 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720
LC
1107
1108 return ret;
1109}
1110#undef FUNC_NAME
1111
6180e336
NJ
1112SCM scm_join_thread (SCM thread)
1113{
1114 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1115}
1116
1117SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1118 (SCM thread, SCM timeout, SCM timeoutval),
d823b11b
MV
1119"Suspend execution of the calling thread until the target @var{thread} "
1120"terminates, unless the target @var{thread} has already terminated. ")
6180e336 1121#define FUNC_NAME s_scm_join_thread_timed
5f05c406 1122{
9de87eea 1123 scm_i_thread *t;
6180e336
NJ
1124 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1125 SCM res = SCM_BOOL_F;
1126
1127 if (! (SCM_UNBNDP (timeoutval)))
1128 res = timeoutval;
d823b11b
MV
1129
1130 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1131 if (scm_is_eq (scm_current_thread (), thread))
2e77f720 1132 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
d823b11b 1133
9de87eea 1134 t = SCM_I_THREAD_DATA (thread);
86a597f8
NJ
1135 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1136
6180e336
NJ
1137 if (! SCM_UNBNDP (timeout))
1138 {
1139 to_timespec (timeout, &ctimeout);
1140 timeout_ptr = &ctimeout;
1141 }
1142
1143 if (t->exited)
1144 res = t->result;
1145 else
d823b11b 1146 {
9de87eea
MV
1147 while (1)
1148 {
74926120 1149 int err = block_self (t->join_queue, thread, &t->admin_mutex,
6180e336
NJ
1150 timeout_ptr);
1151 if (err == 0)
1152 {
1153 if (t->exited)
1154 {
1155 res = t->result;
1156 break;
1157 }
1158 }
1159 else if (err == ETIMEDOUT)
9de87eea 1160 break;
6180e336 1161
86a597f8 1162 scm_i_pthread_mutex_unlock (&t->admin_mutex);
9de87eea 1163 SCM_TICK;
86a597f8 1164 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
21346c4f
NJ
1165
1166 /* Check for exit again, since we just released and
1167 reacquired the admin mutex, before the next block_self
1168 call (which would block forever if t has already
1169 exited). */
1170 if (t->exited)
1171 {
1172 res = t->result;
1173 break;
1174 }
9de87eea 1175 }
d823b11b 1176 }
9de87eea 1177
86a597f8 1178 scm_i_pthread_mutex_unlock (&t->admin_mutex);
2e77f720 1179
d823b11b 1180 return res;
5f05c406
MV
1181}
1182#undef FUNC_NAME
1183
6180e336
NJ
1184SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1185 (SCM obj),
1186 "Return @code{#t} if @var{obj} is a thread.")
1187#define FUNC_NAME s_scm_thread_p
1188{
1189 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1190}
1191#undef FUNC_NAME
5f05c406 1192
d823b11b 1193static SCM
9de87eea 1194fat_mutex_mark (SCM mx)
d823b11b 1195{
9de87eea 1196 fat_mutex *m = SCM_MUTEX_DATA (mx);
d823b11b
MV
1197 scm_gc_mark (m->owner);
1198 return m->waiting;
1199}
4079f87e 1200
9de87eea
MV
1201static size_t
1202fat_mutex_free (SCM mx)
76da80e7 1203{
9de87eea
MV
1204 fat_mutex *m = SCM_MUTEX_DATA (mx);
1205 scm_i_pthread_mutex_destroy (&m->lock);
1206 scm_gc_free (m, sizeof (fat_mutex), "mutex");
76da80e7
MV
1207 return 0;
1208}
1209
1210static int
9de87eea 1211fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
76da80e7 1212{
9de87eea
MV
1213 fat_mutex *m = SCM_MUTEX_DATA (mx);
1214 scm_puts ("#<mutex ", port);
1215 scm_uintprint ((scm_t_bits)m, 16, port);
1216 scm_puts (">", port);
1217 return 1;
76da80e7
MV
1218}
1219
76da80e7 1220static SCM
6180e336 1221make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
76da80e7 1222{
9de87eea
MV
1223 fat_mutex *m;
1224 SCM mx;
1225
1226 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1227 scm_i_pthread_mutex_init (&m->lock, NULL);
1228 m->owner = SCM_BOOL_F;
adc085f1 1229 m->level = 0;
6180e336 1230
adc085f1 1231 m->recursive = recursive;
6180e336
NJ
1232 m->unchecked_unlock = unchecked_unlock;
1233 m->allow_external_unlock = external_unlock;
1234
9de87eea
MV
1235 m->waiting = SCM_EOL;
1236 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1237 m->waiting = make_queue ();
1238 return mx;
76da80e7
MV
1239}
1240
6180e336
NJ
1241SCM scm_make_mutex (void)
1242{
1243 return scm_make_mutex_with_flags (SCM_EOL);
1244}
1245
2a1d0688
NJ
1246SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1247SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1248SCM_SYMBOL (recursive_sym, "recursive");
6180e336
NJ
1249
1250SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1251 (SCM flags),
9de87eea 1252 "Create a new mutex. ")
6180e336 1253#define FUNC_NAME s_scm_make_mutex_with_flags
76da80e7 1254{
6180e336
NJ
1255 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1256
1257 SCM ptr = flags;
1258 while (! scm_is_null (ptr))
1259 {
1260 SCM flag = SCM_CAR (ptr);
1261 if (scm_is_eq (flag, unchecked_unlock_sym))
1262 unchecked_unlock = 1;
1263 else if (scm_is_eq (flag, allow_external_unlock_sym))
1264 external_unlock = 1;
1265 else if (scm_is_eq (flag, recursive_sym))
1266 recursive = 1;
74926120 1267 else
2a1d0688 1268 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
6180e336
NJ
1269 ptr = SCM_CDR (ptr);
1270 }
1271 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
76da80e7
MV
1272}
1273#undef FUNC_NAME
1274
9de87eea 1275SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
9bc4701c 1276 (void),
9de87eea
MV
1277 "Create a new recursive mutex. ")
1278#define FUNC_NAME s_scm_make_recursive_mutex
9bc4701c 1279{
6180e336 1280 return make_fat_mutex (1, 0, 0);
9bc4701c
MD
1281}
1282#undef FUNC_NAME
1283
6180e336
NJ
1284SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1285
1286static SCM
adc085f1 1287fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
9de87eea
MV
1288{
1289 fat_mutex *m = SCM_MUTEX_DATA (mutex);
6180e336 1290
adc085f1 1291 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
6180e336
NJ
1292 SCM err = SCM_BOOL_F;
1293
1294 struct timeval current_time;
9de87eea
MV
1295
1296 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1297
1298 while (1)
9de87eea 1299 {
adc085f1 1300 if (m->level == 0)
6180e336 1301 {
adc085f1 1302 m->owner = new_owner;
6180e336 1303 m->level++;
74926120 1304
adc085f1 1305 if (SCM_I_IS_THREAD (new_owner))
6180e336 1306 {
adc085f1 1307 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
6180e336
NJ
1308 scm_i_pthread_mutex_lock (&t->admin_mutex);
1309 t->mutexes = scm_cons (mutex, t->mutexes);
1310 scm_i_pthread_mutex_unlock (&t->admin_mutex);
6180e336 1311 }
adc085f1
JG
1312 *ret = 1;
1313 break;
1314 }
1315 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1316 {
1317 m->owner = new_owner;
1318 err = scm_cons (scm_abandoned_mutex_error_key,
1319 scm_from_locale_string ("lock obtained on abandoned "
1320 "mutex"));
1321 *ret = 1;
1322 break;
1323 }
1324 else if (scm_is_eq (m->owner, new_owner))
1325 {
1326 if (m->recursive)
1327 {
1328 m->level++;
74926120 1329 *ret = 1;
adc085f1
JG
1330 }
1331 else
6180e336 1332 {
adc085f1
JG
1333 err = scm_cons (scm_misc_error_key,
1334 scm_from_locale_string ("mutex already locked "
1335 "by thread"));
1336 *ret = 0;
1337 }
74926120 1338 break;
adc085f1
JG
1339 }
1340 else
1341 {
74926120 1342 if (timeout != NULL)
adc085f1
JG
1343 {
1344 gettimeofday (&current_time, NULL);
1345 if (current_time.tv_sec > timeout->tv_sec ||
1346 (current_time.tv_sec == timeout->tv_sec &&
1347 current_time.tv_usec * 1000 > timeout->tv_nsec))
6180e336 1348 {
adc085f1
JG
1349 *ret = 0;
1350 break;
6180e336 1351 }
6180e336 1352 }
37a52039 1353 block_self (m->waiting, mutex, &m->lock, timeout);
adc085f1
JG
1354 scm_i_pthread_mutex_unlock (&m->lock);
1355 SCM_TICK;
1356 scm_i_scm_pthread_mutex_lock (&m->lock);
9de87eea
MV
1357 }
1358 }
1359 scm_i_pthread_mutex_unlock (&m->lock);
6180e336 1360 return err;
9de87eea
MV
1361}
1362
6180e336
NJ
1363SCM scm_lock_mutex (SCM mx)
1364{
adc085f1 1365 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
6180e336
NJ
1366}
1367
adc085f1
JG
1368SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1369 (SCM m, SCM timeout, SCM owner),
9bc4701c
MD
1370"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1371"blocks until the mutex becomes available. The function returns when "
1372"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1373"a thread already owns will succeed right away and will not block the "
1374"thread. That is, Guile's mutexes are @emph{recursive}. ")
6180e336 1375#define FUNC_NAME s_scm_lock_mutex_timed
9bc4701c 1376{
6180e336
NJ
1377 SCM exception;
1378 int ret = 0;
1379 scm_t_timespec cwaittime, *waittime = NULL;
76da80e7 1380
6180e336
NJ
1381 SCM_VALIDATE_MUTEX (1, m);
1382
1383 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1384 {
1385 to_timespec (timeout, &cwaittime);
1386 waittime = &cwaittime;
1387 }
1388
adc085f1 1389 exception = fat_mutex_lock (m, waittime, owner, &ret);
6180e336
NJ
1390 if (!scm_is_false (exception))
1391 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1392 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c 1393}
76da80e7 1394#undef FUNC_NAME
9bc4701c 1395
a4d106c7 1396void
661ae7ab 1397scm_dynwind_lock_mutex (SCM mutex)
a4d106c7 1398{
661ae7ab
MV
1399 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1400 SCM_F_WIND_EXPLICITLY);
1401 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1402 SCM_F_WIND_EXPLICITLY);
a4d106c7
MV
1403}
1404
9bc4701c 1405SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
ba1b7223 1406 (SCM mutex),
9bc4701c
MD
1407"Try to lock @var{mutex}. If the mutex is already locked by someone "
1408"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1409#define FUNC_NAME s_scm_try_mutex
1410{
6180e336
NJ
1411 SCM exception;
1412 int ret = 0;
1413 scm_t_timespec cwaittime, *waittime = NULL;
9de87eea 1414
ba1b7223 1415 SCM_VALIDATE_MUTEX (1, mutex);
6180e336
NJ
1416
1417 to_timespec (scm_from_int(0), &cwaittime);
1418 waittime = &cwaittime;
74926120 1419
adc085f1 1420 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
6180e336
NJ
1421 if (!scm_is_false (exception))
1422 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1423 return ret ? SCM_BOOL_T : SCM_BOOL_F;
9de87eea
MV
1424}
1425#undef FUNC_NAME
76da80e7 1426
6180e336
NJ
1427/*** Fat condition variables */
1428
1429typedef struct {
1430 scm_i_pthread_mutex_t lock;
1431 SCM waiting; /* the threads waiting for this condition. */
1432} fat_cond;
1433
1434#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1435#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1436
1437static int
1438fat_mutex_unlock (SCM mutex, SCM cond,
1439 const scm_t_timespec *waittime, int relock)
9de87eea 1440{
6180e336
NJ
1441 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1442 fat_cond *c = NULL;
1443 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1444 int err = 0, ret = 0;
9de87eea
MV
1445
1446 scm_i_scm_pthread_mutex_lock (&m->lock);
adc085f1
JG
1447
1448 SCM owner = m->owner;
1449
1450 if (!scm_is_eq (owner, scm_current_thread ()))
9bc4701c 1451 {
adc085f1 1452 if (m->level == 0)
6180e336
NJ
1453 {
1454 if (!m->unchecked_unlock)
2a1d0688
NJ
1455 {
1456 scm_i_pthread_mutex_unlock (&m->lock);
1457 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1458 }
adc085f1 1459 owner = scm_current_thread ();
6180e336
NJ
1460 }
1461 else if (!m->allow_external_unlock)
2a1d0688
NJ
1462 {
1463 scm_i_pthread_mutex_unlock (&m->lock);
1464 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1465 }
6180e336
NJ
1466 }
1467
1468 if (! (SCM_UNBNDP (cond)))
1469 {
6180e336
NJ
1470 c = SCM_CONDVAR_DATA (cond);
1471 while (1)
1472 {
1473 int brk = 0;
1474
6180e336
NJ
1475 if (m->level > 0)
1476 m->level--;
adc085f1 1477 if (m->level == 0)
6180e336 1478 m->owner = unblock_from_queue (m->waiting);
adc085f1 1479
6180e336 1480 t->block_asyncs++;
74926120 1481
f9d44e84
NJ
1482 err = block_self (c->waiting, cond, &m->lock, waittime);
1483 scm_i_pthread_mutex_unlock (&m->lock);
6180e336
NJ
1484
1485 if (err == 0)
1486 {
1487 ret = 1;
1488 brk = 1;
1489 }
1490 else if (err == ETIMEDOUT)
1491 {
1492 ret = 0;
1493 brk = 1;
1494 }
1495 else if (err != EINTR)
74926120 1496 {
6180e336 1497 errno = err;
6180e336 1498 scm_syserror (NULL);
74926120 1499 }
6180e336
NJ
1500
1501 if (brk)
1502 {
1503 if (relock)
adc085f1 1504 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
4201062d 1505 t->block_asyncs--;
6180e336
NJ
1506 break;
1507 }
74926120 1508
6180e336
NJ
1509 t->block_asyncs--;
1510 scm_async_click ();
74926120 1511
6180e336
NJ
1512 scm_remember_upto_here_2 (cond, mutex);
1513
1514 scm_i_scm_pthread_mutex_lock (&m->lock);
1515 }
9bc4701c 1516 }
9de87eea 1517 else
6180e336
NJ
1518 {
1519 if (m->level > 0)
1520 m->level--;
74926120 1521 if (m->level == 0)
6180e336 1522 m->owner = unblock_from_queue (m->waiting);
74926120 1523
6180e336
NJ
1524 scm_i_pthread_mutex_unlock (&m->lock);
1525 ret = 1;
1526 }
74926120 1527
6180e336
NJ
1528 return ret;
1529}
9de87eea 1530
6180e336
NJ
1531SCM scm_unlock_mutex (SCM mx)
1532{
1533 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
9bc4701c 1534}
9bc4701c 1535
6180e336
NJ
1536SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1537 (SCM mx, SCM cond, SCM timeout),
9bc4701c
MD
1538"Unlocks @var{mutex} if the calling thread owns the lock on "
1539"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1540"thread results in undefined behaviour. Once a mutex has been unlocked, "
1541"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1542"lock. Every call to @code{lock-mutex} by this thread must be matched "
1543"with a call to @code{unlock-mutex}. Only the last call to "
1544"@code{unlock-mutex} will actually unlock the mutex. ")
6180e336 1545#define FUNC_NAME s_scm_unlock_mutex_timed
9bc4701c 1546{
6180e336
NJ
1547 scm_t_timespec cwaittime, *waittime = NULL;
1548
9bc4701c 1549 SCM_VALIDATE_MUTEX (1, mx);
6180e336
NJ
1550 if (! (SCM_UNBNDP (cond)))
1551 {
1552 SCM_VALIDATE_CONDVAR (2, cond);
1553
1554 if (! (SCM_UNBNDP (timeout)))
1555 {
1556 to_timespec (timeout, &cwaittime);
1557 waittime = &cwaittime;
1558 }
1559 }
1560
1561 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
9bc4701c
MD
1562}
1563#undef FUNC_NAME
1564
6180e336
NJ
1565SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1566 (SCM obj),
1567 "Return @code{#t} if @var{obj} is a mutex.")
1568#define FUNC_NAME s_scm_mutex_p
1569{
1570 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1571}
74926120 1572#undef FUNC_NAME
6180e336 1573
9de87eea
MV
1574SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1575 (SCM mx),
1576 "Return the thread owning @var{mx}, or @code{#f}.")
1577#define FUNC_NAME s_scm_mutex_owner
1578{
adc085f1
JG
1579 SCM owner;
1580 fat_mutex *m = NULL;
1581
9de87eea 1582 SCM_VALIDATE_MUTEX (1, mx);
adc085f1
JG
1583 m = SCM_MUTEX_DATA (mx);
1584 scm_i_pthread_mutex_lock (&m->lock);
1585 owner = m->owner;
1586 scm_i_pthread_mutex_unlock (&m->lock);
1587
1588 return owner;
9de87eea
MV
1589}
1590#undef FUNC_NAME
1591
1592SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1593 (SCM mx),
adc085f1 1594 "Return the lock level of mutex @var{mx}.")
9de87eea
MV
1595#define FUNC_NAME s_scm_mutex_level
1596{
1597 SCM_VALIDATE_MUTEX (1, mx);
1598 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1599}
1600#undef FUNC_NAME
1601
adc085f1
JG
1602SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1603 (SCM mx),
1604 "Returns @code{#t} if the mutex @var{mx} is locked.")
1605#define FUNC_NAME s_scm_mutex_locked_p
1606{
1607 SCM_VALIDATE_MUTEX (1, mx);
1608 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1609}
1610#undef FUNC_NAME
9de87eea 1611
9de87eea
MV
1612static SCM
1613fat_cond_mark (SCM cv)
1614{
1615 fat_cond *c = SCM_CONDVAR_DATA (cv);
1616 return c->waiting;
1617}
1618
1619static size_t
1620fat_cond_free (SCM mx)
1621{
1622 fat_cond *c = SCM_CONDVAR_DATA (mx);
9de87eea
MV
1623 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1624 return 0;
1625}
1626
1627static int
1628fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1629{
1630 fat_cond *c = SCM_CONDVAR_DATA (cv);
1631 scm_puts ("#<condition-variable ", port);
1632 scm_uintprint ((scm_t_bits)c, 16, port);
1633 scm_puts (">", port);
1634 return 1;
1635}
9bc4701c 1636
d823b11b
MV
1637SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1638 (void),
1639 "Make a new condition variable.")
1640#define FUNC_NAME s_scm_make_condition_variable
5f05c406 1641{
9de87eea
MV
1642 fat_cond *c;
1643 SCM cv;
1644
1645 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
9de87eea
MV
1646 c->waiting = SCM_EOL;
1647 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1648 c->waiting = make_queue ();
d823b11b 1649 return cv;
5f05c406 1650}
d823b11b 1651#undef FUNC_NAME
5f05c406 1652
d823b11b
MV
1653SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1654 (SCM cv, SCM mx, SCM t),
1655"Wait until @var{cond-var} has been signalled. While waiting, "
1656"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1657"is locked again when this function returns. When @var{time} is given, "
1658"it specifies a point in time where the waiting should be aborted. It "
1659"can be either a integer as returned by @code{current-time} or a pair "
1660"as returned by @code{gettimeofday}. When the waiting is aborted the "
1661"mutex is locked and @code{#f} is returned. When the condition "
1662"variable is in fact signalled, the mutex is also locked and @code{#t} "
1663"is returned. ")
1664#define FUNC_NAME s_scm_timed_wait_condition_variable
5f05c406 1665{
9de87eea 1666 scm_t_timespec waittime, *waitptr = NULL;
d823b11b
MV
1667
1668 SCM_VALIDATE_CONDVAR (1, cv);
1669 SCM_VALIDATE_MUTEX (2, mx);
74926120 1670
d823b11b
MV
1671 if (!SCM_UNBNDP (t))
1672 {
6180e336 1673 to_timespec (t, &waittime);
9de87eea 1674 waitptr = &waittime;
d823b11b
MV
1675 }
1676
2a1d0688 1677 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
5f05c406 1678}
d823b11b 1679#undef FUNC_NAME
5f05c406 1680
9de87eea
MV
1681static void
1682fat_cond_signal (fat_cond *c)
1683{
9de87eea 1684 unblock_from_queue (c->waiting);
9de87eea
MV
1685}
1686
d823b11b
MV
1687SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1688 (SCM cv),
1689 "Wake up one thread that is waiting for @var{cv}")
1690#define FUNC_NAME s_scm_signal_condition_variable
5f05c406 1691{
d823b11b 1692 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1693 fat_cond_signal (SCM_CONDVAR_DATA (cv));
d823b11b 1694 return SCM_BOOL_T;
5f05c406 1695}
d823b11b 1696#undef FUNC_NAME
5f05c406 1697
9de87eea
MV
1698static void
1699fat_cond_broadcast (fat_cond *c)
1700{
9de87eea
MV
1701 while (scm_is_true (unblock_from_queue (c->waiting)))
1702 ;
9de87eea
MV
1703}
1704
d823b11b
MV
1705SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1706 (SCM cv),
1707 "Wake up all threads that are waiting for @var{cv}. ")
1708#define FUNC_NAME s_scm_broadcast_condition_variable
5f05c406 1709{
d823b11b 1710 SCM_VALIDATE_CONDVAR (1, cv);
9de87eea 1711 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
d823b11b 1712 return SCM_BOOL_T;
5f05c406 1713}
d823b11b 1714#undef FUNC_NAME
5f05c406 1715
6180e336
NJ
1716SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1717 (SCM obj),
1718 "Return @code{#t} if @var{obj} is a condition variable.")
1719#define FUNC_NAME s_scm_condition_variable_p
1720{
1721 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1722}
1723#undef FUNC_NAME
1724
d823b11b
MV
1725/*** Marking stacks */
1726
1727/* XXX - what to do with this? Do we need to handle this for blocked
1728 threads as well?
1729*/
1730#ifdef __ia64__
1731# define SCM_MARK_BACKING_STORE() do { \
1732 ucontext_t ctx; \
1733 SCM_STACKITEM * top, * bot; \
1734 getcontext (&ctx); \
1735 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1736 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1737 / sizeof (SCM_STACKITEM))); \
346e4402 1738 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
9a5fa6e9 1739 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
d823b11b
MV
1740 scm_mark_locations (bot, top - bot); } while (0)
1741#else
1742# define SCM_MARK_BACKING_STORE()
1743#endif
1744
1745void
1746scm_threads_mark_stacks (void)
5f05c406 1747{
9de87eea
MV
1748 scm_i_thread *t;
1749 for (t = all_threads; t; t = t->next_thread)
d823b11b 1750 {
9de87eea
MV
1751 /* Check that thread has indeed been suspended.
1752 */
1753 assert (t->top);
6087fad9 1754
9de87eea 1755 scm_gc_mark (t->handle);
6087fad9 1756
d028af45 1757#if SCM_STACK_GROWS_UP
9de87eea 1758 scm_mark_locations (t->base, t->top - t->base);
d823b11b 1759#else
9de87eea 1760 scm_mark_locations (t->top, t->base - t->top);
d823b11b 1761#endif
88cefbc7 1762 scm_mark_locations ((void *) &t->regs,
6087fad9
MV
1763 ((size_t) sizeof(t->regs)
1764 / sizeof (SCM_STACKITEM)));
d823b11b 1765 }
59152722
MV
1766
1767 SCM_MARK_BACKING_STORE ();
5f05c406
MV
1768}
1769
d823b11b
MV
1770/*** Select */
1771
911782b7 1772int
9de87eea
MV
1773scm_std_select (int nfds,
1774 SELECT_TYPE *readfds,
1775 SELECT_TYPE *writefds,
1776 SELECT_TYPE *exceptfds,
1777 struct timeval *timeout)
1778{
1779 fd_set my_readfds;
1780 int res, eno, wakeup_fd;
1781 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1782 scm_t_guile_ticket ticket;
1783
1784 if (readfds == NULL)
1785 {
1786 FD_ZERO (&my_readfds);
1787 readfds = &my_readfds;
1788 }
1789
1790 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1791 SCM_TICK;
1792
1793 wakeup_fd = t->sleep_pipe[0];
1794 ticket = scm_leave_guile ();
1795 FD_SET (wakeup_fd, readfds);
1796 if (wakeup_fd >= nfds)
1797 nfds = wakeup_fd+1;
1798 res = select (nfds, readfds, writefds, exceptfds, timeout);
1799 t->sleep_fd = -1;
d823b11b 1800 eno = errno;
9de87eea
MV
1801 scm_enter_guile (ticket);
1802
1803 scm_i_reset_sleep (t);
1804
1805 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1806 {
1807 char dummy;
634aa8de
LC
1808 full_read (wakeup_fd, &dummy, 1);
1809
9de87eea
MV
1810 FD_CLR (wakeup_fd, readfds);
1811 res -= 1;
1812 if (res == 0)
1813 {
1814 eno = EINTR;
1815 res = -1;
1816 }
1817 }
d823b11b
MV
1818 errno = eno;
1819 return res;
5f05c406
MV
1820}
1821
9de87eea 1822/* Convenience API for blocking while in guile mode. */
76da80e7 1823
9de87eea 1824#if SCM_USE_PTHREAD_THREADS
92e64b87 1825
9bc4701c 1826int
9de87eea 1827scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
9bc4701c 1828{
71a5964c
KR
1829 if (scm_i_pthread_mutex_trylock (mutex) == 0)
1830 return 0;
1831 else
1832 {
1833 scm_t_guile_ticket t = scm_leave_guile ();
1834 int res = scm_i_pthread_mutex_lock (mutex);
1835 scm_enter_guile (t);
1836 return res;
1837 }
9bc4701c
MD
1838}
1839
9de87eea 1840static void
2b829bbb 1841do_unlock (void *data)
28d52ebb 1842{
9de87eea 1843 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
28d52ebb
MD
1844}
1845
1846void
661ae7ab 1847scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
28d52ebb 1848{
9de87eea 1849 scm_i_scm_pthread_mutex_lock (mutex);
2b829bbb 1850 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
28d52ebb
MD
1851}
1852
9bc4701c 1853int
9de87eea 1854scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
9bc4701c 1855{
9de87eea 1856 scm_t_guile_ticket t = scm_leave_guile ();
f9d44e84 1857 ((scm_i_thread *)t)->held_mutex = mutex;
9de87eea 1858 int res = scm_i_pthread_cond_wait (cond, mutex);
f9d44e84 1859 ((scm_i_thread *)t)->held_mutex = NULL;
9de87eea 1860 scm_enter_guile (t);
9bc4701c
MD
1861 return res;
1862}
9bc4701c 1863
76da80e7 1864int
9de87eea
MV
1865scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1866 scm_i_pthread_mutex_t *mutex,
1867 const scm_t_timespec *wt)
76da80e7 1868{
9de87eea 1869 scm_t_guile_ticket t = scm_leave_guile ();
f9d44e84 1870 ((scm_i_thread *)t)->held_mutex = mutex;
9de87eea 1871 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
f9d44e84 1872 ((scm_i_thread *)t)->held_mutex = NULL;
9de87eea
MV
1873 scm_enter_guile (t);
1874 return res;
76da80e7
MV
1875}
1876
9de87eea 1877#endif
76da80e7 1878
d823b11b 1879unsigned long
9de87eea 1880scm_std_usleep (unsigned long usecs)
5f05c406 1881{
d823b11b
MV
1882 struct timeval tv;
1883 tv.tv_usec = usecs % 1000000;
1884 tv.tv_sec = usecs / 1000000;
9de87eea
MV
1885 scm_std_select (0, NULL, NULL, NULL, &tv);
1886 return tv.tv_sec * 1000000 + tv.tv_usec;
5f05c406
MV
1887}
1888
9de87eea
MV
1889unsigned int
1890scm_std_sleep (unsigned int secs)
6c214b62 1891{
d823b11b
MV
1892 struct timeval tv;
1893 tv.tv_usec = 0;
1894 tv.tv_sec = secs;
9de87eea 1895 scm_std_select (0, NULL, NULL, NULL, &tv);
d823b11b 1896 return tv.tv_sec;
6c214b62
MD
1897}
1898
d823b11b
MV
1899/*** Misc */
1900
1901SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1902 (void),
1903 "Return the thread that called this function.")
1904#define FUNC_NAME s_scm_current_thread
1905{
9de87eea 1906 return SCM_I_CURRENT_THREAD->handle;
d823b11b
MV
1907}
1908#undef FUNC_NAME
1909
9de87eea
MV
1910static SCM
1911scm_c_make_list (size_t n, SCM fill)
1912{
1913 SCM res = SCM_EOL;
1914 while (n-- > 0)
1915 res = scm_cons (fill, res);
1916 return res;
1917}
1918
d823b11b
MV
1919SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1920 (void),
1921 "Return a list of all threads.")
9bc4701c 1922#define FUNC_NAME s_scm_all_threads
d823b11b 1923{
9de87eea
MV
1924 /* We can not allocate while holding the thread_admin_mutex because
1925 of the way GC is done.
1926 */
1927 int n = thread_count;
1928 scm_i_thread *t;
1929 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
d823b11b 1930
9de87eea
MV
1931 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1932 l = &list;
1933 for (t = all_threads; t && n > 0; t = t->next_thread)
1934 {
2e77f720
LC
1935 if (t != scm_i_signal_delivery_thread)
1936 {
1937 SCM_SETCAR (*l, t->handle);
1938 l = SCM_CDRLOC (*l);
1939 }
9de87eea
MV
1940 n--;
1941 }
1942 *l = SCM_EOL;
1943 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1944 return list;
d823b11b 1945}
9de87eea 1946#undef FUNC_NAME
d823b11b
MV
1947
1948SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1949 (SCM thread),
1950 "Return @code{#t} iff @var{thread} has exited.\n")
1951#define FUNC_NAME s_scm_thread_exited_p
1952{
7888309b 1953 return scm_from_bool (scm_c_thread_exited_p (thread));
d823b11b
MV
1954}
1955#undef FUNC_NAME
1956
911782b7 1957int
d823b11b
MV
1958scm_c_thread_exited_p (SCM thread)
1959#define FUNC_NAME s_scm_thread_exited_p
5f05c406 1960{
9de87eea 1961 scm_i_thread *t;
d823b11b 1962 SCM_VALIDATE_THREAD (1, thread);
9de87eea 1963 t = SCM_I_THREAD_DATA (thread);
d823b11b 1964 return t->exited;
5f05c406 1965}
d823b11b 1966#undef FUNC_NAME
5f05c406 1967
9de87eea 1968static scm_i_pthread_cond_t wake_up_cond;
9bc4701c 1969int scm_i_thread_go_to_sleep;
9bc4701c
MD
1970static int threads_initialized_p = 0;
1971
1972void
1973scm_i_thread_put_to_sleep ()
1974{
6087fad9 1975 if (threads_initialized_p)
9bc4701c 1976 {
9de87eea 1977 scm_i_thread *t;
6087fad9 1978
9de87eea
MV
1979 scm_leave_guile ();
1980 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1981
74926120 1982 /* Signal all threads to go to sleep
0c97d7dd
MV
1983 */
1984 scm_i_thread_go_to_sleep = 1;
1985 for (t = all_threads; t; t = t->next_thread)
1986 scm_i_pthread_mutex_lock (&t->heap_mutex);
1987 scm_i_thread_go_to_sleep = 0;
9bc4701c
MD
1988 }
1989}
1990
b0dc3d71
MD
1991void
1992scm_i_thread_invalidate_freelists ()
1993{
9de87eea
MV
1994 /* thread_admin_mutex is already locked. */
1995
1996 scm_i_thread *t;
1997 for (t = all_threads; t; t = t->next_thread)
1998 if (t != SCM_I_CURRENT_THREAD)
1999 t->clear_freelists_p = 1;
b0dc3d71
MD
2000}
2001
9bc4701c
MD
2002void
2003scm_i_thread_wake_up ()
2004{
6087fad9 2005 if (threads_initialized_p)
9bc4701c 2006 {
9de87eea 2007 scm_i_thread *t;
9de87eea 2008
0c97d7dd
MV
2009 scm_i_pthread_cond_broadcast (&wake_up_cond);
2010 for (t = all_threads; t; t = t->next_thread)
2011 scm_i_pthread_mutex_unlock (&t->heap_mutex);
9de87eea
MV
2012 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
2013 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
9bc4701c 2014 }
9bc4701c
MD
2015}
2016
2017void
2018scm_i_thread_sleep_for_gc ()
2019{
9de87eea 2020 scm_i_thread *t = suspend ();
8b0174c8
NJ
2021
2022 /* Don't put t->heap_mutex in t->held_mutex here, because if the
2023 thread is cancelled during the cond wait, the thread's cleanup
2024 function (scm_leave_guile_cleanup) will handle unlocking the
2025 heap_mutex, so we don't need to do that again in on_thread_exit.
2026 */
9de87eea 2027 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
8b0174c8 2028
b0dc3d71 2029 resume (t);
9bc4701c
MD
2030}
2031
a4d106c7
MV
2032/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2033 */
d1138028 2034scm_i_pthread_mutex_t scm_i_critical_section_mutex;
a4d106c7
MV
2035int scm_i_critical_section_level = 0;
2036
661ae7ab 2037static SCM dynwind_critical_section_mutex;
a54a94b3 2038
9bc4701c 2039void
661ae7ab 2040scm_dynwind_critical_section (SCM mutex)
76da80e7 2041{
a4d106c7 2042 if (scm_is_false (mutex))
661ae7ab
MV
2043 mutex = dynwind_critical_section_mutex;
2044 scm_dynwind_lock_mutex (mutex);
2045 scm_dynwind_block_asyncs ();
9de87eea
MV
2046}
2047
2048/*** Initialization */
2049
2050scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
2051scm_i_pthread_mutex_t scm_i_misc_mutex;
2052
d1138028
MV
2053#if SCM_USE_PTHREAD_THREADS
2054pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
2055#endif
2056
9de87eea
MV
2057void
2058scm_threads_prehistory (SCM_STACKITEM *base)
2059{
d1138028
MV
2060#if SCM_USE_PTHREAD_THREADS
2061 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
2062 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
2063 PTHREAD_MUTEX_RECURSIVE);
2064#endif
2065
2066 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
2067 scm_i_pthread_mutexattr_recursive);
9de87eea
MV
2068 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
2069 scm_i_pthread_cond_init (&wake_up_cond, NULL);
9de87eea
MV
2070 scm_i_pthread_key_create (&scm_i_freelist, NULL);
2071 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
74926120 2072
9de87eea 2073 guilify_self_1 (base);
9bc4701c
MD
2074}
2075
d823b11b
MV
2076scm_t_bits scm_tc16_thread;
2077scm_t_bits scm_tc16_mutex;
2078scm_t_bits scm_tc16_condvar;
7bfd3b9e 2079
7bfd3b9e 2080void
9de87eea 2081scm_init_threads ()
7bfd3b9e 2082{
9de87eea 2083 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
d823b11b
MV
2084 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2085 scm_set_smob_print (scm_tc16_thread, thread_print);
2086 scm_set_smob_free (scm_tc16_thread, thread_free);
2087
9de87eea
MV
2088 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2089 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2090 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2091 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
9bc4701c 2092
9de87eea
MV
2093 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2094 sizeof (fat_cond));
2095 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2096 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2097 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
d823b11b 2098
9de87eea
MV
2099 scm_i_default_dynamic_state = SCM_BOOL_F;
2100 guilify_self_2 (SCM_BOOL_F);
9bc4701c 2101 threads_initialized_p = 1;
a4d106c7 2102
661ae7ab 2103 dynwind_critical_section_mutex =
a4d106c7 2104 scm_permanent_object (scm_make_recursive_mutex ());
7bfd3b9e 2105}
89e00824 2106
5f05c406 2107void
9de87eea 2108scm_init_threads_default_dynamic_state ()
5f05c406 2109{
9de87eea
MV
2110 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2111 scm_i_default_dynamic_state = scm_permanent_object (state);
5f05c406
MV
2112}
2113
d823b11b 2114void
9de87eea 2115scm_init_thread_procs ()
d823b11b 2116{
9de87eea 2117#include "libguile/threads.x"
d823b11b
MV
2118}
2119
89e00824
ML
2120/*
2121 Local Variables:
2122 c-file-style: "gnu"
2123 End:
2124*/