Expand DEFFROM and DEFTO macros in discouraged.c
[bpt/guile.git] / libguile / threads.c
... / ...
CommitLineData
1/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19\f
20
21#define _GNU_SOURCE
22
23#include "libguile/_scm.h"
24
25#if HAVE_UNISTD_H
26#include <unistd.h>
27#endif
28#include <stdio.h>
29#include <assert.h>
30
31#ifdef HAVE_STRING_H
32#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
33#endif
34
35#if HAVE_SYS_TIME_H
36#include <sys/time.h>
37#endif
38
39#include "libguile/validate.h"
40#include "libguile/root.h"
41#include "libguile/eval.h"
42#include "libguile/async.h"
43#include "libguile/ports.h"
44#include "libguile/threads.h"
45#include "libguile/dynwind.h"
46#include "libguile/iselect.h"
47#include "libguile/fluids.h"
48#include "libguile/continuations.h"
49#include "libguile/gc.h"
50#include "libguile/init.h"
51#include "libguile/scmsigs.h"
52#include "libguile/strings.h"
53
54#ifdef __MINGW32__
55#ifndef ETIMEDOUT
56# define ETIMEDOUT WSAETIMEDOUT
57#endif
58# include <fcntl.h>
59# include <process.h>
60# define pipe(fd) _pipe (fd, 256, O_BINARY)
61#endif /* __MINGW32__ */
62
63static void
64to_timespec (SCM t, scm_t_timespec *waittime)
65{
66 if (scm_is_pair (t))
67 {
68 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
69 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
70 }
71 else
72 {
73 double time = scm_to_double (t);
74 double sec = scm_c_truncate (time);
75
76 waittime->tv_sec = (long) sec;
77 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
78 }
79}
80
81/*** Queues */
82
83/* Make an empty queue data structure.
84 */
85static SCM
86make_queue ()
87{
88 return scm_cons (SCM_EOL, SCM_EOL);
89}
90
91/* Put T at the back of Q and return a handle that can be used with
92 remqueue to remove T from Q again.
93 */
94static SCM
95enqueue (SCM q, SCM t)
96{
97 SCM c = scm_cons (t, SCM_EOL);
98 if (scm_is_null (SCM_CDR (q)))
99 SCM_SETCDR (q, c);
100 else
101 SCM_SETCDR (SCM_CAR (q), c);
102 SCM_SETCAR (q, c);
103 return c;
104}
105
106/* Remove the element that the handle C refers to from the queue Q. C
107 must have been returned from a call to enqueue. The return value
108 is zero when the element referred to by C has already been removed.
109 Otherwise, 1 is returned.
110*/
111static int
112remqueue (SCM q, SCM c)
113{
114 SCM p, prev = q;
115 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
116 {
117 if (scm_is_eq (p, c))
118 {
119 if (scm_is_eq (c, SCM_CAR (q)))
120 SCM_SETCAR (q, SCM_CDR (c));
121 SCM_SETCDR (prev, SCM_CDR (c));
122 return 1;
123 }
124 prev = p;
125 }
126 return 0;
127}
128
129/* Remove the front-most element from the queue Q and return it.
130 Return SCM_BOOL_F when Q is empty.
131*/
132static SCM
133dequeue (SCM q)
134{
135 SCM c = SCM_CDR (q);
136 if (scm_is_null (c))
137 return SCM_BOOL_F;
138 else
139 {
140 SCM_SETCDR (q, SCM_CDR (c));
141 if (scm_is_null (SCM_CDR (q)))
142 SCM_SETCAR (q, SCM_EOL);
143 return SCM_CAR (c);
144 }
145}
146
147/*** Thread smob routines */
148
149static SCM
150thread_mark (SCM obj)
151{
152 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
153 scm_gc_mark (t->result);
154 scm_gc_mark (t->cleanup_handler);
155 scm_gc_mark (t->join_queue);
156 scm_gc_mark (t->mutexes);
157 scm_gc_mark (t->dynwinds);
158 scm_gc_mark (t->active_asyncs);
159 scm_gc_mark (t->continuation_root);
160 return t->dynamic_state;
161}
162
163static int
164thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
165{
166 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
167 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
168 the struct case, hence we go via a union, and extract according to the
169 size of pthread_t. */
170 union {
171 scm_i_pthread_t p;
172 unsigned short us;
173 unsigned int ui;
174 unsigned long ul;
175 scm_t_uintmax um;
176 } u;
177 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
178 scm_i_pthread_t p = t->pthread;
179 scm_t_uintmax id;
180 u.p = p;
181 if (sizeof (p) == sizeof (unsigned short))
182 id = u.us;
183 else if (sizeof (p) == sizeof (unsigned int))
184 id = u.ui;
185 else if (sizeof (p) == sizeof (unsigned long))
186 id = u.ul;
187 else
188 id = u.um;
189
190 scm_puts ("#<thread ", port);
191 scm_uintprint (id, 10, port);
192 scm_puts (" (", port);
193 scm_uintprint ((scm_t_bits)t, 16, port);
194 scm_puts (")>", port);
195 return 1;
196}
197
198static size_t
199thread_free (SCM obj)
200{
201 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
202 assert (t->exited);
203 scm_gc_free (t, sizeof (*t), "thread");
204 return 0;
205}
206
207/*** Blocking on queues. */
208
209/* See also scm_i_queue_async_cell for how such a block is
210 interrputed.
211*/
212
213/* Put the current thread on QUEUE and go to sleep, waiting for it to
214 be woken up by a call to 'unblock_from_queue', or to be
215 interrupted. Upon return of this function, the current thread is
216 no longer on QUEUE, even when the sleep has been interrupted.
217
218 The QUEUE data structure is assumed to be protected by MUTEX and
219 the caller of block_self must hold MUTEX. It will be atomically
220 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
221
222 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
223 as MUTEX is needed.
224
225 When WAITTIME is not NULL, the sleep will be aborted at that time.
226
227 The return value of block_self is an errno value. It will be zero
228 when the sleep has been successfully completed by a call to
229 unblock_from_queue, EINTR when it has been interrupted by the
230 delivery of a system async, and ETIMEDOUT when the timeout has
231 expired.
232
233 The system asyncs themselves are not executed by block_self.
234*/
235static int
236block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
237 const scm_t_timespec *waittime)
238{
239 scm_i_thread *t = SCM_I_CURRENT_THREAD;
240 SCM q_handle;
241 int err;
242
243 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
244 err = EINTR;
245 else
246 {
247 t->block_asyncs++;
248 q_handle = enqueue (queue, t->handle);
249 if (waittime == NULL)
250 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
251 else
252 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
253
254 /* When we are still on QUEUE, we have been interrupted. We
255 report this only when no other error (such as a timeout) has
256 happened above.
257 */
258 if (remqueue (queue, q_handle) && err == 0)
259 err = EINTR;
260 t->block_asyncs--;
261 scm_i_reset_sleep (t);
262 }
263
264 return err;
265}
266
267/* Wake up the first thread on QUEUE, if any. The caller must hold
268 the mutex that protects QUEUE. The awoken thread is returned, or
269 #f when the queue was empty.
270 */
271static SCM
272unblock_from_queue (SCM queue)
273{
274 SCM thread = dequeue (queue);
275 if (scm_is_true (thread))
276 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
277 return thread;
278}
279
280/* Getting into and out of guile mode.
281 */
282
283/* Ken Raeburn observes that the implementation of suspend and resume
284 (and the things that build on top of them) are very likely not
285 correct (see below). We will need fix this eventually, and that's
286 why scm_leave_guile/scm_enter_guile are not exported in the API.
287
288 Ken writes:
289
290 Consider this sequence:
291
292 Function foo, called in Guile mode, calls suspend (maybe indirectly
293 through scm_leave_guile), which does this:
294
295 // record top of stack for the GC
296 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
297 var 't'
298 // save registers.
299 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
300 setjmp (t->regs); // here's most of the magic
301
302 ... and returns.
303
304 Function foo has a SCM value X, a handle on a non-immediate object, in
305 a caller-saved register R, and it's the only reference to the object
306 currently.
307
308 The compiler wants to use R in suspend, so it pushes the current
309 value, X, into a stack slot which will be reloaded on exit from
310 suspend; then it loads stuff into R and goes about its business. The
311 setjmp call saves (some of) the current registers, including R, which
312 no longer contains X. (This isn't a problem for a normal
313 setjmp/longjmp situation, where longjmp would be called before
314 setjmp's caller returns; the old value for X would be loaded back from
315 the stack after the longjmp, before the function returned.)
316
317 So, suspend returns, loading X back into R (and invalidating the jump
318 buffer) in the process. The caller foo then goes off and calls a
319 bunch of other functions out of Guile mode, occasionally storing X on
320 the stack again, but, say, much deeper on the stack than suspend's
321 stack frame went, and the stack slot where suspend had written X has
322 long since been overwritten with other values.
323
324 Okay, nothing actively broken so far. Now, let garbage collection
325 run, triggered by another thread.
326
327 The thread calling foo is out of Guile mode at the time, so the
328 garbage collector just scans a range of stack addresses. Too bad that
329 X isn't stored there. So the pointed-to storage goes onto the free
330 list, and I think you can see where things go from there.
331
332 Is there anything I'm missing that'll prevent this scenario from
333 happening? I mean, aside from, "well, suspend and scm_leave_guile
334 don't have many local variables, so they probably won't need to save
335 any registers on most systems, so we hope everything will wind up in
336 the jump buffer and we'll just get away with it"?
337
338 (And, going the other direction, if scm_leave_guile and suspend push
339 the stack pointer over onto a new page, and foo doesn't make further
340 function calls and thus the stack pointer no longer includes that
341 page, are we guaranteed that the kernel cannot release the now-unused
342 stack page that contains the top-of-stack pointer we just saved? I
343 don't know if any OS actually does that. If it does, we could get
344 faults in garbage collection.)
345
346 I don't think scm_without_guile has to have this problem, as it gets
347 more control over the stack handling -- but it should call setjmp
348 itself. I'd probably try something like:
349
350 // record top of stack for the GC
351 t->top = SCM_STACK_PTR (&t);
352 // save registers.
353 SCM_FLUSH_REGISTER_WINDOWS;
354 setjmp (t->regs);
355 res = func(data);
356 scm_enter_guile (t);
357
358 ... though even that's making some assumptions about the stack
359 ordering of local variables versus caller-saved registers.
360
361 For something like scm_leave_guile to work, I don't think it can just
362 rely on invalidated jump buffers. A valid jump buffer, and a handle
363 on the stack state at the point when the jump buffer was initialized,
364 together, would work fine, but I think then we're talking about macros
365 invoking setjmp in the caller's stack frame, and requiring that the
366 caller of scm_leave_guile also call scm_enter_guile before returning,
367 kind of like pthread_cleanup_push/pop calls that have to be paired up
368 in a function. (In fact, the pthread ones have to be paired up
369 syntactically, as if they might expand to a compound statement
370 incorporating the user's code, and invoking a compiler's
371 exception-handling primitives. Which might be something to think
372 about for cases where Guile is used with C++ exceptions or
373 pthread_cancel.)
374*/
375
376scm_i_pthread_key_t scm_i_thread_key;
377
378static void
379resume (scm_i_thread *t)
380{
381 t->top = NULL;
382 if (t->clear_freelists_p)
383 {
384 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
385 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
386 t->clear_freelists_p = 0;
387 }
388}
389
390typedef void* scm_t_guile_ticket;
391
392static void
393scm_enter_guile (scm_t_guile_ticket ticket)
394{
395 scm_i_thread *t = (scm_i_thread *)ticket;
396 if (t)
397 {
398 scm_i_pthread_mutex_lock (&t->heap_mutex);
399 resume (t);
400 }
401}
402
403static scm_i_thread *
404suspend (void)
405{
406 scm_i_thread *t = SCM_I_CURRENT_THREAD;
407
408 /* record top of stack for the GC */
409 t->top = SCM_STACK_PTR (&t);
410 /* save registers. */
411 SCM_FLUSH_REGISTER_WINDOWS;
412 setjmp (t->regs);
413 return t;
414}
415
416static scm_t_guile_ticket
417scm_leave_guile ()
418{
419 scm_i_thread *t = suspend ();
420 scm_i_pthread_mutex_unlock (&t->heap_mutex);
421 return (scm_t_guile_ticket) t;
422}
423
424static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
425static scm_i_thread *all_threads = NULL;
426static int thread_count;
427
428static SCM scm_i_default_dynamic_state;
429
430/* Perform first stage of thread initialisation, in non-guile mode.
431 */
432static void
433guilify_self_1 (SCM_STACKITEM *base)
434{
435 scm_i_thread *t = malloc (sizeof (scm_i_thread));
436
437 t->pthread = scm_i_pthread_self ();
438 t->handle = SCM_BOOL_F;
439 t->result = SCM_BOOL_F;
440 t->cleanup_handler = SCM_BOOL_F;
441 t->mutexes = SCM_EOL;
442 t->join_queue = SCM_EOL;
443 t->dynamic_state = SCM_BOOL_F;
444 t->dynwinds = SCM_EOL;
445 t->active_asyncs = SCM_EOL;
446 t->block_asyncs = 1;
447 t->pending_asyncs = 1;
448 t->last_debug_frame = NULL;
449 t->base = base;
450#ifdef __ia64__
451 /* Calculate and store off the base of this thread's register
452 backing store (RBS). Unfortunately our implementation(s) of
453 scm_ia64_register_backing_store_base are only reliable for the
454 main thread. For other threads, therefore, find out the current
455 top of the RBS, and use that as a maximum. */
456 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
457 {
458 ucontext_t ctx;
459 void *bsp;
460 getcontext (&ctx);
461 bsp = scm_ia64_ar_bsp (&ctx);
462 if (t->register_backing_store_base > bsp)
463 t->register_backing_store_base = bsp;
464 }
465#endif
466 t->continuation_root = SCM_EOL;
467 t->continuation_base = base;
468 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
469 t->sleep_mutex = NULL;
470 t->sleep_object = SCM_BOOL_F;
471 t->sleep_fd = -1;
472 /* XXX - check for errors. */
473 pipe (t->sleep_pipe);
474 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
475 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
476 t->clear_freelists_p = 0;
477 t->gc_running_p = 0;
478 t->canceled = 0;
479 t->exited = 0;
480
481 t->freelist = SCM_EOL;
482 t->freelist2 = SCM_EOL;
483 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
484 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
485
486 scm_i_pthread_setspecific (scm_i_thread_key, t);
487
488 scm_i_pthread_mutex_lock (&t->heap_mutex);
489
490 scm_i_pthread_mutex_lock (&thread_admin_mutex);
491 t->next_thread = all_threads;
492 all_threads = t;
493 thread_count++;
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
495}
496
497/* Perform second stage of thread initialisation, in guile mode.
498 */
499static void
500guilify_self_2 (SCM parent)
501{
502 scm_i_thread *t = SCM_I_CURRENT_THREAD;
503
504 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
505 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
506 t->continuation_root = scm_cons (t->handle, SCM_EOL);
507 t->continuation_base = t->base;
508
509 if (scm_is_true (parent))
510 t->dynamic_state = scm_make_dynamic_state (parent);
511 else
512 t->dynamic_state = scm_i_make_initial_dynamic_state ();
513
514 t->join_queue = make_queue ();
515 t->block_asyncs = 0;
516}
517
518\f
519/*** Fat mutexes */
520
521/* We implement our own mutex type since we want them to be 'fair', we
522 want to do fancy things while waiting for them (like running
523 asyncs) and we might want to add things that are nice for
524 debugging.
525*/
526
527typedef struct {
528 scm_i_pthread_mutex_t lock;
529 SCM owner;
530 int level; /* how much the owner owns us.
531 < 0 for non-recursive mutexes */
532
533 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
534 int allow_external_unlock; /* is it an error to unlock a mutex that is not
535 owned by the current thread? */
536
537 SCM waiting; /* the threads waiting for this mutex. */
538} fat_mutex;
539
540#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
541#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
542
543/* Perform thread tear-down, in guile mode.
544 */
545static void *
546do_thread_exit (void *v)
547{
548 scm_i_thread *t = (scm_i_thread *) v;
549
550 if (!scm_is_false (t->cleanup_handler))
551 {
552 SCM ptr = t->cleanup_handler;
553
554 t->cleanup_handler = SCM_BOOL_F;
555 t->result = scm_internal_catch (SCM_BOOL_T,
556 (scm_t_catch_body) scm_call_0, ptr,
557 scm_handle_by_message_noexit, NULL);
558 }
559
560 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
561
562 t->exited = 1;
563 close (t->sleep_pipe[0]);
564 close (t->sleep_pipe[1]);
565 while (scm_is_true (unblock_from_queue (t->join_queue)))
566 ;
567
568 while (!scm_is_null (t->mutexes))
569 {
570 SCM mutex = SCM_CAR (t->mutexes);
571 fat_mutex *m = SCM_MUTEX_DATA (mutex);
572 scm_i_pthread_mutex_lock (&m->lock);
573
574 unblock_from_queue (m->waiting);
575
576 scm_i_pthread_mutex_unlock (&m->lock);
577 t->mutexes = SCM_CDR (t->mutexes);
578 }
579
580 scm_i_pthread_mutex_unlock (&t->admin_mutex);
581
582 return NULL;
583}
584
585static void
586on_thread_exit (void *v)
587{
588 /* This handler is executed in non-guile mode. */
589 scm_i_thread *t = (scm_i_thread *) v, **tp;
590
591 scm_i_pthread_setspecific (scm_i_thread_key, v);
592
593 /* Ensure the signal handling thread has been launched, because we might be
594 shutting it down. */
595 scm_i_ensure_signal_delivery_thread ();
596
597 /* Unblocking the joining threads needs to happen in guile mode
598 since the queue is a SCM data structure. */
599 scm_with_guile (do_thread_exit, v);
600
601 /* Removing ourself from the list of all threads needs to happen in
602 non-guile mode since all SCM values on our stack become
603 unprotected once we are no longer in the list. */
604 scm_i_pthread_mutex_lock (&thread_admin_mutex);
605 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
606 if (*tp == t)
607 {
608 *tp = t->next_thread;
609 break;
610 }
611 thread_count--;
612
613 /* If there's only one other thread, it could be the signal delivery
614 thread, so we need to notify it to shut down by closing its read pipe.
615 If it's not the signal delivery thread, then closing the read pipe isn't
616 going to hurt. */
617 if (thread_count <= 1)
618 scm_i_close_signal_pipe ();
619
620 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
621
622 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
623}
624
625static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
626
627static void
628init_thread_key (void)
629{
630 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
631}
632
633/* Perform any initializations necessary to bring the current thread
634 into guile mode, initializing Guile itself, if necessary.
635
636 BASE is the stack base to use with GC.
637
638 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
639 which case the default dynamic state is used.
640
641 Return zero when the thread was in guile mode already; otherwise
642 return 1.
643*/
644
645static int
646scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
647{
648 scm_i_thread *t;
649
650 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
651
652 if ((t = SCM_I_CURRENT_THREAD) == NULL)
653 {
654 /* This thread has not been guilified yet.
655 */
656
657 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
658 if (scm_initialized_p == 0)
659 {
660 /* First thread ever to enter Guile. Run the full
661 initialization.
662 */
663 scm_i_init_guile (base);
664 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
665 }
666 else
667 {
668 /* Guile is already initialized, but this thread enters it for
669 the first time. Only initialize this thread.
670 */
671 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
672 guilify_self_1 (base);
673 guilify_self_2 (parent);
674 }
675 return 1;
676 }
677 else if (t->top)
678 {
679 /* This thread is already guilified but not in guile mode, just
680 resume it.
681
682 XXX - base might be lower than when this thread was first
683 guilified.
684 */
685 scm_enter_guile ((scm_t_guile_ticket) t);
686 return 1;
687 }
688 else
689 {
690 /* Thread is already in guile mode. Nothing to do.
691 */
692 return 0;
693 }
694}
695
696#if SCM_USE_PTHREAD_THREADS
697
698#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
699/* This method for GNU/Linux and perhaps some other systems.
700 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
701 available on them. */
702#define HAVE_GET_THREAD_STACK_BASE
703
704static SCM_STACKITEM *
705get_thread_stack_base ()
706{
707 pthread_attr_t attr;
708 void *start, *end;
709 size_t size;
710
711 pthread_getattr_np (pthread_self (), &attr);
712 pthread_attr_getstack (&attr, &start, &size);
713 end = (char *)start + size;
714
715 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
716 for the main thread, but we can use scm_get_stack_base in that
717 case.
718 */
719
720#ifndef PTHREAD_ATTR_GETSTACK_WORKS
721 if ((void *)&attr < start || (void *)&attr >= end)
722 return scm_get_stack_base ();
723 else
724#endif
725 {
726#if SCM_STACK_GROWS_UP
727 return start;
728#else
729 return end;
730#endif
731 }
732}
733
734#elif HAVE_PTHREAD_GET_STACKADDR_NP
735/* This method for MacOS X.
736 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
737 but as of 2006 there's nothing obvious at apple.com. */
738#define HAVE_GET_THREAD_STACK_BASE
739static SCM_STACKITEM *
740get_thread_stack_base ()
741{
742 return pthread_get_stackaddr_np (pthread_self ());
743}
744
745#elif defined (__MINGW32__)
746/* This method for mingw. In mingw the basic scm_get_stack_base can be used
747 in any thread. We don't like hard-coding the name of a system, but there
748 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
749 work. */
750#define HAVE_GET_THREAD_STACK_BASE
751static SCM_STACKITEM *
752get_thread_stack_base ()
753{
754 return scm_get_stack_base ();
755}
756
757#endif /* pthread methods of get_thread_stack_base */
758
759#else /* !SCM_USE_PTHREAD_THREADS */
760
761#define HAVE_GET_THREAD_STACK_BASE
762
763static SCM_STACKITEM *
764get_thread_stack_base ()
765{
766 return scm_get_stack_base ();
767}
768
769#endif /* !SCM_USE_PTHREAD_THREADS */
770
771#ifdef HAVE_GET_THREAD_STACK_BASE
772
773void
774scm_init_guile ()
775{
776 scm_i_init_thread_for_guile (get_thread_stack_base (),
777 scm_i_default_dynamic_state);
778}
779
780#endif
781
782void *
783scm_with_guile (void *(*func)(void *), void *data)
784{
785 return scm_i_with_guile_and_parent (func, data,
786 scm_i_default_dynamic_state);
787}
788
789static void
790scm_leave_guile_cleanup (void *x)
791{
792 scm_leave_guile ();
793}
794
795void *
796scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
797{
798 void *res;
799 int really_entered;
800 SCM_STACKITEM base_item;
801
802 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
803 if (really_entered)
804 {
805 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
806 res = scm_c_with_continuation_barrier (func, data);
807 scm_i_pthread_cleanup_pop (0);
808 scm_leave_guile ();
809 }
810 else
811 res = scm_c_with_continuation_barrier (func, data);
812
813 return res;
814}
815
816void *
817scm_without_guile (void *(*func)(void *), void *data)
818{
819 void *res;
820 scm_t_guile_ticket t;
821 t = scm_leave_guile ();
822 res = func (data);
823 scm_enter_guile (t);
824 return res;
825}
826
827/*** Thread creation */
828
829typedef struct {
830 SCM parent;
831 SCM thunk;
832 SCM handler;
833 SCM thread;
834 scm_i_pthread_mutex_t mutex;
835 scm_i_pthread_cond_t cond;
836} launch_data;
837
838static void *
839really_launch (void *d)
840{
841 launch_data *data = (launch_data *)d;
842 SCM thunk = data->thunk, handler = data->handler;
843 scm_i_thread *t;
844
845 t = SCM_I_CURRENT_THREAD;
846
847 scm_i_scm_pthread_mutex_lock (&data->mutex);
848 data->thread = scm_current_thread ();
849 scm_i_pthread_cond_signal (&data->cond);
850 scm_i_pthread_mutex_unlock (&data->mutex);
851
852 if (SCM_UNBNDP (handler))
853 t->result = scm_call_0 (thunk);
854 else
855 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
856
857 return 0;
858}
859
860static void *
861launch_thread (void *d)
862{
863 launch_data *data = (launch_data *)d;
864 scm_i_pthread_detach (scm_i_pthread_self ());
865 scm_i_with_guile_and_parent (really_launch, d, data->parent);
866 return NULL;
867}
868
869SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
870 (SCM thunk, SCM handler),
871 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
872 "returning a new thread object representing the thread. The procedure\n"
873 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
874 "\n"
875 "When @var{handler} is specified, then @var{thunk} is called from\n"
876 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
877 "handler. This catch is established inside the continuation barrier.\n"
878 "\n"
879 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
880 "the @emph{exit value} of the thread and the thread is terminated.")
881#define FUNC_NAME s_scm_call_with_new_thread
882{
883 launch_data data;
884 scm_i_pthread_t id;
885 int err;
886
887 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
888 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
889 handler, SCM_ARG2, FUNC_NAME);
890
891 data.parent = scm_current_dynamic_state ();
892 data.thunk = thunk;
893 data.handler = handler;
894 data.thread = SCM_BOOL_F;
895 scm_i_pthread_mutex_init (&data.mutex, NULL);
896 scm_i_pthread_cond_init (&data.cond, NULL);
897
898 scm_i_scm_pthread_mutex_lock (&data.mutex);
899 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
900 if (err)
901 {
902 scm_i_pthread_mutex_unlock (&data.mutex);
903 errno = err;
904 scm_syserror (NULL);
905 }
906 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
907 scm_i_pthread_mutex_unlock (&data.mutex);
908
909 return data.thread;
910}
911#undef FUNC_NAME
912
913typedef struct {
914 SCM parent;
915 scm_t_catch_body body;
916 void *body_data;
917 scm_t_catch_handler handler;
918 void *handler_data;
919 SCM thread;
920 scm_i_pthread_mutex_t mutex;
921 scm_i_pthread_cond_t cond;
922} spawn_data;
923
924static void *
925really_spawn (void *d)
926{
927 spawn_data *data = (spawn_data *)d;
928 scm_t_catch_body body = data->body;
929 void *body_data = data->body_data;
930 scm_t_catch_handler handler = data->handler;
931 void *handler_data = data->handler_data;
932 scm_i_thread *t = SCM_I_CURRENT_THREAD;
933
934 scm_i_scm_pthread_mutex_lock (&data->mutex);
935 data->thread = scm_current_thread ();
936 scm_i_pthread_cond_signal (&data->cond);
937 scm_i_pthread_mutex_unlock (&data->mutex);
938
939 if (handler == NULL)
940 t->result = body (body_data);
941 else
942 t->result = scm_internal_catch (SCM_BOOL_T,
943 body, body_data,
944 handler, handler_data);
945
946 return 0;
947}
948
949static void *
950spawn_thread (void *d)
951{
952 spawn_data *data = (spawn_data *)d;
953 scm_i_pthread_detach (scm_i_pthread_self ());
954 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
955 return NULL;
956}
957
958SCM
959scm_spawn_thread (scm_t_catch_body body, void *body_data,
960 scm_t_catch_handler handler, void *handler_data)
961{
962 spawn_data data;
963 scm_i_pthread_t id;
964 int err;
965
966 data.parent = scm_current_dynamic_state ();
967 data.body = body;
968 data.body_data = body_data;
969 data.handler = handler;
970 data.handler_data = handler_data;
971 data.thread = SCM_BOOL_F;
972 scm_i_pthread_mutex_init (&data.mutex, NULL);
973 scm_i_pthread_cond_init (&data.cond, NULL);
974
975 scm_i_scm_pthread_mutex_lock (&data.mutex);
976 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
977 if (err)
978 {
979 scm_i_pthread_mutex_unlock (&data.mutex);
980 errno = err;
981 scm_syserror (NULL);
982 }
983 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
984 scm_i_pthread_mutex_unlock (&data.mutex);
985
986 return data.thread;
987}
988
989SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
990 (),
991"Move the calling thread to the end of the scheduling queue.")
992#define FUNC_NAME s_scm_yield
993{
994 return scm_from_bool (scm_i_sched_yield ());
995}
996#undef FUNC_NAME
997
998SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
999 (SCM thread),
1000"Asynchronously force the target @var{thread} to terminate. @var{thread} "
1001"cannot be the current thread, and if @var{thread} has already terminated or "
1002"been signaled to terminate, this function is a no-op.")
1003#define FUNC_NAME s_scm_cancel_thread
1004{
1005 scm_i_thread *t = NULL;
1006
1007 SCM_VALIDATE_THREAD (1, thread);
1008 t = SCM_I_THREAD_DATA (thread);
1009 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1010 if (!t->canceled)
1011 {
1012 t->canceled = 1;
1013 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1014 scm_i_pthread_cancel (t->pthread);
1015 }
1016 else
1017 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1018
1019 return SCM_UNSPECIFIED;
1020}
1021#undef FUNC_NAME
1022
1023SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1024 (SCM thread, SCM proc),
1025"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1026"This handler will be called when the thread exits.")
1027#define FUNC_NAME s_scm_set_thread_cleanup_x
1028{
1029 scm_i_thread *t;
1030
1031 SCM_VALIDATE_THREAD (1, thread);
1032 if (!scm_is_false (proc))
1033 SCM_VALIDATE_THUNK (2, proc);
1034
1035 t = SCM_I_THREAD_DATA (thread);
1036 scm_i_pthread_mutex_lock (&t->admin_mutex);
1037
1038 if (!(t->exited || t->canceled))
1039 t->cleanup_handler = proc;
1040
1041 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1042
1043 return SCM_UNSPECIFIED;
1044}
1045#undef FUNC_NAME
1046
1047SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1048 (SCM thread),
1049"Return the cleanup handler installed for the thread @var{thread}.")
1050#define FUNC_NAME s_scm_thread_cleanup
1051{
1052 scm_i_thread *t;
1053 SCM ret;
1054
1055 SCM_VALIDATE_THREAD (1, thread);
1056
1057 t = SCM_I_THREAD_DATA (thread);
1058 scm_i_pthread_mutex_lock (&t->admin_mutex);
1059 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1060 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1061
1062 return ret;
1063}
1064#undef FUNC_NAME
1065
1066SCM scm_join_thread (SCM thread)
1067{
1068 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1069}
1070
1071SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1072 (SCM thread, SCM timeout, SCM timeoutval),
1073"Suspend execution of the calling thread until the target @var{thread} "
1074"terminates, unless the target @var{thread} has already terminated. ")
1075#define FUNC_NAME s_scm_join_thread_timed
1076{
1077 scm_i_thread *t;
1078 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1079 SCM res = SCM_BOOL_F;
1080
1081 if (! (SCM_UNBNDP (timeoutval)))
1082 res = timeoutval;
1083
1084 SCM_VALIDATE_THREAD (1, thread);
1085 if (scm_is_eq (scm_current_thread (), thread))
1086 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1087
1088 t = SCM_I_THREAD_DATA (thread);
1089 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1090
1091 if (! SCM_UNBNDP (timeout))
1092 {
1093 to_timespec (timeout, &ctimeout);
1094 timeout_ptr = &ctimeout;
1095 }
1096
1097 if (t->exited)
1098 res = t->result;
1099 else
1100 {
1101 while (1)
1102 {
1103 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1104 timeout_ptr);
1105 if (err == 0)
1106 {
1107 if (t->exited)
1108 {
1109 res = t->result;
1110 break;
1111 }
1112 }
1113 else if (err == ETIMEDOUT)
1114 break;
1115
1116 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1117 SCM_TICK;
1118 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1119 }
1120 }
1121
1122 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1123
1124 return res;
1125}
1126#undef FUNC_NAME
1127
1128SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1129 (SCM obj),
1130 "Return @code{#t} if @var{obj} is a thread.")
1131#define FUNC_NAME s_scm_thread_p
1132{
1133 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1134}
1135#undef FUNC_NAME
1136
1137static SCM
1138fat_mutex_mark (SCM mx)
1139{
1140 fat_mutex *m = SCM_MUTEX_DATA (mx);
1141 scm_gc_mark (m->owner);
1142 return m->waiting;
1143}
1144
1145static size_t
1146fat_mutex_free (SCM mx)
1147{
1148 fat_mutex *m = SCM_MUTEX_DATA (mx);
1149 scm_i_pthread_mutex_destroy (&m->lock);
1150 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1151 return 0;
1152}
1153
1154static int
1155fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1156{
1157 fat_mutex *m = SCM_MUTEX_DATA (mx);
1158 scm_puts ("#<mutex ", port);
1159 scm_uintprint ((scm_t_bits)m, 16, port);
1160 scm_puts (">", port);
1161 return 1;
1162}
1163
1164static SCM
1165make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1166{
1167 fat_mutex *m;
1168 SCM mx;
1169
1170 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1171 scm_i_pthread_mutex_init (&m->lock, NULL);
1172 m->owner = SCM_BOOL_F;
1173 m->level = recursive? 0 : -1;
1174
1175 m->unchecked_unlock = unchecked_unlock;
1176 m->allow_external_unlock = external_unlock;
1177
1178 m->waiting = SCM_EOL;
1179 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1180 m->waiting = make_queue ();
1181 return mx;
1182}
1183
1184SCM scm_make_mutex (void)
1185{
1186 return scm_make_mutex_with_flags (SCM_EOL);
1187}
1188
1189SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1190SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1191SCM_SYMBOL (recursive_sym, "recursive");
1192
1193SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1194 (SCM flags),
1195 "Create a new mutex. ")
1196#define FUNC_NAME s_scm_make_mutex_with_flags
1197{
1198 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1199
1200 SCM ptr = flags;
1201 while (! scm_is_null (ptr))
1202 {
1203 SCM flag = SCM_CAR (ptr);
1204 if (scm_is_eq (flag, unchecked_unlock_sym))
1205 unchecked_unlock = 1;
1206 else if (scm_is_eq (flag, allow_external_unlock_sym))
1207 external_unlock = 1;
1208 else if (scm_is_eq (flag, recursive_sym))
1209 recursive = 1;
1210 else
1211 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1212 ptr = SCM_CDR (ptr);
1213 }
1214 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1215}
1216#undef FUNC_NAME
1217
1218SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1219 (void),
1220 "Create a new recursive mutex. ")
1221#define FUNC_NAME s_scm_make_recursive_mutex
1222{
1223 return make_fat_mutex (1, 0, 0);
1224}
1225#undef FUNC_NAME
1226
1227SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1228
1229static SCM
1230fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, int *ret)
1231{
1232 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1233
1234 SCM thread = scm_current_thread ();
1235 scm_i_thread *t = SCM_I_THREAD_DATA (thread);
1236
1237 SCM err = SCM_BOOL_F;
1238
1239 struct timeval current_time;
1240
1241 scm_i_scm_pthread_mutex_lock (&m->lock);
1242 if (scm_is_false (m->owner))
1243 {
1244 m->owner = thread;
1245 scm_i_pthread_mutex_lock (&t->admin_mutex);
1246 t->mutexes = scm_cons (mutex, t->mutexes);
1247 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1248 *ret = 1;
1249 }
1250 else if (scm_is_eq (m->owner, thread))
1251 {
1252 if (m->level >= 0)
1253 {
1254 m->level++;
1255 *ret = 1;
1256 }
1257 else
1258 err = scm_cons (scm_misc_error_key,
1259 scm_from_locale_string ("mutex already locked by "
1260 "current thread"));
1261 }
1262 else
1263 {
1264 int first_iteration = 1;
1265 while (1)
1266 {
1267 if (scm_is_eq (m->owner, thread) || scm_c_thread_exited_p (m->owner))
1268 {
1269 scm_i_pthread_mutex_lock (&t->admin_mutex);
1270 t->mutexes = scm_cons (mutex, t->mutexes);
1271 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1272 *ret = 1;
1273 if (scm_c_thread_exited_p (m->owner))
1274 {
1275 m->owner = thread;
1276 err = scm_cons (scm_abandoned_mutex_error_key,
1277 scm_from_locale_string ("lock obtained on "
1278 "abandoned mutex"));
1279 }
1280 break;
1281 }
1282 else if (!first_iteration)
1283 {
1284 if (timeout != NULL)
1285 {
1286 gettimeofday (&current_time, NULL);
1287 if (current_time.tv_sec > timeout->tv_sec ||
1288 (current_time.tv_sec == timeout->tv_sec &&
1289 current_time.tv_usec * 1000 > timeout->tv_nsec))
1290 {
1291 *ret = 0;
1292 break;
1293 }
1294 }
1295 scm_i_pthread_mutex_unlock (&m->lock);
1296 SCM_TICK;
1297 scm_i_scm_pthread_mutex_lock (&m->lock);
1298 }
1299 else
1300 first_iteration = 0;
1301 block_self (m->waiting, mutex, &m->lock, timeout);
1302 }
1303 }
1304 scm_i_pthread_mutex_unlock (&m->lock);
1305 return err;
1306}
1307
1308SCM scm_lock_mutex (SCM mx)
1309{
1310 return scm_lock_mutex_timed (mx, SCM_UNDEFINED);
1311}
1312
1313SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 1, 0,
1314 (SCM m, SCM timeout),
1315"Lock @var{mutex}. If the mutex is already locked, the calling thread "
1316"blocks until the mutex becomes available. The function returns when "
1317"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1318"a thread already owns will succeed right away and will not block the "
1319"thread. That is, Guile's mutexes are @emph{recursive}. ")
1320#define FUNC_NAME s_scm_lock_mutex_timed
1321{
1322 SCM exception;
1323 int ret = 0;
1324 scm_t_timespec cwaittime, *waittime = NULL;
1325
1326 SCM_VALIDATE_MUTEX (1, m);
1327
1328 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1329 {
1330 to_timespec (timeout, &cwaittime);
1331 waittime = &cwaittime;
1332 }
1333
1334 exception = fat_mutex_lock (m, waittime, &ret);
1335 if (!scm_is_false (exception))
1336 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1337 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1338}
1339#undef FUNC_NAME
1340
1341void
1342scm_dynwind_lock_mutex (SCM mutex)
1343{
1344 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1345 SCM_F_WIND_EXPLICITLY);
1346 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1347 SCM_F_WIND_EXPLICITLY);
1348}
1349
1350SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1351 (SCM mutex),
1352"Try to lock @var{mutex}. If the mutex is already locked by someone "
1353"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1354#define FUNC_NAME s_scm_try_mutex
1355{
1356 SCM exception;
1357 int ret = 0;
1358 scm_t_timespec cwaittime, *waittime = NULL;
1359
1360 SCM_VALIDATE_MUTEX (1, mutex);
1361
1362 to_timespec (scm_from_int(0), &cwaittime);
1363 waittime = &cwaittime;
1364
1365 exception = fat_mutex_lock (mutex, waittime, &ret);
1366 if (!scm_is_false (exception))
1367 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1368 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1369}
1370#undef FUNC_NAME
1371
1372/*** Fat condition variables */
1373
1374typedef struct {
1375 scm_i_pthread_mutex_t lock;
1376 SCM waiting; /* the threads waiting for this condition. */
1377} fat_cond;
1378
1379#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1380#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1381
1382static int
1383fat_mutex_unlock (SCM mutex, SCM cond,
1384 const scm_t_timespec *waittime, int relock)
1385{
1386 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1387 fat_cond *c = NULL;
1388 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1389 int err = 0, ret = 0;
1390
1391 scm_i_scm_pthread_mutex_lock (&m->lock);
1392 if (!scm_is_eq (m->owner, scm_current_thread ()))
1393 {
1394 if (scm_is_false (m->owner))
1395 {
1396 if (!m->unchecked_unlock)
1397 {
1398 scm_i_pthread_mutex_unlock (&m->lock);
1399 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1400 }
1401 }
1402 else if (!m->allow_external_unlock)
1403 {
1404 scm_i_pthread_mutex_unlock (&m->lock);
1405 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1406 }
1407 }
1408
1409 if (! (SCM_UNBNDP (cond)))
1410 {
1411 int lock_ret = 0;
1412
1413 c = SCM_CONDVAR_DATA (cond);
1414 while (1)
1415 {
1416 int brk = 0;
1417
1418 scm_i_scm_pthread_mutex_lock (&c->lock);
1419 if (m->level > 0)
1420 m->level--;
1421 else
1422 m->owner = unblock_from_queue (m->waiting);
1423 scm_i_pthread_mutex_unlock (&m->lock);
1424
1425 t->block_asyncs++;
1426
1427 err = block_self (c->waiting, cond, &c->lock, waittime);
1428
1429 if (err == 0)
1430 {
1431 ret = 1;
1432 brk = 1;
1433 }
1434 else if (err == ETIMEDOUT)
1435 {
1436 ret = 0;
1437 brk = 1;
1438 }
1439 else if (err != EINTR)
1440 {
1441 errno = err;
1442 scm_i_pthread_mutex_unlock (&c->lock);
1443 scm_syserror (NULL);
1444 }
1445
1446 if (brk)
1447 {
1448 if (relock)
1449 fat_mutex_lock (mutex, NULL, &lock_ret);
1450 scm_i_pthread_mutex_unlock (&c->lock);
1451 break;
1452 }
1453
1454 scm_i_pthread_mutex_unlock (&c->lock);
1455
1456 t->block_asyncs--;
1457 scm_async_click ();
1458
1459 scm_remember_upto_here_2 (cond, mutex);
1460
1461 scm_i_scm_pthread_mutex_lock (&m->lock);
1462 }
1463 }
1464 else
1465 {
1466 if (m->level > 0)
1467 m->level--;
1468 else
1469 m->owner = unblock_from_queue (m->waiting);
1470 scm_i_pthread_mutex_unlock (&m->lock);
1471 ret = 1;
1472 }
1473
1474 return ret;
1475}
1476
1477SCM scm_unlock_mutex (SCM mx)
1478{
1479 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1480}
1481
1482SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1483 (SCM mx, SCM cond, SCM timeout),
1484"Unlocks @var{mutex} if the calling thread owns the lock on "
1485"@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1486"thread results in undefined behaviour. Once a mutex has been unlocked, "
1487"one thread blocked on @var{mutex} is awakened and grabs the mutex "
1488"lock. Every call to @code{lock-mutex} by this thread must be matched "
1489"with a call to @code{unlock-mutex}. Only the last call to "
1490"@code{unlock-mutex} will actually unlock the mutex. ")
1491#define FUNC_NAME s_scm_unlock_mutex_timed
1492{
1493 scm_t_timespec cwaittime, *waittime = NULL;
1494
1495 SCM_VALIDATE_MUTEX (1, mx);
1496 if (! (SCM_UNBNDP (cond)))
1497 {
1498 SCM_VALIDATE_CONDVAR (2, cond);
1499
1500 if (! (SCM_UNBNDP (timeout)))
1501 {
1502 to_timespec (timeout, &cwaittime);
1503 waittime = &cwaittime;
1504 }
1505 }
1506
1507 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1508}
1509#undef FUNC_NAME
1510
1511SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1512 (SCM obj),
1513 "Return @code{#t} if @var{obj} is a mutex.")
1514#define FUNC_NAME s_scm_mutex_p
1515{
1516 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1517}
1518#undef FUNC_NAME
1519
1520#if 0
1521
1522SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1523 (SCM mx),
1524 "Return the thread owning @var{mx}, or @code{#f}.")
1525#define FUNC_NAME s_scm_mutex_owner
1526{
1527 SCM_VALIDATE_MUTEX (1, mx);
1528 return (SCM_MUTEX_DATA(mx))->owner;
1529}
1530#undef FUNC_NAME
1531
1532SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1533 (SCM mx),
1534 "Return the lock level of a recursive mutex, or -1\n"
1535 "for a standard mutex.")
1536#define FUNC_NAME s_scm_mutex_level
1537{
1538 SCM_VALIDATE_MUTEX (1, mx);
1539 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1540}
1541#undef FUNC_NAME
1542
1543#endif
1544
1545static SCM
1546fat_cond_mark (SCM cv)
1547{
1548 fat_cond *c = SCM_CONDVAR_DATA (cv);
1549 return c->waiting;
1550}
1551
1552static size_t
1553fat_cond_free (SCM mx)
1554{
1555 fat_cond *c = SCM_CONDVAR_DATA (mx);
1556 scm_i_pthread_mutex_destroy (&c->lock);
1557 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1558 return 0;
1559}
1560
1561static int
1562fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1563{
1564 fat_cond *c = SCM_CONDVAR_DATA (cv);
1565 scm_puts ("#<condition-variable ", port);
1566 scm_uintprint ((scm_t_bits)c, 16, port);
1567 scm_puts (">", port);
1568 return 1;
1569}
1570
1571SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1572 (void),
1573 "Make a new condition variable.")
1574#define FUNC_NAME s_scm_make_condition_variable
1575{
1576 fat_cond *c;
1577 SCM cv;
1578
1579 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1580 scm_i_pthread_mutex_init (&c->lock, 0);
1581 c->waiting = SCM_EOL;
1582 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1583 c->waiting = make_queue ();
1584 return cv;
1585}
1586#undef FUNC_NAME
1587
1588SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1589 (SCM cv, SCM mx, SCM t),
1590"Wait until @var{cond-var} has been signalled. While waiting, "
1591"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1592"is locked again when this function returns. When @var{time} is given, "
1593"it specifies a point in time where the waiting should be aborted. It "
1594"can be either a integer as returned by @code{current-time} or a pair "
1595"as returned by @code{gettimeofday}. When the waiting is aborted the "
1596"mutex is locked and @code{#f} is returned. When the condition "
1597"variable is in fact signalled, the mutex is also locked and @code{#t} "
1598"is returned. ")
1599#define FUNC_NAME s_scm_timed_wait_condition_variable
1600{
1601 scm_t_timespec waittime, *waitptr = NULL;
1602
1603 SCM_VALIDATE_CONDVAR (1, cv);
1604 SCM_VALIDATE_MUTEX (2, mx);
1605
1606 if (!SCM_UNBNDP (t))
1607 {
1608 to_timespec (t, &waittime);
1609 waitptr = &waittime;
1610 }
1611
1612 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1613}
1614#undef FUNC_NAME
1615
1616static void
1617fat_cond_signal (fat_cond *c)
1618{
1619 scm_i_scm_pthread_mutex_lock (&c->lock);
1620 unblock_from_queue (c->waiting);
1621 scm_i_pthread_mutex_unlock (&c->lock);
1622}
1623
1624SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1625 (SCM cv),
1626 "Wake up one thread that is waiting for @var{cv}")
1627#define FUNC_NAME s_scm_signal_condition_variable
1628{
1629 SCM_VALIDATE_CONDVAR (1, cv);
1630 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1631 return SCM_BOOL_T;
1632}
1633#undef FUNC_NAME
1634
1635static void
1636fat_cond_broadcast (fat_cond *c)
1637{
1638 scm_i_scm_pthread_mutex_lock (&c->lock);
1639 while (scm_is_true (unblock_from_queue (c->waiting)))
1640 ;
1641 scm_i_pthread_mutex_unlock (&c->lock);
1642}
1643
1644SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1645 (SCM cv),
1646 "Wake up all threads that are waiting for @var{cv}. ")
1647#define FUNC_NAME s_scm_broadcast_condition_variable
1648{
1649 SCM_VALIDATE_CONDVAR (1, cv);
1650 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1651 return SCM_BOOL_T;
1652}
1653#undef FUNC_NAME
1654
1655SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1656 (SCM obj),
1657 "Return @code{#t} if @var{obj} is a condition variable.")
1658#define FUNC_NAME s_scm_condition_variable_p
1659{
1660 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1661}
1662#undef FUNC_NAME
1663
1664/*** Marking stacks */
1665
1666/* XXX - what to do with this? Do we need to handle this for blocked
1667 threads as well?
1668*/
1669#ifdef __ia64__
1670# define SCM_MARK_BACKING_STORE() do { \
1671 ucontext_t ctx; \
1672 SCM_STACKITEM * top, * bot; \
1673 getcontext (&ctx); \
1674 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1675 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1676 / sizeof (SCM_STACKITEM))); \
1677 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1678 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1679 scm_mark_locations (bot, top - bot); } while (0)
1680#else
1681# define SCM_MARK_BACKING_STORE()
1682#endif
1683
1684void
1685scm_threads_mark_stacks (void)
1686{
1687 scm_i_thread *t;
1688 for (t = all_threads; t; t = t->next_thread)
1689 {
1690 /* Check that thread has indeed been suspended.
1691 */
1692 assert (t->top);
1693
1694 scm_gc_mark (t->handle);
1695
1696#if SCM_STACK_GROWS_UP
1697 scm_mark_locations (t->base, t->top - t->base);
1698#else
1699 scm_mark_locations (t->top, t->base - t->top);
1700#endif
1701 scm_mark_locations ((SCM_STACKITEM *) &t->regs,
1702 ((size_t) sizeof(t->regs)
1703 / sizeof (SCM_STACKITEM)));
1704 }
1705
1706 SCM_MARK_BACKING_STORE ();
1707}
1708
1709/*** Select */
1710
1711int
1712scm_std_select (int nfds,
1713 SELECT_TYPE *readfds,
1714 SELECT_TYPE *writefds,
1715 SELECT_TYPE *exceptfds,
1716 struct timeval *timeout)
1717{
1718 fd_set my_readfds;
1719 int res, eno, wakeup_fd;
1720 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1721 scm_t_guile_ticket ticket;
1722
1723 if (readfds == NULL)
1724 {
1725 FD_ZERO (&my_readfds);
1726 readfds = &my_readfds;
1727 }
1728
1729 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1730 SCM_TICK;
1731
1732 wakeup_fd = t->sleep_pipe[0];
1733 ticket = scm_leave_guile ();
1734 FD_SET (wakeup_fd, readfds);
1735 if (wakeup_fd >= nfds)
1736 nfds = wakeup_fd+1;
1737 res = select (nfds, readfds, writefds, exceptfds, timeout);
1738 t->sleep_fd = -1;
1739 eno = errno;
1740 scm_enter_guile (ticket);
1741
1742 scm_i_reset_sleep (t);
1743
1744 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1745 {
1746 char dummy;
1747 read (wakeup_fd, &dummy, 1);
1748 FD_CLR (wakeup_fd, readfds);
1749 res -= 1;
1750 if (res == 0)
1751 {
1752 eno = EINTR;
1753 res = -1;
1754 }
1755 }
1756 errno = eno;
1757 return res;
1758}
1759
1760/* Convenience API for blocking while in guile mode. */
1761
1762#if SCM_USE_PTHREAD_THREADS
1763
1764int
1765scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1766{
1767 scm_t_guile_ticket t = scm_leave_guile ();
1768 int res = scm_i_pthread_mutex_lock (mutex);
1769 scm_enter_guile (t);
1770 return res;
1771}
1772
1773static void
1774do_unlock (void *data)
1775{
1776 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1777}
1778
1779void
1780scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1781{
1782 scm_i_scm_pthread_mutex_lock (mutex);
1783 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1784}
1785
1786int
1787scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1788{
1789 scm_t_guile_ticket t = scm_leave_guile ();
1790 int res = scm_i_pthread_cond_wait (cond, mutex);
1791 scm_enter_guile (t);
1792 return res;
1793}
1794
1795int
1796scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1797 scm_i_pthread_mutex_t *mutex,
1798 const scm_t_timespec *wt)
1799{
1800 scm_t_guile_ticket t = scm_leave_guile ();
1801 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1802 scm_enter_guile (t);
1803 return res;
1804}
1805
1806#endif
1807
1808unsigned long
1809scm_std_usleep (unsigned long usecs)
1810{
1811 struct timeval tv;
1812 tv.tv_usec = usecs % 1000000;
1813 tv.tv_sec = usecs / 1000000;
1814 scm_std_select (0, NULL, NULL, NULL, &tv);
1815 return tv.tv_sec * 1000000 + tv.tv_usec;
1816}
1817
1818unsigned int
1819scm_std_sleep (unsigned int secs)
1820{
1821 struct timeval tv;
1822 tv.tv_usec = 0;
1823 tv.tv_sec = secs;
1824 scm_std_select (0, NULL, NULL, NULL, &tv);
1825 return tv.tv_sec;
1826}
1827
1828/*** Misc */
1829
1830SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1831 (void),
1832 "Return the thread that called this function.")
1833#define FUNC_NAME s_scm_current_thread
1834{
1835 return SCM_I_CURRENT_THREAD->handle;
1836}
1837#undef FUNC_NAME
1838
1839static SCM
1840scm_c_make_list (size_t n, SCM fill)
1841{
1842 SCM res = SCM_EOL;
1843 while (n-- > 0)
1844 res = scm_cons (fill, res);
1845 return res;
1846}
1847
1848SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1849 (void),
1850 "Return a list of all threads.")
1851#define FUNC_NAME s_scm_all_threads
1852{
1853 /* We can not allocate while holding the thread_admin_mutex because
1854 of the way GC is done.
1855 */
1856 int n = thread_count;
1857 scm_i_thread *t;
1858 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1859
1860 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1861 l = &list;
1862 for (t = all_threads; t && n > 0; t = t->next_thread)
1863 {
1864 if (t != scm_i_signal_delivery_thread)
1865 {
1866 SCM_SETCAR (*l, t->handle);
1867 l = SCM_CDRLOC (*l);
1868 }
1869 n--;
1870 }
1871 *l = SCM_EOL;
1872 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1873 return list;
1874}
1875#undef FUNC_NAME
1876
1877SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1878 (SCM thread),
1879 "Return @code{#t} iff @var{thread} has exited.\n")
1880#define FUNC_NAME s_scm_thread_exited_p
1881{
1882 return scm_from_bool (scm_c_thread_exited_p (thread));
1883}
1884#undef FUNC_NAME
1885
1886int
1887scm_c_thread_exited_p (SCM thread)
1888#define FUNC_NAME s_scm_thread_exited_p
1889{
1890 scm_i_thread *t;
1891 SCM_VALIDATE_THREAD (1, thread);
1892 t = SCM_I_THREAD_DATA (thread);
1893 return t->exited;
1894}
1895#undef FUNC_NAME
1896
1897static scm_i_pthread_cond_t wake_up_cond;
1898int scm_i_thread_go_to_sleep;
1899static int threads_initialized_p = 0;
1900
1901void
1902scm_i_thread_put_to_sleep ()
1903{
1904 if (threads_initialized_p)
1905 {
1906 scm_i_thread *t;
1907
1908 scm_leave_guile ();
1909 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1910
1911 /* Signal all threads to go to sleep
1912 */
1913 scm_i_thread_go_to_sleep = 1;
1914 for (t = all_threads; t; t = t->next_thread)
1915 scm_i_pthread_mutex_lock (&t->heap_mutex);
1916 scm_i_thread_go_to_sleep = 0;
1917 }
1918}
1919
1920void
1921scm_i_thread_invalidate_freelists ()
1922{
1923 /* thread_admin_mutex is already locked. */
1924
1925 scm_i_thread *t;
1926 for (t = all_threads; t; t = t->next_thread)
1927 if (t != SCM_I_CURRENT_THREAD)
1928 t->clear_freelists_p = 1;
1929}
1930
1931void
1932scm_i_thread_wake_up ()
1933{
1934 if (threads_initialized_p)
1935 {
1936 scm_i_thread *t;
1937
1938 scm_i_pthread_cond_broadcast (&wake_up_cond);
1939 for (t = all_threads; t; t = t->next_thread)
1940 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1941 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1942 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1943 }
1944}
1945
1946void
1947scm_i_thread_sleep_for_gc ()
1948{
1949 scm_i_thread *t = suspend ();
1950 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1951 resume (t);
1952}
1953
1954/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1955 */
1956scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1957int scm_i_critical_section_level = 0;
1958
1959static SCM dynwind_critical_section_mutex;
1960
1961void
1962scm_dynwind_critical_section (SCM mutex)
1963{
1964 if (scm_is_false (mutex))
1965 mutex = dynwind_critical_section_mutex;
1966 scm_dynwind_lock_mutex (mutex);
1967 scm_dynwind_block_asyncs ();
1968}
1969
1970/*** Initialization */
1971
1972scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1973scm_i_pthread_mutex_t scm_i_misc_mutex;
1974
1975#if SCM_USE_PTHREAD_THREADS
1976pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1977#endif
1978
1979void
1980scm_threads_prehistory (SCM_STACKITEM *base)
1981{
1982#if SCM_USE_PTHREAD_THREADS
1983 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1984 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1985 PTHREAD_MUTEX_RECURSIVE);
1986#endif
1987
1988 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1989 scm_i_pthread_mutexattr_recursive);
1990 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1991 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1992 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1993 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1994
1995 guilify_self_1 (base);
1996}
1997
1998scm_t_bits scm_tc16_thread;
1999scm_t_bits scm_tc16_mutex;
2000scm_t_bits scm_tc16_condvar;
2001
2002void
2003scm_init_threads ()
2004{
2005 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
2006 scm_set_smob_mark (scm_tc16_thread, thread_mark);
2007 scm_set_smob_print (scm_tc16_thread, thread_print);
2008 scm_set_smob_free (scm_tc16_thread, thread_free);
2009
2010 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
2011 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
2012 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
2013 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
2014
2015 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
2016 sizeof (fat_cond));
2017 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
2018 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
2019 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
2020
2021 scm_i_default_dynamic_state = SCM_BOOL_F;
2022 guilify_self_2 (SCM_BOOL_F);
2023 threads_initialized_p = 1;
2024
2025 dynwind_critical_section_mutex =
2026 scm_permanent_object (scm_make_recursive_mutex ());
2027}
2028
2029void
2030scm_init_threads_default_dynamic_state ()
2031{
2032 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
2033 scm_i_default_dynamic_state = scm_permanent_object (state);
2034}
2035
2036void
2037scm_init_thread_procs ()
2038{
2039#include "libguile/threads.x"
2040}
2041
2042/*
2043 Local Variables:
2044 c-file-style: "gnu"
2045 End:
2046*/