Remove double inclusion of <config.h> in `threads.c'.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20 #ifdef HAVE_CONFIG_H
21 # include <config.h>
22 #endif
23
24 #include "libguile/boehm-gc.h"
25 #include "libguile/_scm.h"
26
27 #if HAVE_UNISTD_H
28 #include <unistd.h>
29 #endif
30 #include <stdio.h>
31 #include <assert.h>
32
33 #ifdef HAVE_STRING_H
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
35 #endif
36
37 #if HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #endif
40
41 #include "libguile/validate.h"
42 #include "libguile/root.h"
43 #include "libguile/eval.h"
44 #include "libguile/async.h"
45 #include "libguile/ports.h"
46 #include "libguile/threads.h"
47 #include "libguile/dynwind.h"
48 #include "libguile/iselect.h"
49 #include "libguile/fluids.h"
50 #include "libguile/continuations.h"
51 #include "libguile/gc.h"
52 #include "libguile/init.h"
53 #include "libguile/scmsigs.h"
54 #include "libguile/strings.h"
55
56 #ifdef __MINGW32__
57 #ifndef ETIMEDOUT
58 # define ETIMEDOUT WSAETIMEDOUT
59 #endif
60 # include <fcntl.h>
61 # include <process.h>
62 # define pipe(fd) _pipe (fd, 256, O_BINARY)
63 #endif /* __MINGW32__ */
64
65 static void
66 to_timespec (SCM t, scm_t_timespec *waittime)
67 {
68 if (scm_is_pair (t))
69 {
70 waittime->tv_sec = scm_to_ulong (SCM_CAR (t));
71 waittime->tv_nsec = scm_to_ulong (SCM_CDR (t)) * 1000;
72 }
73 else
74 {
75 double time = scm_to_double (t);
76 double sec = scm_c_truncate (time);
77
78 waittime->tv_sec = (long) sec;
79 waittime->tv_nsec = (long) ((time - sec) * 1000000000);
80 }
81 }
82
83 /*** Queues */
84
85 /* Make an empty queue data structure.
86 */
87 static SCM
88 make_queue ()
89 {
90 return scm_cons (SCM_EOL, SCM_EOL);
91 }
92
93 /* Put T at the back of Q and return a handle that can be used with
94 remqueue to remove T from Q again.
95 */
96 static SCM
97 enqueue (SCM q, SCM t)
98 {
99 SCM c = scm_cons (t, SCM_EOL);
100 if (scm_is_null (SCM_CDR (q)))
101 SCM_SETCDR (q, c);
102 else
103 SCM_SETCDR (SCM_CAR (q), c);
104 SCM_SETCAR (q, c);
105 return c;
106 }
107
108 /* Remove the element that the handle C refers to from the queue Q. C
109 must have been returned from a call to enqueue. The return value
110 is zero when the element referred to by C has already been removed.
111 Otherwise, 1 is returned.
112 */
113 static int
114 remqueue (SCM q, SCM c)
115 {
116 SCM p, prev = q;
117 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
118 {
119 if (scm_is_eq (p, c))
120 {
121 if (scm_is_eq (c, SCM_CAR (q)))
122 SCM_SETCAR (q, SCM_CDR (c));
123 SCM_SETCDR (prev, SCM_CDR (c));
124 return 1;
125 }
126 prev = p;
127 }
128 return 0;
129 }
130
131 /* Remove the front-most element from the queue Q and return it.
132 Return SCM_BOOL_F when Q is empty.
133 */
134 static SCM
135 dequeue (SCM q)
136 {
137 SCM c = SCM_CDR (q);
138 if (scm_is_null (c))
139 return SCM_BOOL_F;
140 else
141 {
142 SCM_SETCDR (q, SCM_CDR (c));
143 if (scm_is_null (SCM_CDR (q)))
144 SCM_SETCAR (q, SCM_EOL);
145 return SCM_CAR (c);
146 }
147 }
148
149 /*** Thread smob routines */
150
151
152 static int
153 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
154 {
155 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
156 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
157 the struct case, hence we go via a union, and extract according to the
158 size of pthread_t. */
159 union {
160 scm_i_pthread_t p;
161 unsigned short us;
162 unsigned int ui;
163 unsigned long ul;
164 scm_t_uintmax um;
165 } u;
166 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
167 scm_i_pthread_t p = t->pthread;
168 scm_t_uintmax id;
169 u.p = p;
170 if (sizeof (p) == sizeof (unsigned short))
171 id = u.us;
172 else if (sizeof (p) == sizeof (unsigned int))
173 id = u.ui;
174 else if (sizeof (p) == sizeof (unsigned long))
175 id = u.ul;
176 else
177 id = u.um;
178
179 scm_puts ("#<thread ", port);
180 scm_uintprint (id, 10, port);
181 scm_puts (" (", port);
182 scm_uintprint ((scm_t_bits)t, 16, port);
183 scm_puts (")>", port);
184 return 1;
185 }
186
187 static size_t
188 thread_free (SCM obj)
189 {
190 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
191 assert (t->exited);
192 scm_gc_free (t, sizeof (*t), "thread");
193 return 0;
194 }
195
196 /*** Blocking on queues. */
197
198 /* See also scm_i_queue_async_cell for how such a block is
199 interrputed.
200 */
201
202 /* Put the current thread on QUEUE and go to sleep, waiting for it to
203 be woken up by a call to 'unblock_from_queue', or to be
204 interrupted. Upon return of this function, the current thread is
205 no longer on QUEUE, even when the sleep has been interrupted.
206
207 The QUEUE data structure is assumed to be protected by MUTEX and
208 the caller of block_self must hold MUTEX. It will be atomically
209 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
210
211 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
212 as MUTEX is needed.
213
214 When WAITTIME is not NULL, the sleep will be aborted at that time.
215
216 The return value of block_self is an errno value. It will be zero
217 when the sleep has been successfully completed by a call to
218 unblock_from_queue, EINTR when it has been interrupted by the
219 delivery of a system async, and ETIMEDOUT when the timeout has
220 expired.
221
222 The system asyncs themselves are not executed by block_self.
223 */
224 static int
225 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
226 const scm_t_timespec *waittime)
227 {
228 scm_i_thread *t = SCM_I_CURRENT_THREAD;
229 SCM q_handle;
230 int err;
231
232 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
233 err = EINTR;
234 else
235 {
236 t->block_asyncs++;
237 q_handle = enqueue (queue, t->handle);
238 if (waittime == NULL)
239 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
240 else
241 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
242
243 /* When we are still on QUEUE, we have been interrupted. We
244 report this only when no other error (such as a timeout) has
245 happened above.
246 */
247 if (remqueue (queue, q_handle) && err == 0)
248 err = EINTR;
249 t->block_asyncs--;
250 scm_i_reset_sleep (t);
251 }
252
253 return err;
254 }
255
256 /* Wake up the first thread on QUEUE, if any. The caller must hold
257 the mutex that protects QUEUE. The awoken thread is returned, or
258 #f when the queue was empty.
259 */
260 static SCM
261 unblock_from_queue (SCM queue)
262 {
263 SCM thread = dequeue (queue);
264 if (scm_is_true (thread))
265 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
266 return thread;
267 }
268
269 /* Getting into and out of guile mode.
270 */
271
272 /* Ken Raeburn observes that the implementation of suspend and resume
273 (and the things that build on top of them) are very likely not
274 correct (see below). We will need fix this eventually, and that's
275 why scm_leave_guile/scm_enter_guile are not exported in the API.
276
277 Ken writes:
278
279 Consider this sequence:
280
281 Function foo, called in Guile mode, calls suspend (maybe indirectly
282 through scm_leave_guile), which does this:
283
284 // record top of stack for the GC
285 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
286 var 't'
287 // save registers.
288 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
289 setjmp (t->regs); // here's most of the magic
290
291 ... and returns.
292
293 Function foo has a SCM value X, a handle on a non-immediate object, in
294 a caller-saved register R, and it's the only reference to the object
295 currently.
296
297 The compiler wants to use R in suspend, so it pushes the current
298 value, X, into a stack slot which will be reloaded on exit from
299 suspend; then it loads stuff into R and goes about its business. The
300 setjmp call saves (some of) the current registers, including R, which
301 no longer contains X. (This isn't a problem for a normal
302 setjmp/longjmp situation, where longjmp would be called before
303 setjmp's caller returns; the old value for X would be loaded back from
304 the stack after the longjmp, before the function returned.)
305
306 So, suspend returns, loading X back into R (and invalidating the jump
307 buffer) in the process. The caller foo then goes off and calls a
308 bunch of other functions out of Guile mode, occasionally storing X on
309 the stack again, but, say, much deeper on the stack than suspend's
310 stack frame went, and the stack slot where suspend had written X has
311 long since been overwritten with other values.
312
313 Okay, nothing actively broken so far. Now, let garbage collection
314 run, triggered by another thread.
315
316 The thread calling foo is out of Guile mode at the time, so the
317 garbage collector just scans a range of stack addresses. Too bad that
318 X isn't stored there. So the pointed-to storage goes onto the free
319 list, and I think you can see where things go from there.
320
321 Is there anything I'm missing that'll prevent this scenario from
322 happening? I mean, aside from, "well, suspend and scm_leave_guile
323 don't have many local variables, so they probably won't need to save
324 any registers on most systems, so we hope everything will wind up in
325 the jump buffer and we'll just get away with it"?
326
327 (And, going the other direction, if scm_leave_guile and suspend push
328 the stack pointer over onto a new page, and foo doesn't make further
329 function calls and thus the stack pointer no longer includes that
330 page, are we guaranteed that the kernel cannot release the now-unused
331 stack page that contains the top-of-stack pointer we just saved? I
332 don't know if any OS actually does that. If it does, we could get
333 faults in garbage collection.)
334
335 I don't think scm_without_guile has to have this problem, as it gets
336 more control over the stack handling -- but it should call setjmp
337 itself. I'd probably try something like:
338
339 // record top of stack for the GC
340 t->top = SCM_STACK_PTR (&t);
341 // save registers.
342 SCM_FLUSH_REGISTER_WINDOWS;
343 setjmp (t->regs);
344 res = func(data);
345 scm_enter_guile (t);
346
347 ... though even that's making some assumptions about the stack
348 ordering of local variables versus caller-saved registers.
349
350 For something like scm_leave_guile to work, I don't think it can just
351 rely on invalidated jump buffers. A valid jump buffer, and a handle
352 on the stack state at the point when the jump buffer was initialized,
353 together, would work fine, but I think then we're talking about macros
354 invoking setjmp in the caller's stack frame, and requiring that the
355 caller of scm_leave_guile also call scm_enter_guile before returning,
356 kind of like pthread_cleanup_push/pop calls that have to be paired up
357 in a function. (In fact, the pthread ones have to be paired up
358 syntactically, as if they might expand to a compound statement
359 incorporating the user's code, and invoking a compiler's
360 exception-handling primitives. Which might be something to think
361 about for cases where Guile is used with C++ exceptions or
362 pthread_cancel.)
363 */
364
365 scm_i_pthread_key_t scm_i_thread_key;
366
367 static void
368 resume (scm_i_thread *t)
369 {
370 t->top = NULL;
371 }
372
373 typedef void* scm_t_guile_ticket;
374
375 static void
376 scm_enter_guile (scm_t_guile_ticket ticket)
377 {
378 scm_i_thread *t = (scm_i_thread *)ticket;
379 if (t)
380 {
381 resume (t);
382 }
383 }
384
385 static scm_i_thread *
386 suspend (void)
387 {
388 scm_i_thread *t = SCM_I_CURRENT_THREAD;
389
390 /* record top of stack for the GC */
391 t->top = SCM_STACK_PTR (&t);
392 /* save registers. */
393 SCM_FLUSH_REGISTER_WINDOWS;
394 setjmp (t->regs);
395 return t;
396 }
397
398 static scm_t_guile_ticket
399 scm_leave_guile ()
400 {
401 scm_i_thread *t = suspend ();
402 return (scm_t_guile_ticket) t;
403 }
404
405 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
406 static scm_i_thread *all_threads = NULL;
407 static int thread_count;
408
409 static SCM scm_i_default_dynamic_state;
410
411 /* Perform first stage of thread initialisation, in non-guile mode.
412 */
413 static void
414 guilify_self_1 (SCM_STACKITEM *base)
415 {
416 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
417
418 t->pthread = scm_i_pthread_self ();
419 t->handle = SCM_BOOL_F;
420 t->result = SCM_BOOL_F;
421 t->cleanup_handler = SCM_BOOL_F;
422 t->mutexes = SCM_EOL;
423 t->join_queue = SCM_EOL;
424 t->dynamic_state = SCM_BOOL_F;
425 t->dynwinds = SCM_EOL;
426 t->active_asyncs = SCM_EOL;
427 t->block_asyncs = 1;
428 t->pending_asyncs = 1;
429 t->last_debug_frame = NULL;
430 t->base = base;
431 #ifdef __ia64__
432 /* Calculate and store off the base of this thread's register
433 backing store (RBS). Unfortunately our implementation(s) of
434 scm_ia64_register_backing_store_base are only reliable for the
435 main thread. For other threads, therefore, find out the current
436 top of the RBS, and use that as a maximum. */
437 t->register_backing_store_base = scm_ia64_register_backing_store_base ();
438 {
439 ucontext_t ctx;
440 void *bsp;
441 getcontext (&ctx);
442 bsp = scm_ia64_ar_bsp (&ctx);
443 if (t->register_backing_store_base > bsp)
444 t->register_backing_store_base = bsp;
445 }
446 #endif
447 t->continuation_root = SCM_EOL;
448 t->continuation_base = base;
449 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
450 t->sleep_mutex = NULL;
451 t->sleep_object = SCM_BOOL_F;
452 t->sleep_fd = -1;
453 /* XXX - check for errors. */
454 pipe (t->sleep_pipe);
455 scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
456 t->current_mark_stack_ptr = NULL;
457 t->current_mark_stack_limit = NULL;
458 t->canceled = 0;
459 t->exited = 0;
460
461 scm_i_pthread_setspecific (scm_i_thread_key, t);
462
463 scm_i_pthread_mutex_lock (&thread_admin_mutex);
464 t->next_thread = all_threads;
465 all_threads = t;
466 thread_count++;
467 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
468 }
469
470 /* Perform second stage of thread initialisation, in guile mode.
471 */
472 static void
473 guilify_self_2 (SCM parent)
474 {
475 scm_i_thread *t = SCM_I_CURRENT_THREAD;
476
477 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
478
479 t->continuation_root = scm_cons (t->handle, SCM_EOL);
480 t->continuation_base = t->base;
481
482 if (scm_is_true (parent))
483 t->dynamic_state = scm_make_dynamic_state (parent);
484 else
485 t->dynamic_state = scm_i_make_initial_dynamic_state ();
486
487 t->join_queue = make_queue ();
488 t->block_asyncs = 0;
489 }
490
491 \f
492 /*** Fat mutexes */
493
494 /* We implement our own mutex type since we want them to be 'fair', we
495 want to do fancy things while waiting for them (like running
496 asyncs) and we might want to add things that are nice for
497 debugging.
498 */
499
500 typedef struct {
501 scm_i_pthread_mutex_t lock;
502 SCM owner;
503 int level; /* how much the owner owns us. <= 1 for non-recursive mutexes */
504
505 int recursive; /* allow recursive locking? */
506 int unchecked_unlock; /* is it an error to unlock an unlocked mutex? */
507 int allow_external_unlock; /* is it an error to unlock a mutex that is not
508 owned by the current thread? */
509
510 SCM waiting; /* the threads waiting for this mutex. */
511 } fat_mutex;
512
513 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
514 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
515
516 /* Perform thread tear-down, in guile mode.
517 */
518 static void *
519 do_thread_exit (void *v)
520 {
521 scm_i_thread *t = (scm_i_thread *) v;
522
523 if (!scm_is_false (t->cleanup_handler))
524 {
525 SCM ptr = t->cleanup_handler;
526
527 t->cleanup_handler = SCM_BOOL_F;
528 t->result = scm_internal_catch (SCM_BOOL_T,
529 (scm_t_catch_body) scm_call_0, ptr,
530 scm_handle_by_message_noexit, NULL);
531 }
532
533 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
534
535 t->exited = 1;
536 close (t->sleep_pipe[0]);
537 close (t->sleep_pipe[1]);
538 while (scm_is_true (unblock_from_queue (t->join_queue)))
539 ;
540
541 while (!scm_is_null (t->mutexes))
542 {
543 SCM mutex = SCM_CAR (t->mutexes);
544 fat_mutex *m = SCM_MUTEX_DATA (mutex);
545 scm_i_pthread_mutex_lock (&m->lock);
546
547 unblock_from_queue (m->waiting);
548
549 scm_i_pthread_mutex_unlock (&m->lock);
550 t->mutexes = SCM_CDR (t->mutexes);
551 }
552
553 scm_i_pthread_mutex_unlock (&t->admin_mutex);
554
555 return NULL;
556 }
557
558 static void
559 on_thread_exit (void *v)
560 {
561 /* This handler is executed in non-guile mode. */
562 scm_i_thread *t = (scm_i_thread *) v, **tp;
563
564 scm_i_pthread_setspecific (scm_i_thread_key, v);
565
566 /* Ensure the signal handling thread has been launched, because we might be
567 shutting it down. */
568 scm_i_ensure_signal_delivery_thread ();
569
570 /* Unblocking the joining threads needs to happen in guile mode
571 since the queue is a SCM data structure. */
572
573 /* Note: `scm_with_guile ()' invokes `GC_local_malloc ()', which accesses
574 thread-local storage (TLS). If said storage is accessed using
575 `pthread_getspecific ()', then it may be inaccessible at this point,
576 having been destroyed earlier, since the invocation order of destructors
577 associated with pthread keys is unspecified:
578
579 http://www.opengroup.org/onlinepubs/009695399/functions/pthread_key_create.html
580
581 Thus, `libgc' *must* be compiled with `USE_COMPILER_TLS' for this code
582 to work. */
583 scm_with_guile (do_thread_exit, v);
584
585 /* Removing ourself from the list of all threads needs to happen in
586 non-guile mode since all SCM values on our stack become
587 unprotected once we are no longer in the list. */
588 scm_i_pthread_mutex_lock (&thread_admin_mutex);
589 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
590 if (*tp == t)
591 {
592 *tp = t->next_thread;
593 break;
594 }
595 thread_count--;
596
597 /* If there's only one other thread, it could be the signal delivery
598 thread, so we need to notify it to shut down by closing its read pipe.
599 If it's not the signal delivery thread, then closing the read pipe isn't
600 going to hurt. */
601 if (thread_count <= 1)
602 scm_i_close_signal_pipe ();
603
604 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
605
606 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
607 }
608
609 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
610
611 static void
612 init_thread_key (void)
613 {
614 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
615 }
616
617 /* Perform any initializations necessary to bring the current thread
618 into guile mode, initializing Guile itself, if necessary.
619
620 BASE is the stack base to use with GC.
621
622 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
623 which case the default dynamic state is used.
624
625 Return zero when the thread was in guile mode already; otherwise
626 return 1.
627 */
628
629 static int
630 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
631 {
632 scm_i_thread *t;
633
634 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
635
636 if ((t = SCM_I_CURRENT_THREAD) == NULL)
637 {
638 /* This thread has not been guilified yet.
639 */
640
641 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
642 if (scm_initialized_p == 0)
643 {
644 /* First thread ever to enter Guile. Run the full
645 initialization.
646 */
647 scm_i_init_guile (base);
648 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
649 }
650 else
651 {
652 /* Guile is already initialized, but this thread enters it for
653 the first time. Only initialize this thread.
654 */
655 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
656 guilify_self_1 (base);
657 guilify_self_2 (parent);
658 }
659 return 1;
660 }
661 else if (t->top)
662 {
663 /* This thread is already guilified but not in guile mode, just
664 resume it.
665
666 XXX - base might be lower than when this thread was first
667 guilified.
668 */
669 scm_enter_guile ((scm_t_guile_ticket) t);
670 return 1;
671 }
672 else
673 {
674 /* Thread is already in guile mode. Nothing to do.
675 */
676 return 0;
677 }
678 }
679
680 #if SCM_USE_PTHREAD_THREADS
681
682 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
683 /* This method for GNU/Linux and perhaps some other systems.
684 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
685 available on them. */
686 #define HAVE_GET_THREAD_STACK_BASE
687
688 static SCM_STACKITEM *
689 get_thread_stack_base ()
690 {
691 pthread_attr_t attr;
692 void *start, *end;
693 size_t size;
694
695 pthread_getattr_np (pthread_self (), &attr);
696 pthread_attr_getstack (&attr, &start, &size);
697 end = (char *)start + size;
698
699 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
700 for the main thread, but we can use scm_get_stack_base in that
701 case.
702 */
703
704 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
705 if ((void *)&attr < start || (void *)&attr >= end)
706 return (SCM_STACKITEM *) GC_stackbottom;
707 else
708 #endif
709 {
710 #if SCM_STACK_GROWS_UP
711 return start;
712 #else
713 return end;
714 #endif
715 }
716 }
717
718 #elif HAVE_PTHREAD_GET_STACKADDR_NP
719 /* This method for MacOS X.
720 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
721 but as of 2006 there's nothing obvious at apple.com. */
722 #define HAVE_GET_THREAD_STACK_BASE
723 static SCM_STACKITEM *
724 get_thread_stack_base ()
725 {
726 return pthread_get_stackaddr_np (pthread_self ());
727 }
728
729 #elif defined (__MINGW32__)
730 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
731 in any thread. We don't like hard-coding the name of a system, but there
732 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
733 work. */
734 #define HAVE_GET_THREAD_STACK_BASE
735 static SCM_STACKITEM *
736 get_thread_stack_base ()
737 {
738 return (SCM_STACKITEM *) GC_stackbottom;
739 }
740
741 #endif /* pthread methods of get_thread_stack_base */
742
743 #else /* !SCM_USE_PTHREAD_THREADS */
744
745 #define HAVE_GET_THREAD_STACK_BASE
746
747 static SCM_STACKITEM *
748 get_thread_stack_base ()
749 {
750 return (SCM_STACKITEM *) GC_stackbottom;
751 }
752
753 #endif /* !SCM_USE_PTHREAD_THREADS */
754
755 #ifdef HAVE_GET_THREAD_STACK_BASE
756
757 void
758 scm_init_guile ()
759 {
760 scm_i_init_thread_for_guile (get_thread_stack_base (),
761 scm_i_default_dynamic_state);
762 }
763
764 #endif
765
766 void *
767 scm_with_guile (void *(*func)(void *), void *data)
768 {
769 return scm_i_with_guile_and_parent (func, data,
770 scm_i_default_dynamic_state);
771 }
772
773 SCM_UNUSED static void
774 scm_leave_guile_cleanup (void *x)
775 {
776 scm_leave_guile ();
777 }
778
779 void *
780 scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
781 {
782 void *res;
783 int really_entered;
784 SCM_STACKITEM base_item;
785
786 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
787 if (really_entered)
788 {
789 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
790 res = scm_c_with_continuation_barrier (func, data);
791 scm_i_pthread_cleanup_pop (0);
792 scm_leave_guile ();
793 }
794 else
795 res = scm_c_with_continuation_barrier (func, data);
796
797 return res;
798 }
799
800 void *
801 scm_without_guile (void *(*func)(void *), void *data)
802 {
803 void *res;
804 scm_t_guile_ticket t;
805 t = scm_leave_guile ();
806 res = func (data);
807 scm_enter_guile (t);
808 return res;
809 }
810
811 /*** Thread creation */
812
813 typedef struct {
814 SCM parent;
815 SCM thunk;
816 SCM handler;
817 SCM thread;
818 scm_i_pthread_mutex_t mutex;
819 scm_i_pthread_cond_t cond;
820 } launch_data;
821
822 static void *
823 really_launch (void *d)
824 {
825 launch_data *data = (launch_data *)d;
826 SCM thunk = data->thunk, handler = data->handler;
827 scm_i_thread *t;
828
829 t = SCM_I_CURRENT_THREAD;
830
831 scm_i_scm_pthread_mutex_lock (&data->mutex);
832 data->thread = scm_current_thread ();
833 scm_i_pthread_cond_signal (&data->cond);
834 scm_i_pthread_mutex_unlock (&data->mutex);
835
836 if (SCM_UNBNDP (handler))
837 t->result = scm_call_0 (thunk);
838 else
839 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
840
841 return 0;
842 }
843
844 static void *
845 launch_thread (void *d)
846 {
847 launch_data *data = (launch_data *)d;
848 scm_i_pthread_detach (scm_i_pthread_self ());
849 scm_i_with_guile_and_parent (really_launch, d, data->parent);
850 return NULL;
851 }
852
853 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
854 (SCM thunk, SCM handler),
855 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
856 "returning a new thread object representing the thread. The procedure\n"
857 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
858 "\n"
859 "When @var{handler} is specified, then @var{thunk} is called from\n"
860 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
861 "handler. This catch is established inside the continuation barrier.\n"
862 "\n"
863 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
864 "the @emph{exit value} of the thread and the thread is terminated.")
865 #define FUNC_NAME s_scm_call_with_new_thread
866 {
867 launch_data data;
868 scm_i_pthread_t id;
869 int err;
870
871 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
872 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
873 handler, SCM_ARG2, FUNC_NAME);
874
875 data.parent = scm_current_dynamic_state ();
876 data.thunk = thunk;
877 data.handler = handler;
878 data.thread = SCM_BOOL_F;
879 scm_i_pthread_mutex_init (&data.mutex, NULL);
880 scm_i_pthread_cond_init (&data.cond, NULL);
881
882 scm_i_scm_pthread_mutex_lock (&data.mutex);
883 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
884 if (err)
885 {
886 scm_i_pthread_mutex_unlock (&data.mutex);
887 errno = err;
888 scm_syserror (NULL);
889 }
890 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
891 scm_i_pthread_mutex_unlock (&data.mutex);
892
893 return data.thread;
894 }
895 #undef FUNC_NAME
896
897 typedef struct {
898 SCM parent;
899 scm_t_catch_body body;
900 void *body_data;
901 scm_t_catch_handler handler;
902 void *handler_data;
903 SCM thread;
904 scm_i_pthread_mutex_t mutex;
905 scm_i_pthread_cond_t cond;
906 } spawn_data;
907
908 static void *
909 really_spawn (void *d)
910 {
911 spawn_data *data = (spawn_data *)d;
912 scm_t_catch_body body = data->body;
913 void *body_data = data->body_data;
914 scm_t_catch_handler handler = data->handler;
915 void *handler_data = data->handler_data;
916 scm_i_thread *t = SCM_I_CURRENT_THREAD;
917
918 scm_i_scm_pthread_mutex_lock (&data->mutex);
919 data->thread = scm_current_thread ();
920 scm_i_pthread_cond_signal (&data->cond);
921 scm_i_pthread_mutex_unlock (&data->mutex);
922
923 if (handler == NULL)
924 t->result = body (body_data);
925 else
926 t->result = scm_internal_catch (SCM_BOOL_T,
927 body, body_data,
928 handler, handler_data);
929
930 return 0;
931 }
932
933 static void *
934 spawn_thread (void *d)
935 {
936 spawn_data *data = (spawn_data *)d;
937 scm_i_pthread_detach (scm_i_pthread_self ());
938 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
939 return NULL;
940 }
941
942 SCM
943 scm_spawn_thread (scm_t_catch_body body, void *body_data,
944 scm_t_catch_handler handler, void *handler_data)
945 {
946 spawn_data data;
947 scm_i_pthread_t id;
948 int err;
949
950 data.parent = scm_current_dynamic_state ();
951 data.body = body;
952 data.body_data = body_data;
953 data.handler = handler;
954 data.handler_data = handler_data;
955 data.thread = SCM_BOOL_F;
956 scm_i_pthread_mutex_init (&data.mutex, NULL);
957 scm_i_pthread_cond_init (&data.cond, NULL);
958
959 scm_i_scm_pthread_mutex_lock (&data.mutex);
960 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
961 if (err)
962 {
963 scm_i_pthread_mutex_unlock (&data.mutex);
964 errno = err;
965 scm_syserror (NULL);
966 }
967 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
968 scm_i_pthread_mutex_unlock (&data.mutex);
969
970 return data.thread;
971 }
972
973 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
974 (),
975 "Move the calling thread to the end of the scheduling queue.")
976 #define FUNC_NAME s_scm_yield
977 {
978 return scm_from_bool (scm_i_sched_yield ());
979 }
980 #undef FUNC_NAME
981
982 SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
983 (SCM thread),
984 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
985 "cannot be the current thread, and if @var{thread} has already terminated or "
986 "been signaled to terminate, this function is a no-op.")
987 #define FUNC_NAME s_scm_cancel_thread
988 {
989 scm_i_thread *t = NULL;
990
991 SCM_VALIDATE_THREAD (1, thread);
992 t = SCM_I_THREAD_DATA (thread);
993 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
994 if (!t->canceled)
995 {
996 t->canceled = 1;
997 scm_i_pthread_mutex_unlock (&t->admin_mutex);
998 scm_i_pthread_cancel (t->pthread);
999 }
1000 else
1001 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1002
1003 return SCM_UNSPECIFIED;
1004 }
1005 #undef FUNC_NAME
1006
1007 SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
1008 (SCM thread, SCM proc),
1009 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1010 "This handler will be called when the thread exits.")
1011 #define FUNC_NAME s_scm_set_thread_cleanup_x
1012 {
1013 scm_i_thread *t;
1014
1015 SCM_VALIDATE_THREAD (1, thread);
1016 if (!scm_is_false (proc))
1017 SCM_VALIDATE_THUNK (2, proc);
1018
1019 t = SCM_I_THREAD_DATA (thread);
1020 scm_i_pthread_mutex_lock (&t->admin_mutex);
1021
1022 if (!(t->exited || t->canceled))
1023 t->cleanup_handler = proc;
1024
1025 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1026
1027 return SCM_UNSPECIFIED;
1028 }
1029 #undef FUNC_NAME
1030
1031 SCM_DEFINE (scm_thread_cleanup, "thread-cleanup", 1, 0, 0,
1032 (SCM thread),
1033 "Return the cleanup handler installed for the thread @var{thread}.")
1034 #define FUNC_NAME s_scm_thread_cleanup
1035 {
1036 scm_i_thread *t;
1037 SCM ret;
1038
1039 SCM_VALIDATE_THREAD (1, thread);
1040
1041 t = SCM_I_THREAD_DATA (thread);
1042 scm_i_pthread_mutex_lock (&t->admin_mutex);
1043 ret = (t->exited || t->canceled) ? SCM_BOOL_F : t->cleanup_handler;
1044 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1045
1046 return ret;
1047 }
1048 #undef FUNC_NAME
1049
1050 SCM scm_join_thread (SCM thread)
1051 {
1052 return scm_join_thread_timed (thread, SCM_UNDEFINED, SCM_UNDEFINED);
1053 }
1054
1055 SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0,
1056 (SCM thread, SCM timeout, SCM timeoutval),
1057 "Suspend execution of the calling thread until the target @var{thread} "
1058 "terminates, unless the target @var{thread} has already terminated. ")
1059 #define FUNC_NAME s_scm_join_thread_timed
1060 {
1061 scm_i_thread *t;
1062 scm_t_timespec ctimeout, *timeout_ptr = NULL;
1063 SCM res = SCM_BOOL_F;
1064
1065 if (! (SCM_UNBNDP (timeoutval)))
1066 res = timeoutval;
1067
1068 SCM_VALIDATE_THREAD (1, thread);
1069 if (scm_is_eq (scm_current_thread (), thread))
1070 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL);
1071
1072 t = SCM_I_THREAD_DATA (thread);
1073 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1074
1075 if (! SCM_UNBNDP (timeout))
1076 {
1077 to_timespec (timeout, &ctimeout);
1078 timeout_ptr = &ctimeout;
1079 }
1080
1081 if (t->exited)
1082 res = t->result;
1083 else
1084 {
1085 while (1)
1086 {
1087 int err = block_self (t->join_queue, thread, &t->admin_mutex,
1088 timeout_ptr);
1089 if (err == 0)
1090 {
1091 if (t->exited)
1092 {
1093 res = t->result;
1094 break;
1095 }
1096 }
1097 else if (err == ETIMEDOUT)
1098 break;
1099
1100 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1101 SCM_TICK;
1102 scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
1103 }
1104 }
1105
1106 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1107
1108 return res;
1109 }
1110 #undef FUNC_NAME
1111
1112 SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0,
1113 (SCM obj),
1114 "Return @code{#t} if @var{obj} is a thread.")
1115 #define FUNC_NAME s_scm_thread_p
1116 {
1117 return SCM_I_IS_THREAD(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1118 }
1119 #undef FUNC_NAME
1120
1121
1122 static size_t
1123 fat_mutex_free (SCM mx)
1124 {
1125 fat_mutex *m = SCM_MUTEX_DATA (mx);
1126 scm_i_pthread_mutex_destroy (&m->lock);
1127 scm_gc_free (m, sizeof (fat_mutex), "mutex");
1128 return 0;
1129 }
1130
1131 static int
1132 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
1133 {
1134 fat_mutex *m = SCM_MUTEX_DATA (mx);
1135 scm_puts ("#<mutex ", port);
1136 scm_uintprint ((scm_t_bits)m, 16, port);
1137 scm_puts (">", port);
1138 return 1;
1139 }
1140
1141 static SCM
1142 make_fat_mutex (int recursive, int unchecked_unlock, int external_unlock)
1143 {
1144 fat_mutex *m;
1145 SCM mx;
1146
1147 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
1148 scm_i_pthread_mutex_init (&m->lock, NULL);
1149 m->owner = SCM_BOOL_F;
1150 m->level = 0;
1151
1152 m->recursive = recursive;
1153 m->unchecked_unlock = unchecked_unlock;
1154 m->allow_external_unlock = external_unlock;
1155
1156 m->waiting = SCM_EOL;
1157 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
1158 m->waiting = make_queue ();
1159 return mx;
1160 }
1161
1162 SCM scm_make_mutex (void)
1163 {
1164 return scm_make_mutex_with_flags (SCM_EOL);
1165 }
1166
1167 SCM_SYMBOL (unchecked_unlock_sym, "unchecked-unlock");
1168 SCM_SYMBOL (allow_external_unlock_sym, "allow-external-unlock");
1169 SCM_SYMBOL (recursive_sym, "recursive");
1170
1171 SCM_DEFINE (scm_make_mutex_with_flags, "make-mutex", 0, 0, 1,
1172 (SCM flags),
1173 "Create a new mutex. ")
1174 #define FUNC_NAME s_scm_make_mutex_with_flags
1175 {
1176 int unchecked_unlock = 0, external_unlock = 0, recursive = 0;
1177
1178 SCM ptr = flags;
1179 while (! scm_is_null (ptr))
1180 {
1181 SCM flag = SCM_CAR (ptr);
1182 if (scm_is_eq (flag, unchecked_unlock_sym))
1183 unchecked_unlock = 1;
1184 else if (scm_is_eq (flag, allow_external_unlock_sym))
1185 external_unlock = 1;
1186 else if (scm_is_eq (flag, recursive_sym))
1187 recursive = 1;
1188 else
1189 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag));
1190 ptr = SCM_CDR (ptr);
1191 }
1192 return make_fat_mutex (recursive, unchecked_unlock, external_unlock);
1193 }
1194 #undef FUNC_NAME
1195
1196 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
1197 (void),
1198 "Create a new recursive mutex. ")
1199 #define FUNC_NAME s_scm_make_recursive_mutex
1200 {
1201 return make_fat_mutex (1, 0, 0);
1202 }
1203 #undef FUNC_NAME
1204
1205 SCM_SYMBOL (scm_abandoned_mutex_error_key, "abandoned-mutex-error");
1206
1207 static SCM
1208 fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret)
1209 {
1210 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1211
1212 SCM new_owner = SCM_UNBNDP (owner) ? scm_current_thread() : owner;
1213 SCM err = SCM_BOOL_F;
1214
1215 struct timeval current_time;
1216
1217 scm_i_scm_pthread_mutex_lock (&m->lock);
1218
1219 while (1)
1220 {
1221 if (m->level == 0)
1222 {
1223 m->owner = new_owner;
1224 m->level++;
1225
1226 if (SCM_I_IS_THREAD (new_owner))
1227 {
1228 scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
1229 scm_i_pthread_mutex_lock (&t->admin_mutex);
1230 t->mutexes = scm_cons (mutex, t->mutexes);
1231 scm_i_pthread_mutex_unlock (&t->admin_mutex);
1232 }
1233 *ret = 1;
1234 break;
1235 }
1236 else if (SCM_I_IS_THREAD (m->owner) && scm_c_thread_exited_p (m->owner))
1237 {
1238 m->owner = new_owner;
1239 err = scm_cons (scm_abandoned_mutex_error_key,
1240 scm_from_locale_string ("lock obtained on abandoned "
1241 "mutex"));
1242 *ret = 1;
1243 break;
1244 }
1245 else if (scm_is_eq (m->owner, new_owner))
1246 {
1247 if (m->recursive)
1248 {
1249 m->level++;
1250 *ret = 1;
1251 }
1252 else
1253 {
1254 err = scm_cons (scm_misc_error_key,
1255 scm_from_locale_string ("mutex already locked "
1256 "by thread"));
1257 *ret = 0;
1258 }
1259 break;
1260 }
1261 else
1262 {
1263 if (timeout != NULL)
1264 {
1265 gettimeofday (&current_time, NULL);
1266 if (current_time.tv_sec > timeout->tv_sec ||
1267 (current_time.tv_sec == timeout->tv_sec &&
1268 current_time.tv_usec * 1000 > timeout->tv_nsec))
1269 {
1270 *ret = 0;
1271 break;
1272 }
1273 }
1274 block_self (m->waiting, mutex, &m->lock, timeout);
1275 scm_i_pthread_mutex_unlock (&m->lock);
1276 SCM_TICK;
1277 scm_i_scm_pthread_mutex_lock (&m->lock);
1278 }
1279 }
1280 scm_i_pthread_mutex_unlock (&m->lock);
1281 return err;
1282 }
1283
1284 SCM scm_lock_mutex (SCM mx)
1285 {
1286 return scm_lock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1287 }
1288
1289 SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
1290 (SCM m, SCM timeout, SCM owner),
1291 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1292 "blocks until the mutex becomes available. The function returns when "
1293 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1294 "a thread already owns will succeed right away and will not block the "
1295 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1296 #define FUNC_NAME s_scm_lock_mutex_timed
1297 {
1298 SCM exception;
1299 int ret = 0;
1300 scm_t_timespec cwaittime, *waittime = NULL;
1301
1302 SCM_VALIDATE_MUTEX (1, m);
1303
1304 if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
1305 {
1306 to_timespec (timeout, &cwaittime);
1307 waittime = &cwaittime;
1308 }
1309
1310 exception = fat_mutex_lock (m, waittime, owner, &ret);
1311 if (!scm_is_false (exception))
1312 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1313 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1314 }
1315 #undef FUNC_NAME
1316
1317 void
1318 scm_dynwind_lock_mutex (SCM mutex)
1319 {
1320 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1321 SCM_F_WIND_EXPLICITLY);
1322 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1323 SCM_F_WIND_EXPLICITLY);
1324 }
1325
1326 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1327 (SCM mutex),
1328 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1329 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1330 #define FUNC_NAME s_scm_try_mutex
1331 {
1332 SCM exception;
1333 int ret = 0;
1334 scm_t_timespec cwaittime, *waittime = NULL;
1335
1336 SCM_VALIDATE_MUTEX (1, mutex);
1337
1338 to_timespec (scm_from_int(0), &cwaittime);
1339 waittime = &cwaittime;
1340
1341 exception = fat_mutex_lock (mutex, waittime, SCM_UNDEFINED, &ret);
1342 if (!scm_is_false (exception))
1343 scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
1344 return ret ? SCM_BOOL_T : SCM_BOOL_F;
1345 }
1346 #undef FUNC_NAME
1347
1348 /*** Fat condition variables */
1349
1350 typedef struct {
1351 scm_i_pthread_mutex_t lock;
1352 SCM waiting; /* the threads waiting for this condition. */
1353 } fat_cond;
1354
1355 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1356 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1357
1358 static int
1359 fat_mutex_unlock (SCM mutex, SCM cond,
1360 const scm_t_timespec *waittime, int relock)
1361 {
1362 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1363 fat_cond *c = NULL;
1364 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1365 int err = 0, ret = 0;
1366
1367 scm_i_scm_pthread_mutex_lock (&m->lock);
1368
1369 SCM owner = m->owner;
1370
1371 if (!scm_is_eq (owner, scm_current_thread ()))
1372 {
1373 if (m->level == 0)
1374 {
1375 if (!m->unchecked_unlock)
1376 {
1377 scm_i_pthread_mutex_unlock (&m->lock);
1378 scm_misc_error (NULL, "mutex not locked", SCM_EOL);
1379 }
1380 owner = scm_current_thread ();
1381 }
1382 else if (!m->allow_external_unlock)
1383 {
1384 scm_i_pthread_mutex_unlock (&m->lock);
1385 scm_misc_error (NULL, "mutex not locked by current thread", SCM_EOL);
1386 }
1387 }
1388
1389 if (! (SCM_UNBNDP (cond)))
1390 {
1391 c = SCM_CONDVAR_DATA (cond);
1392 while (1)
1393 {
1394 int brk = 0;
1395
1396 scm_i_scm_pthread_mutex_lock (&c->lock);
1397 if (m->level > 0)
1398 m->level--;
1399 if (m->level == 0)
1400 m->owner = unblock_from_queue (m->waiting);
1401
1402 scm_i_pthread_mutex_unlock (&m->lock);
1403
1404 t->block_asyncs++;
1405
1406 err = block_self (c->waiting, cond, &c->lock, waittime);
1407
1408 if (err == 0)
1409 {
1410 ret = 1;
1411 brk = 1;
1412 }
1413 else if (err == ETIMEDOUT)
1414 {
1415 ret = 0;
1416 brk = 1;
1417 }
1418 else if (err != EINTR)
1419 {
1420 errno = err;
1421 scm_i_pthread_mutex_unlock (&c->lock);
1422 scm_syserror (NULL);
1423 }
1424
1425 if (brk)
1426 {
1427 if (relock)
1428 scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
1429 scm_i_pthread_mutex_unlock (&c->lock);
1430 break;
1431 }
1432
1433 scm_i_pthread_mutex_unlock (&c->lock);
1434
1435 t->block_asyncs--;
1436 scm_async_click ();
1437
1438 scm_remember_upto_here_2 (cond, mutex);
1439
1440 scm_i_scm_pthread_mutex_lock (&m->lock);
1441 }
1442 }
1443 else
1444 {
1445 if (m->level > 0)
1446 m->level--;
1447 if (m->level == 0)
1448 m->owner = unblock_from_queue (m->waiting);
1449
1450 scm_i_pthread_mutex_unlock (&m->lock);
1451 ret = 1;
1452 }
1453
1454 return ret;
1455 }
1456
1457 SCM scm_unlock_mutex (SCM mx)
1458 {
1459 return scm_unlock_mutex_timed (mx, SCM_UNDEFINED, SCM_UNDEFINED);
1460 }
1461
1462 SCM_DEFINE (scm_unlock_mutex_timed, "unlock-mutex", 1, 2, 0,
1463 (SCM mx, SCM cond, SCM timeout),
1464 "Unlocks @var{mutex} if the calling thread owns the lock on "
1465 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1466 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1467 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1468 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1469 "with a call to @code{unlock-mutex}. Only the last call to "
1470 "@code{unlock-mutex} will actually unlock the mutex. ")
1471 #define FUNC_NAME s_scm_unlock_mutex_timed
1472 {
1473 scm_t_timespec cwaittime, *waittime = NULL;
1474
1475 SCM_VALIDATE_MUTEX (1, mx);
1476 if (! (SCM_UNBNDP (cond)))
1477 {
1478 SCM_VALIDATE_CONDVAR (2, cond);
1479
1480 if (! (SCM_UNBNDP (timeout)))
1481 {
1482 to_timespec (timeout, &cwaittime);
1483 waittime = &cwaittime;
1484 }
1485 }
1486
1487 return fat_mutex_unlock (mx, cond, waittime, 0) ? SCM_BOOL_T : SCM_BOOL_F;
1488 }
1489 #undef FUNC_NAME
1490
1491 SCM_DEFINE (scm_mutex_p, "mutex?", 1, 0, 0,
1492 (SCM obj),
1493 "Return @code{#t} if @var{obj} is a mutex.")
1494 #define FUNC_NAME s_scm_mutex_p
1495 {
1496 return SCM_MUTEXP (obj) ? SCM_BOOL_T : SCM_BOOL_F;
1497 }
1498 #undef FUNC_NAME
1499
1500 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1501 (SCM mx),
1502 "Return the thread owning @var{mx}, or @code{#f}.")
1503 #define FUNC_NAME s_scm_mutex_owner
1504 {
1505 SCM owner;
1506 fat_mutex *m = NULL;
1507
1508 SCM_VALIDATE_MUTEX (1, mx);
1509 m = SCM_MUTEX_DATA (mx);
1510 scm_i_pthread_mutex_lock (&m->lock);
1511 owner = m->owner;
1512 scm_i_pthread_mutex_unlock (&m->lock);
1513
1514 return owner;
1515 }
1516 #undef FUNC_NAME
1517
1518 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1519 (SCM mx),
1520 "Return the lock level of mutex @var{mx}.")
1521 #define FUNC_NAME s_scm_mutex_level
1522 {
1523 SCM_VALIDATE_MUTEX (1, mx);
1524 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1525 }
1526 #undef FUNC_NAME
1527
1528 SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0,
1529 (SCM mx),
1530 "Returns @code{#t} if the mutex @var{mx} is locked.")
1531 #define FUNC_NAME s_scm_mutex_locked_p
1532 {
1533 SCM_VALIDATE_MUTEX (1, mx);
1534 return SCM_MUTEX_DATA (mx)->level > 0 ? SCM_BOOL_T : SCM_BOOL_F;
1535 }
1536 #undef FUNC_NAME
1537
1538 static size_t
1539 fat_cond_free (SCM mx)
1540 {
1541 fat_cond *c = SCM_CONDVAR_DATA (mx);
1542 scm_i_pthread_mutex_destroy (&c->lock);
1543 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1544 return 0;
1545 }
1546
1547 static int
1548 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1549 {
1550 fat_cond *c = SCM_CONDVAR_DATA (cv);
1551 scm_puts ("#<condition-variable ", port);
1552 scm_uintprint ((scm_t_bits)c, 16, port);
1553 scm_puts (">", port);
1554 return 1;
1555 }
1556
1557 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1558 (void),
1559 "Make a new condition variable.")
1560 #define FUNC_NAME s_scm_make_condition_variable
1561 {
1562 fat_cond *c;
1563 SCM cv;
1564
1565 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1566 scm_i_pthread_mutex_init (&c->lock, 0);
1567 c->waiting = SCM_EOL;
1568 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1569 c->waiting = make_queue ();
1570 return cv;
1571 }
1572 #undef FUNC_NAME
1573
1574 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1575 (SCM cv, SCM mx, SCM t),
1576 "Wait until @var{cond-var} has been signalled. While waiting, "
1577 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1578 "is locked again when this function returns. When @var{time} is given, "
1579 "it specifies a point in time where the waiting should be aborted. It "
1580 "can be either a integer as returned by @code{current-time} or a pair "
1581 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1582 "mutex is locked and @code{#f} is returned. When the condition "
1583 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1584 "is returned. ")
1585 #define FUNC_NAME s_scm_timed_wait_condition_variable
1586 {
1587 scm_t_timespec waittime, *waitptr = NULL;
1588
1589 SCM_VALIDATE_CONDVAR (1, cv);
1590 SCM_VALIDATE_MUTEX (2, mx);
1591
1592 if (!SCM_UNBNDP (t))
1593 {
1594 to_timespec (t, &waittime);
1595 waitptr = &waittime;
1596 }
1597
1598 return fat_mutex_unlock (mx, cv, waitptr, 1) ? SCM_BOOL_T : SCM_BOOL_F;
1599 }
1600 #undef FUNC_NAME
1601
1602 static void
1603 fat_cond_signal (fat_cond *c)
1604 {
1605 scm_i_scm_pthread_mutex_lock (&c->lock);
1606 unblock_from_queue (c->waiting);
1607 scm_i_pthread_mutex_unlock (&c->lock);
1608 }
1609
1610 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1611 (SCM cv),
1612 "Wake up one thread that is waiting for @var{cv}")
1613 #define FUNC_NAME s_scm_signal_condition_variable
1614 {
1615 SCM_VALIDATE_CONDVAR (1, cv);
1616 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1617 return SCM_BOOL_T;
1618 }
1619 #undef FUNC_NAME
1620
1621 static void
1622 fat_cond_broadcast (fat_cond *c)
1623 {
1624 scm_i_scm_pthread_mutex_lock (&c->lock);
1625 while (scm_is_true (unblock_from_queue (c->waiting)))
1626 ;
1627 scm_i_pthread_mutex_unlock (&c->lock);
1628 }
1629
1630 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1631 (SCM cv),
1632 "Wake up all threads that are waiting for @var{cv}. ")
1633 #define FUNC_NAME s_scm_broadcast_condition_variable
1634 {
1635 SCM_VALIDATE_CONDVAR (1, cv);
1636 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1637 return SCM_BOOL_T;
1638 }
1639 #undef FUNC_NAME
1640
1641 SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0,
1642 (SCM obj),
1643 "Return @code{#t} if @var{obj} is a condition variable.")
1644 #define FUNC_NAME s_scm_condition_variable_p
1645 {
1646 return SCM_CONDVARP(obj) ? SCM_BOOL_T : SCM_BOOL_F;
1647 }
1648 #undef FUNC_NAME
1649
1650 /*** Marking stacks */
1651
1652 /* XXX - what to do with this? Do we need to handle this for blocked
1653 threads as well?
1654 */
1655 #ifdef __ia64__
1656 # define SCM_MARK_BACKING_STORE() do { \
1657 ucontext_t ctx; \
1658 SCM_STACKITEM * top, * bot; \
1659 getcontext (&ctx); \
1660 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1661 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1662 / sizeof (SCM_STACKITEM))); \
1663 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1664 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1665 scm_mark_locations (bot, top - bot); } while (0)
1666 #else
1667 # define SCM_MARK_BACKING_STORE()
1668 #endif
1669
1670
1671
1672 /*** Select */
1673
1674 int
1675 scm_std_select (int nfds,
1676 SELECT_TYPE *readfds,
1677 SELECT_TYPE *writefds,
1678 SELECT_TYPE *exceptfds,
1679 struct timeval *timeout)
1680 {
1681 fd_set my_readfds;
1682 int res, eno, wakeup_fd;
1683 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1684 scm_t_guile_ticket ticket;
1685
1686 if (readfds == NULL)
1687 {
1688 FD_ZERO (&my_readfds);
1689 readfds = &my_readfds;
1690 }
1691
1692 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1693 SCM_TICK;
1694
1695 wakeup_fd = t->sleep_pipe[0];
1696 ticket = scm_leave_guile ();
1697 FD_SET (wakeup_fd, readfds);
1698 if (wakeup_fd >= nfds)
1699 nfds = wakeup_fd+1;
1700 res = select (nfds, readfds, writefds, exceptfds, timeout);
1701 t->sleep_fd = -1;
1702 eno = errno;
1703 scm_enter_guile (ticket);
1704
1705 scm_i_reset_sleep (t);
1706
1707 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1708 {
1709 char dummy;
1710 read (wakeup_fd, &dummy, 1);
1711 FD_CLR (wakeup_fd, readfds);
1712 res -= 1;
1713 if (res == 0)
1714 {
1715 eno = EINTR;
1716 res = -1;
1717 }
1718 }
1719 errno = eno;
1720 return res;
1721 }
1722
1723 /* Convenience API for blocking while in guile mode. */
1724
1725 #if SCM_USE_PTHREAD_THREADS
1726
1727 int
1728 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1729 {
1730 scm_t_guile_ticket t = scm_leave_guile ();
1731 int res = scm_i_pthread_mutex_lock (mutex);
1732 scm_enter_guile (t);
1733 return res;
1734 }
1735
1736 static void
1737 do_unlock (void *data)
1738 {
1739 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1740 }
1741
1742 void
1743 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1744 {
1745 scm_i_scm_pthread_mutex_lock (mutex);
1746 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1747 }
1748
1749 int
1750 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1751 {
1752 scm_t_guile_ticket t = scm_leave_guile ();
1753 int res = scm_i_pthread_cond_wait (cond, mutex);
1754 scm_enter_guile (t);
1755 return res;
1756 }
1757
1758 int
1759 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1760 scm_i_pthread_mutex_t *mutex,
1761 const scm_t_timespec *wt)
1762 {
1763 scm_t_guile_ticket t = scm_leave_guile ();
1764 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1765 scm_enter_guile (t);
1766 return res;
1767 }
1768
1769 #endif
1770
1771 unsigned long
1772 scm_std_usleep (unsigned long usecs)
1773 {
1774 struct timeval tv;
1775 tv.tv_usec = usecs % 1000000;
1776 tv.tv_sec = usecs / 1000000;
1777 scm_std_select (0, NULL, NULL, NULL, &tv);
1778 return tv.tv_sec * 1000000 + tv.tv_usec;
1779 }
1780
1781 unsigned int
1782 scm_std_sleep (unsigned int secs)
1783 {
1784 struct timeval tv;
1785 tv.tv_usec = 0;
1786 tv.tv_sec = secs;
1787 scm_std_select (0, NULL, NULL, NULL, &tv);
1788 return tv.tv_sec;
1789 }
1790
1791 /*** Misc */
1792
1793 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1794 (void),
1795 "Return the thread that called this function.")
1796 #define FUNC_NAME s_scm_current_thread
1797 {
1798 return SCM_I_CURRENT_THREAD->handle;
1799 }
1800 #undef FUNC_NAME
1801
1802 static SCM
1803 scm_c_make_list (size_t n, SCM fill)
1804 {
1805 SCM res = SCM_EOL;
1806 while (n-- > 0)
1807 res = scm_cons (fill, res);
1808 return res;
1809 }
1810
1811 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1812 (void),
1813 "Return a list of all threads.")
1814 #define FUNC_NAME s_scm_all_threads
1815 {
1816 /* We can not allocate while holding the thread_admin_mutex because
1817 of the way GC is done.
1818 */
1819 int n = thread_count;
1820 scm_i_thread *t;
1821 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1822
1823 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1824 l = &list;
1825 for (t = all_threads; t && n > 0; t = t->next_thread)
1826 {
1827 if (t != scm_i_signal_delivery_thread)
1828 {
1829 SCM_SETCAR (*l, t->handle);
1830 l = SCM_CDRLOC (*l);
1831 }
1832 n--;
1833 }
1834 *l = SCM_EOL;
1835 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1836 return list;
1837 }
1838 #undef FUNC_NAME
1839
1840 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1841 (SCM thread),
1842 "Return @code{#t} iff @var{thread} has exited.\n")
1843 #define FUNC_NAME s_scm_thread_exited_p
1844 {
1845 return scm_from_bool (scm_c_thread_exited_p (thread));
1846 }
1847 #undef FUNC_NAME
1848
1849 int
1850 scm_c_thread_exited_p (SCM thread)
1851 #define FUNC_NAME s_scm_thread_exited_p
1852 {
1853 scm_i_thread *t;
1854 SCM_VALIDATE_THREAD (1, thread);
1855 t = SCM_I_THREAD_DATA (thread);
1856 return t->exited;
1857 }
1858 #undef FUNC_NAME
1859
1860 static scm_i_pthread_cond_t wake_up_cond;
1861 static int threads_initialized_p = 0;
1862
1863
1864 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1865 */
1866 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1867 int scm_i_critical_section_level = 0;
1868
1869 static SCM dynwind_critical_section_mutex;
1870
1871 void
1872 scm_dynwind_critical_section (SCM mutex)
1873 {
1874 if (scm_is_false (mutex))
1875 mutex = dynwind_critical_section_mutex;
1876 scm_dynwind_lock_mutex (mutex);
1877 scm_dynwind_block_asyncs ();
1878 }
1879
1880 /*** Initialization */
1881
1882 scm_i_pthread_mutex_t scm_i_misc_mutex;
1883
1884 #if SCM_USE_PTHREAD_THREADS
1885 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1886 #endif
1887
1888 void
1889 scm_threads_prehistory (SCM_STACKITEM *base)
1890 {
1891 #if SCM_USE_PTHREAD_THREADS
1892 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1893 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1894 PTHREAD_MUTEX_RECURSIVE);
1895 #endif
1896
1897 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1898 scm_i_pthread_mutexattr_recursive);
1899 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1900 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1901
1902 guilify_self_1 (base);
1903 }
1904
1905 scm_t_bits scm_tc16_thread;
1906 scm_t_bits scm_tc16_mutex;
1907 scm_t_bits scm_tc16_condvar;
1908
1909 void
1910 scm_init_threads ()
1911 {
1912 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1913 scm_set_smob_print (scm_tc16_thread, thread_print);
1914 scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
1915
1916 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1917 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1918 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1919
1920 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1921 sizeof (fat_cond));
1922 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1923 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1924
1925 scm_i_default_dynamic_state = SCM_BOOL_F;
1926 guilify_self_2 (SCM_BOOL_F);
1927 threads_initialized_p = 1;
1928
1929 dynwind_critical_section_mutex =
1930 scm_permanent_object (scm_make_recursive_mutex ());
1931 }
1932
1933 void
1934 scm_init_threads_default_dynamic_state ()
1935 {
1936 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1937 scm_i_default_dynamic_state = scm_permanent_object (state);
1938 }
1939
1940 void
1941 scm_init_thread_procs ()
1942 {
1943 #include "libguile/threads.x"
1944 }
1945
1946 /*
1947 Local Variables:
1948 c-file-style: "gnu"
1949 End:
1950 */