Small fixes. Gets to the REPL and `abort ()'s soon after.
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/_scm.h"
24
25 #if HAVE_UNISTD_H
26 #include <unistd.h>
27 #endif
28 #include <stdio.h>
29 #include <assert.h>
30 #if HAVE_SYS_TIME_H
31 #include <sys/time.h>
32 #endif
33
34 #include "libguile/validate.h"
35 #include "libguile/root.h"
36 #include "libguile/eval.h"
37 #include "libguile/async.h"
38 #include "libguile/ports.h"
39 #include "libguile/threads.h"
40 #include "libguile/dynwind.h"
41 #include "libguile/iselect.h"
42 #include "libguile/fluids.h"
43 #include "libguile/continuations.h"
44 #include "libguile/gc.h"
45 #include "libguile/init.h"
46
47 #ifdef __MINGW32__
48 #ifndef ETIMEDOUT
49 # define ETIMEDOUT WSAETIMEDOUT
50 #endif
51 # include <fcntl.h>
52 # include <process.h>
53 # define pipe(fd) _pipe (fd, 256, O_BINARY)
54 #endif /* __MINGW32__ */
55
56 /*** Queues */
57
58 /* Make an empty queue data structure.
59 */
60 static SCM
61 make_queue ()
62 {
63 return scm_cons (SCM_EOL, SCM_EOL);
64 }
65
66 /* Put T at the back of Q and return a handle that can be used with
67 remqueue to remove T from Q again.
68 */
69 static SCM
70 enqueue (SCM q, SCM t)
71 {
72 SCM c = scm_cons (t, SCM_EOL);
73 if (scm_is_null (SCM_CDR (q)))
74 SCM_SETCDR (q, c);
75 else
76 SCM_SETCDR (SCM_CAR (q), c);
77 SCM_SETCAR (q, c);
78 return c;
79 }
80
81 /* Remove the element that the handle C refers to from the queue Q. C
82 must have been returned from a call to enqueue. The return value
83 is zero when the element referred to by C has already been removed.
84 Otherwise, 1 is returned.
85 */
86 static int
87 remqueue (SCM q, SCM c)
88 {
89 SCM p, prev = q;
90 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
91 {
92 if (scm_is_eq (p, c))
93 {
94 if (scm_is_eq (c, SCM_CAR (q)))
95 SCM_SETCAR (q, SCM_CDR (c));
96 SCM_SETCDR (prev, SCM_CDR (c));
97 return 1;
98 }
99 prev = p;
100 }
101 return 0;
102 }
103
104 /* Remove the front-most element from the queue Q and return it.
105 Return SCM_BOOL_F when Q is empty.
106 */
107 static SCM
108 dequeue (SCM q)
109 {
110 SCM c = SCM_CDR (q);
111 if (scm_is_null (c))
112 return SCM_BOOL_F;
113 else
114 {
115 SCM_SETCDR (q, SCM_CDR (c));
116 if (scm_is_null (SCM_CDR (q)))
117 SCM_SETCAR (q, SCM_EOL);
118 return SCM_CAR (c);
119 }
120 }
121
122 /*** Thread smob routines */
123
124 static SCM
125 thread_mark (SCM obj)
126 {
127 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
128 scm_gc_mark (t->result);
129 scm_gc_mark (t->join_queue);
130 scm_gc_mark (t->dynwinds);
131 scm_gc_mark (t->active_asyncs);
132 scm_gc_mark (t->continuation_root);
133 return t->dynamic_state;
134 }
135
136 static int
137 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
138 {
139 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
140 scm_puts ("#<thread ", port);
141 scm_uintprint ((size_t)t->pthread, 10, port);
142 scm_puts (" (", port);
143 scm_uintprint ((scm_t_bits)t, 16, port);
144 scm_puts (")>", port);
145 return 1;
146 }
147
148 static size_t
149 thread_free (SCM obj)
150 {
151 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
152 assert (t->exited);
153 scm_gc_free (t, sizeof (*t), "thread");
154 return 0;
155 }
156
157 /*** Blocking on queues. */
158
159 /* See also scm_i_queue_async_cell for how such a block is
160 interrputed.
161 */
162
163 /* Put the current thread on QUEUE and go to sleep, waiting for it to
164 be woken up by a call to 'unblock_from_queue', or to be
165 interrupted. Upon return of this function, the current thread is
166 no longer on QUEUE, even when the sleep has been interrupted.
167
168 The QUEUE data structure is assumed to be protected by MUTEX and
169 the caller of block_self must hold MUTEX. It will be atomically
170 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
171
172 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
173 as MUTEX is needed.
174
175 When WAITTIME is not NULL, the sleep will be aborted at that time.
176
177 The return value of block_self is an errno value. It will be zero
178 when the sleep has been successfully completed by a call to
179 unblock_from_queue, EINTR when it has been interrupted by the
180 delivery of a system async, and ETIMEDOUT when the timeout has
181 expired.
182
183 The system asyncs themselves are not executed by block_self.
184 */
185 static int
186 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
187 const scm_t_timespec *waittime)
188 {
189 scm_i_thread *t = SCM_I_CURRENT_THREAD;
190 SCM q_handle;
191 int err;
192
193 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
194 err = EINTR;
195 else
196 {
197 t->block_asyncs++;
198 q_handle = enqueue (queue, t->handle);
199 if (waittime == NULL)
200 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
201 else
202 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
203
204 /* When we are still on QUEUE, we have been interrupted. We
205 report this only when no other error (such as a timeout) has
206 happened above.
207 */
208 if (remqueue (queue, q_handle) && err == 0)
209 err = EINTR;
210 t->block_asyncs--;
211 scm_i_reset_sleep (t);
212 }
213
214 return err;
215 }
216
217 /* Wake up the first thread on QUEUE, if any. The caller must hold
218 the mutex that protects QUEUE. The awoken thread is returned, or
219 #f when the queue was empty.
220 */
221 static SCM
222 unblock_from_queue (SCM queue)
223 {
224 SCM thread = dequeue (queue);
225 if (scm_is_true (thread))
226 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
227 return thread;
228 }
229
230 /* Getting into and out of guile mode.
231 */
232
233 /* Ken Raeburn observes that the implementation of suspend and resume
234 (and the things that build on top of them) are very likely not
235 correct (see below). We will need fix this eventually, and that's
236 why scm_leave_guile/scm_enter_guile are not exported in the API.
237
238 Ken writes:
239
240 Consider this sequence:
241
242 Function foo, called in Guile mode, calls suspend (maybe indirectly
243 through scm_leave_guile), which does this:
244
245 // record top of stack for the GC
246 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
247 var 't'
248 // save registers.
249 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
250 setjmp (t->regs); // here's most of the magic
251
252 ... and returns.
253
254 Function foo has a SCM value X, a handle on a non-immediate object, in
255 a caller-saved register R, and it's the only reference to the object
256 currently.
257
258 The compiler wants to use R in suspend, so it pushes the current
259 value, X, into a stack slot which will be reloaded on exit from
260 suspend; then it loads stuff into R and goes about its business. The
261 setjmp call saves (some of) the current registers, including R, which
262 no longer contains X. (This isn't a problem for a normal
263 setjmp/longjmp situation, where longjmp would be called before
264 setjmp's caller returns; the old value for X would be loaded back from
265 the stack after the longjmp, before the function returned.)
266
267 So, suspend returns, loading X back into R (and invalidating the jump
268 buffer) in the process. The caller foo then goes off and calls a
269 bunch of other functions out of Guile mode, occasionally storing X on
270 the stack again, but, say, much deeper on the stack than suspend's
271 stack frame went, and the stack slot where suspend had written X has
272 long since been overwritten with other values.
273
274 Okay, nothing actively broken so far. Now, let garbage collection
275 run, triggered by another thread.
276
277 The thread calling foo is out of Guile mode at the time, so the
278 garbage collector just scans a range of stack addresses. Too bad that
279 X isn't stored there. So the pointed-to storage goes onto the free
280 list, and I think you can see where things go from there.
281
282 Is there anything I'm missing that'll prevent this scenario from
283 happening? I mean, aside from, "well, suspend and scm_leave_guile
284 don't have many local variables, so they probably won't need to save
285 any registers on most systems, so we hope everything will wind up in
286 the jump buffer and we'll just get away with it"?
287
288 (And, going the other direction, if scm_leave_guile and suspend push
289 the stack pointer over onto a new page, and foo doesn't make further
290 function calls and thus the stack pointer no longer includes that
291 page, are we guaranteed that the kernel cannot release the now-unused
292 stack page that contains the top-of-stack pointer we just saved? I
293 don't know if any OS actually does that. If it does, we could get
294 faults in garbage collection.)
295
296 I don't think scm_without_guile has to have this problem, as it gets
297 more control over the stack handling -- but it should call setjmp
298 itself. I'd probably try something like:
299
300 // record top of stack for the GC
301 t->top = SCM_STACK_PTR (&t);
302 // save registers.
303 SCM_FLUSH_REGISTER_WINDOWS;
304 setjmp (t->regs);
305 res = func(data);
306 scm_enter_guile (t);
307
308 ... though even that's making some assumptions about the stack
309 ordering of local variables versus caller-saved registers.
310
311 For something like scm_leave_guile to work, I don't think it can just
312 rely on invalidated jump buffers. A valid jump buffer, and a handle
313 on the stack state at the point when the jump buffer was initialized,
314 together, would work fine, but I think then we're talking about macros
315 invoking setjmp in the caller's stack frame, and requiring that the
316 caller of scm_leave_guile also call scm_enter_guile before returning,
317 kind of like pthread_cleanup_push/pop calls that have to be paired up
318 in a function. (In fact, the pthread ones have to be paired up
319 syntactically, as if they might expand to a compound statement
320 incorporating the user's code, and invoking a compiler's
321 exception-handling primitives. Which might be something to think
322 about for cases where Guile is used with C++ exceptions or
323 pthread_cancel.)
324 */
325
326 scm_i_pthread_key_t scm_i_thread_key;
327
328 static void
329 resume (scm_i_thread *t)
330 {
331 t->top = NULL;
332 if (t->clear_freelists_p)
333 {
334 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
335 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
336 t->clear_freelists_p = 0;
337 }
338 }
339
340 typedef void* scm_t_guile_ticket;
341
342 static void
343 scm_enter_guile (scm_t_guile_ticket ticket)
344 {
345 scm_i_thread *t = (scm_i_thread *)ticket;
346 if (t)
347 {
348 scm_i_pthread_mutex_lock (&t->heap_mutex);
349 resume (t);
350 }
351 }
352
353 static scm_i_thread *
354 suspend (void)
355 {
356 scm_i_thread *t = SCM_I_CURRENT_THREAD;
357
358 /* record top of stack for the GC */
359 t->top = SCM_STACK_PTR (&t);
360 /* save registers. */
361 SCM_FLUSH_REGISTER_WINDOWS;
362 setjmp (t->regs);
363 return t;
364 }
365
366 static scm_t_guile_ticket
367 scm_leave_guile ()
368 {
369 scm_i_thread *t = suspend ();
370 scm_i_pthread_mutex_unlock (&t->heap_mutex);
371 return (scm_t_guile_ticket) t;
372 }
373
374 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
375 static scm_i_thread *all_threads = NULL;
376 static int thread_count;
377
378 static SCM scm_i_default_dynamic_state;
379
380 /* Perform first stage of thread initialisation, in non-guile mode.
381 */
382 static void
383 guilify_self_1 (SCM_STACKITEM *base)
384 {
385 scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
386
387 t->pthread = scm_i_pthread_self ();
388 t->handle = SCM_BOOL_F;
389 t->result = SCM_BOOL_F;
390 t->join_queue = SCM_EOL;
391 t->dynamic_state = SCM_BOOL_F;
392 t->dynwinds = SCM_EOL;
393 t->active_asyncs = SCM_EOL;
394 t->block_asyncs = 1;
395 t->pending_asyncs = 1;
396 t->last_debug_frame = NULL;
397 t->base = base;
398 t->continuation_root = SCM_EOL;
399 t->continuation_base = base;
400 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
401 t->sleep_mutex = NULL;
402 t->sleep_object = SCM_BOOL_F;
403 t->sleep_fd = -1;
404 /* XXX - check for errors. */
405 pipe (t->sleep_pipe);
406 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
407 t->clear_freelists_p = 0;
408 t->gc_running_p = 0;
409 t->exited = 0;
410
411 t->freelist = SCM_EOL;
412 t->freelist2 = SCM_EOL;
413 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
414 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
415
416 scm_i_pthread_setspecific (scm_i_thread_key, t);
417
418 scm_i_pthread_mutex_lock (&t->heap_mutex);
419
420 scm_i_pthread_mutex_lock (&thread_admin_mutex);
421 t->next_thread = all_threads;
422 all_threads = t;
423 thread_count++;
424 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
425 }
426
427 /* Perform second stage of thread initialisation, in guile mode.
428 */
429 static void
430 guilify_self_2 (SCM parent)
431 {
432 scm_i_thread *t = SCM_I_CURRENT_THREAD;
433
434 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
435
436 t->continuation_root = scm_cons (t->handle, SCM_EOL);
437 t->continuation_base = t->base;
438
439 if (scm_is_true (parent))
440 t->dynamic_state = scm_make_dynamic_state (parent);
441 else
442 t->dynamic_state = scm_i_make_initial_dynamic_state ();
443
444 t->join_queue = make_queue ();
445 t->block_asyncs = 0;
446 }
447
448 /* Perform thread tear-down, in guile mode.
449 */
450 static void *
451 do_thread_exit (void *v)
452 {
453 scm_i_thread *t = (scm_i_thread *)v;
454
455 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
456
457 t->exited = 1;
458 close (t->sleep_pipe[0]);
459 close (t->sleep_pipe[1]);
460 while (scm_is_true (unblock_from_queue (t->join_queue)))
461 ;
462
463 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
464 return NULL;
465 }
466
467 static void
468 on_thread_exit (void *v)
469 {
470 scm_i_thread *t = (scm_i_thread *)v, **tp;
471
472 scm_i_pthread_setspecific (scm_i_thread_key, v);
473
474 /* Unblocking the joining threads needs to happen in guile mode
475 since the queue is a SCM data structure.
476 */
477 scm_with_guile (do_thread_exit, v);
478
479 /* Removing ourself from the list of all threads needs to happen in
480 non-guile mode since all SCM values on our stack become
481 unprotected once we are no longer in the list.
482 */
483 scm_leave_guile ();
484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
485 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
486 if (*tp == t)
487 {
488 *tp = t->next_thread;
489 break;
490 }
491 thread_count--;
492 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
493
494 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
495 }
496
497 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
498
499 static void
500 init_thread_key (void)
501 {
502 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
503 }
504
505 /* Perform any initializations necessary to bring the current thread
506 into guile mode, initializing Guile itself, if necessary.
507
508 BASE is the stack base to use with GC.
509
510 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
511 which case the default dynamic state is used.
512
513 Return zero when the thread was in guile mode already; otherwise
514 return 1.
515 */
516
517 static int
518 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
519 {
520 scm_i_thread *t;
521
522 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
523
524 if ((t = SCM_I_CURRENT_THREAD) == NULL)
525 {
526 /* This thread has not been guilified yet.
527 */
528
529 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
530 if (scm_initialized_p == 0)
531 {
532 /* First thread ever to enter Guile. Run the full
533 initialization.
534 */
535 scm_i_init_guile (base);
536 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
537 }
538 else
539 {
540 /* Guile is already initialized, but this thread enters it for
541 the first time. Only initialize this thread.
542 */
543 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
544 guilify_self_1 (base);
545 guilify_self_2 (parent);
546 }
547 return 1;
548 }
549 else if (t->top)
550 {
551 /* This thread is already guilified but not in guile mode, just
552 resume it.
553
554 XXX - base might be lower than when this thread was first
555 guilified.
556 */
557 scm_enter_guile ((scm_t_guile_ticket) t);
558 return 1;
559 }
560 else
561 {
562 /* Thread is already in guile mode. Nothing to do.
563 */
564 return 0;
565 }
566 }
567
568 #if SCM_USE_PTHREAD_THREADS
569 #ifdef HAVE_PTHREAD_ATTR_GETSTACK
570
571 #define HAVE_GET_THREAD_STACK_BASE
572
573 static SCM_STACKITEM *
574 get_thread_stack_base ()
575 {
576 pthread_attr_t attr;
577 void *start, *end;
578 size_t size;
579
580 pthread_getattr_np (pthread_self (), &attr);
581 pthread_attr_getstack (&attr, &start, &size);
582 end = (char *)start + size;
583
584 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
585 for the main thread, but we can use scm_get_stack_base in that
586 case.
587 */
588
589 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
590 if ((void *)&attr < start || (void *)&attr >= end)
591 return scm_get_stack_base ();
592 else
593 #endif
594 {
595 #if SCM_STACK_GROWS_UP
596 return start;
597 #else
598 return end;
599 #endif
600 }
601 }
602
603 #endif /* HAVE_PTHREAD_ATTR_GETSTACK */
604
605 #else /* !SCM_USE_PTHREAD_THREADS */
606
607 #define HAVE_GET_THREAD_STACK_BASE
608
609 static SCM_STACKITEM *
610 get_thread_stack_base ()
611 {
612 return scm_get_stack_base ();
613 }
614
615 #endif /* !SCM_USE_PTHREAD_THREADS */
616
617 #ifdef HAVE_GET_THREAD_STACK_BASE
618
619 void
620 scm_init_guile ()
621 {
622 scm_i_init_thread_for_guile (get_thread_stack_base (),
623 scm_i_default_dynamic_state);
624 }
625
626 #endif
627
628 void *
629 scm_with_guile (void *(*func)(void *), void *data)
630 {
631 return scm_i_with_guile_and_parent (func, data,
632 scm_i_default_dynamic_state);
633 }
634
635 void *
636 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
637 SCM parent)
638 {
639 void *res;
640 int really_entered;
641 SCM_STACKITEM base_item;
642 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
643 res = scm_c_with_continuation_barrier (func, data);
644 if (really_entered)
645 scm_leave_guile ();
646 return res;
647 }
648
649 void *
650 scm_without_guile (void *(*func)(void *), void *data)
651 {
652 void *res;
653 scm_t_guile_ticket t;
654 t = scm_leave_guile ();
655 res = func (data);
656 scm_enter_guile (t);
657 return res;
658 }
659
660 /*** Thread creation */
661
662 typedef struct {
663 SCM parent;
664 SCM thunk;
665 SCM handler;
666 SCM thread;
667 scm_i_pthread_mutex_t mutex;
668 scm_i_pthread_cond_t cond;
669 } launch_data;
670
671 static void *
672 really_launch (void *d)
673 {
674 launch_data *data = (launch_data *)d;
675 SCM thunk = data->thunk, handler = data->handler;
676 scm_i_thread *t;
677
678 t = SCM_I_CURRENT_THREAD;
679
680 scm_i_scm_pthread_mutex_lock (&data->mutex);
681 data->thread = scm_current_thread ();
682 scm_i_pthread_cond_signal (&data->cond);
683 scm_i_pthread_mutex_unlock (&data->mutex);
684
685 if (SCM_UNBNDP (handler))
686 t->result = scm_call_0 (thunk);
687 else
688 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
689
690 return 0;
691 }
692
693 static void *
694 launch_thread (void *d)
695 {
696 launch_data *data = (launch_data *)d;
697 scm_i_pthread_detach (scm_i_pthread_self ());
698 scm_i_with_guile_and_parent (really_launch, d, data->parent);
699 return NULL;
700 }
701
702 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
703 (SCM thunk, SCM handler),
704 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
705 "returning a new thread object representing the thread. The procedure\n"
706 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
707 "\n"
708 "When @var{handler} is specified, then @var{thunk} is called from\n"
709 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
710 "handler. This catch is established inside the continuation barrier.\n"
711 "\n"
712 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
713 "the @emph{exit value} of the thread and the thread is terminated.")
714 #define FUNC_NAME s_scm_call_with_new_thread
715 {
716 launch_data data;
717 scm_i_pthread_t id;
718 int err;
719
720 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
721 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
722 handler, SCM_ARG2, FUNC_NAME);
723
724 data.parent = scm_current_dynamic_state ();
725 data.thunk = thunk;
726 data.handler = handler;
727 data.thread = SCM_BOOL_F;
728 scm_i_pthread_mutex_init (&data.mutex, NULL);
729 scm_i_pthread_cond_init (&data.cond, NULL);
730
731 scm_i_scm_pthread_mutex_lock (&data.mutex);
732 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
733 if (err)
734 {
735 scm_i_pthread_mutex_unlock (&data.mutex);
736 errno = err;
737 scm_syserror (NULL);
738 }
739 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
740 scm_i_pthread_mutex_unlock (&data.mutex);
741
742 return data.thread;
743 }
744 #undef FUNC_NAME
745
746 typedef struct {
747 SCM parent;
748 scm_t_catch_body body;
749 void *body_data;
750 scm_t_catch_handler handler;
751 void *handler_data;
752 SCM thread;
753 scm_i_pthread_mutex_t mutex;
754 scm_i_pthread_cond_t cond;
755 } spawn_data;
756
757 static void *
758 really_spawn (void *d)
759 {
760 spawn_data *data = (spawn_data *)d;
761 scm_t_catch_body body = data->body;
762 void *body_data = data->body_data;
763 scm_t_catch_handler handler = data->handler;
764 void *handler_data = data->handler_data;
765 scm_i_thread *t = SCM_I_CURRENT_THREAD;
766
767 scm_i_scm_pthread_mutex_lock (&data->mutex);
768 data->thread = scm_current_thread ();
769 scm_i_pthread_cond_signal (&data->cond);
770 scm_i_pthread_mutex_unlock (&data->mutex);
771
772 if (handler == NULL)
773 t->result = body (body_data);
774 else
775 t->result = scm_internal_catch (SCM_BOOL_T,
776 body, body_data,
777 handler, handler_data);
778
779 return 0;
780 }
781
782 static void *
783 spawn_thread (void *d)
784 {
785 spawn_data *data = (spawn_data *)d;
786 scm_i_pthread_detach (scm_i_pthread_self ());
787 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
788 return NULL;
789 }
790
791 SCM
792 scm_spawn_thread (scm_t_catch_body body, void *body_data,
793 scm_t_catch_handler handler, void *handler_data)
794 {
795 spawn_data data;
796 scm_i_pthread_t id;
797 int err;
798
799 data.parent = scm_current_dynamic_state ();
800 data.body = body;
801 data.body_data = body_data;
802 data.handler = handler;
803 data.handler_data = handler_data;
804 data.thread = SCM_BOOL_F;
805 scm_i_pthread_mutex_init (&data.mutex, NULL);
806 scm_i_pthread_cond_init (&data.cond, NULL);
807
808 scm_i_scm_pthread_mutex_lock (&data.mutex);
809 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
810 if (err)
811 {
812 scm_i_pthread_mutex_unlock (&data.mutex);
813 errno = err;
814 scm_syserror (NULL);
815 }
816 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
817 scm_i_pthread_mutex_unlock (&data.mutex);
818
819 return data.thread;
820 }
821
822 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
823 (),
824 "Move the calling thread to the end of the scheduling queue.")
825 #define FUNC_NAME s_scm_yield
826 {
827 return scm_from_bool (scm_i_sched_yield ());
828 }
829 #undef FUNC_NAME
830
831 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
832 (SCM thread),
833 "Suspend execution of the calling thread until the target @var{thread} "
834 "terminates, unless the target @var{thread} has already terminated. ")
835 #define FUNC_NAME s_scm_join_thread
836 {
837 scm_i_thread *t;
838 SCM res;
839
840 SCM_VALIDATE_THREAD (1, thread);
841 if (scm_is_eq (scm_current_thread (), thread))
842 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
843
844 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
845
846 t = SCM_I_THREAD_DATA (thread);
847 if (!t->exited)
848 {
849 while (1)
850 {
851 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
852 if (t->exited)
853 break;
854 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
855 SCM_TICK;
856 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
857 }
858 }
859 res = t->result;
860
861 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
862 return res;
863 }
864 #undef FUNC_NAME
865
866 /*** Fat mutexes */
867
868 /* We implement our own mutex type since we want them to be 'fair', we
869 want to do fancy things while waiting for them (like running
870 asyncs) and we might want to add things that are nice for
871 debugging.
872 */
873
874 typedef struct {
875 scm_i_pthread_mutex_t lock;
876 SCM owner;
877 int level; /* how much the owner owns us.
878 < 0 for non-recursive mutexes */
879 SCM waiting; /* the threads waiting for this mutex. */
880 } fat_mutex;
881
882 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
883 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
884
885 static SCM
886 fat_mutex_mark (SCM mx)
887 {
888 fat_mutex *m = SCM_MUTEX_DATA (mx);
889 scm_gc_mark (m->owner);
890 return m->waiting;
891 }
892
893 static size_t
894 fat_mutex_free (SCM mx)
895 {
896 fat_mutex *m = SCM_MUTEX_DATA (mx);
897 scm_i_pthread_mutex_destroy (&m->lock);
898 scm_gc_free (m, sizeof (fat_mutex), "mutex");
899 return 0;
900 }
901
902 static int
903 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
904 {
905 fat_mutex *m = SCM_MUTEX_DATA (mx);
906 scm_puts ("#<mutex ", port);
907 scm_uintprint ((scm_t_bits)m, 16, port);
908 scm_puts (">", port);
909 return 1;
910 }
911
912 static SCM
913 make_fat_mutex (int recursive)
914 {
915 fat_mutex *m;
916 SCM mx;
917
918 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
919 scm_i_pthread_mutex_init (&m->lock, NULL);
920 m->owner = SCM_BOOL_F;
921 m->level = recursive? 0 : -1;
922 m->waiting = SCM_EOL;
923 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
924 m->waiting = make_queue ();
925 return mx;
926 }
927
928 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
929 (void),
930 "Create a new mutex. ")
931 #define FUNC_NAME s_scm_make_mutex
932 {
933 return make_fat_mutex (0);
934 }
935 #undef FUNC_NAME
936
937 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
938 (void),
939 "Create a new recursive mutex. ")
940 #define FUNC_NAME s_scm_make_recursive_mutex
941 {
942 return make_fat_mutex (1);
943 }
944 #undef FUNC_NAME
945
946 static char *
947 fat_mutex_lock (SCM mutex)
948 {
949 fat_mutex *m = SCM_MUTEX_DATA (mutex);
950 SCM thread = scm_current_thread ();
951 char *msg = NULL;
952
953 scm_i_scm_pthread_mutex_lock (&m->lock);
954 if (scm_is_false (m->owner))
955 m->owner = thread;
956 else if (scm_is_eq (m->owner, thread))
957 {
958 if (m->level >= 0)
959 m->level++;
960 else
961 msg = "mutex already locked by current thread";
962 }
963 else
964 {
965 while (1)
966 {
967 block_self (m->waiting, mutex, &m->lock, NULL);
968 if (scm_is_eq (m->owner, thread))
969 break;
970 scm_i_pthread_mutex_unlock (&m->lock);
971 SCM_TICK;
972 scm_i_scm_pthread_mutex_lock (&m->lock);
973 }
974 }
975 scm_i_pthread_mutex_unlock (&m->lock);
976 return msg;
977 }
978
979 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
980 (SCM mx),
981 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
982 "blocks until the mutex becomes available. The function returns when "
983 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
984 "a thread already owns will succeed right away and will not block the "
985 "thread. That is, Guile's mutexes are @emph{recursive}. ")
986 #define FUNC_NAME s_scm_lock_mutex
987 {
988 char *msg;
989
990 SCM_VALIDATE_MUTEX (1, mx);
991 msg = fat_mutex_lock (mx);
992 if (msg)
993 scm_misc_error (NULL, msg, SCM_EOL);
994 return SCM_BOOL_T;
995 }
996 #undef FUNC_NAME
997
998 void
999 scm_dynwind_lock_mutex (SCM mutex)
1000 {
1001 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1002 SCM_F_WIND_EXPLICITLY);
1003 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1004 SCM_F_WIND_EXPLICITLY);
1005 }
1006
1007 static char *
1008 fat_mutex_trylock (fat_mutex *m, int *resp)
1009 {
1010 char *msg = NULL;
1011 SCM thread = scm_current_thread ();
1012
1013 *resp = 1;
1014 scm_i_pthread_mutex_lock (&m->lock);
1015 if (scm_is_false (m->owner))
1016 m->owner = thread;
1017 else if (scm_is_eq (m->owner, thread))
1018 {
1019 if (m->level >= 0)
1020 m->level++;
1021 else
1022 msg = "mutex already locked by current thread";
1023 }
1024 else
1025 *resp = 0;
1026 scm_i_pthread_mutex_unlock (&m->lock);
1027 return msg;
1028 }
1029
1030 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1031 (SCM mutex),
1032 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1033 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1034 #define FUNC_NAME s_scm_try_mutex
1035 {
1036 char *msg;
1037 int res;
1038
1039 SCM_VALIDATE_MUTEX (1, mutex);
1040
1041 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1042 if (msg)
1043 scm_misc_error (NULL, msg, SCM_EOL);
1044 return scm_from_bool (res);
1045 }
1046 #undef FUNC_NAME
1047
1048 static char *
1049 fat_mutex_unlock (fat_mutex *m)
1050 {
1051 char *msg = NULL;
1052
1053 scm_i_scm_pthread_mutex_lock (&m->lock);
1054 if (!scm_is_eq (m->owner, scm_current_thread ()))
1055 {
1056 if (scm_is_false (m->owner))
1057 msg = "mutex not locked";
1058 else
1059 msg = "mutex not locked by current thread";
1060 }
1061 else if (m->level > 0)
1062 m->level--;
1063 else
1064 m->owner = unblock_from_queue (m->waiting);
1065 scm_i_pthread_mutex_unlock (&m->lock);
1066
1067 return msg;
1068 }
1069
1070 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1071 (SCM mx),
1072 "Unlocks @var{mutex} if the calling thread owns the lock on "
1073 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1074 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1075 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1076 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1077 "with a call to @code{unlock-mutex}. Only the last call to "
1078 "@code{unlock-mutex} will actually unlock the mutex. ")
1079 #define FUNC_NAME s_scm_unlock_mutex
1080 {
1081 char *msg;
1082 SCM_VALIDATE_MUTEX (1, mx);
1083
1084 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1085 if (msg)
1086 scm_misc_error (NULL, msg, SCM_EOL);
1087 return SCM_BOOL_T;
1088 }
1089 #undef FUNC_NAME
1090
1091 #if 0
1092
1093 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1094 (SCM mx),
1095 "Return the thread owning @var{mx}, or @code{#f}.")
1096 #define FUNC_NAME s_scm_mutex_owner
1097 {
1098 SCM_VALIDATE_MUTEX (1, mx);
1099 return (SCM_MUTEX_DATA(mx))->owner;
1100 }
1101 #undef FUNC_NAME
1102
1103 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1104 (SCM mx),
1105 "Return the lock level of a recursive mutex, or -1\n"
1106 "for a standard mutex.")
1107 #define FUNC_NAME s_scm_mutex_level
1108 {
1109 SCM_VALIDATE_MUTEX (1, mx);
1110 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1111 }
1112 #undef FUNC_NAME
1113
1114 #endif
1115
1116 /*** Fat condition variables */
1117
1118 typedef struct {
1119 scm_i_pthread_mutex_t lock;
1120 SCM waiting; /* the threads waiting for this condition. */
1121 } fat_cond;
1122
1123 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1124 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1125
1126 static SCM
1127 fat_cond_mark (SCM cv)
1128 {
1129 fat_cond *c = SCM_CONDVAR_DATA (cv);
1130 return c->waiting;
1131 }
1132
1133 static size_t
1134 fat_cond_free (SCM mx)
1135 {
1136 fat_cond *c = SCM_CONDVAR_DATA (mx);
1137 scm_i_pthread_mutex_destroy (&c->lock);
1138 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1139 return 0;
1140 }
1141
1142 static int
1143 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1144 {
1145 fat_cond *c = SCM_CONDVAR_DATA (cv);
1146 scm_puts ("#<condition-variable ", port);
1147 scm_uintprint ((scm_t_bits)c, 16, port);
1148 scm_puts (">", port);
1149 return 1;
1150 }
1151
1152 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1153 (void),
1154 "Make a new condition variable.")
1155 #define FUNC_NAME s_scm_make_condition_variable
1156 {
1157 fat_cond *c;
1158 SCM cv;
1159
1160 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1161 scm_i_pthread_mutex_init (&c->lock, 0);
1162 c->waiting = SCM_EOL;
1163 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1164 c->waiting = make_queue ();
1165 return cv;
1166 }
1167 #undef FUNC_NAME
1168
1169 static int
1170 fat_cond_timedwait (SCM cond, SCM mutex,
1171 const scm_t_timespec *waittime)
1172 {
1173 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1174 fat_cond *c = SCM_CONDVAR_DATA (cond);
1175 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1176 const char *msg;
1177 int err = 0;
1178
1179 while (1)
1180 {
1181 scm_i_scm_pthread_mutex_lock (&c->lock);
1182 msg = fat_mutex_unlock (m);
1183 t->block_asyncs++;
1184 if (msg == NULL)
1185 {
1186 err = block_self (c->waiting, cond, &c->lock, waittime);
1187 scm_i_pthread_mutex_unlock (&c->lock);
1188 fat_mutex_lock (mutex);
1189 }
1190 else
1191 scm_i_pthread_mutex_unlock (&c->lock);
1192 t->block_asyncs--;
1193 scm_async_click ();
1194
1195 if (msg)
1196 scm_misc_error (NULL, msg, SCM_EOL);
1197
1198 scm_remember_upto_here_2 (cond, mutex);
1199
1200 if (err == 0)
1201 return 1;
1202 if (err == ETIMEDOUT)
1203 return 0;
1204 if (err != EINTR)
1205 {
1206 errno = err;
1207 scm_syserror (NULL);
1208 }
1209 }
1210 }
1211
1212 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1213 (SCM cv, SCM mx, SCM t),
1214 "Wait until @var{cond-var} has been signalled. While waiting, "
1215 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1216 "is locked again when this function returns. When @var{time} is given, "
1217 "it specifies a point in time where the waiting should be aborted. It "
1218 "can be either a integer as returned by @code{current-time} or a pair "
1219 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1220 "mutex is locked and @code{#f} is returned. When the condition "
1221 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1222 "is returned. ")
1223 #define FUNC_NAME s_scm_timed_wait_condition_variable
1224 {
1225 scm_t_timespec waittime, *waitptr = NULL;
1226
1227 SCM_VALIDATE_CONDVAR (1, cv);
1228 SCM_VALIDATE_MUTEX (2, mx);
1229
1230 if (!SCM_UNBNDP (t))
1231 {
1232 if (scm_is_pair (t))
1233 {
1234 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1235 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1236 }
1237 else
1238 {
1239 waittime.tv_sec = scm_to_ulong (t);
1240 waittime.tv_nsec = 0;
1241 }
1242 waitptr = &waittime;
1243 }
1244
1245 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1246 }
1247 #undef FUNC_NAME
1248
1249 static void
1250 fat_cond_signal (fat_cond *c)
1251 {
1252 scm_i_scm_pthread_mutex_lock (&c->lock);
1253 unblock_from_queue (c->waiting);
1254 scm_i_pthread_mutex_unlock (&c->lock);
1255 }
1256
1257 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1258 (SCM cv),
1259 "Wake up one thread that is waiting for @var{cv}")
1260 #define FUNC_NAME s_scm_signal_condition_variable
1261 {
1262 SCM_VALIDATE_CONDVAR (1, cv);
1263 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1264 return SCM_BOOL_T;
1265 }
1266 #undef FUNC_NAME
1267
1268 static void
1269 fat_cond_broadcast (fat_cond *c)
1270 {
1271 scm_i_scm_pthread_mutex_lock (&c->lock);
1272 while (scm_is_true (unblock_from_queue (c->waiting)))
1273 ;
1274 scm_i_pthread_mutex_unlock (&c->lock);
1275 }
1276
1277 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1278 (SCM cv),
1279 "Wake up all threads that are waiting for @var{cv}. ")
1280 #define FUNC_NAME s_scm_broadcast_condition_variable
1281 {
1282 SCM_VALIDATE_CONDVAR (1, cv);
1283 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1284 return SCM_BOOL_T;
1285 }
1286 #undef FUNC_NAME
1287
1288 /*** Marking stacks */
1289
1290 /* XXX - what to do with this? Do we need to handle this for blocked
1291 threads as well?
1292 */
1293 #ifdef __ia64__
1294 # define SCM_MARK_BACKING_STORE() do { \
1295 ucontext_t ctx; \
1296 SCM_STACKITEM * top, * bot; \
1297 getcontext (&ctx); \
1298 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1299 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1300 / sizeof (SCM_STACKITEM))); \
1301 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1302 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1303 scm_mark_locations (bot, top - bot); } while (0)
1304 #else
1305 # define SCM_MARK_BACKING_STORE()
1306 #endif
1307
1308 void
1309 scm_threads_mark_stacks (void)
1310 {
1311 scm_i_thread *t;
1312 for (t = all_threads; t; t = t->next_thread)
1313 {
1314 /* Check that thread has indeed been suspended.
1315 */
1316 assert (t->top);
1317
1318 scm_gc_mark (t->handle);
1319
1320 #if SCM_STACK_GROWS_UP
1321 scm_mark_locations (t->base, t->top - t->base);
1322 #else
1323 scm_mark_locations (t->top, t->base - t->top);
1324 #endif
1325 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1326 ((size_t) sizeof(t->regs)
1327 / sizeof (SCM_STACKITEM)));
1328 }
1329
1330 SCM_MARK_BACKING_STORE ();
1331 }
1332
1333 /*** Select */
1334
1335 int
1336 scm_std_select (int nfds,
1337 SELECT_TYPE *readfds,
1338 SELECT_TYPE *writefds,
1339 SELECT_TYPE *exceptfds,
1340 struct timeval *timeout)
1341 {
1342 fd_set my_readfds;
1343 int res, eno, wakeup_fd;
1344 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1345 scm_t_guile_ticket ticket;
1346
1347 if (readfds == NULL)
1348 {
1349 FD_ZERO (&my_readfds);
1350 readfds = &my_readfds;
1351 }
1352
1353 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1354 SCM_TICK;
1355
1356 wakeup_fd = t->sleep_pipe[0];
1357 ticket = scm_leave_guile ();
1358 FD_SET (wakeup_fd, readfds);
1359 if (wakeup_fd >= nfds)
1360 nfds = wakeup_fd+1;
1361 res = select (nfds, readfds, writefds, exceptfds, timeout);
1362 t->sleep_fd = -1;
1363 eno = errno;
1364 scm_enter_guile (ticket);
1365
1366 scm_i_reset_sleep (t);
1367
1368 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1369 {
1370 char dummy;
1371 read (wakeup_fd, &dummy, 1);
1372 FD_CLR (wakeup_fd, readfds);
1373 res -= 1;
1374 if (res == 0)
1375 {
1376 eno = EINTR;
1377 res = -1;
1378 }
1379 }
1380 errno = eno;
1381 return res;
1382 }
1383
1384 /* Convenience API for blocking while in guile mode. */
1385
1386 #if SCM_USE_PTHREAD_THREADS
1387
1388 int
1389 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1390 {
1391 scm_t_guile_ticket t = scm_leave_guile ();
1392 int res = scm_i_pthread_mutex_lock (mutex);
1393 scm_enter_guile (t);
1394 return res;
1395 }
1396
1397 static void
1398 do_unlock (void *data)
1399 {
1400 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1401 }
1402
1403 void
1404 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1405 {
1406 scm_i_scm_pthread_mutex_lock (mutex);
1407 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1408 }
1409
1410 int
1411 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1412 {
1413 scm_t_guile_ticket t = scm_leave_guile ();
1414 int res = scm_i_pthread_cond_wait (cond, mutex);
1415 scm_enter_guile (t);
1416 return res;
1417 }
1418
1419 int
1420 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1421 scm_i_pthread_mutex_t *mutex,
1422 const scm_t_timespec *wt)
1423 {
1424 scm_t_guile_ticket t = scm_leave_guile ();
1425 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1426 scm_enter_guile (t);
1427 return res;
1428 }
1429
1430 #endif
1431
1432 unsigned long
1433 scm_std_usleep (unsigned long usecs)
1434 {
1435 struct timeval tv;
1436 tv.tv_usec = usecs % 1000000;
1437 tv.tv_sec = usecs / 1000000;
1438 scm_std_select (0, NULL, NULL, NULL, &tv);
1439 return tv.tv_sec * 1000000 + tv.tv_usec;
1440 }
1441
1442 unsigned int
1443 scm_std_sleep (unsigned int secs)
1444 {
1445 struct timeval tv;
1446 tv.tv_usec = 0;
1447 tv.tv_sec = secs;
1448 scm_std_select (0, NULL, NULL, NULL, &tv);
1449 return tv.tv_sec;
1450 }
1451
1452 /*** Misc */
1453
1454 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1455 (void),
1456 "Return the thread that called this function.")
1457 #define FUNC_NAME s_scm_current_thread
1458 {
1459 return SCM_I_CURRENT_THREAD->handle;
1460 }
1461 #undef FUNC_NAME
1462
1463 static SCM
1464 scm_c_make_list (size_t n, SCM fill)
1465 {
1466 SCM res = SCM_EOL;
1467 while (n-- > 0)
1468 res = scm_cons (fill, res);
1469 return res;
1470 }
1471
1472 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1473 (void),
1474 "Return a list of all threads.")
1475 #define FUNC_NAME s_scm_all_threads
1476 {
1477 /* We can not allocate while holding the thread_admin_mutex because
1478 of the way GC is done.
1479 */
1480 int n = thread_count;
1481 scm_i_thread *t;
1482 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1483
1484 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1485 l = &list;
1486 for (t = all_threads; t && n > 0; t = t->next_thread)
1487 {
1488 SCM_SETCAR (*l, t->handle);
1489 l = SCM_CDRLOC (*l);
1490 n--;
1491 }
1492 *l = SCM_EOL;
1493 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1494 return list;
1495 }
1496 #undef FUNC_NAME
1497
1498 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1499 (SCM thread),
1500 "Return @code{#t} iff @var{thread} has exited.\n")
1501 #define FUNC_NAME s_scm_thread_exited_p
1502 {
1503 return scm_from_bool (scm_c_thread_exited_p (thread));
1504 }
1505 #undef FUNC_NAME
1506
1507 int
1508 scm_c_thread_exited_p (SCM thread)
1509 #define FUNC_NAME s_scm_thread_exited_p
1510 {
1511 scm_i_thread *t;
1512 SCM_VALIDATE_THREAD (1, thread);
1513 t = SCM_I_THREAD_DATA (thread);
1514 return t->exited;
1515 }
1516 #undef FUNC_NAME
1517
1518 static scm_i_pthread_cond_t wake_up_cond;
1519 int scm_i_thread_go_to_sleep;
1520 static int threads_initialized_p = 0;
1521
1522 void
1523 scm_i_thread_put_to_sleep ()
1524 {
1525 if (threads_initialized_p)
1526 {
1527 scm_i_thread *t;
1528
1529 scm_leave_guile ();
1530 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1531
1532 /* Signal all threads to go to sleep
1533 */
1534 scm_i_thread_go_to_sleep = 1;
1535 for (t = all_threads; t; t = t->next_thread)
1536 scm_i_pthread_mutex_lock (&t->heap_mutex);
1537 scm_i_thread_go_to_sleep = 0;
1538 }
1539 }
1540
1541 void
1542 scm_i_thread_invalidate_freelists ()
1543 {
1544 /* thread_admin_mutex is already locked. */
1545
1546 scm_i_thread *t;
1547 for (t = all_threads; t; t = t->next_thread)
1548 if (t != SCM_I_CURRENT_THREAD)
1549 t->clear_freelists_p = 1;
1550 }
1551
1552 void
1553 scm_i_thread_wake_up ()
1554 {
1555 if (threads_initialized_p)
1556 {
1557 scm_i_thread *t;
1558
1559 scm_i_pthread_cond_broadcast (&wake_up_cond);
1560 for (t = all_threads; t; t = t->next_thread)
1561 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1562 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1563 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1564 }
1565 }
1566
1567 void
1568 scm_i_thread_sleep_for_gc ()
1569 {
1570 scm_i_thread *t = suspend ();
1571 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1572 resume (t);
1573 }
1574
1575 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1576 */
1577 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1578 int scm_i_critical_section_level = 0;
1579
1580 static SCM dynwind_critical_section_mutex;
1581
1582 void
1583 scm_dynwind_critical_section (SCM mutex)
1584 {
1585 if (scm_is_false (mutex))
1586 mutex = dynwind_critical_section_mutex;
1587 scm_dynwind_lock_mutex (mutex);
1588 scm_dynwind_block_asyncs ();
1589 }
1590
1591 /*** Initialization */
1592
1593 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1594 scm_i_pthread_mutex_t scm_i_misc_mutex;
1595
1596 #if SCM_USE_PTHREAD_THREADS
1597 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1598 #endif
1599
1600 void
1601 scm_threads_prehistory (SCM_STACKITEM *base)
1602 {
1603 #if SCM_USE_PTHREAD_THREADS
1604 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1605 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1606 PTHREAD_MUTEX_RECURSIVE);
1607 #endif
1608
1609 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1610 scm_i_pthread_mutexattr_recursive);
1611 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1612 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1613 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1614 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1615
1616 guilify_self_1 (base);
1617 }
1618
1619 scm_t_bits scm_tc16_thread;
1620 scm_t_bits scm_tc16_mutex;
1621 scm_t_bits scm_tc16_condvar;
1622
1623 void
1624 scm_init_threads ()
1625 {
1626 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1627 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1628 scm_set_smob_print (scm_tc16_thread, thread_print);
1629 scm_set_smob_free (scm_tc16_thread, thread_free);
1630
1631 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1632 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1633 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1634 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1635
1636 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1637 sizeof (fat_cond));
1638 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1639 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1640 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1641
1642 scm_i_default_dynamic_state = SCM_BOOL_F;
1643 guilify_self_2 (SCM_BOOL_F);
1644 threads_initialized_p = 1;
1645
1646 dynwind_critical_section_mutex =
1647 scm_permanent_object (scm_make_recursive_mutex ());
1648 }
1649
1650 void
1651 scm_init_threads_default_dynamic_state ()
1652 {
1653 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1654 scm_i_default_dynamic_state = scm_permanent_object (state);
1655 }
1656
1657 void
1658 scm_init_thread_procs ()
1659 {
1660 #include "libguile/threads.x"
1661 }
1662
1663 /*
1664 Local Variables:
1665 c-file-style: "gnu"
1666 End:
1667 */