merge from 1.8 branch
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/_scm.h"
24
25 #if HAVE_UNISTD_H
26 #include <unistd.h>
27 #endif
28 #include <stdio.h>
29 #include <assert.h>
30
31 #ifdef HAVE_STRING_H
32 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
33 #endif
34
35 #if HAVE_SYS_TIME_H
36 #include <sys/time.h>
37 #endif
38
39 #include "libguile/validate.h"
40 #include "libguile/root.h"
41 #include "libguile/eval.h"
42 #include "libguile/async.h"
43 #include "libguile/ports.h"
44 #include "libguile/threads.h"
45 #include "libguile/dynwind.h"
46 #include "libguile/iselect.h"
47 #include "libguile/fluids.h"
48 #include "libguile/continuations.h"
49 #include "libguile/gc.h"
50 #include "libguile/init.h"
51
52 #ifdef __MINGW32__
53 #ifndef ETIMEDOUT
54 # define ETIMEDOUT WSAETIMEDOUT
55 #endif
56 # include <fcntl.h>
57 # include <process.h>
58 # define pipe(fd) _pipe (fd, 256, O_BINARY)
59 #endif /* __MINGW32__ */
60
61 /*** Queues */
62
63 /* Make an empty queue data structure.
64 */
65 static SCM
66 make_queue ()
67 {
68 return scm_cons (SCM_EOL, SCM_EOL);
69 }
70
71 /* Put T at the back of Q and return a handle that can be used with
72 remqueue to remove T from Q again.
73 */
74 static SCM
75 enqueue (SCM q, SCM t)
76 {
77 SCM c = scm_cons (t, SCM_EOL);
78 if (scm_is_null (SCM_CDR (q)))
79 SCM_SETCDR (q, c);
80 else
81 SCM_SETCDR (SCM_CAR (q), c);
82 SCM_SETCAR (q, c);
83 return c;
84 }
85
86 /* Remove the element that the handle C refers to from the queue Q. C
87 must have been returned from a call to enqueue. The return value
88 is zero when the element referred to by C has already been removed.
89 Otherwise, 1 is returned.
90 */
91 static int
92 remqueue (SCM q, SCM c)
93 {
94 SCM p, prev = q;
95 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
96 {
97 if (scm_is_eq (p, c))
98 {
99 if (scm_is_eq (c, SCM_CAR (q)))
100 SCM_SETCAR (q, SCM_CDR (c));
101 SCM_SETCDR (prev, SCM_CDR (c));
102 return 1;
103 }
104 prev = p;
105 }
106 return 0;
107 }
108
109 /* Remove the front-most element from the queue Q and return it.
110 Return SCM_BOOL_F when Q is empty.
111 */
112 static SCM
113 dequeue (SCM q)
114 {
115 SCM c = SCM_CDR (q);
116 if (scm_is_null (c))
117 return SCM_BOOL_F;
118 else
119 {
120 SCM_SETCDR (q, SCM_CDR (c));
121 if (scm_is_null (SCM_CDR (q)))
122 SCM_SETCAR (q, SCM_EOL);
123 return SCM_CAR (c);
124 }
125 }
126
127 /*** Thread smob routines */
128
129 static SCM
130 thread_mark (SCM obj)
131 {
132 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
133 scm_gc_mark (t->result);
134 scm_gc_mark (t->join_queue);
135 scm_gc_mark (t->dynwinds);
136 scm_gc_mark (t->active_asyncs);
137 scm_gc_mark (t->continuation_root);
138 return t->dynamic_state;
139 }
140
141 static int
142 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
143 {
144 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
145 scm_puts ("#<thread ", port);
146 scm_uintprint ((size_t)t->pthread, 10, port);
147 scm_puts (" (", port);
148 scm_uintprint ((scm_t_bits)t, 16, port);
149 scm_puts (")>", port);
150 return 1;
151 }
152
153 static size_t
154 thread_free (SCM obj)
155 {
156 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
157 assert (t->exited);
158 scm_gc_free (t, sizeof (*t), "thread");
159 return 0;
160 }
161
162 /*** Blocking on queues. */
163
164 /* See also scm_i_queue_async_cell for how such a block is
165 interrputed.
166 */
167
168 /* Put the current thread on QUEUE and go to sleep, waiting for it to
169 be woken up by a call to 'unblock_from_queue', or to be
170 interrupted. Upon return of this function, the current thread is
171 no longer on QUEUE, even when the sleep has been interrupted.
172
173 The QUEUE data structure is assumed to be protected by MUTEX and
174 the caller of block_self must hold MUTEX. It will be atomically
175 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
176
177 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
178 as MUTEX is needed.
179
180 When WAITTIME is not NULL, the sleep will be aborted at that time.
181
182 The return value of block_self is an errno value. It will be zero
183 when the sleep has been successfully completed by a call to
184 unblock_from_queue, EINTR when it has been interrupted by the
185 delivery of a system async, and ETIMEDOUT when the timeout has
186 expired.
187
188 The system asyncs themselves are not executed by block_self.
189 */
190 static int
191 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
192 const scm_t_timespec *waittime)
193 {
194 scm_i_thread *t = SCM_I_CURRENT_THREAD;
195 SCM q_handle;
196 int err;
197
198 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
199 err = EINTR;
200 else
201 {
202 t->block_asyncs++;
203 q_handle = enqueue (queue, t->handle);
204 if (waittime == NULL)
205 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
206 else
207 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
208
209 /* When we are still on QUEUE, we have been interrupted. We
210 report this only when no other error (such as a timeout) has
211 happened above.
212 */
213 if (remqueue (queue, q_handle) && err == 0)
214 err = EINTR;
215 t->block_asyncs--;
216 scm_i_reset_sleep (t);
217 }
218
219 return err;
220 }
221
222 /* Wake up the first thread on QUEUE, if any. The caller must hold
223 the mutex that protects QUEUE. The awoken thread is returned, or
224 #f when the queue was empty.
225 */
226 static SCM
227 unblock_from_queue (SCM queue)
228 {
229 SCM thread = dequeue (queue);
230 if (scm_is_true (thread))
231 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
232 return thread;
233 }
234
235 /* Getting into and out of guile mode.
236 */
237
238 /* Ken Raeburn observes that the implementation of suspend and resume
239 (and the things that build on top of them) are very likely not
240 correct (see below). We will need fix this eventually, and that's
241 why scm_leave_guile/scm_enter_guile are not exported in the API.
242
243 Ken writes:
244
245 Consider this sequence:
246
247 Function foo, called in Guile mode, calls suspend (maybe indirectly
248 through scm_leave_guile), which does this:
249
250 // record top of stack for the GC
251 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
252 var 't'
253 // save registers.
254 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
255 setjmp (t->regs); // here's most of the magic
256
257 ... and returns.
258
259 Function foo has a SCM value X, a handle on a non-immediate object, in
260 a caller-saved register R, and it's the only reference to the object
261 currently.
262
263 The compiler wants to use R in suspend, so it pushes the current
264 value, X, into a stack slot which will be reloaded on exit from
265 suspend; then it loads stuff into R and goes about its business. The
266 setjmp call saves (some of) the current registers, including R, which
267 no longer contains X. (This isn't a problem for a normal
268 setjmp/longjmp situation, where longjmp would be called before
269 setjmp's caller returns; the old value for X would be loaded back from
270 the stack after the longjmp, before the function returned.)
271
272 So, suspend returns, loading X back into R (and invalidating the jump
273 buffer) in the process. The caller foo then goes off and calls a
274 bunch of other functions out of Guile mode, occasionally storing X on
275 the stack again, but, say, much deeper on the stack than suspend's
276 stack frame went, and the stack slot where suspend had written X has
277 long since been overwritten with other values.
278
279 Okay, nothing actively broken so far. Now, let garbage collection
280 run, triggered by another thread.
281
282 The thread calling foo is out of Guile mode at the time, so the
283 garbage collector just scans a range of stack addresses. Too bad that
284 X isn't stored there. So the pointed-to storage goes onto the free
285 list, and I think you can see where things go from there.
286
287 Is there anything I'm missing that'll prevent this scenario from
288 happening? I mean, aside from, "well, suspend and scm_leave_guile
289 don't have many local variables, so they probably won't need to save
290 any registers on most systems, so we hope everything will wind up in
291 the jump buffer and we'll just get away with it"?
292
293 (And, going the other direction, if scm_leave_guile and suspend push
294 the stack pointer over onto a new page, and foo doesn't make further
295 function calls and thus the stack pointer no longer includes that
296 page, are we guaranteed that the kernel cannot release the now-unused
297 stack page that contains the top-of-stack pointer we just saved? I
298 don't know if any OS actually does that. If it does, we could get
299 faults in garbage collection.)
300
301 I don't think scm_without_guile has to have this problem, as it gets
302 more control over the stack handling -- but it should call setjmp
303 itself. I'd probably try something like:
304
305 // record top of stack for the GC
306 t->top = SCM_STACK_PTR (&t);
307 // save registers.
308 SCM_FLUSH_REGISTER_WINDOWS;
309 setjmp (t->regs);
310 res = func(data);
311 scm_enter_guile (t);
312
313 ... though even that's making some assumptions about the stack
314 ordering of local variables versus caller-saved registers.
315
316 For something like scm_leave_guile to work, I don't think it can just
317 rely on invalidated jump buffers. A valid jump buffer, and a handle
318 on the stack state at the point when the jump buffer was initialized,
319 together, would work fine, but I think then we're talking about macros
320 invoking setjmp in the caller's stack frame, and requiring that the
321 caller of scm_leave_guile also call scm_enter_guile before returning,
322 kind of like pthread_cleanup_push/pop calls that have to be paired up
323 in a function. (In fact, the pthread ones have to be paired up
324 syntactically, as if they might expand to a compound statement
325 incorporating the user's code, and invoking a compiler's
326 exception-handling primitives. Which might be something to think
327 about for cases where Guile is used with C++ exceptions or
328 pthread_cancel.)
329 */
330
331 scm_i_pthread_key_t scm_i_thread_key;
332
333 static void
334 resume (scm_i_thread *t)
335 {
336 t->top = NULL;
337 if (t->clear_freelists_p)
338 {
339 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
340 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
341 t->clear_freelists_p = 0;
342 }
343 }
344
345 typedef void* scm_t_guile_ticket;
346
347 static void
348 scm_enter_guile (scm_t_guile_ticket ticket)
349 {
350 scm_i_thread *t = (scm_i_thread *)ticket;
351 if (t)
352 {
353 scm_i_pthread_mutex_lock (&t->heap_mutex);
354 resume (t);
355 }
356 }
357
358 static scm_i_thread *
359 suspend (void)
360 {
361 scm_i_thread *t = SCM_I_CURRENT_THREAD;
362
363 /* record top of stack for the GC */
364 t->top = SCM_STACK_PTR (&t);
365 /* save registers. */
366 SCM_FLUSH_REGISTER_WINDOWS;
367 setjmp (t->regs);
368 return t;
369 }
370
371 static scm_t_guile_ticket
372 scm_leave_guile ()
373 {
374 scm_i_thread *t = suspend ();
375 scm_i_pthread_mutex_unlock (&t->heap_mutex);
376 return (scm_t_guile_ticket) t;
377 }
378
379 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
380 static scm_i_thread *all_threads = NULL;
381 static int thread_count;
382
383 static SCM scm_i_default_dynamic_state;
384
385 /* Perform first stage of thread initialisation, in non-guile mode.
386 */
387 static void
388 guilify_self_1 (SCM_STACKITEM *base)
389 {
390 scm_i_thread *t = malloc (sizeof (scm_i_thread));
391
392 t->pthread = scm_i_pthread_self ();
393 t->handle = SCM_BOOL_F;
394 t->result = SCM_BOOL_F;
395 t->join_queue = SCM_EOL;
396 t->dynamic_state = SCM_BOOL_F;
397 t->dynwinds = SCM_EOL;
398 t->active_asyncs = SCM_EOL;
399 t->block_asyncs = 1;
400 t->pending_asyncs = 1;
401 t->last_debug_frame = NULL;
402 t->base = base;
403 t->continuation_root = SCM_EOL;
404 t->continuation_base = base;
405 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
406 t->sleep_mutex = NULL;
407 t->sleep_object = SCM_BOOL_F;
408 t->sleep_fd = -1;
409 /* XXX - check for errors. */
410 pipe (t->sleep_pipe);
411 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
412 t->clear_freelists_p = 0;
413 t->gc_running_p = 0;
414 t->exited = 0;
415
416 t->freelist = SCM_EOL;
417 t->freelist2 = SCM_EOL;
418 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
419 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
420
421 scm_i_pthread_setspecific (scm_i_thread_key, t);
422
423 scm_i_pthread_mutex_lock (&t->heap_mutex);
424
425 scm_i_pthread_mutex_lock (&thread_admin_mutex);
426 t->next_thread = all_threads;
427 all_threads = t;
428 thread_count++;
429 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
430 }
431
432 /* Perform second stage of thread initialisation, in guile mode.
433 */
434 static void
435 guilify_self_2 (SCM parent)
436 {
437 scm_i_thread *t = SCM_I_CURRENT_THREAD;
438
439 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
440 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
441 t->continuation_root = scm_cons (t->handle, SCM_EOL);
442 t->continuation_base = t->base;
443
444 if (scm_is_true (parent))
445 t->dynamic_state = scm_make_dynamic_state (parent);
446 else
447 t->dynamic_state = scm_i_make_initial_dynamic_state ();
448
449 t->join_queue = make_queue ();
450 t->block_asyncs = 0;
451 }
452
453 /* Perform thread tear-down, in guile mode.
454 */
455 static void *
456 do_thread_exit (void *v)
457 {
458 scm_i_thread *t = (scm_i_thread *)v;
459
460 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
461
462 t->exited = 1;
463 close (t->sleep_pipe[0]);
464 close (t->sleep_pipe[1]);
465 while (scm_is_true (unblock_from_queue (t->join_queue)))
466 ;
467
468 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
469 return NULL;
470 }
471
472 static void
473 on_thread_exit (void *v)
474 {
475 scm_i_thread *t = (scm_i_thread *)v, **tp;
476
477 scm_i_pthread_setspecific (scm_i_thread_key, v);
478
479 /* Unblocking the joining threads needs to happen in guile mode
480 since the queue is a SCM data structure.
481 */
482 scm_with_guile (do_thread_exit, v);
483
484 /* Removing ourself from the list of all threads needs to happen in
485 non-guile mode since all SCM values on our stack become
486 unprotected once we are no longer in the list.
487 */
488 scm_leave_guile ();
489 scm_i_pthread_mutex_lock (&thread_admin_mutex);
490 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
491 if (*tp == t)
492 {
493 *tp = t->next_thread;
494 break;
495 }
496 thread_count--;
497 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
498
499 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
500 }
501
502 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
503
504 static void
505 init_thread_key (void)
506 {
507 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
508 }
509
510 /* Perform any initializations necessary to bring the current thread
511 into guile mode, initializing Guile itself, if necessary.
512
513 BASE is the stack base to use with GC.
514
515 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
516 which case the default dynamic state is used.
517
518 Return zero when the thread was in guile mode already; otherwise
519 return 1.
520 */
521
522 static int
523 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
524 {
525 scm_i_thread *t;
526
527 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
528
529 if ((t = SCM_I_CURRENT_THREAD) == NULL)
530 {
531 /* This thread has not been guilified yet.
532 */
533
534 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
535 if (scm_initialized_p == 0)
536 {
537 /* First thread ever to enter Guile. Run the full
538 initialization.
539 */
540 scm_i_init_guile (base);
541 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
542 }
543 else
544 {
545 /* Guile is already initialized, but this thread enters it for
546 the first time. Only initialize this thread.
547 */
548 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
549 guilify_self_1 (base);
550 guilify_self_2 (parent);
551 }
552 return 1;
553 }
554 else if (t->top)
555 {
556 /* This thread is already guilified but not in guile mode, just
557 resume it.
558
559 XXX - base might be lower than when this thread was first
560 guilified.
561 */
562 scm_enter_guile ((scm_t_guile_ticket) t);
563 return 1;
564 }
565 else
566 {
567 /* Thread is already in guile mode. Nothing to do.
568 */
569 return 0;
570 }
571 }
572
573 #if SCM_USE_PTHREAD_THREADS
574 /* pthread_getattr_np not available on MacOS X and Solaris 10. */
575 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
576
577 #define HAVE_GET_THREAD_STACK_BASE
578
579 static SCM_STACKITEM *
580 get_thread_stack_base ()
581 {
582 pthread_attr_t attr;
583 void *start, *end;
584 size_t size;
585
586 pthread_getattr_np (pthread_self (), &attr);
587 pthread_attr_getstack (&attr, &start, &size);
588 end = (char *)start + size;
589
590 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
591 for the main thread, but we can use scm_get_stack_base in that
592 case.
593 */
594
595 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
596 if ((void *)&attr < start || (void *)&attr >= end)
597 return scm_get_stack_base ();
598 else
599 #endif
600 {
601 #if SCM_STACK_GROWS_UP
602 return start;
603 #else
604 return end;
605 #endif
606 }
607 }
608
609 #endif /* HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP */
610
611 #else /* !SCM_USE_PTHREAD_THREADS */
612
613 #define HAVE_GET_THREAD_STACK_BASE
614
615 static SCM_STACKITEM *
616 get_thread_stack_base ()
617 {
618 return scm_get_stack_base ();
619 }
620
621 #endif /* !SCM_USE_PTHREAD_THREADS */
622
623 #ifdef HAVE_GET_THREAD_STACK_BASE
624
625 void
626 scm_init_guile ()
627 {
628 scm_i_init_thread_for_guile (get_thread_stack_base (),
629 scm_i_default_dynamic_state);
630 }
631
632 #endif
633
634 void *
635 scm_with_guile (void *(*func)(void *), void *data)
636 {
637 return scm_i_with_guile_and_parent (func, data,
638 scm_i_default_dynamic_state);
639 }
640
641 void *
642 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
643 SCM parent)
644 {
645 void *res;
646 int really_entered;
647 SCM_STACKITEM base_item;
648 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
649 res = scm_c_with_continuation_barrier (func, data);
650 if (really_entered)
651 scm_leave_guile ();
652 return res;
653 }
654
655 void *
656 scm_without_guile (void *(*func)(void *), void *data)
657 {
658 void *res;
659 scm_t_guile_ticket t;
660 t = scm_leave_guile ();
661 res = func (data);
662 scm_enter_guile (t);
663 return res;
664 }
665
666 /*** Thread creation */
667
668 typedef struct {
669 SCM parent;
670 SCM thunk;
671 SCM handler;
672 SCM thread;
673 scm_i_pthread_mutex_t mutex;
674 scm_i_pthread_cond_t cond;
675 } launch_data;
676
677 static void *
678 really_launch (void *d)
679 {
680 launch_data *data = (launch_data *)d;
681 SCM thunk = data->thunk, handler = data->handler;
682 scm_i_thread *t;
683
684 t = SCM_I_CURRENT_THREAD;
685
686 scm_i_scm_pthread_mutex_lock (&data->mutex);
687 data->thread = scm_current_thread ();
688 scm_i_pthread_cond_signal (&data->cond);
689 scm_i_pthread_mutex_unlock (&data->mutex);
690
691 if (SCM_UNBNDP (handler))
692 t->result = scm_call_0 (thunk);
693 else
694 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
695
696 return 0;
697 }
698
699 static void *
700 launch_thread (void *d)
701 {
702 launch_data *data = (launch_data *)d;
703 scm_i_pthread_detach (scm_i_pthread_self ());
704 scm_i_with_guile_and_parent (really_launch, d, data->parent);
705 return NULL;
706 }
707
708 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
709 (SCM thunk, SCM handler),
710 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
711 "returning a new thread object representing the thread. The procedure\n"
712 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
713 "\n"
714 "When @var{handler} is specified, then @var{thunk} is called from\n"
715 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
716 "handler. This catch is established inside the continuation barrier.\n"
717 "\n"
718 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
719 "the @emph{exit value} of the thread and the thread is terminated.")
720 #define FUNC_NAME s_scm_call_with_new_thread
721 {
722 launch_data data;
723 scm_i_pthread_t id;
724 int err;
725
726 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
727 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
728 handler, SCM_ARG2, FUNC_NAME);
729
730 data.parent = scm_current_dynamic_state ();
731 data.thunk = thunk;
732 data.handler = handler;
733 data.thread = SCM_BOOL_F;
734 scm_i_pthread_mutex_init (&data.mutex, NULL);
735 scm_i_pthread_cond_init (&data.cond, NULL);
736
737 scm_i_scm_pthread_mutex_lock (&data.mutex);
738 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
739 if (err)
740 {
741 scm_i_pthread_mutex_unlock (&data.mutex);
742 errno = err;
743 scm_syserror (NULL);
744 }
745 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
746 scm_i_pthread_mutex_unlock (&data.mutex);
747
748 return data.thread;
749 }
750 #undef FUNC_NAME
751
752 typedef struct {
753 SCM parent;
754 scm_t_catch_body body;
755 void *body_data;
756 scm_t_catch_handler handler;
757 void *handler_data;
758 SCM thread;
759 scm_i_pthread_mutex_t mutex;
760 scm_i_pthread_cond_t cond;
761 } spawn_data;
762
763 static void *
764 really_spawn (void *d)
765 {
766 spawn_data *data = (spawn_data *)d;
767 scm_t_catch_body body = data->body;
768 void *body_data = data->body_data;
769 scm_t_catch_handler handler = data->handler;
770 void *handler_data = data->handler_data;
771 scm_i_thread *t = SCM_I_CURRENT_THREAD;
772
773 scm_i_scm_pthread_mutex_lock (&data->mutex);
774 data->thread = scm_current_thread ();
775 scm_i_pthread_cond_signal (&data->cond);
776 scm_i_pthread_mutex_unlock (&data->mutex);
777
778 if (handler == NULL)
779 t->result = body (body_data);
780 else
781 t->result = scm_internal_catch (SCM_BOOL_T,
782 body, body_data,
783 handler, handler_data);
784
785 return 0;
786 }
787
788 static void *
789 spawn_thread (void *d)
790 {
791 spawn_data *data = (spawn_data *)d;
792 scm_i_pthread_detach (scm_i_pthread_self ());
793 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
794 return NULL;
795 }
796
797 SCM
798 scm_spawn_thread (scm_t_catch_body body, void *body_data,
799 scm_t_catch_handler handler, void *handler_data)
800 {
801 spawn_data data;
802 scm_i_pthread_t id;
803 int err;
804
805 data.parent = scm_current_dynamic_state ();
806 data.body = body;
807 data.body_data = body_data;
808 data.handler = handler;
809 data.handler_data = handler_data;
810 data.thread = SCM_BOOL_F;
811 scm_i_pthread_mutex_init (&data.mutex, NULL);
812 scm_i_pthread_cond_init (&data.cond, NULL);
813
814 scm_i_scm_pthread_mutex_lock (&data.mutex);
815 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
816 if (err)
817 {
818 scm_i_pthread_mutex_unlock (&data.mutex);
819 errno = err;
820 scm_syserror (NULL);
821 }
822 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
823 scm_i_pthread_mutex_unlock (&data.mutex);
824
825 return data.thread;
826 }
827
828 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
829 (),
830 "Move the calling thread to the end of the scheduling queue.")
831 #define FUNC_NAME s_scm_yield
832 {
833 return scm_from_bool (scm_i_sched_yield ());
834 }
835 #undef FUNC_NAME
836
837 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
838 (SCM thread),
839 "Suspend execution of the calling thread until the target @var{thread} "
840 "terminates, unless the target @var{thread} has already terminated. ")
841 #define FUNC_NAME s_scm_join_thread
842 {
843 scm_i_thread *t;
844 SCM res;
845
846 SCM_VALIDATE_THREAD (1, thread);
847 if (scm_is_eq (scm_current_thread (), thread))
848 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
849
850 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
851
852 t = SCM_I_THREAD_DATA (thread);
853 if (!t->exited)
854 {
855 while (1)
856 {
857 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
858 if (t->exited)
859 break;
860 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
861 SCM_TICK;
862 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
863 }
864 }
865 res = t->result;
866
867 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
868 return res;
869 }
870 #undef FUNC_NAME
871
872 /*** Fat mutexes */
873
874 /* We implement our own mutex type since we want them to be 'fair', we
875 want to do fancy things while waiting for them (like running
876 asyncs) and we might want to add things that are nice for
877 debugging.
878 */
879
880 typedef struct {
881 scm_i_pthread_mutex_t lock;
882 SCM owner;
883 int level; /* how much the owner owns us.
884 < 0 for non-recursive mutexes */
885 SCM waiting; /* the threads waiting for this mutex. */
886 } fat_mutex;
887
888 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
889 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
890
891 static SCM
892 fat_mutex_mark (SCM mx)
893 {
894 fat_mutex *m = SCM_MUTEX_DATA (mx);
895 scm_gc_mark (m->owner);
896 return m->waiting;
897 }
898
899 static size_t
900 fat_mutex_free (SCM mx)
901 {
902 fat_mutex *m = SCM_MUTEX_DATA (mx);
903 scm_i_pthread_mutex_destroy (&m->lock);
904 scm_gc_free (m, sizeof (fat_mutex), "mutex");
905 return 0;
906 }
907
908 static int
909 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
910 {
911 fat_mutex *m = SCM_MUTEX_DATA (mx);
912 scm_puts ("#<mutex ", port);
913 scm_uintprint ((scm_t_bits)m, 16, port);
914 scm_puts (">", port);
915 return 1;
916 }
917
918 static SCM
919 make_fat_mutex (int recursive)
920 {
921 fat_mutex *m;
922 SCM mx;
923
924 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
925 scm_i_pthread_mutex_init (&m->lock, NULL);
926 m->owner = SCM_BOOL_F;
927 m->level = recursive? 0 : -1;
928 m->waiting = SCM_EOL;
929 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
930 m->waiting = make_queue ();
931 return mx;
932 }
933
934 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
935 (void),
936 "Create a new mutex. ")
937 #define FUNC_NAME s_scm_make_mutex
938 {
939 return make_fat_mutex (0);
940 }
941 #undef FUNC_NAME
942
943 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
944 (void),
945 "Create a new recursive mutex. ")
946 #define FUNC_NAME s_scm_make_recursive_mutex
947 {
948 return make_fat_mutex (1);
949 }
950 #undef FUNC_NAME
951
952 static char *
953 fat_mutex_lock (SCM mutex)
954 {
955 fat_mutex *m = SCM_MUTEX_DATA (mutex);
956 SCM thread = scm_current_thread ();
957 char *msg = NULL;
958
959 scm_i_scm_pthread_mutex_lock (&m->lock);
960 if (scm_is_false (m->owner))
961 m->owner = thread;
962 else if (scm_is_eq (m->owner, thread))
963 {
964 if (m->level >= 0)
965 m->level++;
966 else
967 msg = "mutex already locked by current thread";
968 }
969 else
970 {
971 while (1)
972 {
973 block_self (m->waiting, mutex, &m->lock, NULL);
974 if (scm_is_eq (m->owner, thread))
975 break;
976 scm_i_pthread_mutex_unlock (&m->lock);
977 SCM_TICK;
978 scm_i_scm_pthread_mutex_lock (&m->lock);
979 }
980 }
981 scm_i_pthread_mutex_unlock (&m->lock);
982 return msg;
983 }
984
985 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
986 (SCM mx),
987 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
988 "blocks until the mutex becomes available. The function returns when "
989 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
990 "a thread already owns will succeed right away and will not block the "
991 "thread. That is, Guile's mutexes are @emph{recursive}. ")
992 #define FUNC_NAME s_scm_lock_mutex
993 {
994 char *msg;
995
996 SCM_VALIDATE_MUTEX (1, mx);
997 msg = fat_mutex_lock (mx);
998 if (msg)
999 scm_misc_error (NULL, msg, SCM_EOL);
1000 return SCM_BOOL_T;
1001 }
1002 #undef FUNC_NAME
1003
1004 void
1005 scm_dynwind_lock_mutex (SCM mutex)
1006 {
1007 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1008 SCM_F_WIND_EXPLICITLY);
1009 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1010 SCM_F_WIND_EXPLICITLY);
1011 }
1012
1013 static char *
1014 fat_mutex_trylock (fat_mutex *m, int *resp)
1015 {
1016 char *msg = NULL;
1017 SCM thread = scm_current_thread ();
1018
1019 *resp = 1;
1020 scm_i_pthread_mutex_lock (&m->lock);
1021 if (scm_is_false (m->owner))
1022 m->owner = thread;
1023 else if (scm_is_eq (m->owner, thread))
1024 {
1025 if (m->level >= 0)
1026 m->level++;
1027 else
1028 msg = "mutex already locked by current thread";
1029 }
1030 else
1031 *resp = 0;
1032 scm_i_pthread_mutex_unlock (&m->lock);
1033 return msg;
1034 }
1035
1036 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1037 (SCM mutex),
1038 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1039 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1040 #define FUNC_NAME s_scm_try_mutex
1041 {
1042 char *msg;
1043 int res;
1044
1045 SCM_VALIDATE_MUTEX (1, mutex);
1046
1047 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1048 if (msg)
1049 scm_misc_error (NULL, msg, SCM_EOL);
1050 return scm_from_bool (res);
1051 }
1052 #undef FUNC_NAME
1053
1054 static char *
1055 fat_mutex_unlock (fat_mutex *m)
1056 {
1057 char *msg = NULL;
1058
1059 scm_i_scm_pthread_mutex_lock (&m->lock);
1060 if (!scm_is_eq (m->owner, scm_current_thread ()))
1061 {
1062 if (scm_is_false (m->owner))
1063 msg = "mutex not locked";
1064 else
1065 msg = "mutex not locked by current thread";
1066 }
1067 else if (m->level > 0)
1068 m->level--;
1069 else
1070 m->owner = unblock_from_queue (m->waiting);
1071 scm_i_pthread_mutex_unlock (&m->lock);
1072
1073 return msg;
1074 }
1075
1076 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1077 (SCM mx),
1078 "Unlocks @var{mutex} if the calling thread owns the lock on "
1079 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1080 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1081 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1082 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1083 "with a call to @code{unlock-mutex}. Only the last call to "
1084 "@code{unlock-mutex} will actually unlock the mutex. ")
1085 #define FUNC_NAME s_scm_unlock_mutex
1086 {
1087 char *msg;
1088 SCM_VALIDATE_MUTEX (1, mx);
1089
1090 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1091 if (msg)
1092 scm_misc_error (NULL, msg, SCM_EOL);
1093 return SCM_BOOL_T;
1094 }
1095 #undef FUNC_NAME
1096
1097 #if 0
1098
1099 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1100 (SCM mx),
1101 "Return the thread owning @var{mx}, or @code{#f}.")
1102 #define FUNC_NAME s_scm_mutex_owner
1103 {
1104 SCM_VALIDATE_MUTEX (1, mx);
1105 return (SCM_MUTEX_DATA(mx))->owner;
1106 }
1107 #undef FUNC_NAME
1108
1109 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1110 (SCM mx),
1111 "Return the lock level of a recursive mutex, or -1\n"
1112 "for a standard mutex.")
1113 #define FUNC_NAME s_scm_mutex_level
1114 {
1115 SCM_VALIDATE_MUTEX (1, mx);
1116 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1117 }
1118 #undef FUNC_NAME
1119
1120 #endif
1121
1122 /*** Fat condition variables */
1123
1124 typedef struct {
1125 scm_i_pthread_mutex_t lock;
1126 SCM waiting; /* the threads waiting for this condition. */
1127 } fat_cond;
1128
1129 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1130 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1131
1132 static SCM
1133 fat_cond_mark (SCM cv)
1134 {
1135 fat_cond *c = SCM_CONDVAR_DATA (cv);
1136 return c->waiting;
1137 }
1138
1139 static size_t
1140 fat_cond_free (SCM mx)
1141 {
1142 fat_cond *c = SCM_CONDVAR_DATA (mx);
1143 scm_i_pthread_mutex_destroy (&c->lock);
1144 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1145 return 0;
1146 }
1147
1148 static int
1149 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1150 {
1151 fat_cond *c = SCM_CONDVAR_DATA (cv);
1152 scm_puts ("#<condition-variable ", port);
1153 scm_uintprint ((scm_t_bits)c, 16, port);
1154 scm_puts (">", port);
1155 return 1;
1156 }
1157
1158 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1159 (void),
1160 "Make a new condition variable.")
1161 #define FUNC_NAME s_scm_make_condition_variable
1162 {
1163 fat_cond *c;
1164 SCM cv;
1165
1166 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1167 scm_i_pthread_mutex_init (&c->lock, 0);
1168 c->waiting = SCM_EOL;
1169 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1170 c->waiting = make_queue ();
1171 return cv;
1172 }
1173 #undef FUNC_NAME
1174
1175 static int
1176 fat_cond_timedwait (SCM cond, SCM mutex,
1177 const scm_t_timespec *waittime)
1178 {
1179 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1180 fat_cond *c = SCM_CONDVAR_DATA (cond);
1181 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1182 const char *msg;
1183 int err = 0;
1184
1185 while (1)
1186 {
1187 scm_i_scm_pthread_mutex_lock (&c->lock);
1188 msg = fat_mutex_unlock (m);
1189 t->block_asyncs++;
1190 if (msg == NULL)
1191 {
1192 err = block_self (c->waiting, cond, &c->lock, waittime);
1193 scm_i_pthread_mutex_unlock (&c->lock);
1194 fat_mutex_lock (mutex);
1195 }
1196 else
1197 scm_i_pthread_mutex_unlock (&c->lock);
1198 t->block_asyncs--;
1199 scm_async_click ();
1200
1201 if (msg)
1202 scm_misc_error (NULL, msg, SCM_EOL);
1203
1204 scm_remember_upto_here_2 (cond, mutex);
1205
1206 if (err == 0)
1207 return 1;
1208 if (err == ETIMEDOUT)
1209 return 0;
1210 if (err != EINTR)
1211 {
1212 errno = err;
1213 scm_syserror (NULL);
1214 }
1215 }
1216 }
1217
1218 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1219 (SCM cv, SCM mx, SCM t),
1220 "Wait until @var{cond-var} has been signalled. While waiting, "
1221 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1222 "is locked again when this function returns. When @var{time} is given, "
1223 "it specifies a point in time where the waiting should be aborted. It "
1224 "can be either a integer as returned by @code{current-time} or a pair "
1225 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1226 "mutex is locked and @code{#f} is returned. When the condition "
1227 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1228 "is returned. ")
1229 #define FUNC_NAME s_scm_timed_wait_condition_variable
1230 {
1231 scm_t_timespec waittime, *waitptr = NULL;
1232
1233 SCM_VALIDATE_CONDVAR (1, cv);
1234 SCM_VALIDATE_MUTEX (2, mx);
1235
1236 if (!SCM_UNBNDP (t))
1237 {
1238 if (scm_is_pair (t))
1239 {
1240 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1241 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1242 }
1243 else
1244 {
1245 waittime.tv_sec = scm_to_ulong (t);
1246 waittime.tv_nsec = 0;
1247 }
1248 waitptr = &waittime;
1249 }
1250
1251 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1252 }
1253 #undef FUNC_NAME
1254
1255 static void
1256 fat_cond_signal (fat_cond *c)
1257 {
1258 scm_i_scm_pthread_mutex_lock (&c->lock);
1259 unblock_from_queue (c->waiting);
1260 scm_i_pthread_mutex_unlock (&c->lock);
1261 }
1262
1263 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1264 (SCM cv),
1265 "Wake up one thread that is waiting for @var{cv}")
1266 #define FUNC_NAME s_scm_signal_condition_variable
1267 {
1268 SCM_VALIDATE_CONDVAR (1, cv);
1269 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1270 return SCM_BOOL_T;
1271 }
1272 #undef FUNC_NAME
1273
1274 static void
1275 fat_cond_broadcast (fat_cond *c)
1276 {
1277 scm_i_scm_pthread_mutex_lock (&c->lock);
1278 while (scm_is_true (unblock_from_queue (c->waiting)))
1279 ;
1280 scm_i_pthread_mutex_unlock (&c->lock);
1281 }
1282
1283 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1284 (SCM cv),
1285 "Wake up all threads that are waiting for @var{cv}. ")
1286 #define FUNC_NAME s_scm_broadcast_condition_variable
1287 {
1288 SCM_VALIDATE_CONDVAR (1, cv);
1289 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1290 return SCM_BOOL_T;
1291 }
1292 #undef FUNC_NAME
1293
1294 /*** Marking stacks */
1295
1296 /* XXX - what to do with this? Do we need to handle this for blocked
1297 threads as well?
1298 */
1299 #ifdef __ia64__
1300 # define SCM_MARK_BACKING_STORE() do { \
1301 ucontext_t ctx; \
1302 SCM_STACKITEM * top, * bot; \
1303 getcontext (&ctx); \
1304 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1305 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1306 / sizeof (SCM_STACKITEM))); \
1307 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1308 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1309 scm_mark_locations (bot, top - bot); } while (0)
1310 #else
1311 # define SCM_MARK_BACKING_STORE()
1312 #endif
1313
1314 void
1315 scm_threads_mark_stacks (void)
1316 {
1317 scm_i_thread *t;
1318 for (t = all_threads; t; t = t->next_thread)
1319 {
1320 /* Check that thread has indeed been suspended.
1321 */
1322 assert (t->top);
1323
1324 scm_gc_mark (t->handle);
1325
1326 #if SCM_STACK_GROWS_UP
1327 scm_mark_locations (t->base, t->top - t->base);
1328 #else
1329 scm_mark_locations (t->top, t->base - t->top);
1330 #endif
1331 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1332 ((size_t) sizeof(t->regs)
1333 / sizeof (SCM_STACKITEM)));
1334 }
1335
1336 SCM_MARK_BACKING_STORE ();
1337 }
1338
1339 /*** Select */
1340
1341 int
1342 scm_std_select (int nfds,
1343 SELECT_TYPE *readfds,
1344 SELECT_TYPE *writefds,
1345 SELECT_TYPE *exceptfds,
1346 struct timeval *timeout)
1347 {
1348 fd_set my_readfds;
1349 int res, eno, wakeup_fd;
1350 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1351 scm_t_guile_ticket ticket;
1352
1353 if (readfds == NULL)
1354 {
1355 FD_ZERO (&my_readfds);
1356 readfds = &my_readfds;
1357 }
1358
1359 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1360 SCM_TICK;
1361
1362 wakeup_fd = t->sleep_pipe[0];
1363 ticket = scm_leave_guile ();
1364 FD_SET (wakeup_fd, readfds);
1365 if (wakeup_fd >= nfds)
1366 nfds = wakeup_fd+1;
1367 res = select (nfds, readfds, writefds, exceptfds, timeout);
1368 t->sleep_fd = -1;
1369 eno = errno;
1370 scm_enter_guile (ticket);
1371
1372 scm_i_reset_sleep (t);
1373
1374 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1375 {
1376 char dummy;
1377 read (wakeup_fd, &dummy, 1);
1378 FD_CLR (wakeup_fd, readfds);
1379 res -= 1;
1380 if (res == 0)
1381 {
1382 eno = EINTR;
1383 res = -1;
1384 }
1385 }
1386 errno = eno;
1387 return res;
1388 }
1389
1390 /* Convenience API for blocking while in guile mode. */
1391
1392 #if SCM_USE_PTHREAD_THREADS
1393
1394 int
1395 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1396 {
1397 scm_t_guile_ticket t = scm_leave_guile ();
1398 int res = scm_i_pthread_mutex_lock (mutex);
1399 scm_enter_guile (t);
1400 return res;
1401 }
1402
1403 static void
1404 do_unlock (void *data)
1405 {
1406 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1407 }
1408
1409 void
1410 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1411 {
1412 scm_i_scm_pthread_mutex_lock (mutex);
1413 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1414 }
1415
1416 int
1417 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1418 {
1419 scm_t_guile_ticket t = scm_leave_guile ();
1420 int res = scm_i_pthread_cond_wait (cond, mutex);
1421 scm_enter_guile (t);
1422 return res;
1423 }
1424
1425 int
1426 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1427 scm_i_pthread_mutex_t *mutex,
1428 const scm_t_timespec *wt)
1429 {
1430 scm_t_guile_ticket t = scm_leave_guile ();
1431 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1432 scm_enter_guile (t);
1433 return res;
1434 }
1435
1436 #endif
1437
1438 unsigned long
1439 scm_std_usleep (unsigned long usecs)
1440 {
1441 struct timeval tv;
1442 tv.tv_usec = usecs % 1000000;
1443 tv.tv_sec = usecs / 1000000;
1444 scm_std_select (0, NULL, NULL, NULL, &tv);
1445 return tv.tv_sec * 1000000 + tv.tv_usec;
1446 }
1447
1448 unsigned int
1449 scm_std_sleep (unsigned int secs)
1450 {
1451 struct timeval tv;
1452 tv.tv_usec = 0;
1453 tv.tv_sec = secs;
1454 scm_std_select (0, NULL, NULL, NULL, &tv);
1455 return tv.tv_sec;
1456 }
1457
1458 /*** Misc */
1459
1460 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1461 (void),
1462 "Return the thread that called this function.")
1463 #define FUNC_NAME s_scm_current_thread
1464 {
1465 return SCM_I_CURRENT_THREAD->handle;
1466 }
1467 #undef FUNC_NAME
1468
1469 static SCM
1470 scm_c_make_list (size_t n, SCM fill)
1471 {
1472 SCM res = SCM_EOL;
1473 while (n-- > 0)
1474 res = scm_cons (fill, res);
1475 return res;
1476 }
1477
1478 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1479 (void),
1480 "Return a list of all threads.")
1481 #define FUNC_NAME s_scm_all_threads
1482 {
1483 /* We can not allocate while holding the thread_admin_mutex because
1484 of the way GC is done.
1485 */
1486 int n = thread_count;
1487 scm_i_thread *t;
1488 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1489
1490 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1491 l = &list;
1492 for (t = all_threads; t && n > 0; t = t->next_thread)
1493 {
1494 SCM_SETCAR (*l, t->handle);
1495 l = SCM_CDRLOC (*l);
1496 n--;
1497 }
1498 *l = SCM_EOL;
1499 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1500 return list;
1501 }
1502 #undef FUNC_NAME
1503
1504 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1505 (SCM thread),
1506 "Return @code{#t} iff @var{thread} has exited.\n")
1507 #define FUNC_NAME s_scm_thread_exited_p
1508 {
1509 return scm_from_bool (scm_c_thread_exited_p (thread));
1510 }
1511 #undef FUNC_NAME
1512
1513 int
1514 scm_c_thread_exited_p (SCM thread)
1515 #define FUNC_NAME s_scm_thread_exited_p
1516 {
1517 scm_i_thread *t;
1518 SCM_VALIDATE_THREAD (1, thread);
1519 t = SCM_I_THREAD_DATA (thread);
1520 return t->exited;
1521 }
1522 #undef FUNC_NAME
1523
1524 static scm_i_pthread_cond_t wake_up_cond;
1525 int scm_i_thread_go_to_sleep;
1526 static int threads_initialized_p = 0;
1527
1528 void
1529 scm_i_thread_put_to_sleep ()
1530 {
1531 if (threads_initialized_p)
1532 {
1533 scm_i_thread *t;
1534
1535 scm_leave_guile ();
1536 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1537
1538 /* Signal all threads to go to sleep
1539 */
1540 scm_i_thread_go_to_sleep = 1;
1541 for (t = all_threads; t; t = t->next_thread)
1542 scm_i_pthread_mutex_lock (&t->heap_mutex);
1543 scm_i_thread_go_to_sleep = 0;
1544 }
1545 }
1546
1547 void
1548 scm_i_thread_invalidate_freelists ()
1549 {
1550 /* thread_admin_mutex is already locked. */
1551
1552 scm_i_thread *t;
1553 for (t = all_threads; t; t = t->next_thread)
1554 if (t != SCM_I_CURRENT_THREAD)
1555 t->clear_freelists_p = 1;
1556 }
1557
1558 void
1559 scm_i_thread_wake_up ()
1560 {
1561 if (threads_initialized_p)
1562 {
1563 scm_i_thread *t;
1564
1565 scm_i_pthread_cond_broadcast (&wake_up_cond);
1566 for (t = all_threads; t; t = t->next_thread)
1567 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1568 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1569 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1570 }
1571 }
1572
1573 void
1574 scm_i_thread_sleep_for_gc ()
1575 {
1576 scm_i_thread *t = suspend ();
1577 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1578 resume (t);
1579 }
1580
1581 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1582 */
1583 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1584 int scm_i_critical_section_level = 0;
1585
1586 static SCM dynwind_critical_section_mutex;
1587
1588 void
1589 scm_dynwind_critical_section (SCM mutex)
1590 {
1591 if (scm_is_false (mutex))
1592 mutex = dynwind_critical_section_mutex;
1593 scm_dynwind_lock_mutex (mutex);
1594 scm_dynwind_block_asyncs ();
1595 }
1596
1597 /*** Initialization */
1598
1599 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1600 scm_i_pthread_mutex_t scm_i_misc_mutex;
1601
1602 #if SCM_USE_PTHREAD_THREADS
1603 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1604 #endif
1605
1606 void
1607 scm_threads_prehistory (SCM_STACKITEM *base)
1608 {
1609 #if SCM_USE_PTHREAD_THREADS
1610 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1611 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1612 PTHREAD_MUTEX_RECURSIVE);
1613 #endif
1614
1615 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1616 scm_i_pthread_mutexattr_recursive);
1617 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1618 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1619 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1620 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1621
1622 guilify_self_1 (base);
1623 }
1624
1625 scm_t_bits scm_tc16_thread;
1626 scm_t_bits scm_tc16_mutex;
1627 scm_t_bits scm_tc16_condvar;
1628
1629 void
1630 scm_init_threads ()
1631 {
1632 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1633 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1634 scm_set_smob_print (scm_tc16_thread, thread_print);
1635 scm_set_smob_free (scm_tc16_thread, thread_free);
1636
1637 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1638 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1639 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1640 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1641
1642 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1643 sizeof (fat_cond));
1644 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1645 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1646 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1647
1648 scm_i_default_dynamic_state = SCM_BOOL_F;
1649 guilify_self_2 (SCM_BOOL_F);
1650 threads_initialized_p = 1;
1651
1652 dynwind_critical_section_mutex =
1653 scm_permanent_object (scm_make_recursive_mutex ());
1654 }
1655
1656 void
1657 scm_init_threads_default_dynamic_state ()
1658 {
1659 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1660 scm_i_default_dynamic_state = scm_permanent_object (state);
1661 }
1662
1663 void
1664 scm_init_thread_procs ()
1665 {
1666 #include "libguile/threads.x"
1667 }
1668
1669 /*
1670 Local Variables:
1671 c-file-style: "gnu"
1672 End:
1673 */