7e1bfde7fcde3faf7baa3685c532cc0790db21df
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/_scm.h"
24
25 #if HAVE_UNISTD_H
26 #include <unistd.h>
27 #endif
28 #include <stdio.h>
29 #include <assert.h>
30
31 #ifdef HAVE_STRING_H
32 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
33 #endif
34
35 #if HAVE_SYS_TIME_H
36 #include <sys/time.h>
37 #endif
38
39 #include "libguile/validate.h"
40 #include "libguile/root.h"
41 #include "libguile/eval.h"
42 #include "libguile/async.h"
43 #include "libguile/ports.h"
44 #include "libguile/threads.h"
45 #include "libguile/dynwind.h"
46 #include "libguile/iselect.h"
47 #include "libguile/fluids.h"
48 #include "libguile/continuations.h"
49 #include "libguile/gc.h"
50 #include "libguile/init.h"
51
52 #ifdef __MINGW32__
53 #ifndef ETIMEDOUT
54 # define ETIMEDOUT WSAETIMEDOUT
55 #endif
56 # include <fcntl.h>
57 # include <process.h>
58 # define pipe(fd) _pipe (fd, 256, O_BINARY)
59 #endif /* __MINGW32__ */
60
61 /*** Queues */
62
63 /* Make an empty queue data structure.
64 */
65 static SCM
66 make_queue ()
67 {
68 return scm_cons (SCM_EOL, SCM_EOL);
69 }
70
71 /* Put T at the back of Q and return a handle that can be used with
72 remqueue to remove T from Q again.
73 */
74 static SCM
75 enqueue (SCM q, SCM t)
76 {
77 SCM c = scm_cons (t, SCM_EOL);
78 if (scm_is_null (SCM_CDR (q)))
79 SCM_SETCDR (q, c);
80 else
81 SCM_SETCDR (SCM_CAR (q), c);
82 SCM_SETCAR (q, c);
83 return c;
84 }
85
86 /* Remove the element that the handle C refers to from the queue Q. C
87 must have been returned from a call to enqueue. The return value
88 is zero when the element referred to by C has already been removed.
89 Otherwise, 1 is returned.
90 */
91 static int
92 remqueue (SCM q, SCM c)
93 {
94 SCM p, prev = q;
95 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
96 {
97 if (scm_is_eq (p, c))
98 {
99 if (scm_is_eq (c, SCM_CAR (q)))
100 SCM_SETCAR (q, SCM_CDR (c));
101 SCM_SETCDR (prev, SCM_CDR (c));
102 return 1;
103 }
104 prev = p;
105 }
106 return 0;
107 }
108
109 /* Remove the front-most element from the queue Q and return it.
110 Return SCM_BOOL_F when Q is empty.
111 */
112 static SCM
113 dequeue (SCM q)
114 {
115 SCM c = SCM_CDR (q);
116 if (scm_is_null (c))
117 return SCM_BOOL_F;
118 else
119 {
120 SCM_SETCDR (q, SCM_CDR (c));
121 if (scm_is_null (SCM_CDR (q)))
122 SCM_SETCAR (q, SCM_EOL);
123 return SCM_CAR (c);
124 }
125 }
126
127 /*** Thread smob routines */
128
129 static SCM
130 thread_mark (SCM obj)
131 {
132 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
133 scm_gc_mark (t->result);
134 scm_gc_mark (t->join_queue);
135 scm_gc_mark (t->dynwinds);
136 scm_gc_mark (t->active_asyncs);
137 scm_gc_mark (t->continuation_root);
138 return t->dynamic_state;
139 }
140
141 static int
142 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
143 {
144 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
145 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
146 the struct case, hence we go via a union, and extract according to the
147 size of pthread_t. */
148 union {
149 scm_i_pthread_t p;
150 unsigned short us;
151 unsigned int ui;
152 unsigned long ul;
153 scm_t_uintmax um;
154 } u;
155 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
156 scm_i_pthread_t p = t->pthread;
157 scm_t_uintmax id;
158 u.p = p;
159 if (sizeof (p) == sizeof (unsigned short))
160 id = u.us;
161 else if (sizeof (p) == sizeof (unsigned int))
162 id = u.ui;
163 else if (sizeof (p) == sizeof (unsigned long))
164 id = u.ul;
165 else
166 id = u.um;
167
168 scm_puts ("#<thread ", port);
169 scm_uintprint (id, 10, port);
170 scm_puts (" (", port);
171 scm_uintprint ((scm_t_bits)t, 16, port);
172 scm_puts (")>", port);
173 return 1;
174 }
175
176 static size_t
177 thread_free (SCM obj)
178 {
179 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
180 assert (t->exited);
181 scm_gc_free (t, sizeof (*t), "thread");
182 return 0;
183 }
184
185 /*** Blocking on queues. */
186
187 /* See also scm_i_queue_async_cell for how such a block is
188 interrputed.
189 */
190
191 /* Put the current thread on QUEUE and go to sleep, waiting for it to
192 be woken up by a call to 'unblock_from_queue', or to be
193 interrupted. Upon return of this function, the current thread is
194 no longer on QUEUE, even when the sleep has been interrupted.
195
196 The QUEUE data structure is assumed to be protected by MUTEX and
197 the caller of block_self must hold MUTEX. It will be atomically
198 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
199
200 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
201 as MUTEX is needed.
202
203 When WAITTIME is not NULL, the sleep will be aborted at that time.
204
205 The return value of block_self is an errno value. It will be zero
206 when the sleep has been successfully completed by a call to
207 unblock_from_queue, EINTR when it has been interrupted by the
208 delivery of a system async, and ETIMEDOUT when the timeout has
209 expired.
210
211 The system asyncs themselves are not executed by block_self.
212 */
213 static int
214 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
215 const scm_t_timespec *waittime)
216 {
217 scm_i_thread *t = SCM_I_CURRENT_THREAD;
218 SCM q_handle;
219 int err;
220
221 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
222 err = EINTR;
223 else
224 {
225 t->block_asyncs++;
226 q_handle = enqueue (queue, t->handle);
227 if (waittime == NULL)
228 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
229 else
230 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
231
232 /* When we are still on QUEUE, we have been interrupted. We
233 report this only when no other error (such as a timeout) has
234 happened above.
235 */
236 if (remqueue (queue, q_handle) && err == 0)
237 err = EINTR;
238 t->block_asyncs--;
239 scm_i_reset_sleep (t);
240 }
241
242 return err;
243 }
244
245 /* Wake up the first thread on QUEUE, if any. The caller must hold
246 the mutex that protects QUEUE. The awoken thread is returned, or
247 #f when the queue was empty.
248 */
249 static SCM
250 unblock_from_queue (SCM queue)
251 {
252 SCM thread = dequeue (queue);
253 if (scm_is_true (thread))
254 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
255 return thread;
256 }
257
258 /* Getting into and out of guile mode.
259 */
260
261 /* Ken Raeburn observes that the implementation of suspend and resume
262 (and the things that build on top of them) are very likely not
263 correct (see below). We will need fix this eventually, and that's
264 why scm_leave_guile/scm_enter_guile are not exported in the API.
265
266 Ken writes:
267
268 Consider this sequence:
269
270 Function foo, called in Guile mode, calls suspend (maybe indirectly
271 through scm_leave_guile), which does this:
272
273 // record top of stack for the GC
274 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
275 var 't'
276 // save registers.
277 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
278 setjmp (t->regs); // here's most of the magic
279
280 ... and returns.
281
282 Function foo has a SCM value X, a handle on a non-immediate object, in
283 a caller-saved register R, and it's the only reference to the object
284 currently.
285
286 The compiler wants to use R in suspend, so it pushes the current
287 value, X, into a stack slot which will be reloaded on exit from
288 suspend; then it loads stuff into R and goes about its business. The
289 setjmp call saves (some of) the current registers, including R, which
290 no longer contains X. (This isn't a problem for a normal
291 setjmp/longjmp situation, where longjmp would be called before
292 setjmp's caller returns; the old value for X would be loaded back from
293 the stack after the longjmp, before the function returned.)
294
295 So, suspend returns, loading X back into R (and invalidating the jump
296 buffer) in the process. The caller foo then goes off and calls a
297 bunch of other functions out of Guile mode, occasionally storing X on
298 the stack again, but, say, much deeper on the stack than suspend's
299 stack frame went, and the stack slot where suspend had written X has
300 long since been overwritten with other values.
301
302 Okay, nothing actively broken so far. Now, let garbage collection
303 run, triggered by another thread.
304
305 The thread calling foo is out of Guile mode at the time, so the
306 garbage collector just scans a range of stack addresses. Too bad that
307 X isn't stored there. So the pointed-to storage goes onto the free
308 list, and I think you can see where things go from there.
309
310 Is there anything I'm missing that'll prevent this scenario from
311 happening? I mean, aside from, "well, suspend and scm_leave_guile
312 don't have many local variables, so they probably won't need to save
313 any registers on most systems, so we hope everything will wind up in
314 the jump buffer and we'll just get away with it"?
315
316 (And, going the other direction, if scm_leave_guile and suspend push
317 the stack pointer over onto a new page, and foo doesn't make further
318 function calls and thus the stack pointer no longer includes that
319 page, are we guaranteed that the kernel cannot release the now-unused
320 stack page that contains the top-of-stack pointer we just saved? I
321 don't know if any OS actually does that. If it does, we could get
322 faults in garbage collection.)
323
324 I don't think scm_without_guile has to have this problem, as it gets
325 more control over the stack handling -- but it should call setjmp
326 itself. I'd probably try something like:
327
328 // record top of stack for the GC
329 t->top = SCM_STACK_PTR (&t);
330 // save registers.
331 SCM_FLUSH_REGISTER_WINDOWS;
332 setjmp (t->regs);
333 res = func(data);
334 scm_enter_guile (t);
335
336 ... though even that's making some assumptions about the stack
337 ordering of local variables versus caller-saved registers.
338
339 For something like scm_leave_guile to work, I don't think it can just
340 rely on invalidated jump buffers. A valid jump buffer, and a handle
341 on the stack state at the point when the jump buffer was initialized,
342 together, would work fine, but I think then we're talking about macros
343 invoking setjmp in the caller's stack frame, and requiring that the
344 caller of scm_leave_guile also call scm_enter_guile before returning,
345 kind of like pthread_cleanup_push/pop calls that have to be paired up
346 in a function. (In fact, the pthread ones have to be paired up
347 syntactically, as if they might expand to a compound statement
348 incorporating the user's code, and invoking a compiler's
349 exception-handling primitives. Which might be something to think
350 about for cases where Guile is used with C++ exceptions or
351 pthread_cancel.)
352 */
353
354 scm_i_pthread_key_t scm_i_thread_key;
355
356 static void
357 resume (scm_i_thread *t)
358 {
359 t->top = NULL;
360 if (t->clear_freelists_p)
361 {
362 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
363 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
364 t->clear_freelists_p = 0;
365 }
366 }
367
368 typedef void* scm_t_guile_ticket;
369
370 static void
371 scm_enter_guile (scm_t_guile_ticket ticket)
372 {
373 scm_i_thread *t = (scm_i_thread *)ticket;
374 if (t)
375 {
376 scm_i_pthread_mutex_lock (&t->heap_mutex);
377 resume (t);
378 }
379 }
380
381 static scm_i_thread *
382 suspend (void)
383 {
384 scm_i_thread *t = SCM_I_CURRENT_THREAD;
385
386 /* record top of stack for the GC */
387 t->top = SCM_STACK_PTR (&t);
388 /* save registers. */
389 SCM_FLUSH_REGISTER_WINDOWS;
390 setjmp (t->regs);
391 return t;
392 }
393
394 static scm_t_guile_ticket
395 scm_leave_guile ()
396 {
397 scm_i_thread *t = suspend ();
398 scm_i_pthread_mutex_unlock (&t->heap_mutex);
399 return (scm_t_guile_ticket) t;
400 }
401
402 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
403 static scm_i_thread *all_threads = NULL;
404 static int thread_count;
405
406 static SCM scm_i_default_dynamic_state;
407
408 /* Perform first stage of thread initialisation, in non-guile mode.
409 */
410 static void
411 guilify_self_1 (SCM_STACKITEM *base)
412 {
413 scm_i_thread *t = malloc (sizeof (scm_i_thread));
414
415 t->pthread = scm_i_pthread_self ();
416 t->handle = SCM_BOOL_F;
417 t->result = SCM_BOOL_F;
418 t->join_queue = SCM_EOL;
419 t->dynamic_state = SCM_BOOL_F;
420 t->dynwinds = SCM_EOL;
421 t->active_asyncs = SCM_EOL;
422 t->block_asyncs = 1;
423 t->pending_asyncs = 1;
424 t->last_debug_frame = NULL;
425 t->base = base;
426 t->continuation_root = SCM_EOL;
427 t->continuation_base = base;
428 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
429 t->sleep_mutex = NULL;
430 t->sleep_object = SCM_BOOL_F;
431 t->sleep_fd = -1;
432 /* XXX - check for errors. */
433 pipe (t->sleep_pipe);
434 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
435 t->clear_freelists_p = 0;
436 t->gc_running_p = 0;
437 t->exited = 0;
438
439 t->freelist = SCM_EOL;
440 t->freelist2 = SCM_EOL;
441 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
442 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
443
444 scm_i_pthread_setspecific (scm_i_thread_key, t);
445
446 scm_i_pthread_mutex_lock (&t->heap_mutex);
447
448 scm_i_pthread_mutex_lock (&thread_admin_mutex);
449 t->next_thread = all_threads;
450 all_threads = t;
451 thread_count++;
452 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
453 }
454
455 /* Perform second stage of thread initialisation, in guile mode.
456 */
457 static void
458 guilify_self_2 (SCM parent)
459 {
460 scm_i_thread *t = SCM_I_CURRENT_THREAD;
461
462 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
463 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
464 t->continuation_root = scm_cons (t->handle, SCM_EOL);
465 t->continuation_base = t->base;
466
467 if (scm_is_true (parent))
468 t->dynamic_state = scm_make_dynamic_state (parent);
469 else
470 t->dynamic_state = scm_i_make_initial_dynamic_state ();
471
472 t->join_queue = make_queue ();
473 t->block_asyncs = 0;
474 }
475
476 /* Perform thread tear-down, in guile mode.
477 */
478 static void *
479 do_thread_exit (void *v)
480 {
481 scm_i_thread *t = (scm_i_thread *)v;
482
483 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
484
485 t->exited = 1;
486 close (t->sleep_pipe[0]);
487 close (t->sleep_pipe[1]);
488 while (scm_is_true (unblock_from_queue (t->join_queue)))
489 ;
490
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
492 return NULL;
493 }
494
495 static void
496 on_thread_exit (void *v)
497 {
498 scm_i_thread *t = (scm_i_thread *)v, **tp;
499
500 scm_i_pthread_setspecific (scm_i_thread_key, v);
501
502 /* Unblocking the joining threads needs to happen in guile mode
503 since the queue is a SCM data structure.
504 */
505 scm_with_guile (do_thread_exit, v);
506
507 /* Removing ourself from the list of all threads needs to happen in
508 non-guile mode since all SCM values on our stack become
509 unprotected once we are no longer in the list.
510 */
511 scm_leave_guile ();
512 scm_i_pthread_mutex_lock (&thread_admin_mutex);
513 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
514 if (*tp == t)
515 {
516 *tp = t->next_thread;
517 break;
518 }
519 thread_count--;
520 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
521
522 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
523 }
524
525 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
526
527 static void
528 init_thread_key (void)
529 {
530 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
531 }
532
533 /* Perform any initializations necessary to bring the current thread
534 into guile mode, initializing Guile itself, if necessary.
535
536 BASE is the stack base to use with GC.
537
538 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
539 which case the default dynamic state is used.
540
541 Return zero when the thread was in guile mode already; otherwise
542 return 1.
543 */
544
545 static int
546 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
547 {
548 scm_i_thread *t;
549
550 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
551
552 if ((t = SCM_I_CURRENT_THREAD) == NULL)
553 {
554 /* This thread has not been guilified yet.
555 */
556
557 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
558 if (scm_initialized_p == 0)
559 {
560 /* First thread ever to enter Guile. Run the full
561 initialization.
562 */
563 scm_i_init_guile (base);
564 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
565 }
566 else
567 {
568 /* Guile is already initialized, but this thread enters it for
569 the first time. Only initialize this thread.
570 */
571 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
572 guilify_self_1 (base);
573 guilify_self_2 (parent);
574 }
575 return 1;
576 }
577 else if (t->top)
578 {
579 /* This thread is already guilified but not in guile mode, just
580 resume it.
581
582 XXX - base might be lower than when this thread was first
583 guilified.
584 */
585 scm_enter_guile ((scm_t_guile_ticket) t);
586 return 1;
587 }
588 else
589 {
590 /* Thread is already in guile mode. Nothing to do.
591 */
592 return 0;
593 }
594 }
595
596 #if SCM_USE_PTHREAD_THREADS
597
598 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
599 /* This method for GNU/Linux and perhaps some other systems.
600 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
601 available on them. */
602 #define HAVE_GET_THREAD_STACK_BASE
603
604 static SCM_STACKITEM *
605 get_thread_stack_base ()
606 {
607 pthread_attr_t attr;
608 void *start, *end;
609 size_t size;
610
611 pthread_getattr_np (pthread_self (), &attr);
612 pthread_attr_getstack (&attr, &start, &size);
613 end = (char *)start + size;
614
615 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
616 for the main thread, but we can use scm_get_stack_base in that
617 case.
618 */
619
620 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
621 if ((void *)&attr < start || (void *)&attr >= end)
622 return scm_get_stack_base ();
623 else
624 #endif
625 {
626 #if SCM_STACK_GROWS_UP
627 return start;
628 #else
629 return end;
630 #endif
631 }
632 }
633
634 #elif HAVE_PTHREAD_GET_STACKADDR_NP
635 /* This method for MacOS X.
636 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
637 but as of 2006 there's nothing obvious at apple.com. */
638 #define HAVE_GET_THREAD_STACK_BASE
639 static SCM_STACKITEM *
640 get_thread_stack_base ()
641 {
642 return pthread_get_stackaddr_np (pthread_self ());
643 }
644
645 #elif defined (__MINGW32__)
646 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
647 in any thread. We don't like hard-coding the name of a system, but there
648 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
649 work. */
650 #define HAVE_GET_THREAD_STACK_BASE
651 static SCM_STACKITEM *
652 get_thread_stack_base ()
653 {
654 return scm_get_stack_base ();
655 }
656
657 #endif /* pthread methods of get_thread_stack_base */
658
659 #else /* !SCM_USE_PTHREAD_THREADS */
660
661 #define HAVE_GET_THREAD_STACK_BASE
662
663 static SCM_STACKITEM *
664 get_thread_stack_base ()
665 {
666 return scm_get_stack_base ();
667 }
668
669 #endif /* !SCM_USE_PTHREAD_THREADS */
670
671 #ifdef HAVE_GET_THREAD_STACK_BASE
672
673 void
674 scm_init_guile ()
675 {
676 scm_i_init_thread_for_guile (get_thread_stack_base (),
677 scm_i_default_dynamic_state);
678 }
679
680 #endif
681
682 void *
683 scm_with_guile (void *(*func)(void *), void *data)
684 {
685 return scm_i_with_guile_and_parent (func, data,
686 scm_i_default_dynamic_state);
687 }
688
689 void *
690 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
691 SCM parent)
692 {
693 void *res;
694 int really_entered;
695 SCM_STACKITEM base_item;
696 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
697 res = scm_c_with_continuation_barrier (func, data);
698 if (really_entered)
699 scm_leave_guile ();
700 return res;
701 }
702
703 void *
704 scm_without_guile (void *(*func)(void *), void *data)
705 {
706 void *res;
707 scm_t_guile_ticket t;
708 t = scm_leave_guile ();
709 res = func (data);
710 scm_enter_guile (t);
711 return res;
712 }
713
714 /*** Thread creation */
715
716 typedef struct {
717 SCM parent;
718 SCM thunk;
719 SCM handler;
720 SCM thread;
721 scm_i_pthread_mutex_t mutex;
722 scm_i_pthread_cond_t cond;
723 } launch_data;
724
725 static void *
726 really_launch (void *d)
727 {
728 launch_data *data = (launch_data *)d;
729 SCM thunk = data->thunk, handler = data->handler;
730 scm_i_thread *t;
731
732 t = SCM_I_CURRENT_THREAD;
733
734 scm_i_scm_pthread_mutex_lock (&data->mutex);
735 data->thread = scm_current_thread ();
736 scm_i_pthread_cond_signal (&data->cond);
737 scm_i_pthread_mutex_unlock (&data->mutex);
738
739 if (SCM_UNBNDP (handler))
740 t->result = scm_call_0 (thunk);
741 else
742 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
743
744 return 0;
745 }
746
747 static void *
748 launch_thread (void *d)
749 {
750 launch_data *data = (launch_data *)d;
751 scm_i_pthread_detach (scm_i_pthread_self ());
752 scm_i_with_guile_and_parent (really_launch, d, data->parent);
753 return NULL;
754 }
755
756 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
757 (SCM thunk, SCM handler),
758 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
759 "returning a new thread object representing the thread. The procedure\n"
760 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
761 "\n"
762 "When @var{handler} is specified, then @var{thunk} is called from\n"
763 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
764 "handler. This catch is established inside the continuation barrier.\n"
765 "\n"
766 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
767 "the @emph{exit value} of the thread and the thread is terminated.")
768 #define FUNC_NAME s_scm_call_with_new_thread
769 {
770 launch_data data;
771 scm_i_pthread_t id;
772 int err;
773
774 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
775 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
776 handler, SCM_ARG2, FUNC_NAME);
777
778 data.parent = scm_current_dynamic_state ();
779 data.thunk = thunk;
780 data.handler = handler;
781 data.thread = SCM_BOOL_F;
782 scm_i_pthread_mutex_init (&data.mutex, NULL);
783 scm_i_pthread_cond_init (&data.cond, NULL);
784
785 scm_i_scm_pthread_mutex_lock (&data.mutex);
786 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
787 if (err)
788 {
789 scm_i_pthread_mutex_unlock (&data.mutex);
790 errno = err;
791 scm_syserror (NULL);
792 }
793 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
794 scm_i_pthread_mutex_unlock (&data.mutex);
795
796 return data.thread;
797 }
798 #undef FUNC_NAME
799
800 typedef struct {
801 SCM parent;
802 scm_t_catch_body body;
803 void *body_data;
804 scm_t_catch_handler handler;
805 void *handler_data;
806 SCM thread;
807 scm_i_pthread_mutex_t mutex;
808 scm_i_pthread_cond_t cond;
809 } spawn_data;
810
811 static void *
812 really_spawn (void *d)
813 {
814 spawn_data *data = (spawn_data *)d;
815 scm_t_catch_body body = data->body;
816 void *body_data = data->body_data;
817 scm_t_catch_handler handler = data->handler;
818 void *handler_data = data->handler_data;
819 scm_i_thread *t = SCM_I_CURRENT_THREAD;
820
821 scm_i_scm_pthread_mutex_lock (&data->mutex);
822 data->thread = scm_current_thread ();
823 scm_i_pthread_cond_signal (&data->cond);
824 scm_i_pthread_mutex_unlock (&data->mutex);
825
826 if (handler == NULL)
827 t->result = body (body_data);
828 else
829 t->result = scm_internal_catch (SCM_BOOL_T,
830 body, body_data,
831 handler, handler_data);
832
833 return 0;
834 }
835
836 static void *
837 spawn_thread (void *d)
838 {
839 spawn_data *data = (spawn_data *)d;
840 scm_i_pthread_detach (scm_i_pthread_self ());
841 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
842 return NULL;
843 }
844
845 SCM
846 scm_spawn_thread (scm_t_catch_body body, void *body_data,
847 scm_t_catch_handler handler, void *handler_data)
848 {
849 spawn_data data;
850 scm_i_pthread_t id;
851 int err;
852
853 data.parent = scm_current_dynamic_state ();
854 data.body = body;
855 data.body_data = body_data;
856 data.handler = handler;
857 data.handler_data = handler_data;
858 data.thread = SCM_BOOL_F;
859 scm_i_pthread_mutex_init (&data.mutex, NULL);
860 scm_i_pthread_cond_init (&data.cond, NULL);
861
862 scm_i_scm_pthread_mutex_lock (&data.mutex);
863 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
864 if (err)
865 {
866 scm_i_pthread_mutex_unlock (&data.mutex);
867 errno = err;
868 scm_syserror (NULL);
869 }
870 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
871 scm_i_pthread_mutex_unlock (&data.mutex);
872
873 return data.thread;
874 }
875
876 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
877 (),
878 "Move the calling thread to the end of the scheduling queue.")
879 #define FUNC_NAME s_scm_yield
880 {
881 return scm_from_bool (scm_i_sched_yield ());
882 }
883 #undef FUNC_NAME
884
885 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
886 (SCM thread),
887 "Suspend execution of the calling thread until the target @var{thread} "
888 "terminates, unless the target @var{thread} has already terminated. ")
889 #define FUNC_NAME s_scm_join_thread
890 {
891 scm_i_thread *t;
892 SCM res;
893
894 SCM_VALIDATE_THREAD (1, thread);
895 if (scm_is_eq (scm_current_thread (), thread))
896 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
897
898 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
899
900 t = SCM_I_THREAD_DATA (thread);
901 if (!t->exited)
902 {
903 while (1)
904 {
905 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
906 if (t->exited)
907 break;
908 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
909 SCM_TICK;
910 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
911 }
912 }
913 res = t->result;
914
915 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
916 return res;
917 }
918 #undef FUNC_NAME
919
920 /*** Fat mutexes */
921
922 /* We implement our own mutex type since we want them to be 'fair', we
923 want to do fancy things while waiting for them (like running
924 asyncs) and we might want to add things that are nice for
925 debugging.
926 */
927
928 typedef struct {
929 scm_i_pthread_mutex_t lock;
930 SCM owner;
931 int level; /* how much the owner owns us.
932 < 0 for non-recursive mutexes */
933 SCM waiting; /* the threads waiting for this mutex. */
934 } fat_mutex;
935
936 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
937 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
938
939 static SCM
940 fat_mutex_mark (SCM mx)
941 {
942 fat_mutex *m = SCM_MUTEX_DATA (mx);
943 scm_gc_mark (m->owner);
944 return m->waiting;
945 }
946
947 static size_t
948 fat_mutex_free (SCM mx)
949 {
950 fat_mutex *m = SCM_MUTEX_DATA (mx);
951 scm_i_pthread_mutex_destroy (&m->lock);
952 scm_gc_free (m, sizeof (fat_mutex), "mutex");
953 return 0;
954 }
955
956 static int
957 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
958 {
959 fat_mutex *m = SCM_MUTEX_DATA (mx);
960 scm_puts ("#<mutex ", port);
961 scm_uintprint ((scm_t_bits)m, 16, port);
962 scm_puts (">", port);
963 return 1;
964 }
965
966 static SCM
967 make_fat_mutex (int recursive)
968 {
969 fat_mutex *m;
970 SCM mx;
971
972 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
973 scm_i_pthread_mutex_init (&m->lock, NULL);
974 m->owner = SCM_BOOL_F;
975 m->level = recursive? 0 : -1;
976 m->waiting = SCM_EOL;
977 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
978 m->waiting = make_queue ();
979 return mx;
980 }
981
982 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
983 (void),
984 "Create a new mutex. ")
985 #define FUNC_NAME s_scm_make_mutex
986 {
987 return make_fat_mutex (0);
988 }
989 #undef FUNC_NAME
990
991 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
992 (void),
993 "Create a new recursive mutex. ")
994 #define FUNC_NAME s_scm_make_recursive_mutex
995 {
996 return make_fat_mutex (1);
997 }
998 #undef FUNC_NAME
999
1000 static char *
1001 fat_mutex_lock (SCM mutex)
1002 {
1003 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1004 SCM thread = scm_current_thread ();
1005 char *msg = NULL;
1006
1007 scm_i_scm_pthread_mutex_lock (&m->lock);
1008 if (scm_is_false (m->owner))
1009 m->owner = thread;
1010 else if (scm_is_eq (m->owner, thread))
1011 {
1012 if (m->level >= 0)
1013 m->level++;
1014 else
1015 msg = "mutex already locked by current thread";
1016 }
1017 else
1018 {
1019 while (1)
1020 {
1021 block_self (m->waiting, mutex, &m->lock, NULL);
1022 if (scm_is_eq (m->owner, thread))
1023 break;
1024 scm_i_pthread_mutex_unlock (&m->lock);
1025 SCM_TICK;
1026 scm_i_scm_pthread_mutex_lock (&m->lock);
1027 }
1028 }
1029 scm_i_pthread_mutex_unlock (&m->lock);
1030 return msg;
1031 }
1032
1033 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
1034 (SCM mx),
1035 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1036 "blocks until the mutex becomes available. The function returns when "
1037 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1038 "a thread already owns will succeed right away and will not block the "
1039 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1040 #define FUNC_NAME s_scm_lock_mutex
1041 {
1042 char *msg;
1043
1044 SCM_VALIDATE_MUTEX (1, mx);
1045 msg = fat_mutex_lock (mx);
1046 if (msg)
1047 scm_misc_error (NULL, msg, SCM_EOL);
1048 return SCM_BOOL_T;
1049 }
1050 #undef FUNC_NAME
1051
1052 void
1053 scm_dynwind_lock_mutex (SCM mutex)
1054 {
1055 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
1056 SCM_F_WIND_EXPLICITLY);
1057 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
1058 SCM_F_WIND_EXPLICITLY);
1059 }
1060
1061 static char *
1062 fat_mutex_trylock (fat_mutex *m, int *resp)
1063 {
1064 char *msg = NULL;
1065 SCM thread = scm_current_thread ();
1066
1067 *resp = 1;
1068 scm_i_pthread_mutex_lock (&m->lock);
1069 if (scm_is_false (m->owner))
1070 m->owner = thread;
1071 else if (scm_is_eq (m->owner, thread))
1072 {
1073 if (m->level >= 0)
1074 m->level++;
1075 else
1076 msg = "mutex already locked by current thread";
1077 }
1078 else
1079 *resp = 0;
1080 scm_i_pthread_mutex_unlock (&m->lock);
1081 return msg;
1082 }
1083
1084 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
1085 (SCM mutex),
1086 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1087 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1088 #define FUNC_NAME s_scm_try_mutex
1089 {
1090 char *msg;
1091 int res;
1092
1093 SCM_VALIDATE_MUTEX (1, mutex);
1094
1095 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
1096 if (msg)
1097 scm_misc_error (NULL, msg, SCM_EOL);
1098 return scm_from_bool (res);
1099 }
1100 #undef FUNC_NAME
1101
1102 static char *
1103 fat_mutex_unlock (fat_mutex *m)
1104 {
1105 char *msg = NULL;
1106
1107 scm_i_scm_pthread_mutex_lock (&m->lock);
1108 if (!scm_is_eq (m->owner, scm_current_thread ()))
1109 {
1110 if (scm_is_false (m->owner))
1111 msg = "mutex not locked";
1112 else
1113 msg = "mutex not locked by current thread";
1114 }
1115 else if (m->level > 0)
1116 m->level--;
1117 else
1118 m->owner = unblock_from_queue (m->waiting);
1119 scm_i_pthread_mutex_unlock (&m->lock);
1120
1121 return msg;
1122 }
1123
1124 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
1125 (SCM mx),
1126 "Unlocks @var{mutex} if the calling thread owns the lock on "
1127 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1128 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1129 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1130 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1131 "with a call to @code{unlock-mutex}. Only the last call to "
1132 "@code{unlock-mutex} will actually unlock the mutex. ")
1133 #define FUNC_NAME s_scm_unlock_mutex
1134 {
1135 char *msg;
1136 SCM_VALIDATE_MUTEX (1, mx);
1137
1138 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
1139 if (msg)
1140 scm_misc_error (NULL, msg, SCM_EOL);
1141 return SCM_BOOL_T;
1142 }
1143 #undef FUNC_NAME
1144
1145 #if 0
1146
1147 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
1148 (SCM mx),
1149 "Return the thread owning @var{mx}, or @code{#f}.")
1150 #define FUNC_NAME s_scm_mutex_owner
1151 {
1152 SCM_VALIDATE_MUTEX (1, mx);
1153 return (SCM_MUTEX_DATA(mx))->owner;
1154 }
1155 #undef FUNC_NAME
1156
1157 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1158 (SCM mx),
1159 "Return the lock level of a recursive mutex, or -1\n"
1160 "for a standard mutex.")
1161 #define FUNC_NAME s_scm_mutex_level
1162 {
1163 SCM_VALIDATE_MUTEX (1, mx);
1164 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1165 }
1166 #undef FUNC_NAME
1167
1168 #endif
1169
1170 /*** Fat condition variables */
1171
1172 typedef struct {
1173 scm_i_pthread_mutex_t lock;
1174 SCM waiting; /* the threads waiting for this condition. */
1175 } fat_cond;
1176
1177 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1178 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1179
1180 static SCM
1181 fat_cond_mark (SCM cv)
1182 {
1183 fat_cond *c = SCM_CONDVAR_DATA (cv);
1184 return c->waiting;
1185 }
1186
1187 static size_t
1188 fat_cond_free (SCM mx)
1189 {
1190 fat_cond *c = SCM_CONDVAR_DATA (mx);
1191 scm_i_pthread_mutex_destroy (&c->lock);
1192 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1193 return 0;
1194 }
1195
1196 static int
1197 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1198 {
1199 fat_cond *c = SCM_CONDVAR_DATA (cv);
1200 scm_puts ("#<condition-variable ", port);
1201 scm_uintprint ((scm_t_bits)c, 16, port);
1202 scm_puts (">", port);
1203 return 1;
1204 }
1205
1206 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1207 (void),
1208 "Make a new condition variable.")
1209 #define FUNC_NAME s_scm_make_condition_variable
1210 {
1211 fat_cond *c;
1212 SCM cv;
1213
1214 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1215 scm_i_pthread_mutex_init (&c->lock, 0);
1216 c->waiting = SCM_EOL;
1217 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1218 c->waiting = make_queue ();
1219 return cv;
1220 }
1221 #undef FUNC_NAME
1222
1223 static int
1224 fat_cond_timedwait (SCM cond, SCM mutex,
1225 const scm_t_timespec *waittime)
1226 {
1227 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1228 fat_cond *c = SCM_CONDVAR_DATA (cond);
1229 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1230 const char *msg;
1231 int err = 0;
1232
1233 while (1)
1234 {
1235 scm_i_scm_pthread_mutex_lock (&c->lock);
1236 msg = fat_mutex_unlock (m);
1237 t->block_asyncs++;
1238 if (msg == NULL)
1239 {
1240 err = block_self (c->waiting, cond, &c->lock, waittime);
1241 scm_i_pthread_mutex_unlock (&c->lock);
1242 fat_mutex_lock (mutex);
1243 }
1244 else
1245 scm_i_pthread_mutex_unlock (&c->lock);
1246 t->block_asyncs--;
1247 scm_async_click ();
1248
1249 if (msg)
1250 scm_misc_error (NULL, msg, SCM_EOL);
1251
1252 scm_remember_upto_here_2 (cond, mutex);
1253
1254 if (err == 0)
1255 return 1;
1256 if (err == ETIMEDOUT)
1257 return 0;
1258 if (err != EINTR)
1259 {
1260 errno = err;
1261 scm_syserror (NULL);
1262 }
1263 }
1264 }
1265
1266 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1267 (SCM cv, SCM mx, SCM t),
1268 "Wait until @var{cond-var} has been signalled. While waiting, "
1269 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1270 "is locked again when this function returns. When @var{time} is given, "
1271 "it specifies a point in time where the waiting should be aborted. It "
1272 "can be either a integer as returned by @code{current-time} or a pair "
1273 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1274 "mutex is locked and @code{#f} is returned. When the condition "
1275 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1276 "is returned. ")
1277 #define FUNC_NAME s_scm_timed_wait_condition_variable
1278 {
1279 scm_t_timespec waittime, *waitptr = NULL;
1280
1281 SCM_VALIDATE_CONDVAR (1, cv);
1282 SCM_VALIDATE_MUTEX (2, mx);
1283
1284 if (!SCM_UNBNDP (t))
1285 {
1286 if (scm_is_pair (t))
1287 {
1288 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1289 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1290 }
1291 else
1292 {
1293 waittime.tv_sec = scm_to_ulong (t);
1294 waittime.tv_nsec = 0;
1295 }
1296 waitptr = &waittime;
1297 }
1298
1299 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1300 }
1301 #undef FUNC_NAME
1302
1303 static void
1304 fat_cond_signal (fat_cond *c)
1305 {
1306 scm_i_scm_pthread_mutex_lock (&c->lock);
1307 unblock_from_queue (c->waiting);
1308 scm_i_pthread_mutex_unlock (&c->lock);
1309 }
1310
1311 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1312 (SCM cv),
1313 "Wake up one thread that is waiting for @var{cv}")
1314 #define FUNC_NAME s_scm_signal_condition_variable
1315 {
1316 SCM_VALIDATE_CONDVAR (1, cv);
1317 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1318 return SCM_BOOL_T;
1319 }
1320 #undef FUNC_NAME
1321
1322 static void
1323 fat_cond_broadcast (fat_cond *c)
1324 {
1325 scm_i_scm_pthread_mutex_lock (&c->lock);
1326 while (scm_is_true (unblock_from_queue (c->waiting)))
1327 ;
1328 scm_i_pthread_mutex_unlock (&c->lock);
1329 }
1330
1331 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1332 (SCM cv),
1333 "Wake up all threads that are waiting for @var{cv}. ")
1334 #define FUNC_NAME s_scm_broadcast_condition_variable
1335 {
1336 SCM_VALIDATE_CONDVAR (1, cv);
1337 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1338 return SCM_BOOL_T;
1339 }
1340 #undef FUNC_NAME
1341
1342 /*** Marking stacks */
1343
1344 /* XXX - what to do with this? Do we need to handle this for blocked
1345 threads as well?
1346 */
1347 #ifdef __ia64__
1348 # define SCM_MARK_BACKING_STORE() do { \
1349 ucontext_t ctx; \
1350 SCM_STACKITEM * top, * bot; \
1351 getcontext (&ctx); \
1352 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1353 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1354 / sizeof (SCM_STACKITEM))); \
1355 bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
1356 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1357 scm_mark_locations (bot, top - bot); } while (0)
1358 #else
1359 # define SCM_MARK_BACKING_STORE()
1360 #endif
1361
1362 void
1363 scm_threads_mark_stacks (void)
1364 {
1365 scm_i_thread *t;
1366 for (t = all_threads; t; t = t->next_thread)
1367 {
1368 /* Check that thread has indeed been suspended.
1369 */
1370 assert (t->top);
1371
1372 scm_gc_mark (t->handle);
1373
1374 #if SCM_STACK_GROWS_UP
1375 scm_mark_locations (t->base, t->top - t->base);
1376 #else
1377 scm_mark_locations (t->top, t->base - t->top);
1378 #endif
1379 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1380 ((size_t) sizeof(t->regs)
1381 / sizeof (SCM_STACKITEM)));
1382 }
1383
1384 SCM_MARK_BACKING_STORE ();
1385 }
1386
1387 /*** Select */
1388
1389 int
1390 scm_std_select (int nfds,
1391 SELECT_TYPE *readfds,
1392 SELECT_TYPE *writefds,
1393 SELECT_TYPE *exceptfds,
1394 struct timeval *timeout)
1395 {
1396 fd_set my_readfds;
1397 int res, eno, wakeup_fd;
1398 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1399 scm_t_guile_ticket ticket;
1400
1401 if (readfds == NULL)
1402 {
1403 FD_ZERO (&my_readfds);
1404 readfds = &my_readfds;
1405 }
1406
1407 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1408 SCM_TICK;
1409
1410 wakeup_fd = t->sleep_pipe[0];
1411 ticket = scm_leave_guile ();
1412 FD_SET (wakeup_fd, readfds);
1413 if (wakeup_fd >= nfds)
1414 nfds = wakeup_fd+1;
1415 res = select (nfds, readfds, writefds, exceptfds, timeout);
1416 t->sleep_fd = -1;
1417 eno = errno;
1418 scm_enter_guile (ticket);
1419
1420 scm_i_reset_sleep (t);
1421
1422 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1423 {
1424 char dummy;
1425 read (wakeup_fd, &dummy, 1);
1426 FD_CLR (wakeup_fd, readfds);
1427 res -= 1;
1428 if (res == 0)
1429 {
1430 eno = EINTR;
1431 res = -1;
1432 }
1433 }
1434 errno = eno;
1435 return res;
1436 }
1437
1438 /* Convenience API for blocking while in guile mode. */
1439
1440 #if SCM_USE_PTHREAD_THREADS
1441
1442 int
1443 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1444 {
1445 scm_t_guile_ticket t = scm_leave_guile ();
1446 int res = scm_i_pthread_mutex_lock (mutex);
1447 scm_enter_guile (t);
1448 return res;
1449 }
1450
1451 static void
1452 do_unlock (void *data)
1453 {
1454 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1455 }
1456
1457 void
1458 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1459 {
1460 scm_i_scm_pthread_mutex_lock (mutex);
1461 scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
1462 }
1463
1464 int
1465 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1466 {
1467 scm_t_guile_ticket t = scm_leave_guile ();
1468 int res = scm_i_pthread_cond_wait (cond, mutex);
1469 scm_enter_guile (t);
1470 return res;
1471 }
1472
1473 int
1474 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1475 scm_i_pthread_mutex_t *mutex,
1476 const scm_t_timespec *wt)
1477 {
1478 scm_t_guile_ticket t = scm_leave_guile ();
1479 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1480 scm_enter_guile (t);
1481 return res;
1482 }
1483
1484 #endif
1485
1486 unsigned long
1487 scm_std_usleep (unsigned long usecs)
1488 {
1489 struct timeval tv;
1490 tv.tv_usec = usecs % 1000000;
1491 tv.tv_sec = usecs / 1000000;
1492 scm_std_select (0, NULL, NULL, NULL, &tv);
1493 return tv.tv_sec * 1000000 + tv.tv_usec;
1494 }
1495
1496 unsigned int
1497 scm_std_sleep (unsigned int secs)
1498 {
1499 struct timeval tv;
1500 tv.tv_usec = 0;
1501 tv.tv_sec = secs;
1502 scm_std_select (0, NULL, NULL, NULL, &tv);
1503 return tv.tv_sec;
1504 }
1505
1506 /*** Misc */
1507
1508 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1509 (void),
1510 "Return the thread that called this function.")
1511 #define FUNC_NAME s_scm_current_thread
1512 {
1513 return SCM_I_CURRENT_THREAD->handle;
1514 }
1515 #undef FUNC_NAME
1516
1517 static SCM
1518 scm_c_make_list (size_t n, SCM fill)
1519 {
1520 SCM res = SCM_EOL;
1521 while (n-- > 0)
1522 res = scm_cons (fill, res);
1523 return res;
1524 }
1525
1526 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1527 (void),
1528 "Return a list of all threads.")
1529 #define FUNC_NAME s_scm_all_threads
1530 {
1531 /* We can not allocate while holding the thread_admin_mutex because
1532 of the way GC is done.
1533 */
1534 int n = thread_count;
1535 scm_i_thread *t;
1536 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1537
1538 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1539 l = &list;
1540 for (t = all_threads; t && n > 0; t = t->next_thread)
1541 {
1542 SCM_SETCAR (*l, t->handle);
1543 l = SCM_CDRLOC (*l);
1544 n--;
1545 }
1546 *l = SCM_EOL;
1547 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1548 return list;
1549 }
1550 #undef FUNC_NAME
1551
1552 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1553 (SCM thread),
1554 "Return @code{#t} iff @var{thread} has exited.\n")
1555 #define FUNC_NAME s_scm_thread_exited_p
1556 {
1557 return scm_from_bool (scm_c_thread_exited_p (thread));
1558 }
1559 #undef FUNC_NAME
1560
1561 int
1562 scm_c_thread_exited_p (SCM thread)
1563 #define FUNC_NAME s_scm_thread_exited_p
1564 {
1565 scm_i_thread *t;
1566 SCM_VALIDATE_THREAD (1, thread);
1567 t = SCM_I_THREAD_DATA (thread);
1568 return t->exited;
1569 }
1570 #undef FUNC_NAME
1571
1572 static scm_i_pthread_cond_t wake_up_cond;
1573 int scm_i_thread_go_to_sleep;
1574 static int threads_initialized_p = 0;
1575
1576 void
1577 scm_i_thread_put_to_sleep ()
1578 {
1579 if (threads_initialized_p)
1580 {
1581 scm_i_thread *t;
1582
1583 scm_leave_guile ();
1584 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1585
1586 /* Signal all threads to go to sleep
1587 */
1588 scm_i_thread_go_to_sleep = 1;
1589 for (t = all_threads; t; t = t->next_thread)
1590 scm_i_pthread_mutex_lock (&t->heap_mutex);
1591 scm_i_thread_go_to_sleep = 0;
1592 }
1593 }
1594
1595 void
1596 scm_i_thread_invalidate_freelists ()
1597 {
1598 /* thread_admin_mutex is already locked. */
1599
1600 scm_i_thread *t;
1601 for (t = all_threads; t; t = t->next_thread)
1602 if (t != SCM_I_CURRENT_THREAD)
1603 t->clear_freelists_p = 1;
1604 }
1605
1606 void
1607 scm_i_thread_wake_up ()
1608 {
1609 if (threads_initialized_p)
1610 {
1611 scm_i_thread *t;
1612
1613 scm_i_pthread_cond_broadcast (&wake_up_cond);
1614 for (t = all_threads; t; t = t->next_thread)
1615 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1616 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1617 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1618 }
1619 }
1620
1621 void
1622 scm_i_thread_sleep_for_gc ()
1623 {
1624 scm_i_thread *t = suspend ();
1625 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1626 resume (t);
1627 }
1628
1629 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1630 */
1631 scm_i_pthread_mutex_t scm_i_critical_section_mutex;
1632 int scm_i_critical_section_level = 0;
1633
1634 static SCM dynwind_critical_section_mutex;
1635
1636 void
1637 scm_dynwind_critical_section (SCM mutex)
1638 {
1639 if (scm_is_false (mutex))
1640 mutex = dynwind_critical_section_mutex;
1641 scm_dynwind_lock_mutex (mutex);
1642 scm_dynwind_block_asyncs ();
1643 }
1644
1645 /*** Initialization */
1646
1647 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1648 scm_i_pthread_mutex_t scm_i_misc_mutex;
1649
1650 #if SCM_USE_PTHREAD_THREADS
1651 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
1652 #endif
1653
1654 void
1655 scm_threads_prehistory (SCM_STACKITEM *base)
1656 {
1657 #if SCM_USE_PTHREAD_THREADS
1658 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
1659 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
1660 PTHREAD_MUTEX_RECURSIVE);
1661 #endif
1662
1663 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
1664 scm_i_pthread_mutexattr_recursive);
1665 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1666 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1667 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1668 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1669
1670 guilify_self_1 (base);
1671 }
1672
1673 scm_t_bits scm_tc16_thread;
1674 scm_t_bits scm_tc16_mutex;
1675 scm_t_bits scm_tc16_condvar;
1676
1677 void
1678 scm_init_threads ()
1679 {
1680 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1681 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1682 scm_set_smob_print (scm_tc16_thread, thread_print);
1683 scm_set_smob_free (scm_tc16_thread, thread_free);
1684
1685 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1686 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1687 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1688 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1689
1690 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1691 sizeof (fat_cond));
1692 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1693 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1694 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1695
1696 scm_i_default_dynamic_state = SCM_BOOL_F;
1697 guilify_self_2 (SCM_BOOL_F);
1698 threads_initialized_p = 1;
1699
1700 dynwind_critical_section_mutex =
1701 scm_permanent_object (scm_make_recursive_mutex ());
1702 }
1703
1704 void
1705 scm_init_threads_default_dynamic_state ()
1706 {
1707 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1708 scm_i_default_dynamic_state = scm_permanent_object (state);
1709 }
1710
1711 void
1712 scm_init_thread_procs ()
1713 {
1714 #include "libguile/threads.x"
1715 }
1716
1717 /*
1718 Local Variables:
1719 c-file-style: "gnu"
1720 End:
1721 */