* async.h (scm_mask_ints): Removed left over reference to
[bpt/guile.git] / libguile / threads.c
1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18
19 \f
20
21 #define _GNU_SOURCE
22
23 #include "libguile/_scm.h"
24
25 #if HAVE_UNISTD_H
26 #include <unistd.h>
27 #endif
28 #include <stdio.h>
29 #include <assert.h>
30 #if HAVE_SYS_TIME_H
31 #include <sys/time.h>
32 #endif
33
34 #include "libguile/validate.h"
35 #include "libguile/root.h"
36 #include "libguile/eval.h"
37 #include "libguile/async.h"
38 #include "libguile/ports.h"
39 #include "libguile/threads.h"
40 #include "libguile/dynwind.h"
41 #include "libguile/iselect.h"
42 #include "libguile/fluids.h"
43 #include "libguile/continuations.h"
44 #include "libguile/init.h"
45
46 /*** Queues */
47
48 /* Make an empty queue data structure.
49 */
50 static SCM
51 make_queue ()
52 {
53 return scm_cons (SCM_EOL, SCM_EOL);
54 }
55
56 /* Put T at the back of Q and return a handle that can be used with
57 remqueue to remove T from Q again.
58 */
59 static SCM
60 enqueue (SCM q, SCM t)
61 {
62 SCM c = scm_cons (t, SCM_EOL);
63 if (scm_is_null (SCM_CDR (q)))
64 SCM_SETCDR (q, c);
65 else
66 SCM_SETCDR (SCM_CAR (q), c);
67 SCM_SETCAR (q, c);
68 return c;
69 }
70
71 /* Remove the element that the handle C refers to from the queue Q. C
72 must have been returned from a call to enqueue. The return value
73 is zero when the element referred to by C has already been removed.
74 Otherwise, 1 is returned.
75 */
76 static int
77 remqueue (SCM q, SCM c)
78 {
79 SCM p, prev = q;
80 for (p = SCM_CDR (q); !scm_is_null (p); p = SCM_CDR (p))
81 {
82 if (scm_is_eq (p, c))
83 {
84 if (scm_is_eq (c, SCM_CAR (q)))
85 SCM_SETCAR (q, SCM_CDR (c));
86 SCM_SETCDR (prev, SCM_CDR (c));
87 return 1;
88 }
89 prev = p;
90 }
91 return 0;
92 }
93
94 /* Remove the front-most element from the queue Q and return it.
95 Return SCM_BOOL_F when Q is empty.
96 */
97 static SCM
98 dequeue (SCM q)
99 {
100 SCM c = SCM_CDR (q);
101 if (scm_is_null (c))
102 return SCM_BOOL_F;
103 else
104 {
105 SCM_SETCDR (q, SCM_CDR (c));
106 if (scm_is_null (SCM_CDR (q)))
107 SCM_SETCAR (q, SCM_EOL);
108 return SCM_CAR (c);
109 }
110 }
111
112 /*** Thread smob routines */
113
114 static SCM
115 thread_mark (SCM obj)
116 {
117 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
118 scm_gc_mark (t->result);
119 scm_gc_mark (t->join_queue);
120 scm_gc_mark (t->dynwinds);
121 scm_gc_mark (t->active_asyncs);
122 scm_gc_mark (t->continuation_root);
123 return t->dynamic_state;
124 }
125
126 static int
127 thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
128 {
129 scm_i_thread *t = SCM_I_THREAD_DATA (exp);
130 scm_puts ("#<thread ", port);
131 scm_uintprint ((size_t)t->pthread, 10, port);
132 scm_puts (" (", port);
133 scm_uintprint ((scm_t_bits)t, 16, port);
134 scm_puts (")>", port);
135 return 1;
136 }
137
138 static size_t
139 thread_free (SCM obj)
140 {
141 scm_i_thread *t = SCM_I_THREAD_DATA (obj);
142 assert (t->exited);
143 scm_gc_free (t, sizeof (*t), "thread");
144 return 0;
145 }
146
147 /*** Blocking on queues. */
148
149 /* See also scm_i_queue_async_cell for how such a block is
150 interrputed.
151 */
152
153 /* Put the current thread on QUEUE and go to sleep, waiting for it to
154 be woken up by a call to 'unblock_from_queue', or to be
155 interrupted. Upon return of this function, the current thread is
156 no longer on QUEUE, even when the sleep has been interrupted.
157
158 The QUEUE data structure is assumed to be protected by MUTEX and
159 the caller of block_self must hold MUTEX. It will be atomically
160 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
161
162 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
163 as MUTEX is needed.
164
165 When WAITTIME is not NULL, the sleep will be aborted at that time.
166
167 The return value of block_self is an errno value. It will be zero
168 when the sleep has been successfully completed by a call to
169 unblock_from_queue, EINTR when it has been interrupted by the
170 delivery of a system async, and ETIMEDOUT when the timeout has
171 expired.
172
173 The system asyncs themselves are not executed by block_self.
174 */
175 static int
176 block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
177 const scm_t_timespec *waittime)
178 {
179 scm_i_thread *t = SCM_I_CURRENT_THREAD;
180 SCM q_handle;
181 int err;
182
183 if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
184 err = EINTR;
185 else
186 {
187 t->block_asyncs++;
188 q_handle = enqueue (queue, t->handle);
189 if (waittime == NULL)
190 err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
191 else
192 err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
193
194 /* When we are still on QUEUE, we have been interrupted. We
195 report this only when no other error (such as a timeout) has
196 happened above.
197 */
198 if (remqueue (queue, q_handle) && err == 0)
199 err = EINTR;
200 t->block_asyncs--;
201 scm_i_reset_sleep (t);
202 }
203
204 return err;
205 }
206
207 /* Wake up the first thread on QUEUE, if any. The caller must hold
208 the mutex that protects QUEUE. The awoken thread is returned, or
209 #f when the queue was empty.
210 */
211 static SCM
212 unblock_from_queue (SCM queue)
213 {
214 SCM thread = dequeue (queue);
215 if (scm_is_true (thread))
216 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
217 return thread;
218 }
219
220 /* Getting into and out of guile mode.
221 */
222
223 scm_i_pthread_key_t scm_i_thread_key;
224
225 static void
226 resume (scm_i_thread *t)
227 {
228 t->top = NULL;
229 if (t->clear_freelists_p)
230 {
231 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
232 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
233 t->clear_freelists_p = 0;
234 }
235 }
236
237 void
238 scm_enter_guile (scm_t_guile_ticket ticket)
239 {
240 scm_i_thread *t = (scm_i_thread *)ticket;
241 if (t)
242 {
243 scm_i_pthread_mutex_lock (&t->heap_mutex);
244 resume (t);
245 }
246 }
247
248 static scm_i_thread *
249 suspend (void)
250 {
251 scm_i_thread *t = SCM_I_CURRENT_THREAD;
252
253 /* record top of stack for the GC */
254 t->top = SCM_STACK_PTR (&t);
255 /* save registers. */
256 SCM_FLUSH_REGISTER_WINDOWS;
257 setjmp (t->regs);
258 return t;
259 }
260
261 scm_t_guile_ticket
262 scm_leave_guile ()
263 {
264 scm_i_thread *t = suspend ();
265 scm_i_pthread_mutex_unlock (&t->heap_mutex);
266 return (scm_t_guile_ticket) t;
267 }
268
269 static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
270 static scm_i_thread *all_threads = NULL;
271 static int thread_count;
272
273 static SCM scm_i_default_dynamic_state;
274
275 /* Perform first stage of thread initialisation, in non-guile mode.
276 */
277 static void
278 guilify_self_1 (SCM_STACKITEM *base)
279 {
280 scm_i_thread *t = malloc (sizeof (scm_i_thread));
281
282 t->pthread = scm_i_pthread_self ();
283 t->handle = SCM_BOOL_F;
284 t->result = SCM_BOOL_F;
285 t->join_queue = SCM_EOL;
286 t->dynamic_state = SCM_BOOL_F;
287 t->dynwinds = SCM_EOL;
288 t->active_asyncs = SCM_EOL;
289 t->block_asyncs = 1;
290 t->pending_asyncs = 1;
291 t->last_debug_frame = NULL;
292 t->base = base;
293 t->continuation_root = SCM_EOL;
294 t->continuation_base = base;
295 scm_i_pthread_cond_init (&t->sleep_cond, NULL);
296 t->sleep_mutex = NULL;
297 t->sleep_object = SCM_BOOL_F;
298 t->sleep_fd = -1;
299 /* XXX - check for errors. */
300 pipe (t->sleep_pipe);
301 scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
302 t->clear_freelists_p = 0;
303 t->gc_running_p = 0;
304 t->exited = 0;
305
306 t->freelist = SCM_EOL;
307 t->freelist2 = SCM_EOL;
308 SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
309 SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
310
311 scm_i_pthread_setspecific (scm_i_thread_key, t);
312
313 scm_i_pthread_mutex_lock (&t->heap_mutex);
314
315 scm_i_pthread_mutex_lock (&thread_admin_mutex);
316 t->next_thread = all_threads;
317 all_threads = t;
318 thread_count++;
319 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
320 }
321
322 /* Perform second stage of thread initialisation, in guile mode.
323 */
324 static void
325 guilify_self_2 (SCM parent)
326 {
327 scm_i_thread *t = SCM_I_CURRENT_THREAD;
328
329 SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
330 scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
331 t->continuation_root = scm_cons (t->handle, SCM_EOL);
332 t->continuation_base = t->base;
333
334 if (scm_is_true (parent))
335 t->dynamic_state = scm_make_dynamic_state (parent);
336 else
337 t->dynamic_state = scm_i_make_initial_dynamic_state ();
338
339 t->join_queue = make_queue ();
340 t->block_asyncs = 0;
341 }
342
343 /* Perform thread tear-down, in guile mode.
344 */
345 static void *
346 do_thread_exit (void *v)
347 {
348 scm_i_thread *t = (scm_i_thread *)v;
349
350 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
351
352 t->exited = 1;
353 close (t->sleep_pipe[0]);
354 close (t->sleep_pipe[1]);
355 while (scm_is_true (unblock_from_queue (t->join_queue)))
356 ;
357
358 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
359 return NULL;
360 }
361
362 static void
363 on_thread_exit (void *v)
364 {
365 scm_i_thread *t = (scm_i_thread *)v, **tp;
366
367 scm_i_pthread_setspecific (scm_i_thread_key, v);
368
369 /* Unblocking the joining threads needs to happen in guile mode
370 since the queue is a SCM data structure.
371 */
372 scm_with_guile (do_thread_exit, v);
373
374 /* Removing ourself from the list of all threads needs to happen in
375 non-guile mode since all SCM values on our stack become
376 unprotected once we are no longer in the list.
377 */
378 scm_leave_guile ();
379 scm_i_pthread_mutex_lock (&thread_admin_mutex);
380 for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
381 if (*tp == t)
382 {
383 *tp = t->next_thread;
384 break;
385 }
386 thread_count--;
387 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
388
389 scm_i_pthread_setspecific (scm_i_thread_key, NULL);
390 }
391
392 static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
393
394 static void
395 init_thread_key (void)
396 {
397 scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
398 }
399
400 /* Perform any initializations necessary to bring the current thread
401 into guile mode, initializing Guile itself, if necessary.
402
403 BASE is the stack base to use with GC.
404
405 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
406 which case the default dynamic state is used.
407
408 Return zero when the thread was in guile mode already; otherwise
409 return 1.
410 */
411
412 static int
413 scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
414 {
415 scm_i_thread *t;
416
417 scm_i_pthread_once (&init_thread_key_once, init_thread_key);
418
419 if ((t = SCM_I_CURRENT_THREAD) == NULL)
420 {
421 /* This thread has not been guilified yet.
422 */
423
424 scm_i_pthread_mutex_lock (&scm_i_init_mutex);
425 if (scm_initialized_p == 0)
426 {
427 /* First thread ever to enter Guile. Run the full
428 initialization.
429 */
430 scm_i_init_guile (base);
431 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
432 }
433 else
434 {
435 /* Guile is already initialized, but this thread enters it for
436 the first time. Only initialize this thread.
437 */
438 scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
439 guilify_self_1 (base);
440 guilify_self_2 (parent);
441 }
442 return 1;
443 }
444 else if (t->top)
445 {
446 /* This thread is already guilified but not in guile mode, just
447 resume it.
448
449 XXX - base might be lower than when this thread was first
450 guilified.
451 */
452 scm_enter_guile ((scm_t_guile_ticket) t);
453 return 1;
454 }
455 else
456 {
457 /* Thread is already in guile mode. Nothing to do.
458 */
459 return 0;
460 }
461 }
462
463 #ifdef HAVE_LIBC_STACK_END
464
465 extern void *__libc_stack_end;
466
467 #if SCM_USE_PTHREAD_THREADS
468 #ifdef HAVE_PTHREAD_ATTR_GETSTACK
469
470 #define HAVE_GET_THREAD_STACK_BASE
471
472 static SCM_STACKITEM *
473 get_thread_stack_base ()
474 {
475 pthread_attr_t attr;
476 void *start, *end;
477 size_t size;
478
479 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
480 for the main thread, but we can use __libc_stack_end in that
481 case.
482 */
483
484 pthread_getattr_np (pthread_self (), &attr);
485 pthread_attr_getstack (&attr, &start, &size);
486 end = (char *)start + size;
487
488 if ((void *)&attr < start || (void *)&attr >= end)
489 return __libc_stack_end;
490 else
491 {
492 #if SCM_STACK_GROWS_UP
493 return start;
494 #else
495 return end;
496 #endif
497 }
498 }
499
500 #endif /* HAVE_PTHREAD_ATTR_GETSTACK */
501
502 #else /* !SCM_USE_PTHREAD_THREADS */
503
504 #define HAVE_GET_THREAD_STACK_BASE
505
506 static SCM_STACKITEM *
507 get_thread_stack_base ()
508 {
509 return __libc_stack_end;
510 }
511
512 #endif /* !SCM_USE_PTHREAD_THREADS */
513 #endif /* HAVE_LIBC_STACK_END */
514
515 #ifdef HAVE_GET_THREAD_STACK_BASE
516
517 void
518 scm_init_guile ()
519 {
520 scm_i_init_thread_for_guile (get_thread_stack_base (),
521 scm_i_default_dynamic_state);
522 }
523
524 #endif
525
526 void *
527 scm_with_guile (void *(*func)(void *), void *data)
528 {
529 return scm_i_with_guile_and_parent (func, data,
530 scm_i_default_dynamic_state);
531 }
532
533 void *
534 scm_i_with_guile_and_parent (void *(*func)(void *), void *data,
535 SCM parent)
536 {
537 void *res;
538 int really_entered;
539 SCM_STACKITEM base_item;
540 really_entered = scm_i_init_thread_for_guile (&base_item, parent);
541 res = scm_c_with_continuation_barrier (func, data);
542 if (really_entered)
543 scm_leave_guile ();
544 return res;
545 }
546
547 void *
548 scm_without_guile (void *(*func)(void *), void *data)
549 {
550 void *res;
551 scm_t_guile_ticket t;
552 t = scm_leave_guile ();
553 res = func (data);
554 scm_enter_guile (t);
555 return res;
556 }
557
558 /*** Thread creation */
559
560 typedef struct {
561 SCM parent;
562 SCM thunk;
563 SCM handler;
564 SCM thread;
565 scm_i_pthread_mutex_t mutex;
566 scm_i_pthread_cond_t cond;
567 } launch_data;
568
569 static void *
570 really_launch (void *d)
571 {
572 launch_data *data = (launch_data *)d;
573 SCM thunk = data->thunk, handler = data->handler;
574 scm_i_thread *t;
575
576 t = SCM_I_CURRENT_THREAD;
577
578 scm_i_scm_pthread_mutex_lock (&data->mutex);
579 data->thread = scm_current_thread ();
580 scm_i_pthread_cond_signal (&data->cond);
581 scm_i_pthread_mutex_unlock (&data->mutex);
582
583 if (SCM_UNBNDP (handler))
584 t->result = scm_call_0 (thunk);
585 else
586 t->result = scm_catch (SCM_BOOL_T, thunk, handler);
587
588 return 0;
589 }
590
591 static void *
592 launch_thread (void *d)
593 {
594 launch_data *data = (launch_data *)d;
595 scm_i_pthread_detach (scm_i_pthread_self ());
596 scm_i_with_guile_and_parent (really_launch, d, data->parent);
597 return NULL;
598 }
599
600 SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
601 (SCM thunk, SCM handler),
602 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
603 "returning a new thread object representing the thread. The procedure\n"
604 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
605 "\n"
606 "When @var{handler} is specified, then @var{thunk} is called from\n"
607 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
608 "handler. This catch is established inside the continuation barrier.\n"
609 "\n"
610 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
611 "the @emph{exit value} of the thread and the thread is terminated.")
612 #define FUNC_NAME s_scm_call_with_new_thread
613 {
614 launch_data data;
615 scm_i_pthread_t id;
616 int err;
617
618 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
619 SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
620 handler, SCM_ARG2, FUNC_NAME);
621
622 data.parent = scm_current_dynamic_state ();
623 data.thunk = thunk;
624 data.handler = handler;
625 data.thread = SCM_BOOL_F;
626 scm_i_pthread_mutex_init (&data.mutex, NULL);
627 scm_i_pthread_cond_init (&data.cond, NULL);
628
629 scm_i_scm_pthread_mutex_lock (&data.mutex);
630 err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
631 if (err)
632 {
633 scm_i_pthread_mutex_unlock (&data.mutex);
634 errno = err;
635 scm_syserror (NULL);
636 }
637 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
638 scm_i_pthread_mutex_unlock (&data.mutex);
639
640 return data.thread;
641 }
642 #undef FUNC_NAME
643
644 typedef struct {
645 SCM parent;
646 scm_t_catch_body body;
647 void *body_data;
648 scm_t_catch_handler handler;
649 void *handler_data;
650 SCM thread;
651 scm_i_pthread_mutex_t mutex;
652 scm_i_pthread_cond_t cond;
653 } spawn_data;
654
655 static void *
656 really_spawn (void *d)
657 {
658 spawn_data *data = (spawn_data *)d;
659 scm_t_catch_body body = data->body;
660 void *body_data = data->body_data;
661 scm_t_catch_handler handler = data->handler;
662 void *handler_data = data->handler_data;
663 scm_i_thread *t = SCM_I_CURRENT_THREAD;
664
665 scm_i_scm_pthread_mutex_lock (&data->mutex);
666 data->thread = scm_current_thread ();
667 scm_i_pthread_cond_signal (&data->cond);
668 scm_i_pthread_mutex_unlock (&data->mutex);
669
670 if (handler == NULL)
671 t->result = body (body_data);
672 else
673 t->result = scm_internal_catch (SCM_BOOL_T,
674 body, body_data,
675 handler, handler_data);
676
677 return 0;
678 }
679
680 static void *
681 spawn_thread (void *d)
682 {
683 spawn_data *data = (spawn_data *)d;
684 scm_i_pthread_detach (scm_i_pthread_self ());
685 scm_i_with_guile_and_parent (really_spawn, d, data->parent);
686 return NULL;
687 }
688
689 SCM
690 scm_spawn_thread (scm_t_catch_body body, void *body_data,
691 scm_t_catch_handler handler, void *handler_data)
692 {
693 spawn_data data;
694 scm_i_pthread_t id;
695 int err;
696
697 data.parent = scm_current_dynamic_state ();
698 data.body = body;
699 data.body_data = body_data;
700 data.handler = handler;
701 data.handler_data = handler_data;
702 data.thread = SCM_BOOL_F;
703 scm_i_pthread_mutex_init (&data.mutex, NULL);
704 scm_i_pthread_cond_init (&data.cond, NULL);
705
706 scm_i_scm_pthread_mutex_lock (&data.mutex);
707 err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
708 if (err)
709 {
710 scm_i_pthread_mutex_unlock (&data.mutex);
711 errno = err;
712 scm_syserror (NULL);
713 }
714 scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
715 scm_i_pthread_mutex_unlock (&data.mutex);
716
717 return data.thread;
718 }
719
720 SCM_DEFINE (scm_yield, "yield", 0, 0, 0,
721 (),
722 "Move the calling thread to the end of the scheduling queue.")
723 #define FUNC_NAME s_scm_yield
724 {
725 return scm_from_bool (scm_i_sched_yield ());
726 }
727 #undef FUNC_NAME
728
729 SCM_DEFINE (scm_join_thread, "join-thread", 1, 0, 0,
730 (SCM thread),
731 "Suspend execution of the calling thread until the target @var{thread} "
732 "terminates, unless the target @var{thread} has already terminated. ")
733 #define FUNC_NAME s_scm_join_thread
734 {
735 scm_i_thread *t;
736 SCM res;
737
738 SCM_VALIDATE_THREAD (1, thread);
739 if (scm_is_eq (scm_current_thread (), thread))
740 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
741
742 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
743
744 t = SCM_I_THREAD_DATA (thread);
745 if (!t->exited)
746 {
747 while (1)
748 {
749 block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
750 if (t->exited)
751 break;
752 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
753 SCM_TICK;
754 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
755 }
756 }
757 res = t->result;
758
759 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
760 return res;
761 }
762 #undef FUNC_NAME
763
764 /*** Fat mutexes */
765
766 /* We implement our own mutex type since we want them to be 'fair', we
767 want to do fancy things while waiting for them (like running
768 asyncs) and we might want to add things that are nice for
769 debugging.
770 */
771
772 typedef struct {
773 scm_i_pthread_mutex_t lock;
774 SCM owner;
775 int level; /* how much the owner owns us.
776 < 0 for non-recursive mutexes */
777 SCM waiting; /* the threads waiting for this mutex. */
778 } fat_mutex;
779
780 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
781 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
782
783 static SCM
784 fat_mutex_mark (SCM mx)
785 {
786 fat_mutex *m = SCM_MUTEX_DATA (mx);
787 scm_gc_mark (m->owner);
788 return m->waiting;
789 }
790
791 static size_t
792 fat_mutex_free (SCM mx)
793 {
794 fat_mutex *m = SCM_MUTEX_DATA (mx);
795 scm_i_pthread_mutex_destroy (&m->lock);
796 scm_gc_free (m, sizeof (fat_mutex), "mutex");
797 return 0;
798 }
799
800 static int
801 fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
802 {
803 fat_mutex *m = SCM_MUTEX_DATA (mx);
804 scm_puts ("#<mutex ", port);
805 scm_uintprint ((scm_t_bits)m, 16, port);
806 scm_puts (">", port);
807 return 1;
808 }
809
810 static SCM
811 make_fat_mutex (int recursive)
812 {
813 fat_mutex *m;
814 SCM mx;
815
816 m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
817 scm_i_pthread_mutex_init (&m->lock, NULL);
818 m->owner = SCM_BOOL_F;
819 m->level = recursive? 0 : -1;
820 m->waiting = SCM_EOL;
821 SCM_NEWSMOB (mx, scm_tc16_mutex, (scm_t_bits) m);
822 m->waiting = make_queue ();
823 return mx;
824 }
825
826 SCM_DEFINE (scm_make_mutex, "make-mutex", 0, 0, 0,
827 (void),
828 "Create a new mutex. ")
829 #define FUNC_NAME s_scm_make_mutex
830 {
831 return make_fat_mutex (0);
832 }
833 #undef FUNC_NAME
834
835 SCM_DEFINE (scm_make_recursive_mutex, "make-recursive-mutex", 0, 0, 0,
836 (void),
837 "Create a new recursive mutex. ")
838 #define FUNC_NAME s_scm_make_recursive_mutex
839 {
840 return make_fat_mutex (1);
841 }
842 #undef FUNC_NAME
843
844 static char *
845 fat_mutex_lock (SCM mutex)
846 {
847 fat_mutex *m = SCM_MUTEX_DATA (mutex);
848 SCM thread = scm_current_thread ();
849 char *msg = NULL;
850
851 scm_i_scm_pthread_mutex_lock (&m->lock);
852 if (scm_is_false (m->owner))
853 m->owner = thread;
854 else if (scm_is_eq (m->owner, thread))
855 {
856 if (m->level >= 0)
857 m->level++;
858 else
859 msg = "mutex already locked by current thread";
860 }
861 else
862 {
863 while (1)
864 {
865 block_self (m->waiting, mutex, &m->lock, NULL);
866 if (scm_is_eq (m->owner, thread))
867 break;
868 scm_i_pthread_mutex_unlock (&m->lock);
869 SCM_TICK;
870 scm_i_scm_pthread_mutex_lock (&m->lock);
871 }
872 }
873 scm_i_pthread_mutex_unlock (&m->lock);
874 return msg;
875 }
876
877 SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
878 (SCM mx),
879 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
880 "blocks until the mutex becomes available. The function returns when "
881 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
882 "a thread already owns will succeed right away and will not block the "
883 "thread. That is, Guile's mutexes are @emph{recursive}. ")
884 #define FUNC_NAME s_scm_lock_mutex
885 {
886 SCM_VALIDATE_MUTEX (1, mx);
887 char *msg;
888
889 msg = fat_mutex_lock (mx);
890 if (msg)
891 scm_misc_error (NULL, msg, SCM_EOL);
892 return SCM_BOOL_T;
893 }
894 #undef FUNC_NAME
895
896 void
897 scm_frame_lock_mutex (SCM mutex)
898 {
899 scm_frame_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
900 SCM_F_WIND_EXPLICITLY);
901 scm_frame_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
902 SCM_F_WIND_EXPLICITLY);
903 }
904
905 static char *
906 fat_mutex_trylock (fat_mutex *m, int *resp)
907 {
908 char *msg = NULL;
909 SCM thread = scm_current_thread ();
910
911 *resp = 1;
912 scm_i_pthread_mutex_lock (&m->lock);
913 if (scm_is_false (m->owner))
914 m->owner = thread;
915 else if (scm_is_eq (m->owner, thread))
916 {
917 if (m->level >= 0)
918 m->level++;
919 else
920 msg = "mutex already locked by current thread";
921 }
922 else
923 *resp = 0;
924 scm_i_pthread_mutex_unlock (&m->lock);
925 return msg;
926 }
927
928 SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
929 (SCM mutex),
930 "Try to lock @var{mutex}. If the mutex is already locked by someone "
931 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
932 #define FUNC_NAME s_scm_try_mutex
933 {
934 char *msg;
935 int res;
936
937 SCM_VALIDATE_MUTEX (1, mutex);
938
939 msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
940 if (msg)
941 scm_misc_error (NULL, msg, SCM_EOL);
942 return scm_from_bool (res);
943 }
944 #undef FUNC_NAME
945
946 static char *
947 fat_mutex_unlock (fat_mutex *m)
948 {
949 char *msg = NULL;
950
951 scm_i_scm_pthread_mutex_lock (&m->lock);
952 if (!scm_is_eq (m->owner, scm_current_thread ()))
953 {
954 if (scm_is_false (m->owner))
955 msg = "mutex not locked";
956 else
957 msg = "mutex not locked by current thread";
958 }
959 else if (m->level > 0)
960 m->level--;
961 else
962 m->owner = unblock_from_queue (m->waiting);
963 scm_i_pthread_mutex_unlock (&m->lock);
964
965 return msg;
966 }
967
968 SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
969 (SCM mx),
970 "Unlocks @var{mutex} if the calling thread owns the lock on "
971 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
972 "thread results in undefined behaviour. Once a mutex has been unlocked, "
973 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
974 "lock. Every call to @code{lock-mutex} by this thread must be matched "
975 "with a call to @code{unlock-mutex}. Only the last call to "
976 "@code{unlock-mutex} will actually unlock the mutex. ")
977 #define FUNC_NAME s_scm_unlock_mutex
978 {
979 char *msg;
980 SCM_VALIDATE_MUTEX (1, mx);
981
982 msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
983 if (msg)
984 scm_misc_error (NULL, msg, SCM_EOL);
985 return SCM_BOOL_T;
986 }
987 #undef FUNC_NAME
988
989 #if 0
990
991 SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
992 (SCM mx),
993 "Return the thread owning @var{mx}, or @code{#f}.")
994 #define FUNC_NAME s_scm_mutex_owner
995 {
996 SCM_VALIDATE_MUTEX (1, mx);
997 return (SCM_MUTEX_DATA(mx))->owner;
998 }
999 #undef FUNC_NAME
1000
1001 SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
1002 (SCM mx),
1003 "Return the lock level of a recursive mutex, or -1\n"
1004 "for a standard mutex.")
1005 #define FUNC_NAME s_scm_mutex_level
1006 {
1007 SCM_VALIDATE_MUTEX (1, mx);
1008 return scm_from_int (SCM_MUTEX_DATA(mx)->level);
1009 }
1010 #undef FUNC_NAME
1011
1012 #endif
1013
1014 /*** Fat condition variables */
1015
1016 typedef struct {
1017 scm_i_pthread_mutex_t lock;
1018 SCM waiting; /* the threads waiting for this condition. */
1019 } fat_cond;
1020
1021 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1022 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1023
1024 static SCM
1025 fat_cond_mark (SCM cv)
1026 {
1027 fat_cond *c = SCM_CONDVAR_DATA (cv);
1028 return c->waiting;
1029 }
1030
1031 static size_t
1032 fat_cond_free (SCM mx)
1033 {
1034 fat_cond *c = SCM_CONDVAR_DATA (mx);
1035 scm_i_pthread_mutex_destroy (&c->lock);
1036 scm_gc_free (c, sizeof (fat_cond), "condition-variable");
1037 return 0;
1038 }
1039
1040 static int
1041 fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
1042 {
1043 fat_cond *c = SCM_CONDVAR_DATA (cv);
1044 scm_puts ("#<condition-variable ", port);
1045 scm_uintprint ((scm_t_bits)c, 16, port);
1046 scm_puts (">", port);
1047 return 1;
1048 }
1049
1050 SCM_DEFINE (scm_make_condition_variable, "make-condition-variable", 0, 0, 0,
1051 (void),
1052 "Make a new condition variable.")
1053 #define FUNC_NAME s_scm_make_condition_variable
1054 {
1055 fat_cond *c;
1056 SCM cv;
1057
1058 c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
1059 scm_i_pthread_mutex_init (&c->lock, 0);
1060 c->waiting = SCM_EOL;
1061 SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
1062 c->waiting = make_queue ();
1063 return cv;
1064 }
1065 #undef FUNC_NAME
1066
1067 static int
1068 fat_cond_timedwait (SCM cond, SCM mutex,
1069 const scm_t_timespec *waittime)
1070 {
1071 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1072 fat_cond *c = SCM_CONDVAR_DATA (cond);
1073 fat_mutex *m = SCM_MUTEX_DATA (mutex);
1074 const char *msg;
1075 int err = 0;
1076
1077 while (1)
1078 {
1079 scm_i_scm_pthread_mutex_lock (&c->lock);
1080 msg = fat_mutex_unlock (m);
1081 t->block_asyncs++;
1082 if (msg == NULL)
1083 {
1084 err = block_self (c->waiting, cond, &c->lock, waittime);
1085 scm_i_pthread_mutex_unlock (&c->lock);
1086 fat_mutex_lock (mutex);
1087 }
1088 else
1089 scm_i_pthread_mutex_unlock (&c->lock);
1090 t->block_asyncs--;
1091 scm_async_click ();
1092
1093 if (msg)
1094 scm_misc_error (NULL, msg, SCM_EOL);
1095
1096 scm_remember_upto_here_2 (cond, mutex);
1097
1098 if (err == 0)
1099 return 1;
1100 if (err == ETIMEDOUT)
1101 return 0;
1102 if (err != EINTR)
1103 {
1104 errno = err;
1105 scm_syserror (NULL);
1106 }
1107 }
1108 }
1109
1110 SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
1111 (SCM cv, SCM mx, SCM t),
1112 "Wait until @var{cond-var} has been signalled. While waiting, "
1113 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1114 "is locked again when this function returns. When @var{time} is given, "
1115 "it specifies a point in time where the waiting should be aborted. It "
1116 "can be either a integer as returned by @code{current-time} or a pair "
1117 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1118 "mutex is locked and @code{#f} is returned. When the condition "
1119 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1120 "is returned. ")
1121 #define FUNC_NAME s_scm_timed_wait_condition_variable
1122 {
1123 scm_t_timespec waittime, *waitptr = NULL;
1124
1125 SCM_VALIDATE_CONDVAR (1, cv);
1126 SCM_VALIDATE_MUTEX (2, mx);
1127
1128 if (!SCM_UNBNDP (t))
1129 {
1130 if (scm_is_pair (t))
1131 {
1132 waittime.tv_sec = scm_to_ulong (SCM_CAR (t));
1133 waittime.tv_nsec = scm_to_ulong (SCM_CAR (t)) * 1000;
1134 }
1135 else
1136 {
1137 waittime.tv_sec = scm_to_ulong (t);
1138 waittime.tv_nsec = 0;
1139 }
1140 waitptr = &waittime;
1141 }
1142
1143 return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
1144 }
1145 #undef FUNC_NAME
1146
1147 static void
1148 fat_cond_signal (fat_cond *c)
1149 {
1150 scm_i_scm_pthread_mutex_lock (&c->lock);
1151 unblock_from_queue (c->waiting);
1152 scm_i_pthread_mutex_unlock (&c->lock);
1153 }
1154
1155 SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
1156 (SCM cv),
1157 "Wake up one thread that is waiting for @var{cv}")
1158 #define FUNC_NAME s_scm_signal_condition_variable
1159 {
1160 SCM_VALIDATE_CONDVAR (1, cv);
1161 fat_cond_signal (SCM_CONDVAR_DATA (cv));
1162 return SCM_BOOL_T;
1163 }
1164 #undef FUNC_NAME
1165
1166 static void
1167 fat_cond_broadcast (fat_cond *c)
1168 {
1169 scm_i_scm_pthread_mutex_lock (&c->lock);
1170 while (scm_is_true (unblock_from_queue (c->waiting)))
1171 ;
1172 scm_i_pthread_mutex_unlock (&c->lock);
1173 }
1174
1175 SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
1176 (SCM cv),
1177 "Wake up all threads that are waiting for @var{cv}. ")
1178 #define FUNC_NAME s_scm_broadcast_condition_variable
1179 {
1180 SCM_VALIDATE_CONDVAR (1, cv);
1181 fat_cond_broadcast (SCM_CONDVAR_DATA (cv));
1182 return SCM_BOOL_T;
1183 }
1184 #undef FUNC_NAME
1185
1186 /*** Marking stacks */
1187
1188 /* XXX - what to do with this? Do we need to handle this for blocked
1189 threads as well?
1190 */
1191 #ifdef __ia64__
1192 # define SCM_MARK_BACKING_STORE() do { \
1193 ucontext_t ctx; \
1194 SCM_STACKITEM * top, * bot; \
1195 getcontext (&ctx); \
1196 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1197 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1198 / sizeof (SCM_STACKITEM))); \
1199 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1200 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1201 scm_mark_locations (bot, top - bot); } while (0)
1202 #else
1203 # define SCM_MARK_BACKING_STORE()
1204 #endif
1205
1206 void
1207 scm_threads_mark_stacks (void)
1208 {
1209 scm_i_thread *t;
1210 for (t = all_threads; t; t = t->next_thread)
1211 {
1212 /* Check that thread has indeed been suspended.
1213 */
1214 assert (t->top);
1215
1216 scm_gc_mark (t->handle);
1217
1218 #if SCM_STACK_GROWS_UP
1219 scm_mark_locations (t->base, t->top - t->base);
1220 #else
1221 scm_mark_locations (t->top, t->base - t->top);
1222 #endif
1223 scm_mark_locations ((SCM_STACKITEM *) t->regs,
1224 ((size_t) sizeof(t->regs)
1225 / sizeof (SCM_STACKITEM)));
1226 }
1227
1228 SCM_MARK_BACKING_STORE ();
1229 }
1230
1231 /*** Select */
1232
1233 int
1234 scm_std_select (int nfds,
1235 SELECT_TYPE *readfds,
1236 SELECT_TYPE *writefds,
1237 SELECT_TYPE *exceptfds,
1238 struct timeval *timeout)
1239 {
1240 fd_set my_readfds;
1241 int res, eno, wakeup_fd;
1242 scm_i_thread *t = SCM_I_CURRENT_THREAD;
1243 scm_t_guile_ticket ticket;
1244
1245 if (readfds == NULL)
1246 {
1247 FD_ZERO (&my_readfds);
1248 readfds = &my_readfds;
1249 }
1250
1251 while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
1252 SCM_TICK;
1253
1254 wakeup_fd = t->sleep_pipe[0];
1255 ticket = scm_leave_guile ();
1256 FD_SET (wakeup_fd, readfds);
1257 if (wakeup_fd >= nfds)
1258 nfds = wakeup_fd+1;
1259 res = select (nfds, readfds, writefds, exceptfds, timeout);
1260 t->sleep_fd = -1;
1261 eno = errno;
1262 scm_enter_guile (ticket);
1263
1264 scm_i_reset_sleep (t);
1265
1266 if (res > 0 && FD_ISSET (wakeup_fd, readfds))
1267 {
1268 char dummy;
1269 read (wakeup_fd, &dummy, 1);
1270 FD_CLR (wakeup_fd, readfds);
1271 res -= 1;
1272 if (res == 0)
1273 {
1274 eno = EINTR;
1275 res = -1;
1276 }
1277 }
1278 errno = eno;
1279 return res;
1280 }
1281
1282 /* Convenience API for blocking while in guile mode. */
1283
1284 #if SCM_USE_PTHREAD_THREADS
1285
1286 int
1287 scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1288 {
1289 scm_t_guile_ticket t = scm_leave_guile ();
1290 int res = scm_i_pthread_mutex_lock (mutex);
1291 scm_enter_guile (t);
1292 return res;
1293 }
1294
1295 static void
1296 unlock (void *data)
1297 {
1298 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
1299 }
1300
1301 void
1302 scm_frame_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
1303 {
1304 scm_i_scm_pthread_mutex_lock (mutex);
1305 scm_frame_unwind_handler (unlock, mutex, SCM_F_WIND_EXPLICITLY);
1306 }
1307
1308 int
1309 scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
1310 {
1311 scm_t_guile_ticket t = scm_leave_guile ();
1312 int res = scm_i_pthread_cond_wait (cond, mutex);
1313 scm_enter_guile (t);
1314 return res;
1315 }
1316
1317 int
1318 scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
1319 scm_i_pthread_mutex_t *mutex,
1320 const scm_t_timespec *wt)
1321 {
1322 scm_t_guile_ticket t = scm_leave_guile ();
1323 int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
1324 scm_enter_guile (t);
1325 return res;
1326 }
1327
1328 #endif
1329
1330 unsigned long
1331 scm_std_usleep (unsigned long usecs)
1332 {
1333 struct timeval tv;
1334 tv.tv_usec = usecs % 1000000;
1335 tv.tv_sec = usecs / 1000000;
1336 scm_std_select (0, NULL, NULL, NULL, &tv);
1337 return tv.tv_sec * 1000000 + tv.tv_usec;
1338 }
1339
1340 unsigned int
1341 scm_std_sleep (unsigned int secs)
1342 {
1343 struct timeval tv;
1344 tv.tv_usec = 0;
1345 tv.tv_sec = secs;
1346 scm_std_select (0, NULL, NULL, NULL, &tv);
1347 return tv.tv_sec;
1348 }
1349
1350 /*** Misc */
1351
1352 SCM_DEFINE (scm_current_thread, "current-thread", 0, 0, 0,
1353 (void),
1354 "Return the thread that called this function.")
1355 #define FUNC_NAME s_scm_current_thread
1356 {
1357 return SCM_I_CURRENT_THREAD->handle;
1358 }
1359 #undef FUNC_NAME
1360
1361 static SCM
1362 scm_c_make_list (size_t n, SCM fill)
1363 {
1364 SCM res = SCM_EOL;
1365 while (n-- > 0)
1366 res = scm_cons (fill, res);
1367 return res;
1368 }
1369
1370 SCM_DEFINE (scm_all_threads, "all-threads", 0, 0, 0,
1371 (void),
1372 "Return a list of all threads.")
1373 #define FUNC_NAME s_scm_all_threads
1374 {
1375 /* We can not allocate while holding the thread_admin_mutex because
1376 of the way GC is done.
1377 */
1378 int n = thread_count;
1379 scm_i_thread *t;
1380 SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
1381
1382 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1383 l = &list;
1384 for (t = all_threads; t && n > 0; t = t->next_thread)
1385 {
1386 SCM_SETCAR (*l, t->handle);
1387 l = SCM_CDRLOC (*l);
1388 n--;
1389 }
1390 *l = SCM_EOL;
1391 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1392 return list;
1393 }
1394 #undef FUNC_NAME
1395
1396 SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
1397 (SCM thread),
1398 "Return @code{#t} iff @var{thread} has exited.\n")
1399 #define FUNC_NAME s_scm_thread_exited_p
1400 {
1401 return scm_from_bool (scm_c_thread_exited_p (thread));
1402 }
1403 #undef FUNC_NAME
1404
1405 int
1406 scm_c_thread_exited_p (SCM thread)
1407 #define FUNC_NAME s_scm_thread_exited_p
1408 {
1409 scm_i_thread *t;
1410 SCM_VALIDATE_THREAD (1, thread);
1411 t = SCM_I_THREAD_DATA (thread);
1412 return t->exited;
1413 }
1414 #undef FUNC_NAME
1415
1416 static scm_i_pthread_cond_t wake_up_cond;
1417 int scm_i_thread_go_to_sleep;
1418 static int threads_initialized_p = 0;
1419
1420 void
1421 scm_i_thread_put_to_sleep ()
1422 {
1423 if (threads_initialized_p)
1424 {
1425 scm_i_thread *t;
1426
1427 scm_leave_guile ();
1428 scm_i_pthread_mutex_lock (&thread_admin_mutex);
1429
1430 /* Signal all threads to go to sleep
1431 */
1432 scm_i_thread_go_to_sleep = 1;
1433 for (t = all_threads; t; t = t->next_thread)
1434 scm_i_pthread_mutex_lock (&t->heap_mutex);
1435 scm_i_thread_go_to_sleep = 0;
1436 }
1437 }
1438
1439 void
1440 scm_i_thread_invalidate_freelists ()
1441 {
1442 /* thread_admin_mutex is already locked. */
1443
1444 scm_i_thread *t;
1445 for (t = all_threads; t; t = t->next_thread)
1446 if (t != SCM_I_CURRENT_THREAD)
1447 t->clear_freelists_p = 1;
1448 }
1449
1450 void
1451 scm_i_thread_wake_up ()
1452 {
1453 if (threads_initialized_p)
1454 {
1455 scm_i_thread *t;
1456
1457 scm_i_pthread_cond_broadcast (&wake_up_cond);
1458 for (t = all_threads; t; t = t->next_thread)
1459 scm_i_pthread_mutex_unlock (&t->heap_mutex);
1460 scm_i_pthread_mutex_unlock (&thread_admin_mutex);
1461 scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
1462 }
1463 }
1464
1465 void
1466 scm_i_thread_sleep_for_gc ()
1467 {
1468 scm_i_thread *t = suspend ();
1469 scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
1470 resume (t);
1471 }
1472
1473 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1474 */
1475 scm_i_pthread_mutex_t scm_i_critical_section_mutex =
1476 SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
1477 int scm_i_critical_section_level = 0;
1478
1479 static SCM framed_critical_section_mutex;
1480
1481 void
1482 scm_frame_critical_section (SCM mutex)
1483 {
1484 if (scm_is_false (mutex))
1485 mutex = framed_critical_section_mutex;
1486 scm_frame_lock_mutex (mutex);
1487 scm_frame_block_asyncs ();
1488 }
1489
1490 /*** Initialization */
1491
1492 scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
1493 scm_i_pthread_mutex_t scm_i_misc_mutex;
1494
1495 void
1496 scm_threads_prehistory (SCM_STACKITEM *base)
1497 {
1498 scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
1499 scm_i_pthread_cond_init (&wake_up_cond, NULL);
1500 scm_i_pthread_key_create (&scm_i_freelist, NULL);
1501 scm_i_pthread_key_create (&scm_i_freelist2, NULL);
1502
1503 guilify_self_1 (base);
1504 }
1505
1506 scm_t_bits scm_tc16_thread;
1507 scm_t_bits scm_tc16_mutex;
1508 scm_t_bits scm_tc16_condvar;
1509
1510 void
1511 scm_init_threads ()
1512 {
1513 scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
1514 scm_set_smob_mark (scm_tc16_thread, thread_mark);
1515 scm_set_smob_print (scm_tc16_thread, thread_print);
1516 scm_set_smob_free (scm_tc16_thread, thread_free);
1517
1518 scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
1519 scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
1520 scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
1521 scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
1522
1523 scm_tc16_condvar = scm_make_smob_type ("condition-variable",
1524 sizeof (fat_cond));
1525 scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
1526 scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
1527 scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
1528
1529 scm_i_default_dynamic_state = SCM_BOOL_F;
1530 guilify_self_2 (SCM_BOOL_F);
1531 threads_initialized_p = 1;
1532
1533 framed_critical_section_mutex =
1534 scm_permanent_object (scm_make_recursive_mutex ());
1535 }
1536
1537 void
1538 scm_init_threads_default_dynamic_state ()
1539 {
1540 SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
1541 scm_i_default_dynamic_state = scm_permanent_object (state);
1542 }
1543
1544 void
1545 scm_init_thread_procs ()
1546 {
1547 #include "libguile/threads.x"
1548 }
1549
1550 /*
1551 Local Variables:
1552 c-file-style: "gnu"
1553 End:
1554 */