1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libguile/_scm.h"
32 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
39 #include "libguile/validate.h"
40 #include "libguile/root.h"
41 #include "libguile/eval.h"
42 #include "libguile/async.h"
43 #include "libguile/ports.h"
44 #include "libguile/threads.h"
45 #include "libguile/dynwind.h"
46 #include "libguile/iselect.h"
47 #include "libguile/fluids.h"
48 #include "libguile/continuations.h"
49 #include "libguile/gc.h"
50 #include "libguile/init.h"
54 # define ETIMEDOUT WSAETIMEDOUT
58 # define pipe(fd) _pipe (fd, 256, O_BINARY)
59 #endif /* __MINGW32__ */
63 /* Make an empty queue data structure.
68 return scm_cons (SCM_EOL
, SCM_EOL
);
71 /* Put T at the back of Q and return a handle that can be used with
72 remqueue to remove T from Q again.
75 enqueue (SCM q
, SCM t
)
77 SCM c
= scm_cons (t
, SCM_EOL
);
78 if (scm_is_null (SCM_CDR (q
)))
81 SCM_SETCDR (SCM_CAR (q
), c
);
86 /* Remove the element that the handle C refers to from the queue Q. C
87 must have been returned from a call to enqueue. The return value
88 is zero when the element referred to by C has already been removed.
89 Otherwise, 1 is returned.
92 remqueue (SCM q
, SCM c
)
95 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
99 if (scm_is_eq (c
, SCM_CAR (q
)))
100 SCM_SETCAR (q
, SCM_CDR (c
));
101 SCM_SETCDR (prev
, SCM_CDR (c
));
109 /* Remove the front-most element from the queue Q and return it.
110 Return SCM_BOOL_F when Q is empty.
120 SCM_SETCDR (q
, SCM_CDR (c
));
121 if (scm_is_null (SCM_CDR (q
)))
122 SCM_SETCAR (q
, SCM_EOL
);
127 /*** Thread smob routines */
130 thread_mark (SCM obj
)
132 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
133 scm_gc_mark (t
->result
);
134 scm_gc_mark (t
->join_queue
);
135 scm_gc_mark (t
->dynwinds
);
136 scm_gc_mark (t
->active_asyncs
);
137 scm_gc_mark (t
->continuation_root
);
138 return t
->dynamic_state
;
142 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
144 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
145 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
146 the struct case, hence we go via a union, and extract according to the
147 size of pthread_t. */
155 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
156 scm_i_pthread_t p
= t
->pthread
;
159 if (sizeof (p
) == sizeof (unsigned short))
161 else if (sizeof (p
) == sizeof (unsigned int))
163 else if (sizeof (p
) == sizeof (unsigned long))
168 scm_puts ("#<thread ", port
);
169 scm_uintprint (id
, 10, port
);
170 scm_puts (" (", port
);
171 scm_uintprint ((scm_t_bits
)t
, 16, port
);
172 scm_puts (")>", port
);
177 thread_free (SCM obj
)
179 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
181 scm_gc_free (t
, sizeof (*t
), "thread");
185 /*** Blocking on queues. */
187 /* See also scm_i_queue_async_cell for how such a block is
191 /* Put the current thread on QUEUE and go to sleep, waiting for it to
192 be woken up by a call to 'unblock_from_queue', or to be
193 interrupted. Upon return of this function, the current thread is
194 no longer on QUEUE, even when the sleep has been interrupted.
196 The QUEUE data structure is assumed to be protected by MUTEX and
197 the caller of block_self must hold MUTEX. It will be atomically
198 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
200 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
203 When WAITTIME is not NULL, the sleep will be aborted at that time.
205 The return value of block_self is an errno value. It will be zero
206 when the sleep has been successfully completed by a call to
207 unblock_from_queue, EINTR when it has been interrupted by the
208 delivery of a system async, and ETIMEDOUT when the timeout has
211 The system asyncs themselves are not executed by block_self.
214 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
215 const scm_t_timespec
*waittime
)
217 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
221 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
226 q_handle
= enqueue (queue
, t
->handle
);
227 if (waittime
== NULL
)
228 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
230 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
232 /* When we are still on QUEUE, we have been interrupted. We
233 report this only when no other error (such as a timeout) has
236 if (remqueue (queue
, q_handle
) && err
== 0)
239 scm_i_reset_sleep (t
);
245 /* Wake up the first thread on QUEUE, if any. The caller must hold
246 the mutex that protects QUEUE. The awoken thread is returned, or
247 #f when the queue was empty.
250 unblock_from_queue (SCM queue
)
252 SCM thread
= dequeue (queue
);
253 if (scm_is_true (thread
))
254 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
258 /* Getting into and out of guile mode.
261 /* Ken Raeburn observes that the implementation of suspend and resume
262 (and the things that build on top of them) are very likely not
263 correct (see below). We will need fix this eventually, and that's
264 why scm_leave_guile/scm_enter_guile are not exported in the API.
268 Consider this sequence:
270 Function foo, called in Guile mode, calls suspend (maybe indirectly
271 through scm_leave_guile), which does this:
273 // record top of stack for the GC
274 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
277 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
278 setjmp (t->regs); // here's most of the magic
282 Function foo has a SCM value X, a handle on a non-immediate object, in
283 a caller-saved register R, and it's the only reference to the object
286 The compiler wants to use R in suspend, so it pushes the current
287 value, X, into a stack slot which will be reloaded on exit from
288 suspend; then it loads stuff into R and goes about its business. The
289 setjmp call saves (some of) the current registers, including R, which
290 no longer contains X. (This isn't a problem for a normal
291 setjmp/longjmp situation, where longjmp would be called before
292 setjmp's caller returns; the old value for X would be loaded back from
293 the stack after the longjmp, before the function returned.)
295 So, suspend returns, loading X back into R (and invalidating the jump
296 buffer) in the process. The caller foo then goes off and calls a
297 bunch of other functions out of Guile mode, occasionally storing X on
298 the stack again, but, say, much deeper on the stack than suspend's
299 stack frame went, and the stack slot where suspend had written X has
300 long since been overwritten with other values.
302 Okay, nothing actively broken so far. Now, let garbage collection
303 run, triggered by another thread.
305 The thread calling foo is out of Guile mode at the time, so the
306 garbage collector just scans a range of stack addresses. Too bad that
307 X isn't stored there. So the pointed-to storage goes onto the free
308 list, and I think you can see where things go from there.
310 Is there anything I'm missing that'll prevent this scenario from
311 happening? I mean, aside from, "well, suspend and scm_leave_guile
312 don't have many local variables, so they probably won't need to save
313 any registers on most systems, so we hope everything will wind up in
314 the jump buffer and we'll just get away with it"?
316 (And, going the other direction, if scm_leave_guile and suspend push
317 the stack pointer over onto a new page, and foo doesn't make further
318 function calls and thus the stack pointer no longer includes that
319 page, are we guaranteed that the kernel cannot release the now-unused
320 stack page that contains the top-of-stack pointer we just saved? I
321 don't know if any OS actually does that. If it does, we could get
322 faults in garbage collection.)
324 I don't think scm_without_guile has to have this problem, as it gets
325 more control over the stack handling -- but it should call setjmp
326 itself. I'd probably try something like:
328 // record top of stack for the GC
329 t->top = SCM_STACK_PTR (&t);
331 SCM_FLUSH_REGISTER_WINDOWS;
336 ... though even that's making some assumptions about the stack
337 ordering of local variables versus caller-saved registers.
339 For something like scm_leave_guile to work, I don't think it can just
340 rely on invalidated jump buffers. A valid jump buffer, and a handle
341 on the stack state at the point when the jump buffer was initialized,
342 together, would work fine, but I think then we're talking about macros
343 invoking setjmp in the caller's stack frame, and requiring that the
344 caller of scm_leave_guile also call scm_enter_guile before returning,
345 kind of like pthread_cleanup_push/pop calls that have to be paired up
346 in a function. (In fact, the pthread ones have to be paired up
347 syntactically, as if they might expand to a compound statement
348 incorporating the user's code, and invoking a compiler's
349 exception-handling primitives. Which might be something to think
350 about for cases where Guile is used with C++ exceptions or
354 scm_i_pthread_key_t scm_i_thread_key
;
357 resume (scm_i_thread
*t
)
360 if (t
->clear_freelists_p
)
362 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
363 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
364 t
->clear_freelists_p
= 0;
368 typedef void* scm_t_guile_ticket
;
371 scm_enter_guile (scm_t_guile_ticket ticket
)
373 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
376 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
381 static scm_i_thread
*
384 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
386 /* record top of stack for the GC */
387 t
->top
= SCM_STACK_PTR (&t
);
388 /* save registers. */
389 SCM_FLUSH_REGISTER_WINDOWS
;
394 static scm_t_guile_ticket
397 scm_i_thread
*t
= suspend ();
398 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
399 return (scm_t_guile_ticket
) t
;
402 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
403 static scm_i_thread
*all_threads
= NULL
;
404 static int thread_count
;
406 static SCM scm_i_default_dynamic_state
;
408 /* Perform first stage of thread initialisation, in non-guile mode.
411 guilify_self_1 (SCM_STACKITEM
*base
)
413 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
415 t
->pthread
= scm_i_pthread_self ();
416 t
->handle
= SCM_BOOL_F
;
417 t
->result
= SCM_BOOL_F
;
418 t
->join_queue
= SCM_EOL
;
419 t
->dynamic_state
= SCM_BOOL_F
;
420 t
->dynwinds
= SCM_EOL
;
421 t
->active_asyncs
= SCM_EOL
;
423 t
->pending_asyncs
= 1;
424 t
->last_debug_frame
= NULL
;
426 t
->continuation_root
= SCM_EOL
;
427 t
->continuation_base
= base
;
428 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
429 t
->sleep_mutex
= NULL
;
430 t
->sleep_object
= SCM_BOOL_F
;
432 /* XXX - check for errors. */
433 pipe (t
->sleep_pipe
);
434 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
435 t
->clear_freelists_p
= 0;
439 t
->freelist
= SCM_EOL
;
440 t
->freelist2
= SCM_EOL
;
441 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
442 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
444 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
446 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
448 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
449 t
->next_thread
= all_threads
;
452 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
455 /* Perform second stage of thread initialisation, in guile mode.
458 guilify_self_2 (SCM parent
)
460 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
462 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
463 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
464 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
465 t
->continuation_base
= t
->base
;
467 if (scm_is_true (parent
))
468 t
->dynamic_state
= scm_make_dynamic_state (parent
);
470 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
472 t
->join_queue
= make_queue ();
476 /* Perform thread tear-down, in guile mode.
479 do_thread_exit (void *v
)
481 scm_i_thread
*t
= (scm_i_thread
*)v
;
483 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
486 close (t
->sleep_pipe
[0]);
487 close (t
->sleep_pipe
[1]);
488 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
496 on_thread_exit (void *v
)
498 scm_i_thread
*t
= (scm_i_thread
*)v
, **tp
;
500 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
502 /* Unblocking the joining threads needs to happen in guile mode
503 since the queue is a SCM data structure.
505 scm_with_guile (do_thread_exit
, v
);
507 /* Removing ourself from the list of all threads needs to happen in
508 non-guile mode since all SCM values on our stack become
509 unprotected once we are no longer in the list.
512 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
513 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
516 *tp
= t
->next_thread
;
520 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
522 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
525 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
528 init_thread_key (void)
530 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
533 /* Perform any initializations necessary to bring the current thread
534 into guile mode, initializing Guile itself, if necessary.
536 BASE is the stack base to use with GC.
538 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
539 which case the default dynamic state is used.
541 Return zero when the thread was in guile mode already; otherwise
546 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
550 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
552 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
554 /* This thread has not been guilified yet.
557 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
558 if (scm_initialized_p
== 0)
560 /* First thread ever to enter Guile. Run the full
563 scm_i_init_guile (base
);
564 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
568 /* Guile is already initialized, but this thread enters it for
569 the first time. Only initialize this thread.
571 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
572 guilify_self_1 (base
);
573 guilify_self_2 (parent
);
579 /* This thread is already guilified but not in guile mode, just
582 XXX - base might be lower than when this thread was first
585 scm_enter_guile ((scm_t_guile_ticket
) t
);
590 /* Thread is already in guile mode. Nothing to do.
596 #if SCM_USE_PTHREAD_THREADS
598 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
599 /* This method for GNU/Linux and perhaps some other systems.
600 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
601 available on them. */
602 #define HAVE_GET_THREAD_STACK_BASE
604 static SCM_STACKITEM
*
605 get_thread_stack_base ()
611 pthread_getattr_np (pthread_self (), &attr
);
612 pthread_attr_getstack (&attr
, &start
, &size
);
613 end
= (char *)start
+ size
;
615 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
616 for the main thread, but we can use scm_get_stack_base in that
620 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
621 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
622 return scm_get_stack_base ();
626 #if SCM_STACK_GROWS_UP
634 #elif HAVE_PTHREAD_GET_STACKADDR_NP
635 /* This method for MacOS X.
636 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
637 but as of 2006 there's nothing obvious at apple.com. */
638 #define HAVE_GET_THREAD_STACK_BASE
639 static SCM_STACKITEM
*
640 get_thread_stack_base ()
642 return pthread_get_stackaddr_np (pthread_self ());
645 #elif defined (__MINGW32__)
646 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
647 in any thread. We don't like hard-coding the name of a system, but there
648 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
650 #define HAVE_GET_THREAD_STACK_BASE
651 static SCM_STACKITEM
*
652 get_thread_stack_base ()
654 return scm_get_stack_base ();
657 #endif /* pthread methods of get_thread_stack_base */
659 #else /* !SCM_USE_PTHREAD_THREADS */
661 #define HAVE_GET_THREAD_STACK_BASE
663 static SCM_STACKITEM
*
664 get_thread_stack_base ()
666 return scm_get_stack_base ();
669 #endif /* !SCM_USE_PTHREAD_THREADS */
671 #ifdef HAVE_GET_THREAD_STACK_BASE
676 scm_i_init_thread_for_guile (get_thread_stack_base (),
677 scm_i_default_dynamic_state
);
683 scm_with_guile (void *(*func
)(void *), void *data
)
685 return scm_i_with_guile_and_parent (func
, data
,
686 scm_i_default_dynamic_state
);
690 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
,
695 SCM_STACKITEM base_item
;
696 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
697 res
= scm_c_with_continuation_barrier (func
, data
);
704 scm_without_guile (void *(*func
)(void *), void *data
)
707 scm_t_guile_ticket t
;
708 t
= scm_leave_guile ();
714 /*** Thread creation */
721 scm_i_pthread_mutex_t mutex
;
722 scm_i_pthread_cond_t cond
;
726 really_launch (void *d
)
728 launch_data
*data
= (launch_data
*)d
;
729 SCM thunk
= data
->thunk
, handler
= data
->handler
;
732 t
= SCM_I_CURRENT_THREAD
;
734 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
735 data
->thread
= scm_current_thread ();
736 scm_i_pthread_cond_signal (&data
->cond
);
737 scm_i_pthread_mutex_unlock (&data
->mutex
);
739 if (SCM_UNBNDP (handler
))
740 t
->result
= scm_call_0 (thunk
);
742 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
748 launch_thread (void *d
)
750 launch_data
*data
= (launch_data
*)d
;
751 scm_i_pthread_detach (scm_i_pthread_self ());
752 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
756 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
757 (SCM thunk
, SCM handler
),
758 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
759 "returning a new thread object representing the thread. The procedure\n"
760 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
762 "When @var{handler} is specified, then @var{thunk} is called from\n"
763 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
764 "handler. This catch is established inside the continuation barrier.\n"
766 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
767 "the @emph{exit value} of the thread and the thread is terminated.")
768 #define FUNC_NAME s_scm_call_with_new_thread
774 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
775 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
776 handler
, SCM_ARG2
, FUNC_NAME
);
778 data
.parent
= scm_current_dynamic_state ();
780 data
.handler
= handler
;
781 data
.thread
= SCM_BOOL_F
;
782 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
783 scm_i_pthread_cond_init (&data
.cond
, NULL
);
785 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
786 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
789 scm_i_pthread_mutex_unlock (&data
.mutex
);
793 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
794 scm_i_pthread_mutex_unlock (&data
.mutex
);
802 scm_t_catch_body body
;
804 scm_t_catch_handler handler
;
807 scm_i_pthread_mutex_t mutex
;
808 scm_i_pthread_cond_t cond
;
812 really_spawn (void *d
)
814 spawn_data
*data
= (spawn_data
*)d
;
815 scm_t_catch_body body
= data
->body
;
816 void *body_data
= data
->body_data
;
817 scm_t_catch_handler handler
= data
->handler
;
818 void *handler_data
= data
->handler_data
;
819 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
821 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
822 data
->thread
= scm_current_thread ();
823 scm_i_pthread_cond_signal (&data
->cond
);
824 scm_i_pthread_mutex_unlock (&data
->mutex
);
827 t
->result
= body (body_data
);
829 t
->result
= scm_internal_catch (SCM_BOOL_T
,
831 handler
, handler_data
);
837 spawn_thread (void *d
)
839 spawn_data
*data
= (spawn_data
*)d
;
840 scm_i_pthread_detach (scm_i_pthread_self ());
841 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
846 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
847 scm_t_catch_handler handler
, void *handler_data
)
853 data
.parent
= scm_current_dynamic_state ();
855 data
.body_data
= body_data
;
856 data
.handler
= handler
;
857 data
.handler_data
= handler_data
;
858 data
.thread
= SCM_BOOL_F
;
859 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
860 scm_i_pthread_cond_init (&data
.cond
, NULL
);
862 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
863 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
866 scm_i_pthread_mutex_unlock (&data
.mutex
);
870 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
871 scm_i_pthread_mutex_unlock (&data
.mutex
);
876 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
878 "Move the calling thread to the end of the scheduling queue.")
879 #define FUNC_NAME s_scm_yield
881 return scm_from_bool (scm_i_sched_yield ());
885 SCM_DEFINE (scm_join_thread
, "join-thread", 1, 0, 0,
887 "Suspend execution of the calling thread until the target @var{thread} "
888 "terminates, unless the target @var{thread} has already terminated. ")
889 #define FUNC_NAME s_scm_join_thread
894 SCM_VALIDATE_THREAD (1, thread
);
895 if (scm_is_eq (scm_current_thread (), thread
))
896 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL
);
898 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
900 t
= SCM_I_THREAD_DATA (thread
);
905 block_self (t
->join_queue
, thread
, &thread_admin_mutex
, NULL
);
908 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
910 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
915 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
922 /* We implement our own mutex type since we want them to be 'fair', we
923 want to do fancy things while waiting for them (like running
924 asyncs) and we might want to add things that are nice for
929 scm_i_pthread_mutex_t lock
;
931 int level
; /* how much the owner owns us.
932 < 0 for non-recursive mutexes */
933 SCM waiting
; /* the threads waiting for this mutex. */
936 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
937 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
940 fat_mutex_mark (SCM mx
)
942 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
943 scm_gc_mark (m
->owner
);
948 fat_mutex_free (SCM mx
)
950 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
951 scm_i_pthread_mutex_destroy (&m
->lock
);
952 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
957 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
959 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
960 scm_puts ("#<mutex ", port
);
961 scm_uintprint ((scm_t_bits
)m
, 16, port
);
962 scm_puts (">", port
);
967 make_fat_mutex (int recursive
)
972 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
973 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
974 m
->owner
= SCM_BOOL_F
;
975 m
->level
= recursive
? 0 : -1;
976 m
->waiting
= SCM_EOL
;
977 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
978 m
->waiting
= make_queue ();
982 SCM_DEFINE (scm_make_mutex
, "make-mutex", 0, 0, 0,
984 "Create a new mutex. ")
985 #define FUNC_NAME s_scm_make_mutex
987 return make_fat_mutex (0);
991 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
993 "Create a new recursive mutex. ")
994 #define FUNC_NAME s_scm_make_recursive_mutex
996 return make_fat_mutex (1);
1001 fat_mutex_lock (SCM mutex
)
1003 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1004 SCM thread
= scm_current_thread ();
1007 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1008 if (scm_is_false (m
->owner
))
1010 else if (scm_is_eq (m
->owner
, thread
))
1015 msg
= "mutex already locked by current thread";
1021 block_self (m
->waiting
, mutex
, &m
->lock
, NULL
);
1022 if (scm_is_eq (m
->owner
, thread
))
1024 scm_i_pthread_mutex_unlock (&m
->lock
);
1026 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1029 scm_i_pthread_mutex_unlock (&m
->lock
);
1033 SCM_DEFINE (scm_lock_mutex
, "lock-mutex", 1, 0, 0,
1035 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1036 "blocks until the mutex becomes available. The function returns when "
1037 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1038 "a thread already owns will succeed right away and will not block the "
1039 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1040 #define FUNC_NAME s_scm_lock_mutex
1044 SCM_VALIDATE_MUTEX (1, mx
);
1045 msg
= fat_mutex_lock (mx
);
1047 scm_misc_error (NULL
, msg
, SCM_EOL
);
1053 scm_dynwind_lock_mutex (SCM mutex
)
1055 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1056 SCM_F_WIND_EXPLICITLY
);
1057 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1058 SCM_F_WIND_EXPLICITLY
);
1062 fat_mutex_trylock (fat_mutex
*m
, int *resp
)
1065 SCM thread
= scm_current_thread ();
1068 scm_i_pthread_mutex_lock (&m
->lock
);
1069 if (scm_is_false (m
->owner
))
1071 else if (scm_is_eq (m
->owner
, thread
))
1076 msg
= "mutex already locked by current thread";
1080 scm_i_pthread_mutex_unlock (&m
->lock
);
1084 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1086 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1087 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1088 #define FUNC_NAME s_scm_try_mutex
1093 SCM_VALIDATE_MUTEX (1, mutex
);
1095 msg
= fat_mutex_trylock (SCM_MUTEX_DATA (mutex
), &res
);
1097 scm_misc_error (NULL
, msg
, SCM_EOL
);
1098 return scm_from_bool (res
);
1103 fat_mutex_unlock (fat_mutex
*m
)
1107 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1108 if (!scm_is_eq (m
->owner
, scm_current_thread ()))
1110 if (scm_is_false (m
->owner
))
1111 msg
= "mutex not locked";
1113 msg
= "mutex not locked by current thread";
1115 else if (m
->level
> 0)
1118 m
->owner
= unblock_from_queue (m
->waiting
);
1119 scm_i_pthread_mutex_unlock (&m
->lock
);
1124 SCM_DEFINE (scm_unlock_mutex
, "unlock-mutex", 1, 0, 0,
1126 "Unlocks @var{mutex} if the calling thread owns the lock on "
1127 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1128 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1129 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1130 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1131 "with a call to @code{unlock-mutex}. Only the last call to "
1132 "@code{unlock-mutex} will actually unlock the mutex. ")
1133 #define FUNC_NAME s_scm_unlock_mutex
1136 SCM_VALIDATE_MUTEX (1, mx
);
1138 msg
= fat_mutex_unlock (SCM_MUTEX_DATA (mx
));
1140 scm_misc_error (NULL
, msg
, SCM_EOL
);
1147 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1149 "Return the thread owning @var{mx}, or @code{#f}.")
1150 #define FUNC_NAME s_scm_mutex_owner
1152 SCM_VALIDATE_MUTEX (1, mx
);
1153 return (SCM_MUTEX_DATA(mx
))->owner
;
1157 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1159 "Return the lock level of a recursive mutex, or -1\n"
1160 "for a standard mutex.")
1161 #define FUNC_NAME s_scm_mutex_level
1163 SCM_VALIDATE_MUTEX (1, mx
);
1164 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1170 /*** Fat condition variables */
1173 scm_i_pthread_mutex_t lock
;
1174 SCM waiting
; /* the threads waiting for this condition. */
1177 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1178 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1181 fat_cond_mark (SCM cv
)
1183 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1188 fat_cond_free (SCM mx
)
1190 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1191 scm_i_pthread_mutex_destroy (&c
->lock
);
1192 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1197 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1199 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1200 scm_puts ("#<condition-variable ", port
);
1201 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1202 scm_puts (">", port
);
1206 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1208 "Make a new condition variable.")
1209 #define FUNC_NAME s_scm_make_condition_variable
1214 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1215 scm_i_pthread_mutex_init (&c
->lock
, 0);
1216 c
->waiting
= SCM_EOL
;
1217 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1218 c
->waiting
= make_queue ();
1224 fat_cond_timedwait (SCM cond
, SCM mutex
,
1225 const scm_t_timespec
*waittime
)
1227 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1228 fat_cond
*c
= SCM_CONDVAR_DATA (cond
);
1229 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1235 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1236 msg
= fat_mutex_unlock (m
);
1240 err
= block_self (c
->waiting
, cond
, &c
->lock
, waittime
);
1241 scm_i_pthread_mutex_unlock (&c
->lock
);
1242 fat_mutex_lock (mutex
);
1245 scm_i_pthread_mutex_unlock (&c
->lock
);
1250 scm_misc_error (NULL
, msg
, SCM_EOL
);
1252 scm_remember_upto_here_2 (cond
, mutex
);
1256 if (err
== ETIMEDOUT
)
1261 scm_syserror (NULL
);
1266 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1267 (SCM cv
, SCM mx
, SCM t
),
1268 "Wait until @var{cond-var} has been signalled. While waiting, "
1269 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1270 "is locked again when this function returns. When @var{time} is given, "
1271 "it specifies a point in time where the waiting should be aborted. It "
1272 "can be either a integer as returned by @code{current-time} or a pair "
1273 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1274 "mutex is locked and @code{#f} is returned. When the condition "
1275 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1277 #define FUNC_NAME s_scm_timed_wait_condition_variable
1279 scm_t_timespec waittime
, *waitptr
= NULL
;
1281 SCM_VALIDATE_CONDVAR (1, cv
);
1282 SCM_VALIDATE_MUTEX (2, mx
);
1284 if (!SCM_UNBNDP (t
))
1286 if (scm_is_pair (t
))
1288 waittime
.tv_sec
= scm_to_ulong (SCM_CAR (t
));
1289 waittime
.tv_nsec
= scm_to_ulong (SCM_CAR (t
)) * 1000;
1293 waittime
.tv_sec
= scm_to_ulong (t
);
1294 waittime
.tv_nsec
= 0;
1296 waitptr
= &waittime
;
1299 return scm_from_bool (fat_cond_timedwait (cv
, mx
, waitptr
));
1304 fat_cond_signal (fat_cond
*c
)
1306 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1307 unblock_from_queue (c
->waiting
);
1308 scm_i_pthread_mutex_unlock (&c
->lock
);
1311 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1313 "Wake up one thread that is waiting for @var{cv}")
1314 #define FUNC_NAME s_scm_signal_condition_variable
1316 SCM_VALIDATE_CONDVAR (1, cv
);
1317 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1323 fat_cond_broadcast (fat_cond
*c
)
1325 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1326 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1328 scm_i_pthread_mutex_unlock (&c
->lock
);
1331 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1333 "Wake up all threads that are waiting for @var{cv}. ")
1334 #define FUNC_NAME s_scm_broadcast_condition_variable
1336 SCM_VALIDATE_CONDVAR (1, cv
);
1337 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1342 /*** Marking stacks */
1344 /* XXX - what to do with this? Do we need to handle this for blocked
1348 # define SCM_MARK_BACKING_STORE() do { \
1350 SCM_STACKITEM * top, * bot; \
1351 getcontext (&ctx); \
1352 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1353 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1354 / sizeof (SCM_STACKITEM))); \
1355 bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
1356 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1357 scm_mark_locations (bot, top - bot); } while (0)
1359 # define SCM_MARK_BACKING_STORE()
1363 scm_threads_mark_stacks (void)
1366 for (t
= all_threads
; t
; t
= t
->next_thread
)
1368 /* Check that thread has indeed been suspended.
1372 scm_gc_mark (t
->handle
);
1374 #if SCM_STACK_GROWS_UP
1375 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1377 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1379 scm_mark_locations ((SCM_STACKITEM
*) t
->regs
,
1380 ((size_t) sizeof(t
->regs
)
1381 / sizeof (SCM_STACKITEM
)));
1384 SCM_MARK_BACKING_STORE ();
1390 scm_std_select (int nfds
,
1391 SELECT_TYPE
*readfds
,
1392 SELECT_TYPE
*writefds
,
1393 SELECT_TYPE
*exceptfds
,
1394 struct timeval
*timeout
)
1397 int res
, eno
, wakeup_fd
;
1398 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1399 scm_t_guile_ticket ticket
;
1401 if (readfds
== NULL
)
1403 FD_ZERO (&my_readfds
);
1404 readfds
= &my_readfds
;
1407 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1410 wakeup_fd
= t
->sleep_pipe
[0];
1411 ticket
= scm_leave_guile ();
1412 FD_SET (wakeup_fd
, readfds
);
1413 if (wakeup_fd
>= nfds
)
1415 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1418 scm_enter_guile (ticket
);
1420 scm_i_reset_sleep (t
);
1422 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1425 read (wakeup_fd
, &dummy
, 1);
1426 FD_CLR (wakeup_fd
, readfds
);
1438 /* Convenience API for blocking while in guile mode. */
1440 #if SCM_USE_PTHREAD_THREADS
1443 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1445 scm_t_guile_ticket t
= scm_leave_guile ();
1446 int res
= scm_i_pthread_mutex_lock (mutex
);
1447 scm_enter_guile (t
);
1452 do_unlock (void *data
)
1454 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1458 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1460 scm_i_scm_pthread_mutex_lock (mutex
);
1461 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1465 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1467 scm_t_guile_ticket t
= scm_leave_guile ();
1468 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1469 scm_enter_guile (t
);
1474 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1475 scm_i_pthread_mutex_t
*mutex
,
1476 const scm_t_timespec
*wt
)
1478 scm_t_guile_ticket t
= scm_leave_guile ();
1479 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1480 scm_enter_guile (t
);
1487 scm_std_usleep (unsigned long usecs
)
1490 tv
.tv_usec
= usecs
% 1000000;
1491 tv
.tv_sec
= usecs
/ 1000000;
1492 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1493 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1497 scm_std_sleep (unsigned int secs
)
1502 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1508 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1510 "Return the thread that called this function.")
1511 #define FUNC_NAME s_scm_current_thread
1513 return SCM_I_CURRENT_THREAD
->handle
;
1518 scm_c_make_list (size_t n
, SCM fill
)
1522 res
= scm_cons (fill
, res
);
1526 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1528 "Return a list of all threads.")
1529 #define FUNC_NAME s_scm_all_threads
1531 /* We can not allocate while holding the thread_admin_mutex because
1532 of the way GC is done.
1534 int n
= thread_count
;
1536 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1538 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1540 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1542 SCM_SETCAR (*l
, t
->handle
);
1543 l
= SCM_CDRLOC (*l
);
1547 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1552 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1554 "Return @code{#t} iff @var{thread} has exited.\n")
1555 #define FUNC_NAME s_scm_thread_exited_p
1557 return scm_from_bool (scm_c_thread_exited_p (thread
));
1562 scm_c_thread_exited_p (SCM thread
)
1563 #define FUNC_NAME s_scm_thread_exited_p
1566 SCM_VALIDATE_THREAD (1, thread
);
1567 t
= SCM_I_THREAD_DATA (thread
);
1572 static scm_i_pthread_cond_t wake_up_cond
;
1573 int scm_i_thread_go_to_sleep
;
1574 static int threads_initialized_p
= 0;
1577 scm_i_thread_put_to_sleep ()
1579 if (threads_initialized_p
)
1584 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1586 /* Signal all threads to go to sleep
1588 scm_i_thread_go_to_sleep
= 1;
1589 for (t
= all_threads
; t
; t
= t
->next_thread
)
1590 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1591 scm_i_thread_go_to_sleep
= 0;
1596 scm_i_thread_invalidate_freelists ()
1598 /* thread_admin_mutex is already locked. */
1601 for (t
= all_threads
; t
; t
= t
->next_thread
)
1602 if (t
!= SCM_I_CURRENT_THREAD
)
1603 t
->clear_freelists_p
= 1;
1607 scm_i_thread_wake_up ()
1609 if (threads_initialized_p
)
1613 scm_i_pthread_cond_broadcast (&wake_up_cond
);
1614 for (t
= all_threads
; t
; t
= t
->next_thread
)
1615 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
1616 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1617 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
1622 scm_i_thread_sleep_for_gc ()
1624 scm_i_thread
*t
= suspend ();
1625 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
1629 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1631 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1632 int scm_i_critical_section_level
= 0;
1634 static SCM dynwind_critical_section_mutex
;
1637 scm_dynwind_critical_section (SCM mutex
)
1639 if (scm_is_false (mutex
))
1640 mutex
= dynwind_critical_section_mutex
;
1641 scm_dynwind_lock_mutex (mutex
);
1642 scm_dynwind_block_asyncs ();
1645 /*** Initialization */
1647 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
1648 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1650 #if SCM_USE_PTHREAD_THREADS
1651 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1655 scm_threads_prehistory (SCM_STACKITEM
*base
)
1657 #if SCM_USE_PTHREAD_THREADS
1658 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1659 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1660 PTHREAD_MUTEX_RECURSIVE
);
1663 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
1664 scm_i_pthread_mutexattr_recursive
);
1665 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
1666 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
1667 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
1668 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
1670 guilify_self_1 (base
);
1673 scm_t_bits scm_tc16_thread
;
1674 scm_t_bits scm_tc16_mutex
;
1675 scm_t_bits scm_tc16_condvar
;
1680 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
1681 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
1682 scm_set_smob_print (scm_tc16_thread
, thread_print
);
1683 scm_set_smob_free (scm_tc16_thread
, thread_free
);
1685 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
1686 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
1687 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
1688 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
1690 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
1692 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
1693 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
1694 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
1696 scm_i_default_dynamic_state
= SCM_BOOL_F
;
1697 guilify_self_2 (SCM_BOOL_F
);
1698 threads_initialized_p
= 1;
1700 dynwind_critical_section_mutex
=
1701 scm_permanent_object (scm_make_recursive_mutex ());
1705 scm_init_threads_default_dynamic_state ()
1707 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
1708 scm_i_default_dynamic_state
= scm_permanent_object (state
);
1712 scm_init_thread_procs ()
1714 #include "libguile/threads.x"