1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libguile/_scm.h"
34 #include "libguile/validate.h"
35 #include "libguile/root.h"
36 #include "libguile/eval.h"
37 #include "libguile/async.h"
38 #include "libguile/ports.h"
39 #include "libguile/threads.h"
40 #include "libguile/dynwind.h"
41 #include "libguile/iselect.h"
42 #include "libguile/fluids.h"
43 #include "libguile/continuations.h"
44 #include "libguile/init.h"
48 # define ETIMEDOUT WSAETIMEDOUT
52 # define pipe(fd) _pipe (fd, 256, O_BINARY)
53 #endif /* __MINGW32__ */
57 /* Make an empty queue data structure.
62 return scm_cons (SCM_EOL
, SCM_EOL
);
65 /* Put T at the back of Q and return a handle that can be used with
66 remqueue to remove T from Q again.
69 enqueue (SCM q
, SCM t
)
71 SCM c
= scm_cons (t
, SCM_EOL
);
72 if (scm_is_null (SCM_CDR (q
)))
75 SCM_SETCDR (SCM_CAR (q
), c
);
80 /* Remove the element that the handle C refers to from the queue Q. C
81 must have been returned from a call to enqueue. The return value
82 is zero when the element referred to by C has already been removed.
83 Otherwise, 1 is returned.
86 remqueue (SCM q
, SCM c
)
89 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
93 if (scm_is_eq (c
, SCM_CAR (q
)))
94 SCM_SETCAR (q
, SCM_CDR (c
));
95 SCM_SETCDR (prev
, SCM_CDR (c
));
103 /* Remove the front-most element from the queue Q and return it.
104 Return SCM_BOOL_F when Q is empty.
114 SCM_SETCDR (q
, SCM_CDR (c
));
115 if (scm_is_null (SCM_CDR (q
)))
116 SCM_SETCAR (q
, SCM_EOL
);
121 /*** Thread smob routines */
124 thread_mark (SCM obj
)
126 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
127 scm_gc_mark (t
->result
);
128 scm_gc_mark (t
->join_queue
);
129 scm_gc_mark (t
->dynwinds
);
130 scm_gc_mark (t
->active_asyncs
);
131 scm_gc_mark (t
->continuation_root
);
132 return t
->dynamic_state
;
136 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
138 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
139 scm_puts ("#<thread ", port
);
140 scm_uintprint ((size_t)t
->pthread
, 10, port
);
141 scm_puts (" (", port
);
142 scm_uintprint ((scm_t_bits
)t
, 16, port
);
143 scm_puts (")>", port
);
148 thread_free (SCM obj
)
150 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
152 scm_gc_free (t
, sizeof (*t
), "thread");
156 /*** Blocking on queues. */
158 /* See also scm_i_queue_async_cell for how such a block is
162 /* Put the current thread on QUEUE and go to sleep, waiting for it to
163 be woken up by a call to 'unblock_from_queue', or to be
164 interrupted. Upon return of this function, the current thread is
165 no longer on QUEUE, even when the sleep has been interrupted.
167 The QUEUE data structure is assumed to be protected by MUTEX and
168 the caller of block_self must hold MUTEX. It will be atomically
169 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
171 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
174 When WAITTIME is not NULL, the sleep will be aborted at that time.
176 The return value of block_self is an errno value. It will be zero
177 when the sleep has been successfully completed by a call to
178 unblock_from_queue, EINTR when it has been interrupted by the
179 delivery of a system async, and ETIMEDOUT when the timeout has
182 The system asyncs themselves are not executed by block_self.
185 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
186 const scm_t_timespec
*waittime
)
188 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
192 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
197 q_handle
= enqueue (queue
, t
->handle
);
198 if (waittime
== NULL
)
199 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
201 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
203 /* When we are still on QUEUE, we have been interrupted. We
204 report this only when no other error (such as a timeout) has
207 if (remqueue (queue
, q_handle
) && err
== 0)
210 scm_i_reset_sleep (t
);
216 /* Wake up the first thread on QUEUE, if any. The caller must hold
217 the mutex that protects QUEUE. The awoken thread is returned, or
218 #f when the queue was empty.
221 unblock_from_queue (SCM queue
)
223 SCM thread
= dequeue (queue
);
224 if (scm_is_true (thread
))
225 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
229 /* Getting into and out of guile mode.
232 /* Ken Raeburn observes that the implementation of suspend and resume
233 (and the things that build on top of them) are very likely not
234 correct (see below). We will need fix this eventually, and that's
235 why scm_leave_guile/scm_enter_guile are not exported in the API.
239 Consider this sequence:
241 Function foo, called in Guile mode, calls suspend (maybe indirectly
242 through scm_leave_guile), which does this:
244 // record top of stack for the GC
245 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
248 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
249 setjmp (t->regs); // here's most of the magic
253 Function foo has a SCM value X, a handle on a non-immediate object, in
254 a caller-saved register R, and it's the only reference to the object
257 The compiler wants to use R in suspend, so it pushes the current
258 value, X, into a stack slot which will be reloaded on exit from
259 suspend; then it loads stuff into R and goes about its business. The
260 setjmp call saves (some of) the current registers, including R, which
261 no longer contains X. (This isn't a problem for a normal
262 setjmp/longjmp situation, where longjmp would be called before
263 setjmp's caller returns; the old value for X would be loaded back from
264 the stack after the longjmp, before the function returned.)
266 So, suspend returns, loading X back into R (and invalidating the jump
267 buffer) in the process. The caller foo then goes off and calls a
268 bunch of other functions out of Guile mode, occasionally storing X on
269 the stack again, but, say, much deeper on the stack than suspend's
270 stack frame went, and the stack slot where suspend had written X has
271 long since been overwritten with other values.
273 Okay, nothing actively broken so far. Now, let garbage collection
274 run, triggered by another thread.
276 The thread calling foo is out of Guile mode at the time, so the
277 garbage collector just scans a range of stack addresses. Too bad that
278 X isn't stored there. So the pointed-to storage goes onto the free
279 list, and I think you can see where things go from there.
281 Is there anything I'm missing that'll prevent this scenario from
282 happening? I mean, aside from, "well, suspend and scm_leave_guile
283 don't have many local variables, so they probably won't need to save
284 any registers on most systems, so we hope everything will wind up in
285 the jump buffer and we'll just get away with it"?
287 (And, going the other direction, if scm_leave_guile and suspend push
288 the stack pointer over onto a new page, and foo doesn't make further
289 function calls and thus the stack pointer no longer includes that
290 page, are we guaranteed that the kernel cannot release the now-unused
291 stack page that contains the top-of-stack pointer we just saved? I
292 don't know if any OS actually does that. If it does, we could get
293 faults in garbage collection.)
295 I don't think scm_without_guile has to have this problem, as it gets
296 more control over the stack handling -- but it should call setjmp
297 itself. I'd probably try something like:
299 // record top of stack for the GC
300 t->top = SCM_STACK_PTR (&t);
302 SCM_FLUSH_REGISTER_WINDOWS;
307 ... though even that's making some assumptions about the stack
308 ordering of local variables versus caller-saved registers.
310 For something like scm_leave_guile to work, I don't think it can just
311 rely on invalidated jump buffers. A valid jump buffer, and a handle
312 on the stack state at the point when the jump buffer was initialized,
313 together, would work fine, but I think then we're talking about macros
314 invoking setjmp in the caller's stack frame, and requiring that the
315 caller of scm_leave_guile also call scm_enter_guile before returning,
316 kind of like pthread_cleanup_push/pop calls that have to be paired up
317 in a function. (In fact, the pthread ones have to be paired up
318 syntactically, as if they might expand to a compound statement
319 incorporating the user's code, and invoking a compiler's
320 exception-handling primitives. Which might be something to think
321 about for cases where Guile is used with C++ exceptions or
325 scm_i_pthread_key_t scm_i_thread_key
;
328 resume (scm_i_thread
*t
)
331 if (t
->clear_freelists_p
)
333 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
334 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
335 t
->clear_freelists_p
= 0;
339 typedef void* scm_t_guile_ticket
;
342 scm_enter_guile (scm_t_guile_ticket ticket
)
344 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
347 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
352 static scm_i_thread
*
355 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
357 /* record top of stack for the GC */
358 t
->top
= SCM_STACK_PTR (&t
);
359 /* save registers. */
360 SCM_FLUSH_REGISTER_WINDOWS
;
365 static scm_t_guile_ticket
368 scm_i_thread
*t
= suspend ();
369 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
370 return (scm_t_guile_ticket
) t
;
373 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
374 static scm_i_thread
*all_threads
= NULL
;
375 static int thread_count
;
377 static SCM scm_i_default_dynamic_state
;
379 /* Perform first stage of thread initialisation, in non-guile mode.
382 guilify_self_1 (SCM_STACKITEM
*base
)
384 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
386 t
->pthread
= scm_i_pthread_self ();
387 t
->handle
= SCM_BOOL_F
;
388 t
->result
= SCM_BOOL_F
;
389 t
->join_queue
= SCM_EOL
;
390 t
->dynamic_state
= SCM_BOOL_F
;
391 t
->dynwinds
= SCM_EOL
;
392 t
->active_asyncs
= SCM_EOL
;
394 t
->pending_asyncs
= 1;
395 t
->last_debug_frame
= NULL
;
397 t
->continuation_root
= SCM_EOL
;
398 t
->continuation_base
= base
;
399 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
400 t
->sleep_mutex
= NULL
;
401 t
->sleep_object
= SCM_BOOL_F
;
403 /* XXX - check for errors. */
404 pipe (t
->sleep_pipe
);
405 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
406 t
->clear_freelists_p
= 0;
410 t
->freelist
= SCM_EOL
;
411 t
->freelist2
= SCM_EOL
;
412 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
413 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
415 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
417 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
419 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
420 t
->next_thread
= all_threads
;
423 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
426 /* Perform second stage of thread initialisation, in guile mode.
429 guilify_self_2 (SCM parent
)
431 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
433 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
434 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
435 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
436 t
->continuation_base
= t
->base
;
438 if (scm_is_true (parent
))
439 t
->dynamic_state
= scm_make_dynamic_state (parent
);
441 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
443 t
->join_queue
= make_queue ();
447 /* Perform thread tear-down, in guile mode.
450 do_thread_exit (void *v
)
452 scm_i_thread
*t
= (scm_i_thread
*)v
;
454 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
457 close (t
->sleep_pipe
[0]);
458 close (t
->sleep_pipe
[1]);
459 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
462 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
467 on_thread_exit (void *v
)
469 scm_i_thread
*t
= (scm_i_thread
*)v
, **tp
;
471 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
473 /* Unblocking the joining threads needs to happen in guile mode
474 since the queue is a SCM data structure.
476 scm_with_guile (do_thread_exit
, v
);
478 /* Removing ourself from the list of all threads needs to happen in
479 non-guile mode since all SCM values on our stack become
480 unprotected once we are no longer in the list.
483 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
484 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
487 *tp
= t
->next_thread
;
491 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
493 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
496 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
499 init_thread_key (void)
501 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
504 /* Perform any initializations necessary to bring the current thread
505 into guile mode, initializing Guile itself, if necessary.
507 BASE is the stack base to use with GC.
509 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
510 which case the default dynamic state is used.
512 Return zero when the thread was in guile mode already; otherwise
517 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
521 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
523 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
525 /* This thread has not been guilified yet.
528 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
529 if (scm_initialized_p
== 0)
531 /* First thread ever to enter Guile. Run the full
534 scm_i_init_guile (base
);
535 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
539 /* Guile is already initialized, but this thread enters it for
540 the first time. Only initialize this thread.
542 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
543 guilify_self_1 (base
);
544 guilify_self_2 (parent
);
550 /* This thread is already guilified but not in guile mode, just
553 XXX - base might be lower than when this thread was first
556 scm_enter_guile ((scm_t_guile_ticket
) t
);
561 /* Thread is already in guile mode. Nothing to do.
567 #ifdef HAVE_LIBC_STACK_END
569 extern void *__libc_stack_end
;
571 #if SCM_USE_PTHREAD_THREADS
572 #ifdef HAVE_PTHREAD_ATTR_GETSTACK
574 #define HAVE_GET_THREAD_STACK_BASE
576 static SCM_STACKITEM
*
577 get_thread_stack_base ()
583 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
584 for the main thread, but we can use __libc_stack_end in that
588 pthread_getattr_np (pthread_self (), &attr
);
589 pthread_attr_getstack (&attr
, &start
, &size
);
590 end
= (char *)start
+ size
;
592 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
593 return __libc_stack_end
;
596 #if SCM_STACK_GROWS_UP
604 #endif /* HAVE_PTHREAD_ATTR_GETSTACK */
606 #else /* !SCM_USE_PTHREAD_THREADS */
608 #define HAVE_GET_THREAD_STACK_BASE
610 static SCM_STACKITEM
*
611 get_thread_stack_base ()
613 return __libc_stack_end
;
616 #endif /* !SCM_USE_PTHREAD_THREADS */
617 #endif /* HAVE_LIBC_STACK_END */
619 #ifdef HAVE_GET_THREAD_STACK_BASE
624 scm_i_init_thread_for_guile (get_thread_stack_base (),
625 scm_i_default_dynamic_state
);
631 scm_with_guile (void *(*func
)(void *), void *data
)
633 return scm_i_with_guile_and_parent (func
, data
,
634 scm_i_default_dynamic_state
);
638 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
,
643 SCM_STACKITEM base_item
;
644 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
645 res
= scm_c_with_continuation_barrier (func
, data
);
652 scm_without_guile (void *(*func
)(void *), void *data
)
655 scm_t_guile_ticket t
;
656 t
= scm_leave_guile ();
662 /*** Thread creation */
669 scm_i_pthread_mutex_t mutex
;
670 scm_i_pthread_cond_t cond
;
674 really_launch (void *d
)
676 launch_data
*data
= (launch_data
*)d
;
677 SCM thunk
= data
->thunk
, handler
= data
->handler
;
680 t
= SCM_I_CURRENT_THREAD
;
682 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
683 data
->thread
= scm_current_thread ();
684 scm_i_pthread_cond_signal (&data
->cond
);
685 scm_i_pthread_mutex_unlock (&data
->mutex
);
687 if (SCM_UNBNDP (handler
))
688 t
->result
= scm_call_0 (thunk
);
690 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
696 launch_thread (void *d
)
698 launch_data
*data
= (launch_data
*)d
;
699 scm_i_pthread_detach (scm_i_pthread_self ());
700 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
704 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
705 (SCM thunk
, SCM handler
),
706 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
707 "returning a new thread object representing the thread. The procedure\n"
708 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
710 "When @var{handler} is specified, then @var{thunk} is called from\n"
711 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
712 "handler. This catch is established inside the continuation barrier.\n"
714 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
715 "the @emph{exit value} of the thread and the thread is terminated.")
716 #define FUNC_NAME s_scm_call_with_new_thread
722 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
723 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
724 handler
, SCM_ARG2
, FUNC_NAME
);
726 data
.parent
= scm_current_dynamic_state ();
728 data
.handler
= handler
;
729 data
.thread
= SCM_BOOL_F
;
730 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
731 scm_i_pthread_cond_init (&data
.cond
, NULL
);
733 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
734 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
737 scm_i_pthread_mutex_unlock (&data
.mutex
);
741 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
742 scm_i_pthread_mutex_unlock (&data
.mutex
);
750 scm_t_catch_body body
;
752 scm_t_catch_handler handler
;
755 scm_i_pthread_mutex_t mutex
;
756 scm_i_pthread_cond_t cond
;
760 really_spawn (void *d
)
762 spawn_data
*data
= (spawn_data
*)d
;
763 scm_t_catch_body body
= data
->body
;
764 void *body_data
= data
->body_data
;
765 scm_t_catch_handler handler
= data
->handler
;
766 void *handler_data
= data
->handler_data
;
767 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
769 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
770 data
->thread
= scm_current_thread ();
771 scm_i_pthread_cond_signal (&data
->cond
);
772 scm_i_pthread_mutex_unlock (&data
->mutex
);
775 t
->result
= body (body_data
);
777 t
->result
= scm_internal_catch (SCM_BOOL_T
,
779 handler
, handler_data
);
785 spawn_thread (void *d
)
787 spawn_data
*data
= (spawn_data
*)d
;
788 scm_i_pthread_detach (scm_i_pthread_self ());
789 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
794 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
795 scm_t_catch_handler handler
, void *handler_data
)
801 data
.parent
= scm_current_dynamic_state ();
803 data
.body_data
= body_data
;
804 data
.handler
= handler
;
805 data
.handler_data
= handler_data
;
806 data
.thread
= SCM_BOOL_F
;
807 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
808 scm_i_pthread_cond_init (&data
.cond
, NULL
);
810 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
811 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
814 scm_i_pthread_mutex_unlock (&data
.mutex
);
818 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
819 scm_i_pthread_mutex_unlock (&data
.mutex
);
824 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
826 "Move the calling thread to the end of the scheduling queue.")
827 #define FUNC_NAME s_scm_yield
829 return scm_from_bool (scm_i_sched_yield ());
833 SCM_DEFINE (scm_join_thread
, "join-thread", 1, 0, 0,
835 "Suspend execution of the calling thread until the target @var{thread} "
836 "terminates, unless the target @var{thread} has already terminated. ")
837 #define FUNC_NAME s_scm_join_thread
842 SCM_VALIDATE_THREAD (1, thread
);
843 if (scm_is_eq (scm_current_thread (), thread
))
844 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL
);
846 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
848 t
= SCM_I_THREAD_DATA (thread
);
853 block_self (t
->join_queue
, thread
, &thread_admin_mutex
, NULL
);
856 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
858 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
863 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
870 /* We implement our own mutex type since we want them to be 'fair', we
871 want to do fancy things while waiting for them (like running
872 asyncs) and we might want to add things that are nice for
877 scm_i_pthread_mutex_t lock
;
879 int level
; /* how much the owner owns us.
880 < 0 for non-recursive mutexes */
881 SCM waiting
; /* the threads waiting for this mutex. */
884 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
885 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
888 fat_mutex_mark (SCM mx
)
890 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
891 scm_gc_mark (m
->owner
);
896 fat_mutex_free (SCM mx
)
898 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
899 scm_i_pthread_mutex_destroy (&m
->lock
);
900 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
905 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
907 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
908 scm_puts ("#<mutex ", port
);
909 scm_uintprint ((scm_t_bits
)m
, 16, port
);
910 scm_puts (">", port
);
915 make_fat_mutex (int recursive
)
920 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
921 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
922 m
->owner
= SCM_BOOL_F
;
923 m
->level
= recursive
? 0 : -1;
924 m
->waiting
= SCM_EOL
;
925 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
926 m
->waiting
= make_queue ();
930 SCM_DEFINE (scm_make_mutex
, "make-mutex", 0, 0, 0,
932 "Create a new mutex. ")
933 #define FUNC_NAME s_scm_make_mutex
935 return make_fat_mutex (0);
939 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
941 "Create a new recursive mutex. ")
942 #define FUNC_NAME s_scm_make_recursive_mutex
944 return make_fat_mutex (1);
949 fat_mutex_lock (SCM mutex
)
951 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
952 SCM thread
= scm_current_thread ();
955 scm_i_scm_pthread_mutex_lock (&m
->lock
);
956 if (scm_is_false (m
->owner
))
958 else if (scm_is_eq (m
->owner
, thread
))
963 msg
= "mutex already locked by current thread";
969 block_self (m
->waiting
, mutex
, &m
->lock
, NULL
);
970 if (scm_is_eq (m
->owner
, thread
))
972 scm_i_pthread_mutex_unlock (&m
->lock
);
974 scm_i_scm_pthread_mutex_lock (&m
->lock
);
977 scm_i_pthread_mutex_unlock (&m
->lock
);
981 SCM_DEFINE (scm_lock_mutex
, "lock-mutex", 1, 0, 0,
983 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
984 "blocks until the mutex becomes available. The function returns when "
985 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
986 "a thread already owns will succeed right away and will not block the "
987 "thread. That is, Guile's mutexes are @emph{recursive}. ")
988 #define FUNC_NAME s_scm_lock_mutex
992 SCM_VALIDATE_MUTEX (1, mx
);
993 msg
= fat_mutex_lock (mx
);
995 scm_misc_error (NULL
, msg
, SCM_EOL
);
1001 scm_dynwind_lock_mutex (SCM mutex
)
1003 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1004 SCM_F_WIND_EXPLICITLY
);
1005 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1006 SCM_F_WIND_EXPLICITLY
);
1010 fat_mutex_trylock (fat_mutex
*m
, int *resp
)
1013 SCM thread
= scm_current_thread ();
1016 scm_i_pthread_mutex_lock (&m
->lock
);
1017 if (scm_is_false (m
->owner
))
1019 else if (scm_is_eq (m
->owner
, thread
))
1024 msg
= "mutex already locked by current thread";
1028 scm_i_pthread_mutex_unlock (&m
->lock
);
1032 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1034 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1035 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1036 #define FUNC_NAME s_scm_try_mutex
1041 SCM_VALIDATE_MUTEX (1, mutex
);
1043 msg
= fat_mutex_trylock (SCM_MUTEX_DATA (mutex
), &res
);
1045 scm_misc_error (NULL
, msg
, SCM_EOL
);
1046 return scm_from_bool (res
);
1051 fat_mutex_unlock (fat_mutex
*m
)
1055 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1056 if (!scm_is_eq (m
->owner
, scm_current_thread ()))
1058 if (scm_is_false (m
->owner
))
1059 msg
= "mutex not locked";
1061 msg
= "mutex not locked by current thread";
1063 else if (m
->level
> 0)
1066 m
->owner
= unblock_from_queue (m
->waiting
);
1067 scm_i_pthread_mutex_unlock (&m
->lock
);
1072 SCM_DEFINE (scm_unlock_mutex
, "unlock-mutex", 1, 0, 0,
1074 "Unlocks @var{mutex} if the calling thread owns the lock on "
1075 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1076 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1077 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1078 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1079 "with a call to @code{unlock-mutex}. Only the last call to "
1080 "@code{unlock-mutex} will actually unlock the mutex. ")
1081 #define FUNC_NAME s_scm_unlock_mutex
1084 SCM_VALIDATE_MUTEX (1, mx
);
1086 msg
= fat_mutex_unlock (SCM_MUTEX_DATA (mx
));
1088 scm_misc_error (NULL
, msg
, SCM_EOL
);
1095 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1097 "Return the thread owning @var{mx}, or @code{#f}.")
1098 #define FUNC_NAME s_scm_mutex_owner
1100 SCM_VALIDATE_MUTEX (1, mx
);
1101 return (SCM_MUTEX_DATA(mx
))->owner
;
1105 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1107 "Return the lock level of a recursive mutex, or -1\n"
1108 "for a standard mutex.")
1109 #define FUNC_NAME s_scm_mutex_level
1111 SCM_VALIDATE_MUTEX (1, mx
);
1112 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1118 /*** Fat condition variables */
1121 scm_i_pthread_mutex_t lock
;
1122 SCM waiting
; /* the threads waiting for this condition. */
1125 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1126 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1129 fat_cond_mark (SCM cv
)
1131 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1136 fat_cond_free (SCM mx
)
1138 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1139 scm_i_pthread_mutex_destroy (&c
->lock
);
1140 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1145 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1147 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1148 scm_puts ("#<condition-variable ", port
);
1149 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1150 scm_puts (">", port
);
1154 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1156 "Make a new condition variable.")
1157 #define FUNC_NAME s_scm_make_condition_variable
1162 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1163 scm_i_pthread_mutex_init (&c
->lock
, 0);
1164 c
->waiting
= SCM_EOL
;
1165 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1166 c
->waiting
= make_queue ();
1172 fat_cond_timedwait (SCM cond
, SCM mutex
,
1173 const scm_t_timespec
*waittime
)
1175 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1176 fat_cond
*c
= SCM_CONDVAR_DATA (cond
);
1177 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1183 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1184 msg
= fat_mutex_unlock (m
);
1188 err
= block_self (c
->waiting
, cond
, &c
->lock
, waittime
);
1189 scm_i_pthread_mutex_unlock (&c
->lock
);
1190 fat_mutex_lock (mutex
);
1193 scm_i_pthread_mutex_unlock (&c
->lock
);
1198 scm_misc_error (NULL
, msg
, SCM_EOL
);
1200 scm_remember_upto_here_2 (cond
, mutex
);
1204 if (err
== ETIMEDOUT
)
1209 scm_syserror (NULL
);
1214 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1215 (SCM cv
, SCM mx
, SCM t
),
1216 "Wait until @var{cond-var} has been signalled. While waiting, "
1217 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1218 "is locked again when this function returns. When @var{time} is given, "
1219 "it specifies a point in time where the waiting should be aborted. It "
1220 "can be either a integer as returned by @code{current-time} or a pair "
1221 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1222 "mutex is locked and @code{#f} is returned. When the condition "
1223 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1225 #define FUNC_NAME s_scm_timed_wait_condition_variable
1227 scm_t_timespec waittime
, *waitptr
= NULL
;
1229 SCM_VALIDATE_CONDVAR (1, cv
);
1230 SCM_VALIDATE_MUTEX (2, mx
);
1232 if (!SCM_UNBNDP (t
))
1234 if (scm_is_pair (t
))
1236 waittime
.tv_sec
= scm_to_ulong (SCM_CAR (t
));
1237 waittime
.tv_nsec
= scm_to_ulong (SCM_CAR (t
)) * 1000;
1241 waittime
.tv_sec
= scm_to_ulong (t
);
1242 waittime
.tv_nsec
= 0;
1244 waitptr
= &waittime
;
1247 return scm_from_bool (fat_cond_timedwait (cv
, mx
, waitptr
));
1252 fat_cond_signal (fat_cond
*c
)
1254 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1255 unblock_from_queue (c
->waiting
);
1256 scm_i_pthread_mutex_unlock (&c
->lock
);
1259 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1261 "Wake up one thread that is waiting for @var{cv}")
1262 #define FUNC_NAME s_scm_signal_condition_variable
1264 SCM_VALIDATE_CONDVAR (1, cv
);
1265 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1271 fat_cond_broadcast (fat_cond
*c
)
1273 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1274 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1276 scm_i_pthread_mutex_unlock (&c
->lock
);
1279 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1281 "Wake up all threads that are waiting for @var{cv}. ")
1282 #define FUNC_NAME s_scm_broadcast_condition_variable
1284 SCM_VALIDATE_CONDVAR (1, cv
);
1285 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1290 /*** Marking stacks */
1292 /* XXX - what to do with this? Do we need to handle this for blocked
1296 # define SCM_MARK_BACKING_STORE() do { \
1298 SCM_STACKITEM * top, * bot; \
1299 getcontext (&ctx); \
1300 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1301 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1302 / sizeof (SCM_STACKITEM))); \
1303 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1304 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1305 scm_mark_locations (bot, top - bot); } while (0)
1307 # define SCM_MARK_BACKING_STORE()
1311 scm_threads_mark_stacks (void)
1314 for (t
= all_threads
; t
; t
= t
->next_thread
)
1316 /* Check that thread has indeed been suspended.
1320 scm_gc_mark (t
->handle
);
1322 #if SCM_STACK_GROWS_UP
1323 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1325 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1327 scm_mark_locations ((SCM_STACKITEM
*) t
->regs
,
1328 ((size_t) sizeof(t
->regs
)
1329 / sizeof (SCM_STACKITEM
)));
1332 SCM_MARK_BACKING_STORE ();
1338 scm_std_select (int nfds
,
1339 SELECT_TYPE
*readfds
,
1340 SELECT_TYPE
*writefds
,
1341 SELECT_TYPE
*exceptfds
,
1342 struct timeval
*timeout
)
1345 int res
, eno
, wakeup_fd
;
1346 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1347 scm_t_guile_ticket ticket
;
1349 if (readfds
== NULL
)
1351 FD_ZERO (&my_readfds
);
1352 readfds
= &my_readfds
;
1355 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1358 wakeup_fd
= t
->sleep_pipe
[0];
1359 ticket
= scm_leave_guile ();
1360 FD_SET (wakeup_fd
, readfds
);
1361 if (wakeup_fd
>= nfds
)
1363 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1366 scm_enter_guile (ticket
);
1368 scm_i_reset_sleep (t
);
1370 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1373 read (wakeup_fd
, &dummy
, 1);
1374 FD_CLR (wakeup_fd
, readfds
);
1386 /* Convenience API for blocking while in guile mode. */
1388 #if SCM_USE_PTHREAD_THREADS
1391 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1393 scm_t_guile_ticket t
= scm_leave_guile ();
1394 int res
= scm_i_pthread_mutex_lock (mutex
);
1395 scm_enter_guile (t
);
1402 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1406 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1408 scm_i_scm_pthread_mutex_lock (mutex
);
1409 scm_dynwind_unwind_handler (unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1413 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1415 scm_t_guile_ticket t
= scm_leave_guile ();
1416 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1417 scm_enter_guile (t
);
1422 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1423 scm_i_pthread_mutex_t
*mutex
,
1424 const scm_t_timespec
*wt
)
1426 scm_t_guile_ticket t
= scm_leave_guile ();
1427 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1428 scm_enter_guile (t
);
1435 scm_std_usleep (unsigned long usecs
)
1438 tv
.tv_usec
= usecs
% 1000000;
1439 tv
.tv_sec
= usecs
/ 1000000;
1440 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1441 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1445 scm_std_sleep (unsigned int secs
)
1450 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1456 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1458 "Return the thread that called this function.")
1459 #define FUNC_NAME s_scm_current_thread
1461 return SCM_I_CURRENT_THREAD
->handle
;
1466 scm_c_make_list (size_t n
, SCM fill
)
1470 res
= scm_cons (fill
, res
);
1474 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1476 "Return a list of all threads.")
1477 #define FUNC_NAME s_scm_all_threads
1479 /* We can not allocate while holding the thread_admin_mutex because
1480 of the way GC is done.
1482 int n
= thread_count
;
1484 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1486 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1488 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1490 SCM_SETCAR (*l
, t
->handle
);
1491 l
= SCM_CDRLOC (*l
);
1495 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1500 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1502 "Return @code{#t} iff @var{thread} has exited.\n")
1503 #define FUNC_NAME s_scm_thread_exited_p
1505 return scm_from_bool (scm_c_thread_exited_p (thread
));
1510 scm_c_thread_exited_p (SCM thread
)
1511 #define FUNC_NAME s_scm_thread_exited_p
1514 SCM_VALIDATE_THREAD (1, thread
);
1515 t
= SCM_I_THREAD_DATA (thread
);
1520 static scm_i_pthread_cond_t wake_up_cond
;
1521 int scm_i_thread_go_to_sleep
;
1522 static int threads_initialized_p
= 0;
1525 scm_i_thread_put_to_sleep ()
1527 if (threads_initialized_p
)
1532 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1534 /* Signal all threads to go to sleep
1536 scm_i_thread_go_to_sleep
= 1;
1537 for (t
= all_threads
; t
; t
= t
->next_thread
)
1538 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1539 scm_i_thread_go_to_sleep
= 0;
1544 scm_i_thread_invalidate_freelists ()
1546 /* thread_admin_mutex is already locked. */
1549 for (t
= all_threads
; t
; t
= t
->next_thread
)
1550 if (t
!= SCM_I_CURRENT_THREAD
)
1551 t
->clear_freelists_p
= 1;
1555 scm_i_thread_wake_up ()
1557 if (threads_initialized_p
)
1561 scm_i_pthread_cond_broadcast (&wake_up_cond
);
1562 for (t
= all_threads
; t
; t
= t
->next_thread
)
1563 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
1564 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1565 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
1570 scm_i_thread_sleep_for_gc ()
1572 scm_i_thread
*t
= suspend ();
1573 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
1577 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1579 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1580 int scm_i_critical_section_level
= 0;
1582 static SCM dynwind_critical_section_mutex
;
1585 scm_dynwind_critical_section (SCM mutex
)
1587 if (scm_is_false (mutex
))
1588 mutex
= dynwind_critical_section_mutex
;
1589 scm_dynwind_lock_mutex (mutex
);
1590 scm_dynwind_block_asyncs ();
1593 /*** Initialization */
1595 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
1596 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1598 #if SCM_USE_PTHREAD_THREADS
1599 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1603 scm_threads_prehistory (SCM_STACKITEM
*base
)
1605 #if SCM_USE_PTHREAD_THREADS
1606 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1607 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1608 PTHREAD_MUTEX_RECURSIVE
);
1611 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
1612 scm_i_pthread_mutexattr_recursive
);
1613 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
1614 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
1615 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
1616 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
1618 guilify_self_1 (base
);
1621 scm_t_bits scm_tc16_thread
;
1622 scm_t_bits scm_tc16_mutex
;
1623 scm_t_bits scm_tc16_condvar
;
1628 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
1629 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
1630 scm_set_smob_print (scm_tc16_thread
, thread_print
);
1631 scm_set_smob_free (scm_tc16_thread
, thread_free
);
1633 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
1634 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
1635 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
1636 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
1638 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
1640 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
1641 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
1642 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
1644 scm_i_default_dynamic_state
= SCM_BOOL_F
;
1645 guilify_self_2 (SCM_BOOL_F
);
1646 threads_initialized_p
= 1;
1648 dynwind_critical_section_mutex
=
1649 scm_permanent_object (scm_make_recursive_mutex ());
1653 scm_init_threads_default_dynamic_state ()
1655 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
1656 scm_i_default_dynamic_state
= scm_permanent_object (state
);
1660 scm_init_thread_procs ()
1662 #include "libguile/threads.x"