1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libguile/_scm.h"
34 #include "libguile/validate.h"
35 #include "libguile/root.h"
36 #include "libguile/eval.h"
37 #include "libguile/async.h"
38 #include "libguile/ports.h"
39 #include "libguile/threads.h"
40 #include "libguile/dynwind.h"
41 #include "libguile/iselect.h"
42 #include "libguile/fluids.h"
43 #include "libguile/continuations.h"
44 #include "libguile/gc.h"
45 #include "libguile/init.h"
49 # define ETIMEDOUT WSAETIMEDOUT
53 # define pipe(fd) _pipe (fd, 256, O_BINARY)
54 #endif /* __MINGW32__ */
58 /* Make an empty queue data structure.
63 return scm_cons (SCM_EOL
, SCM_EOL
);
66 /* Put T at the back of Q and return a handle that can be used with
67 remqueue to remove T from Q again.
70 enqueue (SCM q
, SCM t
)
72 SCM c
= scm_cons (t
, SCM_EOL
);
73 if (scm_is_null (SCM_CDR (q
)))
76 SCM_SETCDR (SCM_CAR (q
), c
);
81 /* Remove the element that the handle C refers to from the queue Q. C
82 must have been returned from a call to enqueue. The return value
83 is zero when the element referred to by C has already been removed.
84 Otherwise, 1 is returned.
87 remqueue (SCM q
, SCM c
)
90 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
94 if (scm_is_eq (c
, SCM_CAR (q
)))
95 SCM_SETCAR (q
, SCM_CDR (c
));
96 SCM_SETCDR (prev
, SCM_CDR (c
));
104 /* Remove the front-most element from the queue Q and return it.
105 Return SCM_BOOL_F when Q is empty.
115 SCM_SETCDR (q
, SCM_CDR (c
));
116 if (scm_is_null (SCM_CDR (q
)))
117 SCM_SETCAR (q
, SCM_EOL
);
122 /*** Thread smob routines */
125 thread_mark (SCM obj
)
127 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
128 scm_gc_mark (t
->result
);
129 scm_gc_mark (t
->join_queue
);
130 scm_gc_mark (t
->dynwinds
);
131 scm_gc_mark (t
->active_asyncs
);
132 scm_gc_mark (t
->continuation_root
);
133 return t
->dynamic_state
;
137 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
139 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
140 scm_puts ("#<thread ", port
);
141 scm_uintprint ((size_t)t
->pthread
, 10, port
);
142 scm_puts (" (", port
);
143 scm_uintprint ((scm_t_bits
)t
, 16, port
);
144 scm_puts (")>", port
);
149 thread_free (SCM obj
)
151 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
153 scm_gc_free (t
, sizeof (*t
), "thread");
157 /*** Blocking on queues. */
159 /* See also scm_i_queue_async_cell for how such a block is
163 /* Put the current thread on QUEUE and go to sleep, waiting for it to
164 be woken up by a call to 'unblock_from_queue', or to be
165 interrupted. Upon return of this function, the current thread is
166 no longer on QUEUE, even when the sleep has been interrupted.
168 The QUEUE data structure is assumed to be protected by MUTEX and
169 the caller of block_self must hold MUTEX. It will be atomically
170 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
172 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
175 When WAITTIME is not NULL, the sleep will be aborted at that time.
177 The return value of block_self is an errno value. It will be zero
178 when the sleep has been successfully completed by a call to
179 unblock_from_queue, EINTR when it has been interrupted by the
180 delivery of a system async, and ETIMEDOUT when the timeout has
183 The system asyncs themselves are not executed by block_self.
186 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
187 const scm_t_timespec
*waittime
)
189 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
193 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
198 q_handle
= enqueue (queue
, t
->handle
);
199 if (waittime
== NULL
)
200 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
202 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
204 /* When we are still on QUEUE, we have been interrupted. We
205 report this only when no other error (such as a timeout) has
208 if (remqueue (queue
, q_handle
) && err
== 0)
211 scm_i_reset_sleep (t
);
217 /* Wake up the first thread on QUEUE, if any. The caller must hold
218 the mutex that protects QUEUE. The awoken thread is returned, or
219 #f when the queue was empty.
222 unblock_from_queue (SCM queue
)
224 SCM thread
= dequeue (queue
);
225 if (scm_is_true (thread
))
226 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
230 /* Getting into and out of guile mode.
233 /* Ken Raeburn observes that the implementation of suspend and resume
234 (and the things that build on top of them) are very likely not
235 correct (see below). We will need fix this eventually, and that's
236 why scm_leave_guile/scm_enter_guile are not exported in the API.
240 Consider this sequence:
242 Function foo, called in Guile mode, calls suspend (maybe indirectly
243 through scm_leave_guile), which does this:
245 // record top of stack for the GC
246 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
249 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
250 setjmp (t->regs); // here's most of the magic
254 Function foo has a SCM value X, a handle on a non-immediate object, in
255 a caller-saved register R, and it's the only reference to the object
258 The compiler wants to use R in suspend, so it pushes the current
259 value, X, into a stack slot which will be reloaded on exit from
260 suspend; then it loads stuff into R and goes about its business. The
261 setjmp call saves (some of) the current registers, including R, which
262 no longer contains X. (This isn't a problem for a normal
263 setjmp/longjmp situation, where longjmp would be called before
264 setjmp's caller returns; the old value for X would be loaded back from
265 the stack after the longjmp, before the function returned.)
267 So, suspend returns, loading X back into R (and invalidating the jump
268 buffer) in the process. The caller foo then goes off and calls a
269 bunch of other functions out of Guile mode, occasionally storing X on
270 the stack again, but, say, much deeper on the stack than suspend's
271 stack frame went, and the stack slot where suspend had written X has
272 long since been overwritten with other values.
274 Okay, nothing actively broken so far. Now, let garbage collection
275 run, triggered by another thread.
277 The thread calling foo is out of Guile mode at the time, so the
278 garbage collector just scans a range of stack addresses. Too bad that
279 X isn't stored there. So the pointed-to storage goes onto the free
280 list, and I think you can see where things go from there.
282 Is there anything I'm missing that'll prevent this scenario from
283 happening? I mean, aside from, "well, suspend and scm_leave_guile
284 don't have many local variables, so they probably won't need to save
285 any registers on most systems, so we hope everything will wind up in
286 the jump buffer and we'll just get away with it"?
288 (And, going the other direction, if scm_leave_guile and suspend push
289 the stack pointer over onto a new page, and foo doesn't make further
290 function calls and thus the stack pointer no longer includes that
291 page, are we guaranteed that the kernel cannot release the now-unused
292 stack page that contains the top-of-stack pointer we just saved? I
293 don't know if any OS actually does that. If it does, we could get
294 faults in garbage collection.)
296 I don't think scm_without_guile has to have this problem, as it gets
297 more control over the stack handling -- but it should call setjmp
298 itself. I'd probably try something like:
300 // record top of stack for the GC
301 t->top = SCM_STACK_PTR (&t);
303 SCM_FLUSH_REGISTER_WINDOWS;
308 ... though even that's making some assumptions about the stack
309 ordering of local variables versus caller-saved registers.
311 For something like scm_leave_guile to work, I don't think it can just
312 rely on invalidated jump buffers. A valid jump buffer, and a handle
313 on the stack state at the point when the jump buffer was initialized,
314 together, would work fine, but I think then we're talking about macros
315 invoking setjmp in the caller's stack frame, and requiring that the
316 caller of scm_leave_guile also call scm_enter_guile before returning,
317 kind of like pthread_cleanup_push/pop calls that have to be paired up
318 in a function. (In fact, the pthread ones have to be paired up
319 syntactically, as if they might expand to a compound statement
320 incorporating the user's code, and invoking a compiler's
321 exception-handling primitives. Which might be something to think
322 about for cases where Guile is used with C++ exceptions or
326 scm_i_pthread_key_t scm_i_thread_key
;
329 resume (scm_i_thread
*t
)
332 if (t
->clear_freelists_p
)
334 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
335 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
336 t
->clear_freelists_p
= 0;
340 typedef void* scm_t_guile_ticket
;
343 scm_enter_guile (scm_t_guile_ticket ticket
)
345 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
348 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
353 static scm_i_thread
*
356 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
358 /* record top of stack for the GC */
359 t
->top
= SCM_STACK_PTR (&t
);
360 /* save registers. */
361 SCM_FLUSH_REGISTER_WINDOWS
;
366 static scm_t_guile_ticket
369 scm_i_thread
*t
= suspend ();
370 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
371 return (scm_t_guile_ticket
) t
;
374 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
375 static scm_i_thread
*all_threads
= NULL
;
376 static int thread_count
;
378 static SCM scm_i_default_dynamic_state
;
380 /* Perform first stage of thread initialisation, in non-guile mode.
383 guilify_self_1 (SCM_STACKITEM
*base
)
385 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
387 t
->pthread
= scm_i_pthread_self ();
388 t
->handle
= SCM_BOOL_F
;
389 t
->result
= SCM_BOOL_F
;
390 t
->join_queue
= SCM_EOL
;
391 t
->dynamic_state
= SCM_BOOL_F
;
392 t
->dynwinds
= SCM_EOL
;
393 t
->active_asyncs
= SCM_EOL
;
395 t
->pending_asyncs
= 1;
396 t
->last_debug_frame
= NULL
;
398 t
->continuation_root
= SCM_EOL
;
399 t
->continuation_base
= base
;
400 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
401 t
->sleep_mutex
= NULL
;
402 t
->sleep_object
= SCM_BOOL_F
;
404 /* XXX - check for errors. */
405 pipe (t
->sleep_pipe
);
406 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
407 t
->clear_freelists_p
= 0;
411 t
->freelist
= SCM_EOL
;
412 t
->freelist2
= SCM_EOL
;
413 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
414 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
416 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
418 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
420 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
421 t
->next_thread
= all_threads
;
424 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
427 /* Perform second stage of thread initialisation, in guile mode.
430 guilify_self_2 (SCM parent
)
432 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
434 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
435 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
436 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
437 t
->continuation_base
= t
->base
;
439 if (scm_is_true (parent
))
440 t
->dynamic_state
= scm_make_dynamic_state (parent
);
442 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
444 t
->join_queue
= make_queue ();
448 /* Perform thread tear-down, in guile mode.
451 do_thread_exit (void *v
)
453 scm_i_thread
*t
= (scm_i_thread
*)v
;
455 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
458 close (t
->sleep_pipe
[0]);
459 close (t
->sleep_pipe
[1]);
460 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
463 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
468 on_thread_exit (void *v
)
470 scm_i_thread
*t
= (scm_i_thread
*)v
, **tp
;
472 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
474 /* Unblocking the joining threads needs to happen in guile mode
475 since the queue is a SCM data structure.
477 scm_with_guile (do_thread_exit
, v
);
479 /* Removing ourself from the list of all threads needs to happen in
480 non-guile mode since all SCM values on our stack become
481 unprotected once we are no longer in the list.
484 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
485 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
488 *tp
= t
->next_thread
;
492 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
494 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
497 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
500 init_thread_key (void)
502 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
505 /* Perform any initializations necessary to bring the current thread
506 into guile mode, initializing Guile itself, if necessary.
508 BASE is the stack base to use with GC.
510 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
511 which case the default dynamic state is used.
513 Return zero when the thread was in guile mode already; otherwise
518 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
522 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
524 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
526 /* This thread has not been guilified yet.
529 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
530 if (scm_initialized_p
== 0)
532 /* First thread ever to enter Guile. Run the full
535 scm_i_init_guile (base
);
536 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
540 /* Guile is already initialized, but this thread enters it for
541 the first time. Only initialize this thread.
543 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
544 guilify_self_1 (base
);
545 guilify_self_2 (parent
);
551 /* This thread is already guilified but not in guile mode, just
554 XXX - base might be lower than when this thread was first
557 scm_enter_guile ((scm_t_guile_ticket
) t
);
562 /* Thread is already in guile mode. Nothing to do.
568 #if SCM_USE_PTHREAD_THREADS
569 #ifdef HAVE_PTHREAD_ATTR_GETSTACK
571 #define HAVE_GET_THREAD_STACK_BASE
573 static SCM_STACKITEM
*
574 get_thread_stack_base ()
580 pthread_getattr_np (pthread_self (), &attr
);
581 pthread_attr_getstack (&attr
, &start
, &size
);
582 end
= (char *)start
+ size
;
584 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
585 for the main thread, but we can use scm_get_stack_base in that
589 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
590 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
591 return scm_get_stack_base ();
595 #if SCM_STACK_GROWS_UP
603 #endif /* HAVE_PTHREAD_ATTR_GETSTACK */
605 #else /* !SCM_USE_PTHREAD_THREADS */
607 #define HAVE_GET_THREAD_STACK_BASE
609 static SCM_STACKITEM
*
610 get_thread_stack_base ()
612 return scm_get_stack_base ();
615 #endif /* !SCM_USE_PTHREAD_THREADS */
617 #ifdef HAVE_GET_THREAD_STACK_BASE
622 scm_i_init_thread_for_guile (get_thread_stack_base (),
623 scm_i_default_dynamic_state
);
629 scm_with_guile (void *(*func
)(void *), void *data
)
631 return scm_i_with_guile_and_parent (func
, data
,
632 scm_i_default_dynamic_state
);
636 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
,
641 SCM_STACKITEM base_item
;
642 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
643 res
= scm_c_with_continuation_barrier (func
, data
);
650 scm_without_guile (void *(*func
)(void *), void *data
)
653 scm_t_guile_ticket t
;
654 t
= scm_leave_guile ();
660 /*** Thread creation */
667 scm_i_pthread_mutex_t mutex
;
668 scm_i_pthread_cond_t cond
;
672 really_launch (void *d
)
674 launch_data
*data
= (launch_data
*)d
;
675 SCM thunk
= data
->thunk
, handler
= data
->handler
;
678 t
= SCM_I_CURRENT_THREAD
;
680 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
681 data
->thread
= scm_current_thread ();
682 scm_i_pthread_cond_signal (&data
->cond
);
683 scm_i_pthread_mutex_unlock (&data
->mutex
);
685 if (SCM_UNBNDP (handler
))
686 t
->result
= scm_call_0 (thunk
);
688 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
694 launch_thread (void *d
)
696 launch_data
*data
= (launch_data
*)d
;
697 scm_i_pthread_detach (scm_i_pthread_self ());
698 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
702 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
703 (SCM thunk
, SCM handler
),
704 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
705 "returning a new thread object representing the thread. The procedure\n"
706 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
708 "When @var{handler} is specified, then @var{thunk} is called from\n"
709 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
710 "handler. This catch is established inside the continuation barrier.\n"
712 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
713 "the @emph{exit value} of the thread and the thread is terminated.")
714 #define FUNC_NAME s_scm_call_with_new_thread
720 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
721 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
722 handler
, SCM_ARG2
, FUNC_NAME
);
724 data
.parent
= scm_current_dynamic_state ();
726 data
.handler
= handler
;
727 data
.thread
= SCM_BOOL_F
;
728 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
729 scm_i_pthread_cond_init (&data
.cond
, NULL
);
731 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
732 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
735 scm_i_pthread_mutex_unlock (&data
.mutex
);
739 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
740 scm_i_pthread_mutex_unlock (&data
.mutex
);
748 scm_t_catch_body body
;
750 scm_t_catch_handler handler
;
753 scm_i_pthread_mutex_t mutex
;
754 scm_i_pthread_cond_t cond
;
758 really_spawn (void *d
)
760 spawn_data
*data
= (spawn_data
*)d
;
761 scm_t_catch_body body
= data
->body
;
762 void *body_data
= data
->body_data
;
763 scm_t_catch_handler handler
= data
->handler
;
764 void *handler_data
= data
->handler_data
;
765 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
767 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
768 data
->thread
= scm_current_thread ();
769 scm_i_pthread_cond_signal (&data
->cond
);
770 scm_i_pthread_mutex_unlock (&data
->mutex
);
773 t
->result
= body (body_data
);
775 t
->result
= scm_internal_catch (SCM_BOOL_T
,
777 handler
, handler_data
);
783 spawn_thread (void *d
)
785 spawn_data
*data
= (spawn_data
*)d
;
786 scm_i_pthread_detach (scm_i_pthread_self ());
787 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
792 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
793 scm_t_catch_handler handler
, void *handler_data
)
799 data
.parent
= scm_current_dynamic_state ();
801 data
.body_data
= body_data
;
802 data
.handler
= handler
;
803 data
.handler_data
= handler_data
;
804 data
.thread
= SCM_BOOL_F
;
805 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
806 scm_i_pthread_cond_init (&data
.cond
, NULL
);
808 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
809 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
812 scm_i_pthread_mutex_unlock (&data
.mutex
);
816 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
817 scm_i_pthread_mutex_unlock (&data
.mutex
);
822 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
824 "Move the calling thread to the end of the scheduling queue.")
825 #define FUNC_NAME s_scm_yield
827 return scm_from_bool (scm_i_sched_yield ());
831 SCM_DEFINE (scm_join_thread
, "join-thread", 1, 0, 0,
833 "Suspend execution of the calling thread until the target @var{thread} "
834 "terminates, unless the target @var{thread} has already terminated. ")
835 #define FUNC_NAME s_scm_join_thread
840 SCM_VALIDATE_THREAD (1, thread
);
841 if (scm_is_eq (scm_current_thread (), thread
))
842 SCM_MISC_ERROR ("can not join the current thread", SCM_EOL
);
844 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
846 t
= SCM_I_THREAD_DATA (thread
);
851 block_self (t
->join_queue
, thread
, &thread_admin_mutex
, NULL
);
854 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
856 scm_i_scm_pthread_mutex_lock (&thread_admin_mutex
);
861 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
868 /* We implement our own mutex type since we want them to be 'fair', we
869 want to do fancy things while waiting for them (like running
870 asyncs) and we might want to add things that are nice for
875 scm_i_pthread_mutex_t lock
;
877 int level
; /* how much the owner owns us.
878 < 0 for non-recursive mutexes */
879 SCM waiting
; /* the threads waiting for this mutex. */
882 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
883 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
886 fat_mutex_mark (SCM mx
)
888 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
889 scm_gc_mark (m
->owner
);
894 fat_mutex_free (SCM mx
)
896 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
897 scm_i_pthread_mutex_destroy (&m
->lock
);
898 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
903 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
905 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
906 scm_puts ("#<mutex ", port
);
907 scm_uintprint ((scm_t_bits
)m
, 16, port
);
908 scm_puts (">", port
);
913 make_fat_mutex (int recursive
)
918 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
919 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
920 m
->owner
= SCM_BOOL_F
;
921 m
->level
= recursive
? 0 : -1;
922 m
->waiting
= SCM_EOL
;
923 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
924 m
->waiting
= make_queue ();
928 SCM_DEFINE (scm_make_mutex
, "make-mutex", 0, 0, 0,
930 "Create a new mutex. ")
931 #define FUNC_NAME s_scm_make_mutex
933 return make_fat_mutex (0);
937 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
939 "Create a new recursive mutex. ")
940 #define FUNC_NAME s_scm_make_recursive_mutex
942 return make_fat_mutex (1);
947 fat_mutex_lock (SCM mutex
)
949 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
950 SCM thread
= scm_current_thread ();
953 scm_i_scm_pthread_mutex_lock (&m
->lock
);
954 if (scm_is_false (m
->owner
))
956 else if (scm_is_eq (m
->owner
, thread
))
961 msg
= "mutex already locked by current thread";
967 block_self (m
->waiting
, mutex
, &m
->lock
, NULL
);
968 if (scm_is_eq (m
->owner
, thread
))
970 scm_i_pthread_mutex_unlock (&m
->lock
);
972 scm_i_scm_pthread_mutex_lock (&m
->lock
);
975 scm_i_pthread_mutex_unlock (&m
->lock
);
979 SCM_DEFINE (scm_lock_mutex
, "lock-mutex", 1, 0, 0,
981 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
982 "blocks until the mutex becomes available. The function returns when "
983 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
984 "a thread already owns will succeed right away and will not block the "
985 "thread. That is, Guile's mutexes are @emph{recursive}. ")
986 #define FUNC_NAME s_scm_lock_mutex
990 SCM_VALIDATE_MUTEX (1, mx
);
991 msg
= fat_mutex_lock (mx
);
993 scm_misc_error (NULL
, msg
, SCM_EOL
);
999 scm_dynwind_lock_mutex (SCM mutex
)
1001 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1002 SCM_F_WIND_EXPLICITLY
);
1003 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1004 SCM_F_WIND_EXPLICITLY
);
1008 fat_mutex_trylock (fat_mutex
*m
, int *resp
)
1011 SCM thread
= scm_current_thread ();
1014 scm_i_pthread_mutex_lock (&m
->lock
);
1015 if (scm_is_false (m
->owner
))
1017 else if (scm_is_eq (m
->owner
, thread
))
1022 msg
= "mutex already locked by current thread";
1026 scm_i_pthread_mutex_unlock (&m
->lock
);
1030 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1032 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1033 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1034 #define FUNC_NAME s_scm_try_mutex
1039 SCM_VALIDATE_MUTEX (1, mutex
);
1041 msg
= fat_mutex_trylock (SCM_MUTEX_DATA (mutex
), &res
);
1043 scm_misc_error (NULL
, msg
, SCM_EOL
);
1044 return scm_from_bool (res
);
1049 fat_mutex_unlock (fat_mutex
*m
)
1053 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1054 if (!scm_is_eq (m
->owner
, scm_current_thread ()))
1056 if (scm_is_false (m
->owner
))
1057 msg
= "mutex not locked";
1059 msg
= "mutex not locked by current thread";
1061 else if (m
->level
> 0)
1064 m
->owner
= unblock_from_queue (m
->waiting
);
1065 scm_i_pthread_mutex_unlock (&m
->lock
);
1070 SCM_DEFINE (scm_unlock_mutex
, "unlock-mutex", 1, 0, 0,
1072 "Unlocks @var{mutex} if the calling thread owns the lock on "
1073 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1074 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1075 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1076 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1077 "with a call to @code{unlock-mutex}. Only the last call to "
1078 "@code{unlock-mutex} will actually unlock the mutex. ")
1079 #define FUNC_NAME s_scm_unlock_mutex
1082 SCM_VALIDATE_MUTEX (1, mx
);
1084 msg
= fat_mutex_unlock (SCM_MUTEX_DATA (mx
));
1086 scm_misc_error (NULL
, msg
, SCM_EOL
);
1093 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1095 "Return the thread owning @var{mx}, or @code{#f}.")
1096 #define FUNC_NAME s_scm_mutex_owner
1098 SCM_VALIDATE_MUTEX (1, mx
);
1099 return (SCM_MUTEX_DATA(mx
))->owner
;
1103 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1105 "Return the lock level of a recursive mutex, or -1\n"
1106 "for a standard mutex.")
1107 #define FUNC_NAME s_scm_mutex_level
1109 SCM_VALIDATE_MUTEX (1, mx
);
1110 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1116 /*** Fat condition variables */
1119 scm_i_pthread_mutex_t lock
;
1120 SCM waiting
; /* the threads waiting for this condition. */
1123 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1124 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1127 fat_cond_mark (SCM cv
)
1129 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1134 fat_cond_free (SCM mx
)
1136 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1137 scm_i_pthread_mutex_destroy (&c
->lock
);
1138 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1143 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1145 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1146 scm_puts ("#<condition-variable ", port
);
1147 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1148 scm_puts (">", port
);
1152 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1154 "Make a new condition variable.")
1155 #define FUNC_NAME s_scm_make_condition_variable
1160 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1161 scm_i_pthread_mutex_init (&c
->lock
, 0);
1162 c
->waiting
= SCM_EOL
;
1163 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1164 c
->waiting
= make_queue ();
1170 fat_cond_timedwait (SCM cond
, SCM mutex
,
1171 const scm_t_timespec
*waittime
)
1173 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1174 fat_cond
*c
= SCM_CONDVAR_DATA (cond
);
1175 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1181 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1182 msg
= fat_mutex_unlock (m
);
1186 err
= block_self (c
->waiting
, cond
, &c
->lock
, waittime
);
1187 scm_i_pthread_mutex_unlock (&c
->lock
);
1188 fat_mutex_lock (mutex
);
1191 scm_i_pthread_mutex_unlock (&c
->lock
);
1196 scm_misc_error (NULL
, msg
, SCM_EOL
);
1198 scm_remember_upto_here_2 (cond
, mutex
);
1202 if (err
== ETIMEDOUT
)
1207 scm_syserror (NULL
);
1212 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1213 (SCM cv
, SCM mx
, SCM t
),
1214 "Wait until @var{cond-var} has been signalled. While waiting, "
1215 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1216 "is locked again when this function returns. When @var{time} is given, "
1217 "it specifies a point in time where the waiting should be aborted. It "
1218 "can be either a integer as returned by @code{current-time} or a pair "
1219 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1220 "mutex is locked and @code{#f} is returned. When the condition "
1221 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1223 #define FUNC_NAME s_scm_timed_wait_condition_variable
1225 scm_t_timespec waittime
, *waitptr
= NULL
;
1227 SCM_VALIDATE_CONDVAR (1, cv
);
1228 SCM_VALIDATE_MUTEX (2, mx
);
1230 if (!SCM_UNBNDP (t
))
1232 if (scm_is_pair (t
))
1234 waittime
.tv_sec
= scm_to_ulong (SCM_CAR (t
));
1235 waittime
.tv_nsec
= scm_to_ulong (SCM_CAR (t
)) * 1000;
1239 waittime
.tv_sec
= scm_to_ulong (t
);
1240 waittime
.tv_nsec
= 0;
1242 waitptr
= &waittime
;
1245 return scm_from_bool (fat_cond_timedwait (cv
, mx
, waitptr
));
1250 fat_cond_signal (fat_cond
*c
)
1252 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1253 unblock_from_queue (c
->waiting
);
1254 scm_i_pthread_mutex_unlock (&c
->lock
);
1257 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1259 "Wake up one thread that is waiting for @var{cv}")
1260 #define FUNC_NAME s_scm_signal_condition_variable
1262 SCM_VALIDATE_CONDVAR (1, cv
);
1263 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1269 fat_cond_broadcast (fat_cond
*c
)
1271 scm_i_scm_pthread_mutex_lock (&c
->lock
);
1272 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1274 scm_i_pthread_mutex_unlock (&c
->lock
);
1277 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1279 "Wake up all threads that are waiting for @var{cv}. ")
1280 #define FUNC_NAME s_scm_broadcast_condition_variable
1282 SCM_VALIDATE_CONDVAR (1, cv
);
1283 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1288 /*** Marking stacks */
1290 /* XXX - what to do with this? Do we need to handle this for blocked
1294 # define SCM_MARK_BACKING_STORE() do { \
1296 SCM_STACKITEM * top, * bot; \
1297 getcontext (&ctx); \
1298 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1299 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1300 / sizeof (SCM_STACKITEM))); \
1301 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1302 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1303 scm_mark_locations (bot, top - bot); } while (0)
1305 # define SCM_MARK_BACKING_STORE()
1309 scm_threads_mark_stacks (void)
1312 for (t
= all_threads
; t
; t
= t
->next_thread
)
1314 /* Check that thread has indeed been suspended.
1318 scm_gc_mark (t
->handle
);
1320 #if SCM_STACK_GROWS_UP
1321 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1323 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1325 scm_mark_locations ((SCM_STACKITEM
*) t
->regs
,
1326 ((size_t) sizeof(t
->regs
)
1327 / sizeof (SCM_STACKITEM
)));
1330 SCM_MARK_BACKING_STORE ();
1336 scm_std_select (int nfds
,
1337 SELECT_TYPE
*readfds
,
1338 SELECT_TYPE
*writefds
,
1339 SELECT_TYPE
*exceptfds
,
1340 struct timeval
*timeout
)
1343 int res
, eno
, wakeup_fd
;
1344 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1345 scm_t_guile_ticket ticket
;
1347 if (readfds
== NULL
)
1349 FD_ZERO (&my_readfds
);
1350 readfds
= &my_readfds
;
1353 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1356 wakeup_fd
= t
->sleep_pipe
[0];
1357 ticket
= scm_leave_guile ();
1358 FD_SET (wakeup_fd
, readfds
);
1359 if (wakeup_fd
>= nfds
)
1361 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1364 scm_enter_guile (ticket
);
1366 scm_i_reset_sleep (t
);
1368 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1371 read (wakeup_fd
, &dummy
, 1);
1372 FD_CLR (wakeup_fd
, readfds
);
1384 /* Convenience API for blocking while in guile mode. */
1386 #if SCM_USE_PTHREAD_THREADS
1389 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1391 scm_t_guile_ticket t
= scm_leave_guile ();
1392 int res
= scm_i_pthread_mutex_lock (mutex
);
1393 scm_enter_guile (t
);
1398 do_unlock (void *data
)
1400 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1404 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1406 scm_i_scm_pthread_mutex_lock (mutex
);
1407 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1411 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1413 scm_t_guile_ticket t
= scm_leave_guile ();
1414 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1415 scm_enter_guile (t
);
1420 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1421 scm_i_pthread_mutex_t
*mutex
,
1422 const scm_t_timespec
*wt
)
1424 scm_t_guile_ticket t
= scm_leave_guile ();
1425 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1426 scm_enter_guile (t
);
1433 scm_std_usleep (unsigned long usecs
)
1436 tv
.tv_usec
= usecs
% 1000000;
1437 tv
.tv_sec
= usecs
/ 1000000;
1438 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1439 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1443 scm_std_sleep (unsigned int secs
)
1448 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1454 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1456 "Return the thread that called this function.")
1457 #define FUNC_NAME s_scm_current_thread
1459 return SCM_I_CURRENT_THREAD
->handle
;
1464 scm_c_make_list (size_t n
, SCM fill
)
1468 res
= scm_cons (fill
, res
);
1472 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1474 "Return a list of all threads.")
1475 #define FUNC_NAME s_scm_all_threads
1477 /* We can not allocate while holding the thread_admin_mutex because
1478 of the way GC is done.
1480 int n
= thread_count
;
1482 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1484 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1486 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1488 SCM_SETCAR (*l
, t
->handle
);
1489 l
= SCM_CDRLOC (*l
);
1493 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1498 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1500 "Return @code{#t} iff @var{thread} has exited.\n")
1501 #define FUNC_NAME s_scm_thread_exited_p
1503 return scm_from_bool (scm_c_thread_exited_p (thread
));
1508 scm_c_thread_exited_p (SCM thread
)
1509 #define FUNC_NAME s_scm_thread_exited_p
1512 SCM_VALIDATE_THREAD (1, thread
);
1513 t
= SCM_I_THREAD_DATA (thread
);
1518 static scm_i_pthread_cond_t wake_up_cond
;
1519 int scm_i_thread_go_to_sleep
;
1520 static int threads_initialized_p
= 0;
1523 scm_i_thread_put_to_sleep ()
1525 if (threads_initialized_p
)
1530 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1532 /* Signal all threads to go to sleep
1534 scm_i_thread_go_to_sleep
= 1;
1535 for (t
= all_threads
; t
; t
= t
->next_thread
)
1536 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1537 scm_i_thread_go_to_sleep
= 0;
1542 scm_i_thread_invalidate_freelists ()
1544 /* thread_admin_mutex is already locked. */
1547 for (t
= all_threads
; t
; t
= t
->next_thread
)
1548 if (t
!= SCM_I_CURRENT_THREAD
)
1549 t
->clear_freelists_p
= 1;
1553 scm_i_thread_wake_up ()
1555 if (threads_initialized_p
)
1559 scm_i_pthread_cond_broadcast (&wake_up_cond
);
1560 for (t
= all_threads
; t
; t
= t
->next_thread
)
1561 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
1562 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1563 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
1568 scm_i_thread_sleep_for_gc ()
1570 scm_i_thread
*t
= suspend ();
1571 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
1575 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1577 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1578 int scm_i_critical_section_level
= 0;
1580 static SCM dynwind_critical_section_mutex
;
1583 scm_dynwind_critical_section (SCM mutex
)
1585 if (scm_is_false (mutex
))
1586 mutex
= dynwind_critical_section_mutex
;
1587 scm_dynwind_lock_mutex (mutex
);
1588 scm_dynwind_block_asyncs ();
1591 /*** Initialization */
1593 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
1594 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1596 #if SCM_USE_PTHREAD_THREADS
1597 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1601 scm_threads_prehistory (SCM_STACKITEM
*base
)
1603 #if SCM_USE_PTHREAD_THREADS
1604 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1605 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1606 PTHREAD_MUTEX_RECURSIVE
);
1609 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
1610 scm_i_pthread_mutexattr_recursive
);
1611 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
1612 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
1613 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
1614 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
1616 guilify_self_1 (base
);
1619 scm_t_bits scm_tc16_thread
;
1620 scm_t_bits scm_tc16_mutex
;
1621 scm_t_bits scm_tc16_condvar
;
1626 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
1627 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
1628 scm_set_smob_print (scm_tc16_thread
, thread_print
);
1629 scm_set_smob_free (scm_tc16_thread
, thread_free
);
1631 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
1632 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
1633 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
1634 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
1636 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
1638 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
1639 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
1640 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
1642 scm_i_default_dynamic_state
= SCM_BOOL_F
;
1643 guilify_self_2 (SCM_BOOL_F
);
1644 threads_initialized_p
= 1;
1646 dynwind_critical_section_mutex
=
1647 scm_permanent_object (scm_make_recursive_mutex ());
1651 scm_init_threads_default_dynamic_state ()
1653 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
1654 scm_i_default_dynamic_state
= scm_permanent_object (state
);
1658 scm_init_thread_procs ()
1660 #include "libguile/threads.x"