1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
41 #include "libguile/validate.h"
42 #include "libguile/root.h"
43 #include "libguile/eval.h"
44 #include "libguile/async.h"
45 #include "libguile/ports.h"
46 #include "libguile/threads.h"
47 #include "libguile/dynwind.h"
48 #include "libguile/iselect.h"
49 #include "libguile/fluids.h"
50 #include "libguile/continuations.h"
51 #include "libguile/gc.h"
52 #include "libguile/init.h"
53 #include "libguile/scmsigs.h"
54 #include "libguile/strings.h"
58 # define ETIMEDOUT WSAETIMEDOUT
62 # define pipe(fd) _pipe (fd, 256, O_BINARY)
63 #endif /* __MINGW32__ */
65 #include <full-read.h>
69 to_timespec (SCM t
, scm_t_timespec
*waittime
)
73 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
74 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
78 double time
= scm_to_double (t
);
79 double sec
= scm_c_truncate (time
);
81 waittime
->tv_sec
= (long) sec
;
82 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
88 /* Make an empty queue data structure.
93 return scm_cons (SCM_EOL
, SCM_EOL
);
96 /* Put T at the back of Q and return a handle that can be used with
97 remqueue to remove T from Q again.
100 enqueue (SCM q
, SCM t
)
102 SCM c
= scm_cons (t
, SCM_EOL
);
103 SCM_CRITICAL_SECTION_START
;
104 if (scm_is_null (SCM_CDR (q
)))
107 SCM_SETCDR (SCM_CAR (q
), c
);
109 SCM_CRITICAL_SECTION_END
;
113 /* Remove the element that the handle C refers to from the queue Q. C
114 must have been returned from a call to enqueue. The return value
115 is zero when the element referred to by C has already been removed.
116 Otherwise, 1 is returned.
119 remqueue (SCM q
, SCM c
)
122 SCM_CRITICAL_SECTION_START
;
123 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
125 if (scm_is_eq (p
, c
))
127 if (scm_is_eq (c
, SCM_CAR (q
)))
128 SCM_SETCAR (q
, SCM_CDR (c
));
129 SCM_SETCDR (prev
, SCM_CDR (c
));
130 SCM_CRITICAL_SECTION_END
;
135 SCM_CRITICAL_SECTION_END
;
139 /* Remove the front-most element from the queue Q and return it.
140 Return SCM_BOOL_F when Q is empty.
146 SCM_CRITICAL_SECTION_START
;
150 SCM_CRITICAL_SECTION_END
;
155 SCM_SETCDR (q
, SCM_CDR (c
));
156 if (scm_is_null (SCM_CDR (q
)))
157 SCM_SETCAR (q
, SCM_EOL
);
158 SCM_CRITICAL_SECTION_END
;
163 /*** Thread smob routines */
166 thread_mark (SCM obj
)
168 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
169 scm_gc_mark (t
->result
);
170 scm_gc_mark (t
->cleanup_handler
);
171 scm_gc_mark (t
->join_queue
);
172 scm_gc_mark (t
->mutexes
);
173 scm_gc_mark (t
->dynwinds
);
174 scm_gc_mark (t
->active_asyncs
);
175 scm_gc_mark (t
->continuation_root
);
177 return t
->dynamic_state
;
181 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
183 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
184 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
185 the struct case, hence we go via a union, and extract according to the
186 size of pthread_t. */
194 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
195 scm_i_pthread_t p
= t
->pthread
;
198 if (sizeof (p
) == sizeof (unsigned short))
200 else if (sizeof (p
) == sizeof (unsigned int))
202 else if (sizeof (p
) == sizeof (unsigned long))
207 scm_puts ("#<thread ", port
);
208 scm_uintprint (id
, 10, port
);
209 scm_puts (" (", port
);
210 scm_uintprint ((scm_t_bits
)t
, 16, port
);
211 scm_puts (")>", port
);
216 thread_free (SCM obj
)
218 scm_i_thread
*t
= SCM_I_THREAD_DATA (obj
);
220 scm_gc_free (t
, sizeof (*t
), "thread");
224 /*** Blocking on queues. */
226 /* See also scm_i_queue_async_cell for how such a block is
230 /* Put the current thread on QUEUE and go to sleep, waiting for it to
231 be woken up by a call to 'unblock_from_queue', or to be
232 interrupted. Upon return of this function, the current thread is
233 no longer on QUEUE, even when the sleep has been interrupted.
235 The caller of block_self must hold MUTEX. It will be atomically
236 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
238 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
241 When WAITTIME is not NULL, the sleep will be aborted at that time.
243 The return value of block_self is an errno value. It will be zero
244 when the sleep has been successfully completed by a call to
245 unblock_from_queue, EINTR when it has been interrupted by the
246 delivery of a system async, and ETIMEDOUT when the timeout has
249 The system asyncs themselves are not executed by block_self.
252 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
253 const scm_t_timespec
*waittime
)
255 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
259 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
264 q_handle
= enqueue (queue
, t
->handle
);
265 if (waittime
== NULL
)
266 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
268 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
270 /* When we are still on QUEUE, we have been interrupted. We
271 report this only when no other error (such as a timeout) has
274 if (remqueue (queue
, q_handle
) && err
== 0)
277 scm_i_reset_sleep (t
);
283 /* Wake up the first thread on QUEUE, if any. The awoken thread is
284 returned, or #f if the queue was empty.
287 unblock_from_queue (SCM queue
)
289 SCM thread
= dequeue (queue
);
290 if (scm_is_true (thread
))
291 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
295 /* Getting into and out of guile mode.
298 /* Ken Raeburn observes that the implementation of suspend and resume
299 (and the things that build on top of them) are very likely not
300 correct (see below). We will need fix this eventually, and that's
301 why scm_leave_guile/scm_enter_guile are not exported in the API.
305 Consider this sequence:
307 Function foo, called in Guile mode, calls suspend (maybe indirectly
308 through scm_leave_guile), which does this:
310 // record top of stack for the GC
311 t->top = SCM_STACK_PTR (&t); // just takes address of automatic
314 SCM_FLUSH_REGISTER_WINDOWS; // sparc only
315 SCM_I_SETJMP (t->regs); // here's most of the magic
319 Function foo has a SCM value X, a handle on a non-immediate object, in
320 a caller-saved register R, and it's the only reference to the object
323 The compiler wants to use R in suspend, so it pushes the current
324 value, X, into a stack slot which will be reloaded on exit from
325 suspend; then it loads stuff into R and goes about its business. The
326 setjmp call saves (some of) the current registers, including R, which
327 no longer contains X. (This isn't a problem for a normal
328 setjmp/longjmp situation, where longjmp would be called before
329 setjmp's caller returns; the old value for X would be loaded back from
330 the stack after the longjmp, before the function returned.)
332 So, suspend returns, loading X back into R (and invalidating the jump
333 buffer) in the process. The caller foo then goes off and calls a
334 bunch of other functions out of Guile mode, occasionally storing X on
335 the stack again, but, say, much deeper on the stack than suspend's
336 stack frame went, and the stack slot where suspend had written X has
337 long since been overwritten with other values.
339 Okay, nothing actively broken so far. Now, let garbage collection
340 run, triggered by another thread.
342 The thread calling foo is out of Guile mode at the time, so the
343 garbage collector just scans a range of stack addresses. Too bad that
344 X isn't stored there. So the pointed-to storage goes onto the free
345 list, and I think you can see where things go from there.
347 Is there anything I'm missing that'll prevent this scenario from
348 happening? I mean, aside from, "well, suspend and scm_leave_guile
349 don't have many local variables, so they probably won't need to save
350 any registers on most systems, so we hope everything will wind up in
351 the jump buffer and we'll just get away with it"?
353 (And, going the other direction, if scm_leave_guile and suspend push
354 the stack pointer over onto a new page, and foo doesn't make further
355 function calls and thus the stack pointer no longer includes that
356 page, are we guaranteed that the kernel cannot release the now-unused
357 stack page that contains the top-of-stack pointer we just saved? I
358 don't know if any OS actually does that. If it does, we could get
359 faults in garbage collection.)
361 I don't think scm_without_guile has to have this problem, as it gets
362 more control over the stack handling -- but it should call setjmp
363 itself. I'd probably try something like:
365 // record top of stack for the GC
366 t->top = SCM_STACK_PTR (&t);
368 SCM_FLUSH_REGISTER_WINDOWS;
369 SCM_I_SETJMP (t->regs);
373 ... though even that's making some assumptions about the stack
374 ordering of local variables versus caller-saved registers.
376 For something like scm_leave_guile to work, I don't think it can just
377 rely on invalidated jump buffers. A valid jump buffer, and a handle
378 on the stack state at the point when the jump buffer was initialized,
379 together, would work fine, but I think then we're talking about macros
380 invoking setjmp in the caller's stack frame, and requiring that the
381 caller of scm_leave_guile also call scm_enter_guile before returning,
382 kind of like pthread_cleanup_push/pop calls that have to be paired up
383 in a function. (In fact, the pthread ones have to be paired up
384 syntactically, as if they might expand to a compound statement
385 incorporating the user's code, and invoking a compiler's
386 exception-handling primitives. Which might be something to think
387 about for cases where Guile is used with C++ exceptions or
391 scm_i_pthread_key_t scm_i_thread_key
;
394 resume (scm_i_thread
*t
)
397 if (t
->clear_freelists_p
)
399 *SCM_FREELIST_LOC (scm_i_freelist
) = SCM_EOL
;
400 *SCM_FREELIST_LOC (scm_i_freelist2
) = SCM_EOL
;
401 t
->clear_freelists_p
= 0;
405 typedef void* scm_t_guile_ticket
;
408 scm_enter_guile (scm_t_guile_ticket ticket
)
410 scm_i_thread
*t
= (scm_i_thread
*)ticket
;
413 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
414 t
->heap_mutex_locked_by_self
= 1;
419 static scm_i_thread
*
422 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
424 /* record top of stack for the GC */
425 t
->top
= SCM_STACK_PTR (&t
);
426 /* save registers. */
427 SCM_FLUSH_REGISTER_WINDOWS
;
428 SCM_I_SETJMP (t
->regs
);
432 static scm_t_guile_ticket
435 scm_i_thread
*t
= suspend ();
436 if (t
->heap_mutex_locked_by_self
)
438 t
->heap_mutex_locked_by_self
= 0;
439 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
441 return (scm_t_guile_ticket
) t
;
444 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
445 static scm_i_thread
*all_threads
= NULL
;
446 static int thread_count
;
448 static SCM scm_i_default_dynamic_state
;
450 /* Perform first stage of thread initialisation, in non-guile mode.
453 guilify_self_1 (SCM_STACKITEM
*base
)
455 scm_i_thread
*t
= malloc (sizeof (scm_i_thread
));
457 t
->pthread
= scm_i_pthread_self ();
458 t
->handle
= SCM_BOOL_F
;
459 t
->result
= SCM_BOOL_F
;
460 t
->cleanup_handler
= SCM_BOOL_F
;
461 t
->mutexes
= SCM_EOL
;
462 t
->held_mutex
= NULL
;
463 t
->join_queue
= SCM_EOL
;
464 t
->dynamic_state
= SCM_BOOL_F
;
465 t
->dynwinds
= SCM_EOL
;
466 t
->active_asyncs
= SCM_EOL
;
468 t
->pending_asyncs
= 1;
469 t
->last_debug_frame
= NULL
;
472 /* Calculate and store off the base of this thread's register
473 backing store (RBS). Unfortunately our implementation(s) of
474 scm_ia64_register_backing_store_base are only reliable for the
475 main thread. For other threads, therefore, find out the current
476 top of the RBS, and use that as a maximum. */
477 t
->register_backing_store_base
= scm_ia64_register_backing_store_base ();
482 bsp
= scm_ia64_ar_bsp (&ctx
);
483 if (t
->register_backing_store_base
> bsp
)
484 t
->register_backing_store_base
= bsp
;
487 t
->continuation_root
= SCM_EOL
;
488 t
->continuation_base
= base
;
489 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
490 t
->sleep_mutex
= NULL
;
491 t
->sleep_object
= SCM_BOOL_F
;
494 if (pipe (t
->sleep_pipe
) != 0)
495 /* FIXME: Error conditions during the initialization phase are handled
496 gracelessly since public functions such as `scm_init_guile ()'
497 currently have type `void'. */
500 scm_i_pthread_mutex_init (&t
->heap_mutex
, NULL
);
501 t
->heap_mutex_locked_by_self
= 0;
502 scm_i_pthread_mutex_init (&t
->admin_mutex
, NULL
);
503 t
->clear_freelists_p
= 0;
508 t
->freelist
= SCM_EOL
;
509 t
->freelist2
= SCM_EOL
;
510 SCM_SET_FREELIST_LOC (scm_i_freelist
, &t
->freelist
);
511 SCM_SET_FREELIST_LOC (scm_i_freelist2
, &t
->freelist2
);
513 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
515 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
516 t
->heap_mutex_locked_by_self
= 1;
518 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
519 t
->next_thread
= all_threads
;
522 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
525 /* Perform second stage of thread initialisation, in guile mode.
528 guilify_self_2 (SCM parent
)
530 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
532 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
533 scm_gc_register_collectable_memory (t
, sizeof (scm_i_thread
), "thread");
534 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
535 t
->continuation_base
= t
->base
;
538 if (scm_is_true (parent
))
539 t
->dynamic_state
= scm_make_dynamic_state (parent
);
541 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
543 t
->join_queue
= make_queue ();
550 /* We implement our own mutex type since we want them to be 'fair', we
551 want to do fancy things while waiting for them (like running
552 asyncs) and we might want to add things that are nice for
557 scm_i_pthread_mutex_t lock
;
559 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
561 int recursive
; /* allow recursive locking? */
562 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
563 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
564 owned by the current thread? */
566 SCM waiting
; /* the threads waiting for this mutex. */
569 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
570 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
572 /* Perform thread tear-down, in guile mode.
575 do_thread_exit (void *v
)
577 scm_i_thread
*t
= (scm_i_thread
*) v
;
579 if (!scm_is_false (t
->cleanup_handler
))
581 SCM ptr
= t
->cleanup_handler
;
583 t
->cleanup_handler
= SCM_BOOL_F
;
584 t
->result
= scm_internal_catch (SCM_BOOL_T
,
585 (scm_t_catch_body
) scm_call_0
, ptr
,
586 scm_handle_by_message_noexit
, NULL
);
589 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
592 close (t
->sleep_pipe
[0]);
593 close (t
->sleep_pipe
[1]);
594 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
597 while (!scm_is_null (t
->mutexes
))
599 SCM mutex
= SCM_CAR (t
->mutexes
);
600 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
601 scm_i_pthread_mutex_lock (&m
->lock
);
603 unblock_from_queue (m
->waiting
);
605 scm_i_pthread_mutex_unlock (&m
->lock
);
606 t
->mutexes
= SCM_CDR (t
->mutexes
);
609 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
615 on_thread_exit (void *v
)
617 /* This handler is executed in non-guile mode. */
618 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
620 /* If this thread was cancelled while doing a cond wait, it will
621 still have a mutex locked, so we unlock it here. */
624 scm_i_pthread_mutex_unlock (t
->held_mutex
);
625 t
->held_mutex
= NULL
;
628 scm_i_pthread_setspecific (scm_i_thread_key
, v
);
630 /* Ensure the signal handling thread has been launched, because we might be
632 scm_i_ensure_signal_delivery_thread ();
634 /* Unblocking the joining threads needs to happen in guile mode
635 since the queue is a SCM data structure. */
636 scm_with_guile (do_thread_exit
, v
);
638 /* Removing ourself from the list of all threads needs to happen in
639 non-guile mode since all SCM values on our stack become
640 unprotected once we are no longer in the list. */
641 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
642 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
645 *tp
= t
->next_thread
;
650 /* If there's only one other thread, it could be the signal delivery
651 thread, so we need to notify it to shut down by closing its read pipe.
652 If it's not the signal delivery thread, then closing the read pipe isn't
654 if (thread_count
<= 1)
655 scm_i_close_signal_pipe ();
657 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
659 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
662 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
665 init_thread_key (void)
667 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
670 /* Perform any initializations necessary to bring the current thread
671 into guile mode, initializing Guile itself, if necessary.
673 BASE is the stack base to use with GC.
675 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
676 which case the default dynamic state is used.
678 Return zero when the thread was in guile mode already; otherwise
683 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
687 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
689 if ((t
= SCM_I_CURRENT_THREAD
) == NULL
)
691 /* This thread has not been guilified yet.
694 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
695 if (scm_initialized_p
== 0)
697 /* First thread ever to enter Guile. Run the full
700 scm_i_init_guile (base
);
701 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
705 /* Guile is already initialized, but this thread enters it for
706 the first time. Only initialize this thread.
708 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
709 guilify_self_1 (base
);
710 guilify_self_2 (parent
);
716 /* This thread is already guilified but not in guile mode, just
719 A user call to scm_with_guile() will lead us to here. This could
720 happen from anywhere on the stack, and in particular lower on the
721 stack than when it was when this thread was first guilified. Thus,
722 `base' must be updated. */
723 #if SCM_STACK_GROWS_UP
731 scm_enter_guile ((scm_t_guile_ticket
) t
);
736 /* Thread is already in guile mode. Nothing to do.
742 #if SCM_USE_PTHREAD_THREADS
744 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
745 /* This method for GNU/Linux and perhaps some other systems.
746 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
747 available on them. */
748 #define HAVE_GET_THREAD_STACK_BASE
750 static SCM_STACKITEM
*
751 get_thread_stack_base ()
757 pthread_getattr_np (pthread_self (), &attr
);
758 pthread_attr_getstack (&attr
, &start
, &size
);
759 end
= (char *)start
+ size
;
761 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
762 for the main thread, but we can use scm_get_stack_base in that
766 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
767 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
768 return scm_get_stack_base ();
772 #if SCM_STACK_GROWS_UP
780 #elif HAVE_PTHREAD_GET_STACKADDR_NP
781 /* This method for MacOS X.
782 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
783 but as of 2006 there's nothing obvious at apple.com. */
784 #define HAVE_GET_THREAD_STACK_BASE
785 static SCM_STACKITEM
*
786 get_thread_stack_base ()
788 return pthread_get_stackaddr_np (pthread_self ());
791 #elif defined (__MINGW32__)
792 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
793 in any thread. We don't like hard-coding the name of a system, but there
794 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
796 #define HAVE_GET_THREAD_STACK_BASE
797 static SCM_STACKITEM
*
798 get_thread_stack_base ()
800 return scm_get_stack_base ();
803 #endif /* pthread methods of get_thread_stack_base */
805 #else /* !SCM_USE_PTHREAD_THREADS */
807 #define HAVE_GET_THREAD_STACK_BASE
809 static SCM_STACKITEM
*
810 get_thread_stack_base ()
812 return scm_get_stack_base ();
815 #endif /* !SCM_USE_PTHREAD_THREADS */
817 #ifdef HAVE_GET_THREAD_STACK_BASE
822 scm_i_init_thread_for_guile (get_thread_stack_base (),
823 scm_i_default_dynamic_state
);
829 scm_with_guile (void *(*func
)(void *), void *data
)
831 return scm_i_with_guile_and_parent (func
, data
,
832 scm_i_default_dynamic_state
);
835 SCM_UNUSED
static void
836 scm_leave_guile_cleanup (void *x
)
842 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
846 SCM_STACKITEM base_item
;
848 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
851 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup
, NULL
);
852 res
= scm_c_with_continuation_barrier (func
, data
);
853 scm_i_pthread_cleanup_pop (0);
857 res
= scm_c_with_continuation_barrier (func
, data
);
863 scm_without_guile (void *(*func
)(void *), void *data
)
866 scm_t_guile_ticket t
;
867 t
= scm_leave_guile ();
873 /*** Thread creation */
880 scm_i_pthread_mutex_t mutex
;
881 scm_i_pthread_cond_t cond
;
885 really_launch (void *d
)
887 launch_data
*data
= (launch_data
*)d
;
888 SCM thunk
= data
->thunk
, handler
= data
->handler
;
891 t
= SCM_I_CURRENT_THREAD
;
893 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
894 data
->thread
= scm_current_thread ();
895 scm_i_pthread_cond_signal (&data
->cond
);
896 scm_i_pthread_mutex_unlock (&data
->mutex
);
898 if (SCM_UNBNDP (handler
))
899 t
->result
= scm_call_0 (thunk
);
901 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
907 launch_thread (void *d
)
909 launch_data
*data
= (launch_data
*)d
;
910 scm_i_pthread_detach (scm_i_pthread_self ());
911 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
915 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
916 (SCM thunk
, SCM handler
),
917 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
918 "returning a new thread object representing the thread. The procedure\n"
919 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
921 "When @var{handler} is specified, then @var{thunk} is called from\n"
922 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
923 "handler. This catch is established inside the continuation barrier.\n"
925 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
926 "the @emph{exit value} of the thread and the thread is terminated.")
927 #define FUNC_NAME s_scm_call_with_new_thread
933 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
934 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
935 handler
, SCM_ARG2
, FUNC_NAME
);
937 data
.parent
= scm_current_dynamic_state ();
939 data
.handler
= handler
;
940 data
.thread
= SCM_BOOL_F
;
941 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
942 scm_i_pthread_cond_init (&data
.cond
, NULL
);
944 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
945 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
948 scm_i_pthread_mutex_unlock (&data
.mutex
);
952 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
953 scm_i_pthread_mutex_unlock (&data
.mutex
);
961 scm_t_catch_body body
;
963 scm_t_catch_handler handler
;
966 scm_i_pthread_mutex_t mutex
;
967 scm_i_pthread_cond_t cond
;
971 really_spawn (void *d
)
973 spawn_data
*data
= (spawn_data
*)d
;
974 scm_t_catch_body body
= data
->body
;
975 void *body_data
= data
->body_data
;
976 scm_t_catch_handler handler
= data
->handler
;
977 void *handler_data
= data
->handler_data
;
978 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
980 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
981 data
->thread
= scm_current_thread ();
982 scm_i_pthread_cond_signal (&data
->cond
);
983 scm_i_pthread_mutex_unlock (&data
->mutex
);
986 t
->result
= body (body_data
);
988 t
->result
= scm_internal_catch (SCM_BOOL_T
,
990 handler
, handler_data
);
996 spawn_thread (void *d
)
998 spawn_data
*data
= (spawn_data
*)d
;
999 scm_i_pthread_detach (scm_i_pthread_self ());
1000 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1005 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1006 scm_t_catch_handler handler
, void *handler_data
)
1012 data
.parent
= scm_current_dynamic_state ();
1014 data
.body_data
= body_data
;
1015 data
.handler
= handler
;
1016 data
.handler_data
= handler_data
;
1017 data
.thread
= SCM_BOOL_F
;
1018 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1019 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1021 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1022 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1025 scm_i_pthread_mutex_unlock (&data
.mutex
);
1027 scm_syserror (NULL
);
1029 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1030 scm_i_pthread_mutex_unlock (&data
.mutex
);
1035 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1037 "Move the calling thread to the end of the scheduling queue.")
1038 #define FUNC_NAME s_scm_yield
1040 return scm_from_bool (scm_i_sched_yield ());
1044 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1046 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1047 "cannot be the current thread, and if @var{thread} has already terminated or "
1048 "been signaled to terminate, this function is a no-op.")
1049 #define FUNC_NAME s_scm_cancel_thread
1051 scm_i_thread
*t
= NULL
;
1053 SCM_VALIDATE_THREAD (1, thread
);
1054 t
= SCM_I_THREAD_DATA (thread
);
1055 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1059 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1060 scm_i_pthread_cancel (t
->pthread
);
1063 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1065 return SCM_UNSPECIFIED
;
1069 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1070 (SCM thread
, SCM proc
),
1071 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1072 "This handler will be called when the thread exits.")
1073 #define FUNC_NAME s_scm_set_thread_cleanup_x
1077 SCM_VALIDATE_THREAD (1, thread
);
1078 if (!scm_is_false (proc
))
1079 SCM_VALIDATE_THUNK (2, proc
);
1081 t
= SCM_I_THREAD_DATA (thread
);
1082 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1084 if (!(t
->exited
|| t
->canceled
))
1085 t
->cleanup_handler
= proc
;
1087 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1089 return SCM_UNSPECIFIED
;
1093 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1095 "Return the cleanup handler installed for the thread @var{thread}.")
1096 #define FUNC_NAME s_scm_thread_cleanup
1101 SCM_VALIDATE_THREAD (1, thread
);
1103 t
= SCM_I_THREAD_DATA (thread
);
1104 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1105 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1106 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1112 SCM
scm_join_thread (SCM thread
)
1114 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1117 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1118 (SCM thread
, SCM timeout
, SCM timeoutval
),
1119 "Suspend execution of the calling thread until the target @var{thread} "
1120 "terminates, unless the target @var{thread} has already terminated. ")
1121 #define FUNC_NAME s_scm_join_thread_timed
1124 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1125 SCM res
= SCM_BOOL_F
;
1127 if (! (SCM_UNBNDP (timeoutval
)))
1130 SCM_VALIDATE_THREAD (1, thread
);
1131 if (scm_is_eq (scm_current_thread (), thread
))
1132 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1134 t
= SCM_I_THREAD_DATA (thread
);
1135 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1137 if (! SCM_UNBNDP (timeout
))
1139 to_timespec (timeout
, &ctimeout
);
1140 timeout_ptr
= &ctimeout
;
1149 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1159 else if (err
== ETIMEDOUT
)
1162 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1164 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1166 /* Check for exit again, since we just released and
1167 reacquired the admin mutex, before the next block_self
1168 call (which would block forever if t has already
1178 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1184 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1186 "Return @code{#t} if @var{obj} is a thread.")
1187 #define FUNC_NAME s_scm_thread_p
1189 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1194 fat_mutex_mark (SCM mx
)
1196 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1197 scm_gc_mark (m
->owner
);
1202 fat_mutex_free (SCM mx
)
1204 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1205 scm_i_pthread_mutex_destroy (&m
->lock
);
1206 scm_gc_free (m
, sizeof (fat_mutex
), "mutex");
1211 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1213 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1214 scm_puts ("#<mutex ", port
);
1215 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1216 scm_puts (">", port
);
1221 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1226 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1227 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1228 m
->owner
= SCM_BOOL_F
;
1231 m
->recursive
= recursive
;
1232 m
->unchecked_unlock
= unchecked_unlock
;
1233 m
->allow_external_unlock
= external_unlock
;
1235 m
->waiting
= SCM_EOL
;
1236 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1237 m
->waiting
= make_queue ();
1241 SCM
scm_make_mutex (void)
1243 return scm_make_mutex_with_flags (SCM_EOL
);
1246 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1247 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1248 SCM_SYMBOL (recursive_sym
, "recursive");
1250 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1252 "Create a new mutex. ")
1253 #define FUNC_NAME s_scm_make_mutex_with_flags
1255 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1258 while (! scm_is_null (ptr
))
1260 SCM flag
= SCM_CAR (ptr
);
1261 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1262 unchecked_unlock
= 1;
1263 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1264 external_unlock
= 1;
1265 else if (scm_is_eq (flag
, recursive_sym
))
1268 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1269 ptr
= SCM_CDR (ptr
);
1271 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1275 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1277 "Create a new recursive mutex. ")
1278 #define FUNC_NAME s_scm_make_recursive_mutex
1280 return make_fat_mutex (1, 0, 0);
1284 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1287 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1289 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1291 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1292 SCM err
= SCM_BOOL_F
;
1294 struct timeval current_time
;
1296 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1302 m
->owner
= new_owner
;
1305 if (SCM_I_IS_THREAD (new_owner
))
1307 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1308 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1309 t
->mutexes
= scm_cons (mutex
, t
->mutexes
);
1310 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1315 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1317 m
->owner
= new_owner
;
1318 err
= scm_cons (scm_abandoned_mutex_error_key
,
1319 scm_from_locale_string ("lock obtained on abandoned "
1324 else if (scm_is_eq (m
->owner
, new_owner
))
1333 err
= scm_cons (scm_misc_error_key
,
1334 scm_from_locale_string ("mutex already locked "
1342 if (timeout
!= NULL
)
1344 gettimeofday (¤t_time
, NULL
);
1345 if (current_time
.tv_sec
> timeout
->tv_sec
||
1346 (current_time
.tv_sec
== timeout
->tv_sec
&&
1347 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1353 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1354 scm_i_pthread_mutex_unlock (&m
->lock
);
1356 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1359 scm_i_pthread_mutex_unlock (&m
->lock
);
1363 SCM
scm_lock_mutex (SCM mx
)
1365 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1368 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1369 (SCM m
, SCM timeout
, SCM owner
),
1370 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1371 "blocks until the mutex becomes available. The function returns when "
1372 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1373 "a thread already owns will succeed right away and will not block the "
1374 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1375 #define FUNC_NAME s_scm_lock_mutex_timed
1379 scm_t_timespec cwaittime
, *waittime
= NULL
;
1381 SCM_VALIDATE_MUTEX (1, m
);
1383 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1385 to_timespec (timeout
, &cwaittime
);
1386 waittime
= &cwaittime
;
1389 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1390 if (!scm_is_false (exception
))
1391 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1392 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1397 scm_dynwind_lock_mutex (SCM mutex
)
1399 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1400 SCM_F_WIND_EXPLICITLY
);
1401 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1402 SCM_F_WIND_EXPLICITLY
);
1405 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1407 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1408 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1409 #define FUNC_NAME s_scm_try_mutex
1413 scm_t_timespec cwaittime
, *waittime
= NULL
;
1415 SCM_VALIDATE_MUTEX (1, mutex
);
1417 to_timespec (scm_from_int(0), &cwaittime
);
1418 waittime
= &cwaittime
;
1420 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1421 if (!scm_is_false (exception
))
1422 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1423 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1427 /*** Fat condition variables */
1430 scm_i_pthread_mutex_t lock
;
1431 SCM waiting
; /* the threads waiting for this condition. */
1434 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1435 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1438 fat_mutex_unlock (SCM mutex
, SCM cond
,
1439 const scm_t_timespec
*waittime
, int relock
)
1441 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1443 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1444 int err
= 0, ret
= 0;
1446 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1448 SCM owner
= m
->owner
;
1450 if (!scm_is_eq (owner
, scm_current_thread ()))
1454 if (!m
->unchecked_unlock
)
1456 scm_i_pthread_mutex_unlock (&m
->lock
);
1457 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1459 owner
= scm_current_thread ();
1461 else if (!m
->allow_external_unlock
)
1463 scm_i_pthread_mutex_unlock (&m
->lock
);
1464 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1468 if (! (SCM_UNBNDP (cond
)))
1470 c
= SCM_CONDVAR_DATA (cond
);
1478 m
->owner
= unblock_from_queue (m
->waiting
);
1482 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1483 scm_i_pthread_mutex_unlock (&m
->lock
);
1490 else if (err
== ETIMEDOUT
)
1495 else if (err
!= EINTR
)
1498 scm_syserror (NULL
);
1504 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1512 scm_remember_upto_here_2 (cond
, mutex
);
1514 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1522 m
->owner
= unblock_from_queue (m
->waiting
);
1524 scm_i_pthread_mutex_unlock (&m
->lock
);
1531 SCM
scm_unlock_mutex (SCM mx
)
1533 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1536 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1537 (SCM mx
, SCM cond
, SCM timeout
),
1538 "Unlocks @var{mutex} if the calling thread owns the lock on "
1539 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1540 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1541 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1542 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1543 "with a call to @code{unlock-mutex}. Only the last call to "
1544 "@code{unlock-mutex} will actually unlock the mutex. ")
1545 #define FUNC_NAME s_scm_unlock_mutex_timed
1547 scm_t_timespec cwaittime
, *waittime
= NULL
;
1549 SCM_VALIDATE_MUTEX (1, mx
);
1550 if (! (SCM_UNBNDP (cond
)))
1552 SCM_VALIDATE_CONDVAR (2, cond
);
1554 if (! (SCM_UNBNDP (timeout
)))
1556 to_timespec (timeout
, &cwaittime
);
1557 waittime
= &cwaittime
;
1561 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1565 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1567 "Return @code{#t} if @var{obj} is a mutex.")
1568 #define FUNC_NAME s_scm_mutex_p
1570 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1574 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1576 "Return the thread owning @var{mx}, or @code{#f}.")
1577 #define FUNC_NAME s_scm_mutex_owner
1580 fat_mutex
*m
= NULL
;
1582 SCM_VALIDATE_MUTEX (1, mx
);
1583 m
= SCM_MUTEX_DATA (mx
);
1584 scm_i_pthread_mutex_lock (&m
->lock
);
1586 scm_i_pthread_mutex_unlock (&m
->lock
);
1592 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1594 "Return the lock level of mutex @var{mx}.")
1595 #define FUNC_NAME s_scm_mutex_level
1597 SCM_VALIDATE_MUTEX (1, mx
);
1598 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1602 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1604 "Returns @code{#t} if the mutex @var{mx} is locked.")
1605 #define FUNC_NAME s_scm_mutex_locked_p
1607 SCM_VALIDATE_MUTEX (1, mx
);
1608 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1613 fat_cond_mark (SCM cv
)
1615 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1620 fat_cond_free (SCM mx
)
1622 fat_cond
*c
= SCM_CONDVAR_DATA (mx
);
1623 scm_gc_free (c
, sizeof (fat_cond
), "condition-variable");
1628 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1630 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1631 scm_puts ("#<condition-variable ", port
);
1632 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1633 scm_puts (">", port
);
1637 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1639 "Make a new condition variable.")
1640 #define FUNC_NAME s_scm_make_condition_variable
1645 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1646 c
->waiting
= SCM_EOL
;
1647 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1648 c
->waiting
= make_queue ();
1653 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1654 (SCM cv
, SCM mx
, SCM t
),
1655 "Wait until @var{cond-var} has been signalled. While waiting, "
1656 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1657 "is locked again when this function returns. When @var{time} is given, "
1658 "it specifies a point in time where the waiting should be aborted. It "
1659 "can be either a integer as returned by @code{current-time} or a pair "
1660 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1661 "mutex is locked and @code{#f} is returned. When the condition "
1662 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1664 #define FUNC_NAME s_scm_timed_wait_condition_variable
1666 scm_t_timespec waittime
, *waitptr
= NULL
;
1668 SCM_VALIDATE_CONDVAR (1, cv
);
1669 SCM_VALIDATE_MUTEX (2, mx
);
1671 if (!SCM_UNBNDP (t
))
1673 to_timespec (t
, &waittime
);
1674 waitptr
= &waittime
;
1677 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1682 fat_cond_signal (fat_cond
*c
)
1684 unblock_from_queue (c
->waiting
);
1687 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1689 "Wake up one thread that is waiting for @var{cv}")
1690 #define FUNC_NAME s_scm_signal_condition_variable
1692 SCM_VALIDATE_CONDVAR (1, cv
);
1693 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1699 fat_cond_broadcast (fat_cond
*c
)
1701 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1705 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1707 "Wake up all threads that are waiting for @var{cv}. ")
1708 #define FUNC_NAME s_scm_broadcast_condition_variable
1710 SCM_VALIDATE_CONDVAR (1, cv
);
1711 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1716 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1718 "Return @code{#t} if @var{obj} is a condition variable.")
1719 #define FUNC_NAME s_scm_condition_variable_p
1721 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1725 /*** Marking stacks */
1727 /* XXX - what to do with this? Do we need to handle this for blocked
1731 # define SCM_MARK_BACKING_STORE() do { \
1733 SCM_STACKITEM * top, * bot; \
1734 getcontext (&ctx); \
1735 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1736 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1737 / sizeof (SCM_STACKITEM))); \
1738 bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
1739 top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
1740 scm_mark_locations (bot, top - bot); } while (0)
1742 # define SCM_MARK_BACKING_STORE()
1746 scm_threads_mark_stacks (void)
1749 for (t
= all_threads
; t
; t
= t
->next_thread
)
1751 /* Check that thread has indeed been suspended.
1755 scm_gc_mark (t
->handle
);
1757 #if SCM_STACK_GROWS_UP
1758 scm_mark_locations (t
->base
, t
->top
- t
->base
);
1760 scm_mark_locations (t
->top
, t
->base
- t
->top
);
1762 scm_mark_locations ((void *) &t
->regs
,
1763 ((size_t) sizeof(t
->regs
)
1764 / sizeof (SCM_STACKITEM
)));
1767 SCM_MARK_BACKING_STORE ();
1773 scm_std_select (int nfds
,
1774 SELECT_TYPE
*readfds
,
1775 SELECT_TYPE
*writefds
,
1776 SELECT_TYPE
*exceptfds
,
1777 struct timeval
*timeout
)
1780 int res
, eno
, wakeup_fd
;
1781 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1782 scm_t_guile_ticket ticket
;
1784 if (readfds
== NULL
)
1786 FD_ZERO (&my_readfds
);
1787 readfds
= &my_readfds
;
1790 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1793 wakeup_fd
= t
->sleep_pipe
[0];
1794 ticket
= scm_leave_guile ();
1795 FD_SET (wakeup_fd
, readfds
);
1796 if (wakeup_fd
>= nfds
)
1798 res
= select (nfds
, readfds
, writefds
, exceptfds
, timeout
);
1801 scm_enter_guile (ticket
);
1803 scm_i_reset_sleep (t
);
1805 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1808 full_read (wakeup_fd
, &dummy
, 1);
1810 FD_CLR (wakeup_fd
, readfds
);
1822 /* Convenience API for blocking while in guile mode. */
1824 #if SCM_USE_PTHREAD_THREADS
1827 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1829 if (scm_i_pthread_mutex_trylock (mutex
) == 0)
1833 scm_t_guile_ticket t
= scm_leave_guile ();
1834 int res
= scm_i_pthread_mutex_lock (mutex
);
1835 scm_enter_guile (t
);
1841 do_unlock (void *data
)
1843 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1847 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1849 scm_i_scm_pthread_mutex_lock (mutex
);
1850 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1854 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1856 scm_t_guile_ticket t
= scm_leave_guile ();
1857 ((scm_i_thread
*)t
)->held_mutex
= mutex
;
1858 int res
= scm_i_pthread_cond_wait (cond
, mutex
);
1859 ((scm_i_thread
*)t
)->held_mutex
= NULL
;
1860 scm_enter_guile (t
);
1865 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1866 scm_i_pthread_mutex_t
*mutex
,
1867 const scm_t_timespec
*wt
)
1869 scm_t_guile_ticket t
= scm_leave_guile ();
1870 ((scm_i_thread
*)t
)->held_mutex
= mutex
;
1871 int res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1872 ((scm_i_thread
*)t
)->held_mutex
= NULL
;
1873 scm_enter_guile (t
);
1880 scm_std_usleep (unsigned long usecs
)
1883 tv
.tv_usec
= usecs
% 1000000;
1884 tv
.tv_sec
= usecs
/ 1000000;
1885 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1886 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1890 scm_std_sleep (unsigned int secs
)
1895 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1901 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1903 "Return the thread that called this function.")
1904 #define FUNC_NAME s_scm_current_thread
1906 return SCM_I_CURRENT_THREAD
->handle
;
1911 scm_c_make_list (size_t n
, SCM fill
)
1915 res
= scm_cons (fill
, res
);
1919 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1921 "Return a list of all threads.")
1922 #define FUNC_NAME s_scm_all_threads
1924 /* We can not allocate while holding the thread_admin_mutex because
1925 of the way GC is done.
1927 int n
= thread_count
;
1929 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1931 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1933 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1935 if (t
!= scm_i_signal_delivery_thread
)
1937 SCM_SETCAR (*l
, t
->handle
);
1938 l
= SCM_CDRLOC (*l
);
1943 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1948 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1950 "Return @code{#t} iff @var{thread} has exited.\n")
1951 #define FUNC_NAME s_scm_thread_exited_p
1953 return scm_from_bool (scm_c_thread_exited_p (thread
));
1958 scm_c_thread_exited_p (SCM thread
)
1959 #define FUNC_NAME s_scm_thread_exited_p
1962 SCM_VALIDATE_THREAD (1, thread
);
1963 t
= SCM_I_THREAD_DATA (thread
);
1968 static scm_i_pthread_cond_t wake_up_cond
;
1969 int scm_i_thread_go_to_sleep
;
1970 static int threads_initialized_p
= 0;
1973 scm_i_thread_put_to_sleep ()
1975 if (threads_initialized_p
)
1980 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1982 /* Signal all threads to go to sleep
1984 scm_i_thread_go_to_sleep
= 1;
1985 for (t
= all_threads
; t
; t
= t
->next_thread
)
1986 scm_i_pthread_mutex_lock (&t
->heap_mutex
);
1987 scm_i_thread_go_to_sleep
= 0;
1992 scm_i_thread_invalidate_freelists ()
1994 /* thread_admin_mutex is already locked. */
1997 for (t
= all_threads
; t
; t
= t
->next_thread
)
1998 if (t
!= SCM_I_CURRENT_THREAD
)
1999 t
->clear_freelists_p
= 1;
2003 scm_i_thread_wake_up ()
2005 if (threads_initialized_p
)
2009 scm_i_pthread_cond_broadcast (&wake_up_cond
);
2010 for (t
= all_threads
; t
; t
= t
->next_thread
)
2011 scm_i_pthread_mutex_unlock (&t
->heap_mutex
);
2012 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2013 scm_enter_guile ((scm_t_guile_ticket
) SCM_I_CURRENT_THREAD
);
2018 scm_i_thread_sleep_for_gc ()
2020 scm_i_thread
*t
= suspend ();
2022 /* Don't put t->heap_mutex in t->held_mutex here, because if the
2023 thread is cancelled during the cond wait, the thread's cleanup
2024 function (scm_leave_guile_cleanup) will handle unlocking the
2025 heap_mutex, so we don't need to do that again in on_thread_exit.
2027 scm_i_pthread_cond_wait (&wake_up_cond
, &t
->heap_mutex
);
2032 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2034 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2035 int scm_i_critical_section_level
= 0;
2037 static SCM dynwind_critical_section_mutex
;
2040 scm_dynwind_critical_section (SCM mutex
)
2042 if (scm_is_false (mutex
))
2043 mutex
= dynwind_critical_section_mutex
;
2044 scm_dynwind_lock_mutex (mutex
);
2045 scm_dynwind_block_asyncs ();
2048 /*** Initialization */
2050 scm_i_pthread_key_t scm_i_freelist
, scm_i_freelist2
;
2051 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2053 #if SCM_USE_PTHREAD_THREADS
2054 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2058 scm_threads_prehistory (SCM_STACKITEM
*base
)
2060 #if SCM_USE_PTHREAD_THREADS
2061 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2062 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2063 PTHREAD_MUTEX_RECURSIVE
);
2066 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2067 scm_i_pthread_mutexattr_recursive
);
2068 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2069 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2070 scm_i_pthread_key_create (&scm_i_freelist
, NULL
);
2071 scm_i_pthread_key_create (&scm_i_freelist2
, NULL
);
2073 guilify_self_1 (base
);
2076 scm_t_bits scm_tc16_thread
;
2077 scm_t_bits scm_tc16_mutex
;
2078 scm_t_bits scm_tc16_condvar
;
2083 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2084 scm_set_smob_mark (scm_tc16_thread
, thread_mark
);
2085 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2086 scm_set_smob_free (scm_tc16_thread
, thread_free
);
2088 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2089 scm_set_smob_mark (scm_tc16_mutex
, fat_mutex_mark
);
2090 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2091 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2093 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2095 scm_set_smob_mark (scm_tc16_condvar
, fat_cond_mark
);
2096 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2097 scm_set_smob_free (scm_tc16_condvar
, fat_cond_free
);
2099 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2100 guilify_self_2 (SCM_BOOL_F
);
2101 threads_initialized_p
= 1;
2103 dynwind_critical_section_mutex
=
2104 scm_permanent_object (scm_make_recursive_mutex ());
2108 scm_init_threads_default_dynamic_state ()
2110 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2111 scm_i_default_dynamic_state
= scm_permanent_object (state
);
2115 scm_init_thread_procs ()
2117 #include "libguile/threads.x"