1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
61 # define ETIMEDOUT WSAETIMEDOUT
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
68 #include <full-read.h>
73 /* First some libgc shims. */
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type
) (void *);
86 #ifndef GC_UNIMPLEMENTED
87 #define GC_UNIMPLEMENTED 3
90 /* Likewise struct GC_stack_base is missing before 7.1. */
91 #ifndef HAVE_GC_STACK_BASE
92 struct GC_stack_base
{
93 void * mem_base
; /* Base of memory stack. */
95 void * reg_base
; /* Base of separate register stack. */
100 GC_register_my_thread (struct GC_stack_base
*stack_base
)
102 return GC_UNIMPLEMENTED
;
106 GC_unregister_my_thread ()
110 #if !SCM_USE_PTHREAD_THREADS
111 /* No threads; we can just use GC_stackbottom. */
113 get_thread_stack_base ()
115 return GC_stackbottom
;
118 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
119 && defined PTHREAD_ATTR_GETSTACK_WORKS
120 /* This method for GNU/Linux and perhaps some other systems.
121 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
122 available on them. */
124 get_thread_stack_base ()
130 pthread_getattr_np (pthread_self (), &attr
);
131 pthread_attr_getstack (&attr
, &start
, &size
);
132 end
= (char *)start
+ size
;
134 #if SCM_STACK_GROWS_UP
141 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
142 /* This method for MacOS X.
143 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
144 but as of 2006 there's nothing obvious at apple.com. */
146 get_thread_stack_base ()
148 return pthread_get_stackaddr_np (pthread_self ());
152 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
156 GC_get_stack_base (struct GC_stack_base
*stack_base
)
158 stack_base
->mem_base
= get_thread_stack_base ();
160 /* Calculate and store off the base of this thread's register
161 backing store (RBS). Unfortunately our implementation(s) of
162 scm_ia64_register_backing_store_base are only reliable for the
163 main thread. For other threads, therefore, find out the current
164 top of the RBS, and use that as a maximum. */
165 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
170 bsp
= scm_ia64_ar_bsp (&ctx
);
171 if (stack_base
->reg_base
> bsp
)
172 stack_base
->reg_base
= bsp
;
179 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
181 struct GC_stack_base stack_base
;
183 stack_base
.mem_base
= (void*)&stack_base
;
185 /* FIXME: Untested. */
189 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
193 return fn (&stack_base
, arg
);
195 #endif /* HAVE_GC_STACK_BASE */
198 /* Now define with_gc_active and with_gc_inactive. */
200 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
202 /* We have a sufficiently new libgc (7.2 or newer). */
205 with_gc_inactive (GC_fn_type func
, void *data
)
207 return GC_do_blocking (func
, data
);
211 with_gc_active (GC_fn_type func
, void *data
)
213 return GC_call_with_gc_active (func
, data
);
218 /* libgc not new enough, so never actually deactivate GC.
220 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
221 GC_call_with_gc_active. */
224 with_gc_inactive (GC_fn_type func
, void *data
)
230 with_gc_active (GC_fn_type func
, void *data
)
235 #endif /* HAVE_GC_DO_BLOCKING */
240 to_timespec (SCM t
, scm_t_timespec
*waittime
)
244 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
245 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
249 double time
= scm_to_double (t
);
250 double sec
= scm_c_truncate (time
);
252 waittime
->tv_sec
= (long) sec
;
253 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
260 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
261 the risk of false references leading to unbounded retained space as
262 described in "Bounding Space Usage of Conservative Garbage Collectors",
265 /* Make an empty queue data structure.
270 return scm_cons (SCM_EOL
, SCM_EOL
);
273 /* Put T at the back of Q and return a handle that can be used with
274 remqueue to remove T from Q again.
277 enqueue (SCM q
, SCM t
)
279 SCM c
= scm_cons (t
, SCM_EOL
);
280 SCM_CRITICAL_SECTION_START
;
281 if (scm_is_null (SCM_CDR (q
)))
284 SCM_SETCDR (SCM_CAR (q
), c
);
286 SCM_CRITICAL_SECTION_END
;
290 /* Remove the element that the handle C refers to from the queue Q. C
291 must have been returned from a call to enqueue. The return value
292 is zero when the element referred to by C has already been removed.
293 Otherwise, 1 is returned.
296 remqueue (SCM q
, SCM c
)
299 SCM_CRITICAL_SECTION_START
;
300 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
302 if (scm_is_eq (p
, c
))
304 if (scm_is_eq (c
, SCM_CAR (q
)))
305 SCM_SETCAR (q
, SCM_CDR (c
));
306 SCM_SETCDR (prev
, SCM_CDR (c
));
309 SCM_SETCDR (c
, SCM_EOL
);
311 SCM_CRITICAL_SECTION_END
;
316 SCM_CRITICAL_SECTION_END
;
320 /* Remove the front-most element from the queue Q and return it.
321 Return SCM_BOOL_F when Q is empty.
327 SCM_CRITICAL_SECTION_START
;
331 SCM_CRITICAL_SECTION_END
;
336 SCM_SETCDR (q
, SCM_CDR (c
));
337 if (scm_is_null (SCM_CDR (q
)))
338 SCM_SETCAR (q
, SCM_EOL
);
339 SCM_CRITICAL_SECTION_END
;
342 SCM_SETCDR (c
, SCM_EOL
);
348 /*** Thread smob routines */
352 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
354 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
355 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
356 the struct case, hence we go via a union, and extract according to the
357 size of pthread_t. */
365 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
366 scm_i_pthread_t p
= t
->pthread
;
369 if (sizeof (p
) == sizeof (unsigned short))
371 else if (sizeof (p
) == sizeof (unsigned int))
373 else if (sizeof (p
) == sizeof (unsigned long))
378 scm_puts ("#<thread ", port
);
379 scm_uintprint (id
, 10, port
);
380 scm_puts (" (", port
);
381 scm_uintprint ((scm_t_bits
)t
, 16, port
);
382 scm_puts (")>", port
);
387 /*** Blocking on queues. */
389 /* See also scm_i_queue_async_cell for how such a block is
393 /* Put the current thread on QUEUE and go to sleep, waiting for it to
394 be woken up by a call to 'unblock_from_queue', or to be
395 interrupted. Upon return of this function, the current thread is
396 no longer on QUEUE, even when the sleep has been interrupted.
398 The caller of block_self must hold MUTEX. It will be atomically
399 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
401 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
404 When WAITTIME is not NULL, the sleep will be aborted at that time.
406 The return value of block_self is an errno value. It will be zero
407 when the sleep has been successfully completed by a call to
408 unblock_from_queue, EINTR when it has been interrupted by the
409 delivery of a system async, and ETIMEDOUT when the timeout has
412 The system asyncs themselves are not executed by block_self.
415 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
416 const scm_t_timespec
*waittime
)
418 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
422 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
427 q_handle
= enqueue (queue
, t
->handle
);
428 if (waittime
== NULL
)
429 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
431 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
433 /* When we are still on QUEUE, we have been interrupted. We
434 report this only when no other error (such as a timeout) has
437 if (remqueue (queue
, q_handle
) && err
== 0)
440 scm_i_reset_sleep (t
);
446 /* Wake up the first thread on QUEUE, if any. The awoken thread is
447 returned, or #f if the queue was empty.
450 unblock_from_queue (SCM queue
)
452 SCM thread
= dequeue (queue
);
453 if (scm_is_true (thread
))
454 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
459 /* Getting into and out of guile mode.
462 /* Key used to attach a cleanup handler to a given thread. Also, if
463 thread-local storage is unavailable, this key is used to retrieve the
464 current thread with `pthread_getspecific ()'. */
465 scm_i_pthread_key_t scm_i_thread_key
;
468 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
470 /* When thread-local storage (TLS) is available, a pointer to the
471 current-thread object is kept in TLS. Note that storing the thread-object
472 itself in TLS (rather than a pointer to some malloc'd memory) is not
473 possible since thread objects may live longer than the actual thread they
475 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
477 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
480 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
481 static scm_i_thread
*all_threads
= NULL
;
482 static int thread_count
;
484 static SCM scm_i_default_dynamic_state
;
486 /* Perform first stage of thread initialisation, in non-guile mode.
489 guilify_self_1 (struct GC_stack_base
*base
)
493 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
494 before allocating anything in this thread, because allocation could
495 cause GC to run, and GC could cause finalizers, which could invoke
496 Scheme functions, which need the current thread to be set. */
498 t
.pthread
= scm_i_pthread_self ();
499 t
.handle
= SCM_BOOL_F
;
500 t
.result
= SCM_BOOL_F
;
501 t
.cleanup_handler
= SCM_BOOL_F
;
504 t
.join_queue
= SCM_EOL
;
505 t
.dynamic_state
= SCM_BOOL_F
;
506 t
.dynwinds
= SCM_EOL
;
507 t
.active_asyncs
= SCM_EOL
;
509 t
.pending_asyncs
= 1;
510 t
.critical_section_level
= 0;
511 t
.base
= base
->mem_base
;
513 t
.register_backing_store_base
= base
->reg
-base
;
515 t
.continuation_root
= SCM_EOL
;
516 t
.continuation_base
= t
.base
;
517 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
518 t
.sleep_mutex
= NULL
;
519 t
.sleep_object
= SCM_BOOL_F
;
522 if (pipe (t
.sleep_pipe
) != 0)
523 /* FIXME: Error conditions during the initialization phase are handled
524 gracelessly since public functions such as `scm_init_guile ()'
525 currently have type `void'. */
528 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
529 t
.current_mark_stack_ptr
= NULL
;
530 t
.current_mark_stack_limit
= NULL
;
535 /* The switcheroo. */
537 scm_i_thread
*t_ptr
= &t
;
540 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
541 memcpy (t_ptr
, &t
, sizeof t
);
543 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
545 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
546 /* Cache the current thread in TLS for faster lookup. */
547 scm_i_current_thread
= t_ptr
;
550 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
551 t_ptr
->next_thread
= all_threads
;
554 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
560 /* Perform second stage of thread initialisation, in guile mode.
563 guilify_self_2 (SCM parent
)
565 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
569 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
571 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
572 t
->continuation_base
= t
->base
;
575 if (scm_is_true (parent
))
576 t
->dynamic_state
= scm_make_dynamic_state (parent
);
578 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
580 t
->join_queue
= make_queue ();
587 /* We implement our own mutex type since we want them to be 'fair', we
588 want to do fancy things while waiting for them (like running
589 asyncs) and we might want to add things that are nice for
594 scm_i_pthread_mutex_t lock
;
596 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
598 int recursive
; /* allow recursive locking? */
599 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
600 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
601 owned by the current thread? */
603 SCM waiting
; /* the threads waiting for this mutex. */
606 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
607 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
609 /* Perform thread tear-down, in guile mode.
612 do_thread_exit (void *v
)
614 scm_i_thread
*t
= (scm_i_thread
*) v
;
616 if (!scm_is_false (t
->cleanup_handler
))
618 SCM ptr
= t
->cleanup_handler
;
620 t
->cleanup_handler
= SCM_BOOL_F
;
621 t
->result
= scm_internal_catch (SCM_BOOL_T
,
622 (scm_t_catch_body
) scm_call_0
, ptr
,
623 scm_handle_by_message_noexit
, NULL
);
626 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
629 close (t
->sleep_pipe
[0]);
630 close (t
->sleep_pipe
[1]);
631 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
634 while (!scm_is_null (t
->mutexes
))
636 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
638 if (!SCM_UNBNDP (mutex
))
640 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
642 scm_i_pthread_mutex_lock (&m
->lock
);
644 /* Since MUTEX is in `t->mutexes', T must be its owner. */
645 assert (scm_is_eq (m
->owner
, t
->handle
));
647 unblock_from_queue (m
->waiting
);
649 scm_i_pthread_mutex_unlock (&m
->lock
);
652 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
655 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
661 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
663 /* Won't hurt if we are already registered. */
664 GC_register_my_thread (sb
);
666 return scm_with_guile (do_thread_exit
, v
);
670 on_thread_exit (void *v
)
672 /* This handler is executed in non-guile mode. */
673 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
675 /* If this thread was cancelled while doing a cond wait, it will
676 still have a mutex locked, so we unlock it here. */
679 scm_i_pthread_mutex_unlock (t
->held_mutex
);
680 t
->held_mutex
= NULL
;
683 /* Reinstate the current thread for purposes of scm_with_guile
684 guile-mode cleanup handlers. Only really needed in the non-TLS
685 case but it doesn't hurt to be consistent. */
686 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
688 /* Ensure the signal handling thread has been launched, because we might be
690 scm_i_ensure_signal_delivery_thread ();
692 /* Scheme-level thread finalizers and other cleanup needs to happen in
694 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
696 /* Removing ourself from the list of all threads needs to happen in
697 non-guile mode since all SCM values on our stack become
698 unprotected once we are no longer in the list. */
699 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
700 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
703 *tp
= t
->next_thread
;
706 t
->next_thread
= NULL
;
712 /* If there's only one other thread, it could be the signal delivery
713 thread, so we need to notify it to shut down by closing its read pipe.
714 If it's not the signal delivery thread, then closing the read pipe isn't
716 if (thread_count
<= 1)
717 scm_i_close_signal_pipe ();
719 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
721 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
723 #if !SCM_USE_NULL_THREADS
724 GC_unregister_my_thread ();
728 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
731 init_thread_key (void)
733 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
736 /* Perform any initializations necessary to make the current thread
737 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
740 BASE is the stack base to use with GC.
742 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
743 which case the default dynamic state is used.
745 Returns zero when the thread was known to guile already; otherwise
748 Note that it could be the case that the thread was known
749 to Guile, but not in guile mode (because we are within a
750 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
751 be sure. New threads are put into guile mode implicitly. */
754 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
756 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
758 if (SCM_I_CURRENT_THREAD
)
760 /* Thread is already known to Guile.
766 /* This thread has not been guilified yet.
769 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
770 if (scm_initialized_p
== 0)
772 /* First thread ever to enter Guile. Run the full
775 scm_i_init_guile (base
);
777 #ifdef HAVE_GC_ALLOW_REGISTER_THREADS
778 /* Allow other threads to come in later. */
779 GC_allow_register_threads ();
782 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
786 /* Guile is already initialized, but this thread enters it for
787 the first time. Only initialize this thread.
789 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
791 /* Register this thread with libgc. */
792 GC_register_my_thread (base
);
794 guilify_self_1 (base
);
795 guilify_self_2 (parent
);
804 struct GC_stack_base stack_base
;
806 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
807 scm_i_init_thread_for_guile (&stack_base
,
808 scm_i_default_dynamic_state
);
811 fprintf (stderr
, "Failed to get stack base for current thread.\n");
816 SCM_UNUSED
static void
817 scm_leave_guile_cleanup (void *x
)
819 on_thread_exit (SCM_I_CURRENT_THREAD
);
822 struct with_guile_args
830 with_guile_trampoline (void *data
)
832 struct with_guile_args
*args
= data
;
834 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
838 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
843 struct with_guile_args
*args
= data
;
845 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
846 t
= SCM_I_CURRENT_THREAD
;
849 /* We are in Guile mode. */
850 assert (t
->guile_mode
);
852 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
854 /* Leave Guile mode. */
857 else if (t
->guile_mode
)
859 /* Already in Guile mode. */
860 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
864 /* We are not in Guile mode, either because we are not within a
865 scm_with_guile, or because we are within a scm_without_guile.
867 This call to scm_with_guile() could happen from anywhere on the
868 stack, and in particular lower on the stack than when it was
869 when this thread was first guilified. Thus, `base' must be
871 #if SCM_STACK_GROWS_UP
872 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
873 t
->base
= SCM_STACK_PTR (base
->mem_base
);
875 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
876 t
->base
= SCM_STACK_PTR (base
->mem_base
);
880 res
= with_gc_active (with_guile_trampoline
, args
);
887 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
889 struct with_guile_args args
;
893 args
.parent
= parent
;
895 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
899 scm_with_guile (void *(*func
)(void *), void *data
)
901 return scm_i_with_guile_and_parent (func
, data
,
902 scm_i_default_dynamic_state
);
906 scm_without_guile (void *(*func
)(void *), void *data
)
909 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
913 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
914 result
= with_gc_inactive (func
, data
);
915 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
918 /* Otherwise we're not in guile mode, so nothing to do. */
919 result
= func (data
);
925 /*** Thread creation */
932 scm_i_pthread_mutex_t mutex
;
933 scm_i_pthread_cond_t cond
;
937 really_launch (void *d
)
939 launch_data
*data
= (launch_data
*)d
;
940 SCM thunk
= data
->thunk
, handler
= data
->handler
;
943 t
= SCM_I_CURRENT_THREAD
;
945 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
946 data
->thread
= scm_current_thread ();
947 scm_i_pthread_cond_signal (&data
->cond
);
948 scm_i_pthread_mutex_unlock (&data
->mutex
);
950 if (SCM_UNBNDP (handler
))
951 t
->result
= scm_call_0 (thunk
);
953 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
959 launch_thread (void *d
)
961 launch_data
*data
= (launch_data
*)d
;
962 scm_i_pthread_detach (scm_i_pthread_self ());
963 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
967 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
968 (SCM thunk
, SCM handler
),
969 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
970 "returning a new thread object representing the thread. The procedure\n"
971 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
973 "When @var{handler} is specified, then @var{thunk} is called from\n"
974 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
975 "handler. This catch is established inside the continuation barrier.\n"
977 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
978 "the @emph{exit value} of the thread and the thread is terminated.")
979 #define FUNC_NAME s_scm_call_with_new_thread
985 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
986 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
987 handler
, SCM_ARG2
, FUNC_NAME
);
989 data
.parent
= scm_current_dynamic_state ();
991 data
.handler
= handler
;
992 data
.thread
= SCM_BOOL_F
;
993 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
994 scm_i_pthread_cond_init (&data
.cond
, NULL
);
996 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
997 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1000 scm_i_pthread_mutex_unlock (&data
.mutex
);
1002 scm_syserror (NULL
);
1004 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1005 scm_i_pthread_mutex_unlock (&data
.mutex
);
1013 scm_t_catch_body body
;
1015 scm_t_catch_handler handler
;
1018 scm_i_pthread_mutex_t mutex
;
1019 scm_i_pthread_cond_t cond
;
1023 really_spawn (void *d
)
1025 spawn_data
*data
= (spawn_data
*)d
;
1026 scm_t_catch_body body
= data
->body
;
1027 void *body_data
= data
->body_data
;
1028 scm_t_catch_handler handler
= data
->handler
;
1029 void *handler_data
= data
->handler_data
;
1030 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1032 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1033 data
->thread
= scm_current_thread ();
1034 scm_i_pthread_cond_signal (&data
->cond
);
1035 scm_i_pthread_mutex_unlock (&data
->mutex
);
1037 if (handler
== NULL
)
1038 t
->result
= body (body_data
);
1040 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1042 handler
, handler_data
);
1048 spawn_thread (void *d
)
1050 spawn_data
*data
= (spawn_data
*)d
;
1051 scm_i_pthread_detach (scm_i_pthread_self ());
1052 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1057 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1058 scm_t_catch_handler handler
, void *handler_data
)
1064 data
.parent
= scm_current_dynamic_state ();
1066 data
.body_data
= body_data
;
1067 data
.handler
= handler
;
1068 data
.handler_data
= handler_data
;
1069 data
.thread
= SCM_BOOL_F
;
1070 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1071 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1073 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1074 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1077 scm_i_pthread_mutex_unlock (&data
.mutex
);
1079 scm_syserror (NULL
);
1081 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1082 scm_i_pthread_mutex_unlock (&data
.mutex
);
1087 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1089 "Move the calling thread to the end of the scheduling queue.")
1090 #define FUNC_NAME s_scm_yield
1092 return scm_from_bool (scm_i_sched_yield ());
1096 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1098 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1099 "cannot be the current thread, and if @var{thread} has already terminated or "
1100 "been signaled to terminate, this function is a no-op.")
1101 #define FUNC_NAME s_scm_cancel_thread
1103 scm_i_thread
*t
= NULL
;
1105 SCM_VALIDATE_THREAD (1, thread
);
1106 t
= SCM_I_THREAD_DATA (thread
);
1107 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1111 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1112 scm_i_pthread_cancel (t
->pthread
);
1115 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1117 return SCM_UNSPECIFIED
;
1121 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1122 (SCM thread
, SCM proc
),
1123 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1124 "This handler will be called when the thread exits.")
1125 #define FUNC_NAME s_scm_set_thread_cleanup_x
1129 SCM_VALIDATE_THREAD (1, thread
);
1130 if (!scm_is_false (proc
))
1131 SCM_VALIDATE_THUNK (2, proc
);
1133 t
= SCM_I_THREAD_DATA (thread
);
1134 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1136 if (!(t
->exited
|| t
->canceled
))
1137 t
->cleanup_handler
= proc
;
1139 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1141 return SCM_UNSPECIFIED
;
1145 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1147 "Return the cleanup handler installed for the thread @var{thread}.")
1148 #define FUNC_NAME s_scm_thread_cleanup
1153 SCM_VALIDATE_THREAD (1, thread
);
1155 t
= SCM_I_THREAD_DATA (thread
);
1156 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1157 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1158 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1164 SCM
scm_join_thread (SCM thread
)
1166 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1169 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1170 (SCM thread
, SCM timeout
, SCM timeoutval
),
1171 "Suspend execution of the calling thread until the target @var{thread} "
1172 "terminates, unless the target @var{thread} has already terminated. ")
1173 #define FUNC_NAME s_scm_join_thread_timed
1176 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1177 SCM res
= SCM_BOOL_F
;
1179 if (! (SCM_UNBNDP (timeoutval
)))
1182 SCM_VALIDATE_THREAD (1, thread
);
1183 if (scm_is_eq (scm_current_thread (), thread
))
1184 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1186 t
= SCM_I_THREAD_DATA (thread
);
1187 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1189 if (! SCM_UNBNDP (timeout
))
1191 to_timespec (timeout
, &ctimeout
);
1192 timeout_ptr
= &ctimeout
;
1201 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1211 else if (err
== ETIMEDOUT
)
1214 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1216 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1218 /* Check for exit again, since we just released and
1219 reacquired the admin mutex, before the next block_self
1220 call (which would block forever if t has already
1230 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1236 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1238 "Return @code{#t} if @var{obj} is a thread.")
1239 #define FUNC_NAME s_scm_thread_p
1241 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1247 fat_mutex_free (SCM mx
)
1249 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1250 scm_i_pthread_mutex_destroy (&m
->lock
);
1255 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1257 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1258 scm_puts ("#<mutex ", port
);
1259 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1260 scm_puts (">", port
);
1265 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1270 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1271 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1272 m
->owner
= SCM_BOOL_F
;
1275 m
->recursive
= recursive
;
1276 m
->unchecked_unlock
= unchecked_unlock
;
1277 m
->allow_external_unlock
= external_unlock
;
1279 m
->waiting
= SCM_EOL
;
1280 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1281 m
->waiting
= make_queue ();
1285 SCM
scm_make_mutex (void)
1287 return scm_make_mutex_with_flags (SCM_EOL
);
1290 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1291 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1292 SCM_SYMBOL (recursive_sym
, "recursive");
1294 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1296 "Create a new mutex. ")
1297 #define FUNC_NAME s_scm_make_mutex_with_flags
1299 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1302 while (! scm_is_null (ptr
))
1304 SCM flag
= SCM_CAR (ptr
);
1305 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1306 unchecked_unlock
= 1;
1307 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1308 external_unlock
= 1;
1309 else if (scm_is_eq (flag
, recursive_sym
))
1312 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1313 ptr
= SCM_CDR (ptr
);
1315 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1319 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1321 "Create a new recursive mutex. ")
1322 #define FUNC_NAME s_scm_make_recursive_mutex
1324 return make_fat_mutex (1, 0, 0);
1328 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1331 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1333 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1335 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1336 SCM err
= SCM_BOOL_F
;
1338 struct timeval current_time
;
1340 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1346 m
->owner
= new_owner
;
1349 if (SCM_I_IS_THREAD (new_owner
))
1351 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1352 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1354 /* Only keep a weak reference to MUTEX so that it's not
1355 retained when not referenced elsewhere (bug #27450).
1356 The weak pair itself is eventually removed when MUTEX
1357 is unlocked. Note that `t->mutexes' lists mutexes
1358 currently held by T, so it should be small. */
1359 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1361 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1366 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1368 m
->owner
= new_owner
;
1369 err
= scm_cons (scm_abandoned_mutex_error_key
,
1370 scm_from_locale_string ("lock obtained on abandoned "
1375 else if (scm_is_eq (m
->owner
, new_owner
))
1384 err
= scm_cons (scm_misc_error_key
,
1385 scm_from_locale_string ("mutex already locked "
1393 if (timeout
!= NULL
)
1395 gettimeofday (¤t_time
, NULL
);
1396 if (current_time
.tv_sec
> timeout
->tv_sec
||
1397 (current_time
.tv_sec
== timeout
->tv_sec
&&
1398 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1404 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1405 scm_i_pthread_mutex_unlock (&m
->lock
);
1407 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1410 scm_i_pthread_mutex_unlock (&m
->lock
);
1414 SCM
scm_lock_mutex (SCM mx
)
1416 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1419 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1420 (SCM m
, SCM timeout
, SCM owner
),
1421 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1422 "blocks until the mutex becomes available. The function returns when "
1423 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1424 "a thread already owns will succeed right away and will not block the "
1425 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1426 #define FUNC_NAME s_scm_lock_mutex_timed
1430 scm_t_timespec cwaittime
, *waittime
= NULL
;
1432 SCM_VALIDATE_MUTEX (1, m
);
1434 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1436 to_timespec (timeout
, &cwaittime
);
1437 waittime
= &cwaittime
;
1440 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1441 if (!scm_is_false (exception
))
1442 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1443 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1448 scm_dynwind_lock_mutex (SCM mutex
)
1450 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1451 SCM_F_WIND_EXPLICITLY
);
1452 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1453 SCM_F_WIND_EXPLICITLY
);
1456 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1458 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1459 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1460 #define FUNC_NAME s_scm_try_mutex
1464 scm_t_timespec cwaittime
, *waittime
= NULL
;
1466 SCM_VALIDATE_MUTEX (1, mutex
);
1468 to_timespec (scm_from_int(0), &cwaittime
);
1469 waittime
= &cwaittime
;
1471 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1472 if (!scm_is_false (exception
))
1473 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1474 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1478 /*** Fat condition variables */
1481 scm_i_pthread_mutex_t lock
;
1482 SCM waiting
; /* the threads waiting for this condition. */
1485 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1486 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1489 fat_mutex_unlock (SCM mutex
, SCM cond
,
1490 const scm_t_timespec
*waittime
, int relock
)
1493 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1495 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1496 int err
= 0, ret
= 0;
1498 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1502 if (!scm_is_eq (owner
, t
->handle
))
1506 if (!m
->unchecked_unlock
)
1508 scm_i_pthread_mutex_unlock (&m
->lock
);
1509 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1513 else if (!m
->allow_external_unlock
)
1515 scm_i_pthread_mutex_unlock (&m
->lock
);
1516 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1520 if (! (SCM_UNBNDP (cond
)))
1522 c
= SCM_CONDVAR_DATA (cond
);
1531 /* Change the owner of MUTEX. */
1532 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1533 m
->owner
= unblock_from_queue (m
->waiting
);
1538 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1539 scm_i_pthread_mutex_unlock (&m
->lock
);
1546 else if (err
== ETIMEDOUT
)
1551 else if (err
!= EINTR
)
1554 scm_syserror (NULL
);
1560 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1568 scm_remember_upto_here_2 (cond
, mutex
);
1570 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1579 /* Change the owner of MUTEX. */
1580 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1581 m
->owner
= unblock_from_queue (m
->waiting
);
1584 scm_i_pthread_mutex_unlock (&m
->lock
);
1591 SCM
scm_unlock_mutex (SCM mx
)
1593 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1596 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1597 (SCM mx
, SCM cond
, SCM timeout
),
1598 "Unlocks @var{mutex} if the calling thread owns the lock on "
1599 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1600 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1601 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1602 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1603 "with a call to @code{unlock-mutex}. Only the last call to "
1604 "@code{unlock-mutex} will actually unlock the mutex. ")
1605 #define FUNC_NAME s_scm_unlock_mutex_timed
1607 scm_t_timespec cwaittime
, *waittime
= NULL
;
1609 SCM_VALIDATE_MUTEX (1, mx
);
1610 if (! (SCM_UNBNDP (cond
)))
1612 SCM_VALIDATE_CONDVAR (2, cond
);
1614 if (! (SCM_UNBNDP (timeout
)))
1616 to_timespec (timeout
, &cwaittime
);
1617 waittime
= &cwaittime
;
1621 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1625 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1627 "Return @code{#t} if @var{obj} is a mutex.")
1628 #define FUNC_NAME s_scm_mutex_p
1630 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1634 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1636 "Return the thread owning @var{mx}, or @code{#f}.")
1637 #define FUNC_NAME s_scm_mutex_owner
1640 fat_mutex
*m
= NULL
;
1642 SCM_VALIDATE_MUTEX (1, mx
);
1643 m
= SCM_MUTEX_DATA (mx
);
1644 scm_i_pthread_mutex_lock (&m
->lock
);
1646 scm_i_pthread_mutex_unlock (&m
->lock
);
1652 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1654 "Return the lock level of mutex @var{mx}.")
1655 #define FUNC_NAME s_scm_mutex_level
1657 SCM_VALIDATE_MUTEX (1, mx
);
1658 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1662 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1664 "Returns @code{#t} if the mutex @var{mx} is locked.")
1665 #define FUNC_NAME s_scm_mutex_locked_p
1667 SCM_VALIDATE_MUTEX (1, mx
);
1668 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1673 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1675 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1676 scm_puts ("#<condition-variable ", port
);
1677 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1678 scm_puts (">", port
);
1682 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1684 "Make a new condition variable.")
1685 #define FUNC_NAME s_scm_make_condition_variable
1690 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1691 c
->waiting
= SCM_EOL
;
1692 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1693 c
->waiting
= make_queue ();
1698 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1699 (SCM cv
, SCM mx
, SCM t
),
1700 "Wait until @var{cond-var} has been signalled. While waiting, "
1701 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1702 "is locked again when this function returns. When @var{time} is given, "
1703 "it specifies a point in time where the waiting should be aborted. It "
1704 "can be either a integer as returned by @code{current-time} or a pair "
1705 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1706 "mutex is locked and @code{#f} is returned. When the condition "
1707 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1709 #define FUNC_NAME s_scm_timed_wait_condition_variable
1711 scm_t_timespec waittime
, *waitptr
= NULL
;
1713 SCM_VALIDATE_CONDVAR (1, cv
);
1714 SCM_VALIDATE_MUTEX (2, mx
);
1716 if (!SCM_UNBNDP (t
))
1718 to_timespec (t
, &waittime
);
1719 waitptr
= &waittime
;
1722 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1727 fat_cond_signal (fat_cond
*c
)
1729 unblock_from_queue (c
->waiting
);
1732 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1734 "Wake up one thread that is waiting for @var{cv}")
1735 #define FUNC_NAME s_scm_signal_condition_variable
1737 SCM_VALIDATE_CONDVAR (1, cv
);
1738 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1744 fat_cond_broadcast (fat_cond
*c
)
1746 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1750 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1752 "Wake up all threads that are waiting for @var{cv}. ")
1753 #define FUNC_NAME s_scm_broadcast_condition_variable
1755 SCM_VALIDATE_CONDVAR (1, cv
);
1756 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1761 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1763 "Return @code{#t} if @var{obj} is a condition variable.")
1764 #define FUNC_NAME s_scm_condition_variable_p
1766 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1777 SELECT_TYPE
*read_fds
;
1778 SELECT_TYPE
*write_fds
;
1779 SELECT_TYPE
*except_fds
;
1780 struct timeval
*timeout
;
1787 do_std_select (void *args
)
1789 struct select_args
*select_args
;
1791 select_args
= (struct select_args
*) args
;
1793 select_args
->result
=
1794 select (select_args
->nfds
,
1795 select_args
->read_fds
, select_args
->write_fds
,
1796 select_args
->except_fds
, select_args
->timeout
);
1797 select_args
->errno_value
= errno
;
1803 scm_std_select (int nfds
,
1804 SELECT_TYPE
*readfds
,
1805 SELECT_TYPE
*writefds
,
1806 SELECT_TYPE
*exceptfds
,
1807 struct timeval
*timeout
)
1810 int res
, eno
, wakeup_fd
;
1811 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1812 struct select_args args
;
1814 if (readfds
== NULL
)
1816 FD_ZERO (&my_readfds
);
1817 readfds
= &my_readfds
;
1820 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1823 wakeup_fd
= t
->sleep_pipe
[0];
1824 FD_SET (wakeup_fd
, readfds
);
1825 if (wakeup_fd
>= nfds
)
1829 args
.read_fds
= readfds
;
1830 args
.write_fds
= writefds
;
1831 args
.except_fds
= exceptfds
;
1832 args
.timeout
= timeout
;
1834 /* Explicitly cooperate with the GC. */
1835 scm_without_guile (do_std_select
, &args
);
1838 eno
= args
.errno_value
;
1841 scm_i_reset_sleep (t
);
1843 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1846 full_read (wakeup_fd
, &dummy
, 1);
1848 FD_CLR (wakeup_fd
, readfds
);
1860 /* Convenience API for blocking while in guile mode. */
1862 #if SCM_USE_PTHREAD_THREADS
1864 /* It seems reasonable to not run procedures related to mutex and condition
1865 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1866 without it, and (ii) the only potential gain would be GC latency. See
1867 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1868 for a discussion of the pros and cons. */
1871 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1873 int res
= scm_i_pthread_mutex_lock (mutex
);
1878 do_unlock (void *data
)
1880 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1884 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1886 scm_i_scm_pthread_mutex_lock (mutex
);
1887 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1891 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1894 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1896 t
->held_mutex
= mutex
;
1897 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1898 t
->held_mutex
= NULL
;
1904 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1905 scm_i_pthread_mutex_t
*mutex
,
1906 const scm_t_timespec
*wt
)
1909 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1911 t
->held_mutex
= mutex
;
1912 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1913 t
->held_mutex
= NULL
;
1921 scm_std_usleep (unsigned long usecs
)
1924 tv
.tv_usec
= usecs
% 1000000;
1925 tv
.tv_sec
= usecs
/ 1000000;
1926 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1927 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1931 scm_std_sleep (unsigned int secs
)
1936 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1942 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1944 "Return the thread that called this function.")
1945 #define FUNC_NAME s_scm_current_thread
1947 return SCM_I_CURRENT_THREAD
->handle
;
1952 scm_c_make_list (size_t n
, SCM fill
)
1956 res
= scm_cons (fill
, res
);
1960 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1962 "Return a list of all threads.")
1963 #define FUNC_NAME s_scm_all_threads
1965 /* We can not allocate while holding the thread_admin_mutex because
1966 of the way GC is done.
1968 int n
= thread_count
;
1970 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1972 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1974 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1976 if (t
!= scm_i_signal_delivery_thread
)
1978 SCM_SETCAR (*l
, t
->handle
);
1979 l
= SCM_CDRLOC (*l
);
1984 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1989 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1991 "Return @code{#t} iff @var{thread} has exited.\n")
1992 #define FUNC_NAME s_scm_thread_exited_p
1994 return scm_from_bool (scm_c_thread_exited_p (thread
));
1999 scm_c_thread_exited_p (SCM thread
)
2000 #define FUNC_NAME s_scm_thread_exited_p
2003 SCM_VALIDATE_THREAD (1, thread
);
2004 t
= SCM_I_THREAD_DATA (thread
);
2009 static scm_i_pthread_cond_t wake_up_cond
;
2010 static int threads_initialized_p
= 0;
2013 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2015 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2017 static SCM dynwind_critical_section_mutex
;
2020 scm_dynwind_critical_section (SCM mutex
)
2022 if (scm_is_false (mutex
))
2023 mutex
= dynwind_critical_section_mutex
;
2024 scm_dynwind_lock_mutex (mutex
);
2025 scm_dynwind_block_asyncs ();
2028 /*** Initialization */
2030 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2032 #if SCM_USE_PTHREAD_THREADS
2033 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2037 scm_threads_prehistory (void *base
)
2039 #if SCM_USE_PTHREAD_THREADS
2040 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2041 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2042 PTHREAD_MUTEX_RECURSIVE
);
2045 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2046 scm_i_pthread_mutexattr_recursive
);
2047 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2048 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2050 guilify_self_1 ((struct GC_stack_base
*) base
);
2053 scm_t_bits scm_tc16_thread
;
2054 scm_t_bits scm_tc16_mutex
;
2055 scm_t_bits scm_tc16_condvar
;
2060 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2061 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2063 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2064 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2065 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2067 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2069 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2071 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2072 guilify_self_2 (SCM_BOOL_F
);
2073 threads_initialized_p
= 1;
2075 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2079 scm_init_threads_default_dynamic_state ()
2081 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2082 scm_i_default_dynamic_state
= state
;
2086 scm_init_thread_procs ()
2088 #include "libguile/threads.x"
2092 /* IA64-specific things. */
2096 # include <sys/param.h>
2097 # include <sys/pstat.h>
2099 scm_ia64_register_backing_store_base (void)
2101 struct pst_vm_status vm_status
;
2103 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2104 if (vm_status
.pst_type
== PS_RSESTACK
)
2105 return (void *) vm_status
.pst_vaddr
;
2109 scm_ia64_ar_bsp (const void *ctx
)
2112 __uc_get_ar_bsp (ctx
, &bsp
);
2113 return (void *) bsp
;
2117 # include <ucontext.h>
2119 scm_ia64_register_backing_store_base (void)
2121 extern void *__libc_ia64_register_backing_store_base
;
2122 return __libc_ia64_register_backing_store_base
;
2125 scm_ia64_ar_bsp (const void *opaque
)
2127 const ucontext_t
*ctx
= opaque
;
2128 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2131 #endif /* __ia64__ */