1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
45 # include <pthread_np.h>
48 #include <sys/select.h>
54 #include "libguile/validate.h"
55 #include "libguile/root.h"
56 #include "libguile/eval.h"
57 #include "libguile/async.h"
58 #include "libguile/ports.h"
59 #include "libguile/threads.h"
60 #include "libguile/dynwind.h"
61 #include "libguile/iselect.h"
62 #include "libguile/fluids.h"
63 #include "libguile/continuations.h"
64 #include "libguile/gc.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
68 #include "libguile/weaks.h"
70 #include <full-read.h>
75 /* First some libgc shims. */
77 /* Make sure GC_fn_type is defined; it is missing from the public
78 headers of GC 7.1 and earlier. */
79 #ifndef HAVE_GC_FN_TYPE
80 typedef void * (* GC_fn_type
) (void *);
88 #ifndef GC_UNIMPLEMENTED
89 #define GC_UNIMPLEMENTED 3
92 /* Likewise struct GC_stack_base is missing before 7.1. */
93 #ifndef HAVE_GC_STACK_BASE
94 struct GC_stack_base
{
95 void * mem_base
; /* Base of memory stack. */
97 void * reg_base
; /* Base of separate register stack. */
102 GC_register_my_thread (struct GC_stack_base
*stack_base
)
104 return GC_UNIMPLEMENTED
;
108 GC_unregister_my_thread ()
112 #if !SCM_USE_PTHREAD_THREADS
113 /* No threads; we can just use GC_stackbottom. */
115 get_thread_stack_base ()
117 return GC_stackbottom
;
120 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
121 && defined PTHREAD_ATTR_GETSTACK_WORKS
122 /* This method for GNU/Linux and perhaps some other systems.
123 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
124 available on them. */
126 get_thread_stack_base ()
132 pthread_getattr_np (pthread_self (), &attr
);
133 pthread_attr_getstack (&attr
, &start
, &size
);
134 end
= (char *)start
+ size
;
136 #if SCM_STACK_GROWS_UP
143 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
144 /* This method for MacOS X.
145 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
146 but as of 2006 there's nothing obvious at apple.com. */
148 get_thread_stack_base ()
150 return pthread_get_stackaddr_np (pthread_self ());
153 #elif HAVE_PTHREAD_ATTR_GET_NP
154 /* This one is for FreeBSD 9. */
156 get_thread_stack_base ()
162 pthread_attr_init (&attr
);
163 pthread_attr_get_np (pthread_self (), &attr
);
164 pthread_attr_getstack (&attr
, &start
, &size
);
165 pthread_attr_destroy (&attr
);
167 end
= (char *)start
+ size
;
169 #if SCM_STACK_GROWS_UP
177 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
181 GC_get_stack_base (struct GC_stack_base
*stack_base
)
183 stack_base
->mem_base
= get_thread_stack_base ();
185 /* Calculate and store off the base of this thread's register
186 backing store (RBS). Unfortunately our implementation(s) of
187 scm_ia64_register_backing_store_base are only reliable for the
188 main thread. For other threads, therefore, find out the current
189 top of the RBS, and use that as a maximum. */
190 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
195 bsp
= scm_ia64_ar_bsp (&ctx
);
196 if (stack_base
->reg_base
> bsp
)
197 stack_base
->reg_base
= bsp
;
204 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
206 struct GC_stack_base stack_base
;
208 stack_base
.mem_base
= (void*)&stack_base
;
210 /* FIXME: Untested. */
214 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
218 return fn (&stack_base
, arg
);
220 #endif /* HAVE_GC_STACK_BASE */
223 /* Now define with_gc_active and with_gc_inactive. */
225 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
227 /* We have a sufficiently new libgc (7.2 or newer). */
230 with_gc_inactive (GC_fn_type func
, void *data
)
232 return GC_do_blocking (func
, data
);
236 with_gc_active (GC_fn_type func
, void *data
)
238 return GC_call_with_gc_active (func
, data
);
243 /* libgc not new enough, so never actually deactivate GC.
245 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
246 GC_call_with_gc_active. */
249 with_gc_inactive (GC_fn_type func
, void *data
)
255 with_gc_active (GC_fn_type func
, void *data
)
260 #endif /* HAVE_GC_DO_BLOCKING */
265 to_timespec (SCM t
, scm_t_timespec
*waittime
)
269 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
270 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
274 double time
= scm_to_double (t
);
275 double sec
= scm_c_truncate (time
);
277 waittime
->tv_sec
= (long) sec
;
278 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
285 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
286 the risk of false references leading to unbounded retained space as
287 described in "Bounding Space Usage of Conservative Garbage Collectors",
290 /* Make an empty queue data structure.
295 return scm_cons (SCM_EOL
, SCM_EOL
);
298 /* Put T at the back of Q and return a handle that can be used with
299 remqueue to remove T from Q again.
302 enqueue (SCM q
, SCM t
)
304 SCM c
= scm_cons (t
, SCM_EOL
);
305 SCM_CRITICAL_SECTION_START
;
306 if (scm_is_null (SCM_CDR (q
)))
309 SCM_SETCDR (SCM_CAR (q
), c
);
311 SCM_CRITICAL_SECTION_END
;
315 /* Remove the element that the handle C refers to from the queue Q. C
316 must have been returned from a call to enqueue. The return value
317 is zero when the element referred to by C has already been removed.
318 Otherwise, 1 is returned.
321 remqueue (SCM q
, SCM c
)
324 SCM_CRITICAL_SECTION_START
;
325 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
327 if (scm_is_eq (p
, c
))
329 if (scm_is_eq (c
, SCM_CAR (q
)))
330 SCM_SETCAR (q
, scm_is_eq (prev
, q
) ? SCM_EOL
: prev
);
331 SCM_SETCDR (prev
, SCM_CDR (c
));
334 SCM_SETCDR (c
, SCM_EOL
);
336 SCM_CRITICAL_SECTION_END
;
341 SCM_CRITICAL_SECTION_END
;
345 /* Remove the front-most element from the queue Q and return it.
346 Return SCM_BOOL_F when Q is empty.
352 SCM_CRITICAL_SECTION_START
;
356 SCM_CRITICAL_SECTION_END
;
361 SCM_SETCDR (q
, SCM_CDR (c
));
362 if (scm_is_null (SCM_CDR (q
)))
363 SCM_SETCAR (q
, SCM_EOL
);
364 SCM_CRITICAL_SECTION_END
;
367 SCM_SETCDR (c
, SCM_EOL
);
373 /*** Thread smob routines */
377 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
379 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
380 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
381 the struct case, hence we go via a union, and extract according to the
382 size of pthread_t. */
390 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
391 scm_i_pthread_t p
= t
->pthread
;
394 if (sizeof (p
) == sizeof (unsigned short))
396 else if (sizeof (p
) == sizeof (unsigned int))
398 else if (sizeof (p
) == sizeof (unsigned long))
403 scm_puts ("#<thread ", port
);
404 scm_uintprint (id
, 10, port
);
405 scm_puts (" (", port
);
406 scm_uintprint ((scm_t_bits
)t
, 16, port
);
407 scm_puts (")>", port
);
412 /*** Blocking on queues. */
414 /* See also scm_i_queue_async_cell for how such a block is
418 /* Put the current thread on QUEUE and go to sleep, waiting for it to
419 be woken up by a call to 'unblock_from_queue', or to be
420 interrupted. Upon return of this function, the current thread is
421 no longer on QUEUE, even when the sleep has been interrupted.
423 The caller of block_self must hold MUTEX. It will be atomically
424 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
426 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
429 When WAITTIME is not NULL, the sleep will be aborted at that time.
431 The return value of block_self is an errno value. It will be zero
432 when the sleep has been successfully completed by a call to
433 unblock_from_queue, EINTR when it has been interrupted by the
434 delivery of a system async, and ETIMEDOUT when the timeout has
437 The system asyncs themselves are not executed by block_self.
440 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
441 const scm_t_timespec
*waittime
)
443 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
447 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
452 q_handle
= enqueue (queue
, t
->handle
);
453 if (waittime
== NULL
)
454 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
456 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
458 /* When we are still on QUEUE, we have been interrupted. We
459 report this only when no other error (such as a timeout) has
462 if (remqueue (queue
, q_handle
) && err
== 0)
465 scm_i_reset_sleep (t
);
471 /* Wake up the first thread on QUEUE, if any. The awoken thread is
472 returned, or #f if the queue was empty.
475 unblock_from_queue (SCM queue
)
477 SCM thread
= dequeue (queue
);
478 if (scm_is_true (thread
))
479 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
484 /* Getting into and out of guile mode.
487 /* Key used to attach a cleanup handler to a given thread. Also, if
488 thread-local storage is unavailable, this key is used to retrieve the
489 current thread with `pthread_getspecific ()'. */
490 scm_i_pthread_key_t scm_i_thread_key
;
493 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
495 /* When thread-local storage (TLS) is available, a pointer to the
496 current-thread object is kept in TLS. Note that storing the thread-object
497 itself in TLS (rather than a pointer to some malloc'd memory) is not
498 possible since thread objects may live longer than the actual thread they
500 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
502 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
505 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
506 static scm_i_thread
*all_threads
= NULL
;
507 static int thread_count
;
509 static SCM scm_i_default_dynamic_state
;
511 /* Run when a fluid is collected. */
513 scm_i_reset_fluid (size_t n
)
517 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
518 for (t
= all_threads
; t
; t
= t
->next_thread
)
519 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
521 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
523 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
524 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
526 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
529 /* Perform first stage of thread initialisation, in non-guile mode.
532 guilify_self_1 (struct GC_stack_base
*base
)
536 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
537 before allocating anything in this thread, because allocation could
538 cause GC to run, and GC could cause finalizers, which could invoke
539 Scheme functions, which need the current thread to be set. */
541 t
.pthread
= scm_i_pthread_self ();
542 t
.handle
= SCM_BOOL_F
;
543 t
.result
= SCM_BOOL_F
;
544 t
.cleanup_handler
= SCM_BOOL_F
;
547 t
.join_queue
= SCM_EOL
;
548 t
.dynamic_state
= SCM_BOOL_F
;
549 t
.dynwinds
= SCM_EOL
;
550 t
.active_asyncs
= SCM_EOL
;
552 t
.pending_asyncs
= 1;
553 t
.critical_section_level
= 0;
554 t
.base
= base
->mem_base
;
556 t
.register_backing_store_base
= base
->reg_base
;
558 t
.continuation_root
= SCM_EOL
;
559 t
.continuation_base
= t
.base
;
560 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
561 t
.sleep_mutex
= NULL
;
562 t
.sleep_object
= SCM_BOOL_F
;
565 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
566 /* FIXME: Error conditions during the initialization phase are handled
567 gracelessly since public functions such as `scm_init_guile ()'
568 currently have type `void'. */
571 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
572 t
.current_mark_stack_ptr
= NULL
;
573 t
.current_mark_stack_limit
= NULL
;
578 /* The switcheroo. */
580 scm_i_thread
*t_ptr
= &t
;
583 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
584 memcpy (t_ptr
, &t
, sizeof t
);
586 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
588 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
589 /* Cache the current thread in TLS for faster lookup. */
590 scm_i_current_thread
= t_ptr
;
593 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
594 t_ptr
->next_thread
= all_threads
;
597 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
603 /* Perform second stage of thread initialisation, in guile mode.
606 guilify_self_2 (SCM parent
)
608 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
612 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
614 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
615 t
->continuation_base
= t
->base
;
618 if (scm_is_true (parent
))
619 t
->dynamic_state
= scm_make_dynamic_state (parent
);
621 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
623 t
->join_queue
= make_queue ();
626 /* See note in finalizers.c:queue_finalizer_async(). */
627 GC_invoke_finalizers ();
633 /* We implement our own mutex type since we want them to be 'fair', we
634 want to do fancy things while waiting for them (like running
635 asyncs) and we might want to add things that are nice for
640 scm_i_pthread_mutex_t lock
;
642 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
644 int recursive
; /* allow recursive locking? */
645 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
646 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
647 owned by the current thread? */
649 SCM waiting
; /* the threads waiting for this mutex. */
652 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
653 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
656 call_cleanup (void *data
)
659 return scm_call_0 (*proc_p
);
662 /* Perform thread tear-down, in guile mode.
665 do_thread_exit (void *v
)
667 scm_i_thread
*t
= (scm_i_thread
*) v
;
669 /* Ensure the signal handling thread has been launched, because we might be
670 shutting it down. This needs to be done in Guile mode. */
671 scm_i_ensure_signal_delivery_thread ();
673 if (!scm_is_false (t
->cleanup_handler
))
675 SCM ptr
= t
->cleanup_handler
;
677 t
->cleanup_handler
= SCM_BOOL_F
;
678 t
->result
= scm_internal_catch (SCM_BOOL_T
,
680 scm_handle_by_message_noexit
, NULL
);
683 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
686 close (t
->sleep_pipe
[0]);
687 close (t
->sleep_pipe
[1]);
688 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
691 while (!scm_is_null (t
->mutexes
))
693 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
695 if (!SCM_UNBNDP (mutex
))
697 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
699 scm_i_pthread_mutex_lock (&m
->lock
);
701 /* Check whether T owns MUTEX. This is usually the case, unless
702 T abandoned MUTEX; in that case, T is no longer its owner (see
703 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
704 if (scm_is_eq (m
->owner
, t
->handle
))
705 unblock_from_queue (m
->waiting
);
707 scm_i_pthread_mutex_unlock (&m
->lock
);
710 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
713 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
719 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
721 /* Won't hurt if we are already registered. */
722 #if SCM_USE_PTHREAD_THREADS
723 GC_register_my_thread (sb
);
726 return scm_with_guile (do_thread_exit
, v
);
730 on_thread_exit (void *v
)
732 /* This handler is executed in non-guile mode. */
733 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
735 /* If we were canceled, we were unable to clear `t->guile_mode', so do
739 /* If this thread was cancelled while doing a cond wait, it will
740 still have a mutex locked, so we unlock it here. */
743 scm_i_pthread_mutex_unlock (t
->held_mutex
);
744 t
->held_mutex
= NULL
;
747 /* Reinstate the current thread for purposes of scm_with_guile
748 guile-mode cleanup handlers. Only really needed in the non-TLS
749 case but it doesn't hurt to be consistent. */
750 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
752 /* Scheme-level thread finalizers and other cleanup needs to happen in
754 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
756 /* Removing ourself from the list of all threads needs to happen in
757 non-guile mode since all SCM values on our stack become
758 unprotected once we are no longer in the list. */
759 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
760 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
763 *tp
= t
->next_thread
;
766 t
->next_thread
= NULL
;
772 /* If there's only one other thread, it could be the signal delivery
773 thread, so we need to notify it to shut down by closing its read pipe.
774 If it's not the signal delivery thread, then closing the read pipe isn't
776 if (thread_count
<= 1)
777 scm_i_close_signal_pipe ();
779 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
781 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
783 #if SCM_USE_PTHREAD_THREADS
784 GC_unregister_my_thread ();
788 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
791 init_thread_key (void)
793 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
796 /* Perform any initializations necessary to make the current thread
797 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
800 BASE is the stack base to use with GC.
802 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
803 which case the default dynamic state is used.
805 Returns zero when the thread was known to guile already; otherwise
808 Note that it could be the case that the thread was known
809 to Guile, but not in guile mode (because we are within a
810 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
811 be sure. New threads are put into guile mode implicitly. */
814 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
816 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
818 if (SCM_I_CURRENT_THREAD
)
820 /* Thread is already known to Guile.
826 /* This thread has not been guilified yet.
829 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
830 if (scm_initialized_p
== 0)
832 /* First thread ever to enter Guile. Run the full
835 scm_i_init_guile (base
);
837 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
838 /* Allow other threads to come in later. */
839 GC_allow_register_threads ();
842 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
846 /* Guile is already initialized, but this thread enters it for
847 the first time. Only initialize this thread.
849 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
851 /* Register this thread with libgc. */
852 #if SCM_USE_PTHREAD_THREADS
853 GC_register_my_thread (base
);
856 guilify_self_1 (base
);
857 guilify_self_2 (parent
);
866 struct GC_stack_base stack_base
;
868 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
869 scm_i_init_thread_for_guile (&stack_base
,
870 scm_i_default_dynamic_state
);
873 fprintf (stderr
, "Failed to get stack base for current thread.\n");
878 struct with_guile_args
886 with_guile_trampoline (void *data
)
888 struct with_guile_args
*args
= data
;
890 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
894 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
899 struct with_guile_args
*args
= data
;
901 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
902 t
= SCM_I_CURRENT_THREAD
;
905 /* We are in Guile mode. */
906 assert (t
->guile_mode
);
908 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
910 /* Leave Guile mode. */
913 else if (t
->guile_mode
)
915 /* Already in Guile mode. */
916 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
920 /* We are not in Guile mode, either because we are not within a
921 scm_with_guile, or because we are within a scm_without_guile.
923 This call to scm_with_guile() could happen from anywhere on the
924 stack, and in particular lower on the stack than when it was
925 when this thread was first guilified. Thus, `base' must be
927 #if SCM_STACK_GROWS_UP
928 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
929 t
->base
= SCM_STACK_PTR (base
->mem_base
);
931 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
932 t
->base
= SCM_STACK_PTR (base
->mem_base
);
936 res
= with_gc_active (with_guile_trampoline
, args
);
943 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
945 struct with_guile_args args
;
949 args
.parent
= parent
;
951 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
955 scm_with_guile (void *(*func
)(void *), void *data
)
957 return scm_i_with_guile_and_parent (func
, data
,
958 scm_i_default_dynamic_state
);
962 scm_without_guile (void *(*func
)(void *), void *data
)
965 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
969 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
970 result
= with_gc_inactive (func
, data
);
971 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
974 /* Otherwise we're not in guile mode, so nothing to do. */
975 result
= func (data
);
981 /*** Thread creation */
988 scm_i_pthread_mutex_t mutex
;
989 scm_i_pthread_cond_t cond
;
993 really_launch (void *d
)
995 launch_data
*data
= (launch_data
*)d
;
996 SCM thunk
= data
->thunk
, handler
= data
->handler
;
999 t
= SCM_I_CURRENT_THREAD
;
1001 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1002 data
->thread
= scm_current_thread ();
1003 scm_i_pthread_cond_signal (&data
->cond
);
1004 scm_i_pthread_mutex_unlock (&data
->mutex
);
1006 if (SCM_UNBNDP (handler
))
1007 t
->result
= scm_call_0 (thunk
);
1009 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
1015 launch_thread (void *d
)
1017 launch_data
*data
= (launch_data
*)d
;
1018 scm_i_pthread_detach (scm_i_pthread_self ());
1019 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
1023 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
1024 (SCM thunk
, SCM handler
),
1025 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1026 "returning a new thread object representing the thread. The procedure\n"
1027 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1029 "When @var{handler} is specified, then @var{thunk} is called from\n"
1030 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1031 "handler. This catch is established inside the continuation barrier.\n"
1033 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1034 "the @emph{exit value} of the thread and the thread is terminated.")
1035 #define FUNC_NAME s_scm_call_with_new_thread
1041 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
1042 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
1043 handler
, SCM_ARG2
, FUNC_NAME
);
1045 GC_collect_a_little ();
1046 data
.parent
= scm_current_dynamic_state ();
1048 data
.handler
= handler
;
1049 data
.thread
= SCM_BOOL_F
;
1050 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1051 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1053 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1054 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1057 scm_i_pthread_mutex_unlock (&data
.mutex
);
1059 scm_syserror (NULL
);
1062 while (scm_is_false (data
.thread
))
1063 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1065 scm_i_pthread_mutex_unlock (&data
.mutex
);
1073 scm_t_catch_body body
;
1075 scm_t_catch_handler handler
;
1078 scm_i_pthread_mutex_t mutex
;
1079 scm_i_pthread_cond_t cond
;
1083 really_spawn (void *d
)
1085 spawn_data
*data
= (spawn_data
*)d
;
1086 scm_t_catch_body body
= data
->body
;
1087 void *body_data
= data
->body_data
;
1088 scm_t_catch_handler handler
= data
->handler
;
1089 void *handler_data
= data
->handler_data
;
1090 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1092 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1093 data
->thread
= scm_current_thread ();
1094 scm_i_pthread_cond_signal (&data
->cond
);
1095 scm_i_pthread_mutex_unlock (&data
->mutex
);
1097 if (handler
== NULL
)
1098 t
->result
= body (body_data
);
1100 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1102 handler
, handler_data
);
1108 spawn_thread (void *d
)
1110 spawn_data
*data
= (spawn_data
*)d
;
1111 scm_i_pthread_detach (scm_i_pthread_self ());
1112 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1117 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1118 scm_t_catch_handler handler
, void *handler_data
)
1124 data
.parent
= scm_current_dynamic_state ();
1126 data
.body_data
= body_data
;
1127 data
.handler
= handler
;
1128 data
.handler_data
= handler_data
;
1129 data
.thread
= SCM_BOOL_F
;
1130 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1131 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1133 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1134 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1137 scm_i_pthread_mutex_unlock (&data
.mutex
);
1139 scm_syserror (NULL
);
1142 while (scm_is_false (data
.thread
))
1143 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1145 scm_i_pthread_mutex_unlock (&data
.mutex
);
1147 assert (SCM_I_IS_THREAD (data
.thread
));
1152 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1154 "Move the calling thread to the end of the scheduling queue.")
1155 #define FUNC_NAME s_scm_yield
1157 return scm_from_bool (scm_i_sched_yield ());
1161 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1163 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1164 "cannot be the current thread, and if @var{thread} has already terminated or "
1165 "been signaled to terminate, this function is a no-op.")
1166 #define FUNC_NAME s_scm_cancel_thread
1168 scm_i_thread
*t
= NULL
;
1170 SCM_VALIDATE_THREAD (1, thread
);
1171 t
= SCM_I_THREAD_DATA (thread
);
1172 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1176 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1177 scm_i_pthread_cancel (t
->pthread
);
1180 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1182 return SCM_UNSPECIFIED
;
1186 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1187 (SCM thread
, SCM proc
),
1188 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1189 "This handler will be called when the thread exits.")
1190 #define FUNC_NAME s_scm_set_thread_cleanup_x
1194 SCM_VALIDATE_THREAD (1, thread
);
1195 if (!scm_is_false (proc
))
1196 SCM_VALIDATE_THUNK (2, proc
);
1198 t
= SCM_I_THREAD_DATA (thread
);
1199 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1201 if (!(t
->exited
|| t
->canceled
))
1202 t
->cleanup_handler
= proc
;
1204 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1206 return SCM_UNSPECIFIED
;
1210 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1212 "Return the cleanup handler installed for the thread @var{thread}.")
1213 #define FUNC_NAME s_scm_thread_cleanup
1218 SCM_VALIDATE_THREAD (1, thread
);
1220 t
= SCM_I_THREAD_DATA (thread
);
1221 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1222 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1223 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1229 SCM
scm_join_thread (SCM thread
)
1231 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1234 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1235 (SCM thread
, SCM timeout
, SCM timeoutval
),
1236 "Suspend execution of the calling thread until the target @var{thread} "
1237 "terminates, unless the target @var{thread} has already terminated. ")
1238 #define FUNC_NAME s_scm_join_thread_timed
1241 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1242 SCM res
= SCM_BOOL_F
;
1244 if (! (SCM_UNBNDP (timeoutval
)))
1247 SCM_VALIDATE_THREAD (1, thread
);
1248 if (scm_is_eq (scm_current_thread (), thread
))
1249 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1251 t
= SCM_I_THREAD_DATA (thread
);
1252 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1254 if (! SCM_UNBNDP (timeout
))
1256 to_timespec (timeout
, &ctimeout
);
1257 timeout_ptr
= &ctimeout
;
1266 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1276 else if (err
== ETIMEDOUT
)
1279 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1281 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1283 /* Check for exit again, since we just released and
1284 reacquired the admin mutex, before the next block_self
1285 call (which would block forever if t has already
1295 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1301 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1303 "Return @code{#t} if @var{obj} is a thread.")
1304 #define FUNC_NAME s_scm_thread_p
1306 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1312 fat_mutex_free (SCM mx
)
1314 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1315 scm_i_pthread_mutex_destroy (&m
->lock
);
1320 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1322 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1323 scm_puts ("#<mutex ", port
);
1324 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1325 scm_puts (">", port
);
1330 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1335 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1336 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1337 m
->owner
= SCM_BOOL_F
;
1340 m
->recursive
= recursive
;
1341 m
->unchecked_unlock
= unchecked_unlock
;
1342 m
->allow_external_unlock
= external_unlock
;
1344 m
->waiting
= SCM_EOL
;
1345 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1346 m
->waiting
= make_queue ();
1350 SCM
scm_make_mutex (void)
1352 return scm_make_mutex_with_flags (SCM_EOL
);
1355 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1356 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1357 SCM_SYMBOL (recursive_sym
, "recursive");
1359 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1361 "Create a new mutex. ")
1362 #define FUNC_NAME s_scm_make_mutex_with_flags
1364 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1367 while (! scm_is_null (ptr
))
1369 SCM flag
= SCM_CAR (ptr
);
1370 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1371 unchecked_unlock
= 1;
1372 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1373 external_unlock
= 1;
1374 else if (scm_is_eq (flag
, recursive_sym
))
1377 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1378 ptr
= SCM_CDR (ptr
);
1380 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1384 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1386 "Create a new recursive mutex. ")
1387 #define FUNC_NAME s_scm_make_recursive_mutex
1389 return make_fat_mutex (1, 0, 0);
1393 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1396 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1398 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1400 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1401 SCM err
= SCM_BOOL_F
;
1403 struct timeval current_time
;
1405 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1411 m
->owner
= new_owner
;
1414 if (SCM_I_IS_THREAD (new_owner
))
1416 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1418 /* FIXME: The order in which `t->admin_mutex' and
1419 `m->lock' are taken differs from that in
1420 `on_thread_exit', potentially leading to deadlocks. */
1421 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1423 /* Only keep a weak reference to MUTEX so that it's not
1424 retained when not referenced elsewhere (bug #27450).
1425 The weak pair itself is eventually removed when MUTEX
1426 is unlocked. Note that `t->mutexes' lists mutexes
1427 currently held by T, so it should be small. */
1428 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1430 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1435 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1437 m
->owner
= new_owner
;
1438 err
= scm_cons (scm_abandoned_mutex_error_key
,
1439 scm_from_locale_string ("lock obtained on abandoned "
1444 else if (scm_is_eq (m
->owner
, new_owner
))
1453 err
= scm_cons (scm_misc_error_key
,
1454 scm_from_locale_string ("mutex already locked "
1462 if (timeout
!= NULL
)
1464 gettimeofday (¤t_time
, NULL
);
1465 if (current_time
.tv_sec
> timeout
->tv_sec
||
1466 (current_time
.tv_sec
== timeout
->tv_sec
&&
1467 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1473 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1474 scm_i_pthread_mutex_unlock (&m
->lock
);
1476 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1479 scm_i_pthread_mutex_unlock (&m
->lock
);
1483 SCM
scm_lock_mutex (SCM mx
)
1485 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1488 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1489 (SCM m
, SCM timeout
, SCM owner
),
1490 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1491 "thread blocks until the mutex becomes available. The function\n"
1492 "returns when the calling thread owns the lock on @var{m}.\n"
1493 "Locking a mutex that a thread already owns will succeed right\n"
1494 "away and will not block the thread. That is, Guile's mutexes\n"
1495 "are @emph{recursive}.")
1496 #define FUNC_NAME s_scm_lock_mutex_timed
1500 scm_t_timespec cwaittime
, *waittime
= NULL
;
1502 SCM_VALIDATE_MUTEX (1, m
);
1504 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1506 to_timespec (timeout
, &cwaittime
);
1507 waittime
= &cwaittime
;
1510 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1511 SCM_VALIDATE_THREAD (3, owner
);
1513 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1514 if (!scm_is_false (exception
))
1515 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1516 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1521 lock_mutex_return_void (SCM mx
)
1523 (void) scm_lock_mutex (mx
);
1527 unlock_mutex_return_void (SCM mx
)
1529 (void) scm_unlock_mutex (mx
);
1533 scm_dynwind_lock_mutex (SCM mutex
)
1535 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1536 SCM_F_WIND_EXPLICITLY
);
1537 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1538 SCM_F_WIND_EXPLICITLY
);
1541 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1543 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1544 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1545 #define FUNC_NAME s_scm_try_mutex
1549 scm_t_timespec cwaittime
, *waittime
= NULL
;
1551 SCM_VALIDATE_MUTEX (1, mutex
);
1553 to_timespec (scm_from_int(0), &cwaittime
);
1554 waittime
= &cwaittime
;
1556 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1557 if (!scm_is_false (exception
))
1558 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1559 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1563 /*** Fat condition variables */
1566 scm_i_pthread_mutex_t lock
;
1567 SCM waiting
; /* the threads waiting for this condition. */
1570 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1571 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1574 fat_mutex_unlock (SCM mutex
, SCM cond
,
1575 const scm_t_timespec
*waittime
, int relock
)
1578 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1580 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1581 int err
= 0, ret
= 0;
1583 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1587 if (!scm_is_eq (owner
, t
->handle
))
1591 if (!m
->unchecked_unlock
)
1593 scm_i_pthread_mutex_unlock (&m
->lock
);
1594 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1598 else if (!m
->allow_external_unlock
)
1600 scm_i_pthread_mutex_unlock (&m
->lock
);
1601 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1605 if (! (SCM_UNBNDP (cond
)))
1607 c
= SCM_CONDVAR_DATA (cond
);
1616 /* Change the owner of MUTEX. */
1617 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1618 m
->owner
= unblock_from_queue (m
->waiting
);
1623 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1624 scm_i_pthread_mutex_unlock (&m
->lock
);
1631 else if (err
== ETIMEDOUT
)
1636 else if (err
!= EINTR
)
1639 scm_syserror (NULL
);
1645 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1653 scm_remember_upto_here_2 (cond
, mutex
);
1655 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1664 /* Change the owner of MUTEX. */
1665 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1666 m
->owner
= unblock_from_queue (m
->waiting
);
1669 scm_i_pthread_mutex_unlock (&m
->lock
);
1676 SCM
scm_unlock_mutex (SCM mx
)
1678 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1681 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1682 (SCM mx
, SCM cond
, SCM timeout
),
1683 "Unlocks @var{mutex} if the calling thread owns the lock on "
1684 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1685 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1686 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1687 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1688 "with a call to @code{unlock-mutex}. Only the last call to "
1689 "@code{unlock-mutex} will actually unlock the mutex. ")
1690 #define FUNC_NAME s_scm_unlock_mutex_timed
1692 scm_t_timespec cwaittime
, *waittime
= NULL
;
1694 SCM_VALIDATE_MUTEX (1, mx
);
1695 if (! (SCM_UNBNDP (cond
)))
1697 SCM_VALIDATE_CONDVAR (2, cond
);
1699 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1701 to_timespec (timeout
, &cwaittime
);
1702 waittime
= &cwaittime
;
1706 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1710 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1712 "Return @code{#t} if @var{obj} is a mutex.")
1713 #define FUNC_NAME s_scm_mutex_p
1715 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1719 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1721 "Return the thread owning @var{mx}, or @code{#f}.")
1722 #define FUNC_NAME s_scm_mutex_owner
1725 fat_mutex
*m
= NULL
;
1727 SCM_VALIDATE_MUTEX (1, mx
);
1728 m
= SCM_MUTEX_DATA (mx
);
1729 scm_i_pthread_mutex_lock (&m
->lock
);
1731 scm_i_pthread_mutex_unlock (&m
->lock
);
1737 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1739 "Return the lock level of mutex @var{mx}.")
1740 #define FUNC_NAME s_scm_mutex_level
1742 SCM_VALIDATE_MUTEX (1, mx
);
1743 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1747 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1749 "Returns @code{#t} if the mutex @var{mx} is locked.")
1750 #define FUNC_NAME s_scm_mutex_locked_p
1752 SCM_VALIDATE_MUTEX (1, mx
);
1753 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1758 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1760 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1761 scm_puts ("#<condition-variable ", port
);
1762 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1763 scm_puts (">", port
);
1767 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1769 "Make a new condition variable.")
1770 #define FUNC_NAME s_scm_make_condition_variable
1775 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1776 c
->waiting
= SCM_EOL
;
1777 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1778 c
->waiting
= make_queue ();
1783 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1784 (SCM cv
, SCM mx
, SCM t
),
1785 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1786 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1787 "is locked again when this function returns. When @var{t} is given, "
1788 "it specifies a point in time where the waiting should be aborted. It "
1789 "can be either a integer as returned by @code{current-time} or a pair "
1790 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1791 "mutex is locked and @code{#f} is returned. When the condition "
1792 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1794 #define FUNC_NAME s_scm_timed_wait_condition_variable
1796 scm_t_timespec waittime
, *waitptr
= NULL
;
1798 SCM_VALIDATE_CONDVAR (1, cv
);
1799 SCM_VALIDATE_MUTEX (2, mx
);
1801 if (!SCM_UNBNDP (t
))
1803 to_timespec (t
, &waittime
);
1804 waitptr
= &waittime
;
1807 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1812 fat_cond_signal (fat_cond
*c
)
1814 unblock_from_queue (c
->waiting
);
1817 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1819 "Wake up one thread that is waiting for @var{cv}")
1820 #define FUNC_NAME s_scm_signal_condition_variable
1822 SCM_VALIDATE_CONDVAR (1, cv
);
1823 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1829 fat_cond_broadcast (fat_cond
*c
)
1831 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1835 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1837 "Wake up all threads that are waiting for @var{cv}. ")
1838 #define FUNC_NAME s_scm_broadcast_condition_variable
1840 SCM_VALIDATE_CONDVAR (1, cv
);
1841 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1846 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1848 "Return @code{#t} if @var{obj} is a condition variable.")
1849 #define FUNC_NAME s_scm_condition_variable_p
1851 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1865 struct timeval
*timeout
;
1872 do_std_select (void *args
)
1874 struct select_args
*select_args
;
1876 select_args
= (struct select_args
*) args
;
1878 select_args
->result
=
1879 select (select_args
->nfds
,
1880 select_args
->read_fds
, select_args
->write_fds
,
1881 select_args
->except_fds
, select_args
->timeout
);
1882 select_args
->errno_value
= errno
;
1888 scm_std_select (int nfds
,
1892 struct timeval
*timeout
)
1895 int res
, eno
, wakeup_fd
;
1896 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1897 struct select_args args
;
1899 if (readfds
== NULL
)
1901 FD_ZERO (&my_readfds
);
1902 readfds
= &my_readfds
;
1905 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1908 wakeup_fd
= t
->sleep_pipe
[0];
1909 FD_SET (wakeup_fd
, readfds
);
1910 if (wakeup_fd
>= nfds
)
1914 args
.read_fds
= readfds
;
1915 args
.write_fds
= writefds
;
1916 args
.except_fds
= exceptfds
;
1917 args
.timeout
= timeout
;
1919 /* Explicitly cooperate with the GC. */
1920 scm_without_guile (do_std_select
, &args
);
1923 eno
= args
.errno_value
;
1926 scm_i_reset_sleep (t
);
1928 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1931 full_read (wakeup_fd
, &dummy
, 1);
1933 FD_CLR (wakeup_fd
, readfds
);
1945 /* Convenience API for blocking while in guile mode. */
1947 #if SCM_USE_PTHREAD_THREADS
1949 /* It seems reasonable to not run procedures related to mutex and condition
1950 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1951 without it, and (ii) the only potential gain would be GC latency. See
1952 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1953 for a discussion of the pros and cons. */
1956 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1958 int res
= scm_i_pthread_mutex_lock (mutex
);
1963 do_unlock (void *data
)
1965 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1969 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1971 scm_i_scm_pthread_mutex_lock (mutex
);
1972 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1976 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1979 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1981 t
->held_mutex
= mutex
;
1982 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1983 t
->held_mutex
= NULL
;
1989 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1990 scm_i_pthread_mutex_t
*mutex
,
1991 const scm_t_timespec
*wt
)
1994 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1996 t
->held_mutex
= mutex
;
1997 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1998 t
->held_mutex
= NULL
;
2006 do_unlock_with_asyncs (void *data
)
2008 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
2009 SCM_I_CURRENT_THREAD
->block_asyncs
--;
2013 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t
*mutex
)
2015 SCM_I_CURRENT_THREAD
->block_asyncs
++;
2016 scm_i_scm_pthread_mutex_lock (mutex
);
2017 scm_dynwind_unwind_handler (do_unlock_with_asyncs
, mutex
,
2018 SCM_F_WIND_EXPLICITLY
);
2022 scm_std_usleep (unsigned long usecs
)
2025 tv
.tv_usec
= usecs
% 1000000;
2026 tv
.tv_sec
= usecs
/ 1000000;
2027 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2028 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
2032 scm_std_sleep (unsigned int secs
)
2037 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2043 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
2045 "Return the thread that called this function.")
2046 #define FUNC_NAME s_scm_current_thread
2048 return SCM_I_CURRENT_THREAD
->handle
;
2053 scm_c_make_list (size_t n
, SCM fill
)
2057 res
= scm_cons (fill
, res
);
2061 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
2063 "Return a list of all threads.")
2064 #define FUNC_NAME s_scm_all_threads
2066 /* We can not allocate while holding the thread_admin_mutex because
2067 of the way GC is done.
2069 int n
= thread_count
;
2071 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
2073 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
2075 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2077 if (t
!= scm_i_signal_delivery_thread
)
2079 SCM_SETCAR (*l
, t
->handle
);
2080 l
= SCM_CDRLOC (*l
);
2085 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2090 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2092 "Return @code{#t} iff @var{thread} has exited.\n")
2093 #define FUNC_NAME s_scm_thread_exited_p
2095 return scm_from_bool (scm_c_thread_exited_p (thread
));
2100 scm_c_thread_exited_p (SCM thread
)
2101 #define FUNC_NAME s_scm_thread_exited_p
2104 SCM_VALIDATE_THREAD (1, thread
);
2105 t
= SCM_I_THREAD_DATA (thread
);
2110 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2112 "Return the total number of processors of the machine, which\n"
2113 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2114 "thread execution unit, which can be either:\n\n"
2116 "@item an execution core in a (possibly multi-core) chip, in a\n"
2117 " (possibly multi- chip) module, in a single computer, or\n"
2118 "@item a thread execution unit inside a core in the case of\n"
2119 " @dfn{hyper-threaded} CPUs.\n"
2121 "Which of the two definitions is used, is unspecified.\n")
2122 #define FUNC_NAME s_scm_total_processor_count
2124 return scm_from_ulong (num_processors (NPROC_ALL
));
2128 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2130 "Like @code{total-processor-count}, but return the number of\n"
2131 "processors available to the current process. See\n"
2132 "@code{setaffinity} and @code{getaffinity} for more\n"
2134 #define FUNC_NAME s_scm_current_processor_count
2136 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2143 static scm_i_pthread_cond_t wake_up_cond
;
2144 static int threads_initialized_p
= 0;
2147 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2149 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2151 static SCM dynwind_critical_section_mutex
;
2154 scm_dynwind_critical_section (SCM mutex
)
2156 if (scm_is_false (mutex
))
2157 mutex
= dynwind_critical_section_mutex
;
2158 scm_dynwind_lock_mutex (mutex
);
2159 scm_dynwind_block_asyncs ();
2162 /*** Initialization */
2164 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2166 #if SCM_USE_PTHREAD_THREADS
2167 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2171 scm_threads_prehistory (void *base
)
2173 #if SCM_USE_PTHREAD_THREADS
2174 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2175 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2176 PTHREAD_MUTEX_RECURSIVE
);
2179 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2180 scm_i_pthread_mutexattr_recursive
);
2181 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2182 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2184 guilify_self_1 ((struct GC_stack_base
*) base
);
2187 scm_t_bits scm_tc16_thread
;
2188 scm_t_bits scm_tc16_mutex
;
2189 scm_t_bits scm_tc16_condvar
;
2194 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2195 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2197 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2198 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2199 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2201 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2203 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2205 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2206 guilify_self_2 (SCM_BOOL_F
);
2207 threads_initialized_p
= 1;
2209 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2213 scm_init_threads_default_dynamic_state ()
2215 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2216 scm_i_default_dynamic_state
= state
;
2220 scm_init_thread_procs ()
2222 #include "libguile/threads.x"
2226 /* IA64-specific things. */
2230 # include <sys/param.h>
2231 # include <sys/pstat.h>
2233 scm_ia64_register_backing_store_base (void)
2235 struct pst_vm_status vm_status
;
2237 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2238 if (vm_status
.pst_type
== PS_RSESTACK
)
2239 return (void *) vm_status
.pst_vaddr
;
2243 scm_ia64_ar_bsp (const void *ctx
)
2246 __uc_get_ar_bsp (ctx
, &bsp
);
2247 return (void *) bsp
;
2251 # include <ucontext.h>
2253 scm_ia64_register_backing_store_base (void)
2255 extern void *__libc_ia64_register_backing_store_base
;
2256 return __libc_ia64_register_backing_store_base
;
2259 scm_ia64_ar_bsp (const void *opaque
)
2261 const ucontext_t
*ctx
= opaque
;
2262 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2266 # include <ucontext.h>
2268 scm_ia64_register_backing_store_base (void)
2270 return (void *)0x8000000000000000;
2273 scm_ia64_ar_bsp (const void *opaque
)
2275 const ucontext_t
*ctx
= opaque
;
2276 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2277 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2279 # endif /* __FreeBSD__ */
2280 #endif /* __ia64__ */