1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
42 # include <pthread_np.h>
49 #include "libguile/validate.h"
50 #include "libguile/root.h"
51 #include "libguile/eval.h"
52 #include "libguile/async.h"
53 #include "libguile/ports.h"
54 #include "libguile/threads.h"
55 #include "libguile/dynwind.h"
56 #include "libguile/iselect.h"
57 #include "libguile/fluids.h"
58 #include "libguile/continuations.h"
59 #include "libguile/gc.h"
60 #include "libguile/init.h"
61 #include "libguile/scmsigs.h"
62 #include "libguile/strings.h"
63 #include "libguile/weaks.h"
65 #include <full-read.h>
70 /* First some libgc shims. */
72 /* Make sure GC_fn_type is defined; it is missing from the public
73 headers of GC 7.1 and earlier. */
74 #ifndef HAVE_GC_FN_TYPE
75 typedef void * (* GC_fn_type
) (void *);
83 #ifndef GC_UNIMPLEMENTED
84 #define GC_UNIMPLEMENTED 3
87 /* Likewise struct GC_stack_base is missing before 7.1. */
88 #ifndef HAVE_GC_STACK_BASE
89 struct GC_stack_base
{
90 void * mem_base
; /* Base of memory stack. */
92 void * reg_base
; /* Base of separate register stack. */
97 GC_register_my_thread (struct GC_stack_base
*stack_base
)
99 return GC_UNIMPLEMENTED
;
103 GC_unregister_my_thread ()
107 #if !SCM_USE_PTHREAD_THREADS
108 /* No threads; we can just use GC_stackbottom. */
110 get_thread_stack_base ()
112 return GC_stackbottom
;
115 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
116 && defined PTHREAD_ATTR_GETSTACK_WORKS
117 /* This method for GNU/Linux and perhaps some other systems.
118 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
119 available on them. */
121 get_thread_stack_base ()
127 pthread_getattr_np (pthread_self (), &attr
);
128 pthread_attr_getstack (&attr
, &start
, &size
);
129 end
= (char *)start
+ size
;
131 #if SCM_STACK_GROWS_UP
138 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
139 /* This method for MacOS X.
140 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
141 but as of 2006 there's nothing obvious at apple.com. */
143 get_thread_stack_base ()
145 return pthread_get_stackaddr_np (pthread_self ());
148 #elif HAVE_PTHREAD_ATTR_GET_NP
149 /* This one is for FreeBSD 9. */
151 get_thread_stack_base ()
157 pthread_attr_init (&attr
);
158 pthread_attr_get_np (pthread_self (), &attr
);
159 pthread_attr_getstack (&attr
, &start
, &size
);
160 pthread_attr_destroy (&attr
);
162 end
= (char *)start
+ size
;
164 #if SCM_STACK_GROWS_UP
172 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
176 GC_get_stack_base (struct GC_stack_base
*stack_base
)
178 stack_base
->mem_base
= get_thread_stack_base ();
180 /* Calculate and store off the base of this thread's register
181 backing store (RBS). Unfortunately our implementation(s) of
182 scm_ia64_register_backing_store_base are only reliable for the
183 main thread. For other threads, therefore, find out the current
184 top of the RBS, and use that as a maximum. */
185 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
190 bsp
= scm_ia64_ar_bsp (&ctx
);
191 if (stack_base
->reg_base
> bsp
)
192 stack_base
->reg_base
= bsp
;
199 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
201 struct GC_stack_base stack_base
;
203 stack_base
.mem_base
= (void*)&stack_base
;
205 /* FIXME: Untested. */
209 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
213 return fn (&stack_base
, arg
);
215 #endif /* HAVE_GC_STACK_BASE */
218 /* Now define with_gc_active and with_gc_inactive. */
220 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
222 /* We have a sufficiently new libgc (7.2 or newer). */
225 with_gc_inactive (GC_fn_type func
, void *data
)
227 return GC_do_blocking (func
, data
);
231 with_gc_active (GC_fn_type func
, void *data
)
233 return GC_call_with_gc_active (func
, data
);
238 /* libgc not new enough, so never actually deactivate GC.
240 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
241 GC_call_with_gc_active. */
244 with_gc_inactive (GC_fn_type func
, void *data
)
250 with_gc_active (GC_fn_type func
, void *data
)
255 #endif /* HAVE_GC_DO_BLOCKING */
260 to_timespec (SCM t
, scm_t_timespec
*waittime
)
264 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
265 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
269 double time
= scm_to_double (t
);
270 double sec
= scm_c_truncate (time
);
272 waittime
->tv_sec
= (long) sec
;
273 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
280 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
281 the risk of false references leading to unbounded retained space as
282 described in "Bounding Space Usage of Conservative Garbage Collectors",
285 /* Make an empty queue data structure.
290 return scm_cons (SCM_EOL
, SCM_EOL
);
293 /* Put T at the back of Q and return a handle that can be used with
294 remqueue to remove T from Q again.
297 enqueue (SCM q
, SCM t
)
299 SCM c
= scm_cons (t
, SCM_EOL
);
300 SCM_CRITICAL_SECTION_START
;
301 if (scm_is_null (SCM_CDR (q
)))
304 SCM_SETCDR (SCM_CAR (q
), c
);
306 SCM_CRITICAL_SECTION_END
;
310 /* Remove the element that the handle C refers to from the queue Q. C
311 must have been returned from a call to enqueue. The return value
312 is zero when the element referred to by C has already been removed.
313 Otherwise, 1 is returned.
316 remqueue (SCM q
, SCM c
)
319 SCM_CRITICAL_SECTION_START
;
320 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
322 if (scm_is_eq (p
, c
))
324 if (scm_is_eq (c
, SCM_CAR (q
)))
325 SCM_SETCAR (q
, SCM_CDR (c
));
326 SCM_SETCDR (prev
, SCM_CDR (c
));
329 SCM_SETCDR (c
, SCM_EOL
);
331 SCM_CRITICAL_SECTION_END
;
336 SCM_CRITICAL_SECTION_END
;
340 /* Remove the front-most element from the queue Q and return it.
341 Return SCM_BOOL_F when Q is empty.
347 SCM_CRITICAL_SECTION_START
;
351 SCM_CRITICAL_SECTION_END
;
356 SCM_SETCDR (q
, SCM_CDR (c
));
357 if (scm_is_null (SCM_CDR (q
)))
358 SCM_SETCAR (q
, SCM_EOL
);
359 SCM_CRITICAL_SECTION_END
;
362 SCM_SETCDR (c
, SCM_EOL
);
368 /*** Thread smob routines */
372 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
374 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
375 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
376 the struct case, hence we go via a union, and extract according to the
377 size of pthread_t. */
385 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
386 scm_i_pthread_t p
= t
->pthread
;
389 if (sizeof (p
) == sizeof (unsigned short))
391 else if (sizeof (p
) == sizeof (unsigned int))
393 else if (sizeof (p
) == sizeof (unsigned long))
398 scm_puts ("#<thread ", port
);
399 scm_uintprint (id
, 10, port
);
400 scm_puts (" (", port
);
401 scm_uintprint ((scm_t_bits
)t
, 16, port
);
402 scm_puts (")>", port
);
407 /*** Blocking on queues. */
409 /* See also scm_i_queue_async_cell for how such a block is
413 /* Put the current thread on QUEUE and go to sleep, waiting for it to
414 be woken up by a call to 'unblock_from_queue', or to be
415 interrupted. Upon return of this function, the current thread is
416 no longer on QUEUE, even when the sleep has been interrupted.
418 The caller of block_self must hold MUTEX. It will be atomically
419 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
421 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
424 When WAITTIME is not NULL, the sleep will be aborted at that time.
426 The return value of block_self is an errno value. It will be zero
427 when the sleep has been successfully completed by a call to
428 unblock_from_queue, EINTR when it has been interrupted by the
429 delivery of a system async, and ETIMEDOUT when the timeout has
432 The system asyncs themselves are not executed by block_self.
435 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
436 const scm_t_timespec
*waittime
)
438 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
442 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
447 q_handle
= enqueue (queue
, t
->handle
);
448 if (waittime
== NULL
)
449 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
451 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
453 /* When we are still on QUEUE, we have been interrupted. We
454 report this only when no other error (such as a timeout) has
457 if (remqueue (queue
, q_handle
) && err
== 0)
460 scm_i_reset_sleep (t
);
466 /* Wake up the first thread on QUEUE, if any. The awoken thread is
467 returned, or #f if the queue was empty.
470 unblock_from_queue (SCM queue
)
472 SCM thread
= dequeue (queue
);
473 if (scm_is_true (thread
))
474 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
479 /* Getting into and out of guile mode.
482 /* Key used to attach a cleanup handler to a given thread. Also, if
483 thread-local storage is unavailable, this key is used to retrieve the
484 current thread with `pthread_getspecific ()'. */
485 scm_i_pthread_key_t scm_i_thread_key
;
488 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
490 /* When thread-local storage (TLS) is available, a pointer to the
491 current-thread object is kept in TLS. Note that storing the thread-object
492 itself in TLS (rather than a pointer to some malloc'd memory) is not
493 possible since thread objects may live longer than the actual thread they
495 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
497 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
500 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
501 static scm_i_thread
*all_threads
= NULL
;
502 static int thread_count
;
504 static SCM scm_i_default_dynamic_state
;
506 /* Run when a fluid is collected. */
508 scm_i_reset_fluid (size_t n
)
512 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
513 for (t
= all_threads
; t
; t
= t
->next_thread
)
514 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
516 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
518 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
519 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
521 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
524 /* Perform first stage of thread initialisation, in non-guile mode.
527 guilify_self_1 (struct GC_stack_base
*base
)
531 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
532 before allocating anything in this thread, because allocation could
533 cause GC to run, and GC could cause finalizers, which could invoke
534 Scheme functions, which need the current thread to be set. */
536 t
.pthread
= scm_i_pthread_self ();
537 t
.handle
= SCM_BOOL_F
;
538 t
.result
= SCM_BOOL_F
;
539 t
.cleanup_handler
= SCM_BOOL_F
;
542 t
.join_queue
= SCM_EOL
;
543 t
.dynamic_state
= SCM_BOOL_F
;
544 t
.dynwinds
= SCM_EOL
;
545 t
.active_asyncs
= SCM_EOL
;
547 t
.pending_asyncs
= 1;
548 t
.critical_section_level
= 0;
549 t
.base
= base
->mem_base
;
551 t
.register_backing_store_base
= base
->reg
-base
;
553 t
.continuation_root
= SCM_EOL
;
554 t
.continuation_base
= t
.base
;
555 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
556 t
.sleep_mutex
= NULL
;
557 t
.sleep_object
= SCM_BOOL_F
;
560 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
561 /* FIXME: Error conditions during the initialization phase are handled
562 gracelessly since public functions such as `scm_init_guile ()'
563 currently have type `void'. */
566 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
567 t
.current_mark_stack_ptr
= NULL
;
568 t
.current_mark_stack_limit
= NULL
;
573 /* The switcheroo. */
575 scm_i_thread
*t_ptr
= &t
;
578 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
579 memcpy (t_ptr
, &t
, sizeof t
);
581 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
583 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
584 /* Cache the current thread in TLS for faster lookup. */
585 scm_i_current_thread
= t_ptr
;
588 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
589 t_ptr
->next_thread
= all_threads
;
592 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
598 /* Perform second stage of thread initialisation, in guile mode.
601 guilify_self_2 (SCM parent
)
603 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
607 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
609 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
610 t
->continuation_base
= t
->base
;
613 if (scm_is_true (parent
))
614 t
->dynamic_state
= scm_make_dynamic_state (parent
);
616 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
618 t
->join_queue
= make_queue ();
625 /* We implement our own mutex type since we want them to be 'fair', we
626 want to do fancy things while waiting for them (like running
627 asyncs) and we might want to add things that are nice for
632 scm_i_pthread_mutex_t lock
;
634 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
636 int recursive
; /* allow recursive locking? */
637 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
638 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
639 owned by the current thread? */
641 SCM waiting
; /* the threads waiting for this mutex. */
644 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
645 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
648 call_cleanup (void *data
)
651 return scm_call_0 (*proc_p
);
654 /* Perform thread tear-down, in guile mode.
657 do_thread_exit (void *v
)
659 scm_i_thread
*t
= (scm_i_thread
*) v
;
661 /* Ensure the signal handling thread has been launched, because we might be
662 shutting it down. This needs to be done in Guile mode. */
663 scm_i_ensure_signal_delivery_thread ();
665 if (!scm_is_false (t
->cleanup_handler
))
667 SCM ptr
= t
->cleanup_handler
;
669 t
->cleanup_handler
= SCM_BOOL_F
;
670 t
->result
= scm_internal_catch (SCM_BOOL_T
,
672 scm_handle_by_message_noexit
, NULL
);
675 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
678 close (t
->sleep_pipe
[0]);
679 close (t
->sleep_pipe
[1]);
680 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
683 while (!scm_is_null (t
->mutexes
))
685 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
687 if (!SCM_UNBNDP (mutex
))
689 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
691 scm_i_pthread_mutex_lock (&m
->lock
);
693 /* Since MUTEX is in `t->mutexes', T must be its owner. */
694 assert (scm_is_eq (m
->owner
, t
->handle
));
696 unblock_from_queue (m
->waiting
);
698 scm_i_pthread_mutex_unlock (&m
->lock
);
701 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
704 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
710 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
712 /* Won't hurt if we are already registered. */
713 #if SCM_USE_PTHREAD_THREADS
714 GC_register_my_thread (sb
);
717 return scm_with_guile (do_thread_exit
, v
);
721 on_thread_exit (void *v
)
723 /* This handler is executed in non-guile mode. */
724 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
726 /* If we were canceled, we were unable to clear `t->guile_mode', so do
730 /* If this thread was cancelled while doing a cond wait, it will
731 still have a mutex locked, so we unlock it here. */
734 scm_i_pthread_mutex_unlock (t
->held_mutex
);
735 t
->held_mutex
= NULL
;
738 /* Reinstate the current thread for purposes of scm_with_guile
739 guile-mode cleanup handlers. Only really needed in the non-TLS
740 case but it doesn't hurt to be consistent. */
741 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
743 /* Scheme-level thread finalizers and other cleanup needs to happen in
745 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
747 /* Removing ourself from the list of all threads needs to happen in
748 non-guile mode since all SCM values on our stack become
749 unprotected once we are no longer in the list. */
750 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
751 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
754 *tp
= t
->next_thread
;
757 t
->next_thread
= NULL
;
763 /* If there's only one other thread, it could be the signal delivery
764 thread, so we need to notify it to shut down by closing its read pipe.
765 If it's not the signal delivery thread, then closing the read pipe isn't
767 if (thread_count
<= 1)
768 scm_i_close_signal_pipe ();
770 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
772 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
774 #if SCM_USE_PTHREAD_THREADS
775 GC_unregister_my_thread ();
779 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
782 init_thread_key (void)
784 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
787 /* Perform any initializations necessary to make the current thread
788 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
791 BASE is the stack base to use with GC.
793 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
794 which case the default dynamic state is used.
796 Returns zero when the thread was known to guile already; otherwise
799 Note that it could be the case that the thread was known
800 to Guile, but not in guile mode (because we are within a
801 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
802 be sure. New threads are put into guile mode implicitly. */
805 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
807 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
809 if (SCM_I_CURRENT_THREAD
)
811 /* Thread is already known to Guile.
817 /* This thread has not been guilified yet.
820 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
821 if (scm_initialized_p
== 0)
823 /* First thread ever to enter Guile. Run the full
826 scm_i_init_guile (base
);
828 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
829 /* Allow other threads to come in later. */
830 GC_allow_register_threads ();
833 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
837 /* Guile is already initialized, but this thread enters it for
838 the first time. Only initialize this thread.
840 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
842 /* Register this thread with libgc. */
843 #if SCM_USE_PTHREAD_THREADS
844 GC_register_my_thread (base
);
847 guilify_self_1 (base
);
848 guilify_self_2 (parent
);
857 struct GC_stack_base stack_base
;
859 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
860 scm_i_init_thread_for_guile (&stack_base
,
861 scm_i_default_dynamic_state
);
864 fprintf (stderr
, "Failed to get stack base for current thread.\n");
869 struct with_guile_args
877 with_guile_trampoline (void *data
)
879 struct with_guile_args
*args
= data
;
881 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
885 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
890 struct with_guile_args
*args
= data
;
892 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
893 t
= SCM_I_CURRENT_THREAD
;
896 /* We are in Guile mode. */
897 assert (t
->guile_mode
);
899 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
901 /* Leave Guile mode. */
904 else if (t
->guile_mode
)
906 /* Already in Guile mode. */
907 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
911 /* We are not in Guile mode, either because we are not within a
912 scm_with_guile, or because we are within a scm_without_guile.
914 This call to scm_with_guile() could happen from anywhere on the
915 stack, and in particular lower on the stack than when it was
916 when this thread was first guilified. Thus, `base' must be
918 #if SCM_STACK_GROWS_UP
919 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
920 t
->base
= SCM_STACK_PTR (base
->mem_base
);
922 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
923 t
->base
= SCM_STACK_PTR (base
->mem_base
);
927 res
= with_gc_active (with_guile_trampoline
, args
);
934 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
936 struct with_guile_args args
;
940 args
.parent
= parent
;
942 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
946 scm_with_guile (void *(*func
)(void *), void *data
)
948 return scm_i_with_guile_and_parent (func
, data
,
949 scm_i_default_dynamic_state
);
953 scm_without_guile (void *(*func
)(void *), void *data
)
956 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
960 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
961 result
= with_gc_inactive (func
, data
);
962 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
965 /* Otherwise we're not in guile mode, so nothing to do. */
966 result
= func (data
);
972 /*** Thread creation */
979 scm_i_pthread_mutex_t mutex
;
980 scm_i_pthread_cond_t cond
;
984 really_launch (void *d
)
986 launch_data
*data
= (launch_data
*)d
;
987 SCM thunk
= data
->thunk
, handler
= data
->handler
;
990 t
= SCM_I_CURRENT_THREAD
;
992 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
993 data
->thread
= scm_current_thread ();
994 scm_i_pthread_cond_signal (&data
->cond
);
995 scm_i_pthread_mutex_unlock (&data
->mutex
);
997 if (SCM_UNBNDP (handler
))
998 t
->result
= scm_call_0 (thunk
);
1000 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
1006 launch_thread (void *d
)
1008 launch_data
*data
= (launch_data
*)d
;
1009 scm_i_pthread_detach (scm_i_pthread_self ());
1010 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
1014 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
1015 (SCM thunk
, SCM handler
),
1016 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1017 "returning a new thread object representing the thread. The procedure\n"
1018 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1020 "When @var{handler} is specified, then @var{thunk} is called from\n"
1021 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1022 "handler. This catch is established inside the continuation barrier.\n"
1024 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1025 "the @emph{exit value} of the thread and the thread is terminated.")
1026 #define FUNC_NAME s_scm_call_with_new_thread
1032 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
1033 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
1034 handler
, SCM_ARG2
, FUNC_NAME
);
1036 GC_collect_a_little ();
1037 data
.parent
= scm_current_dynamic_state ();
1039 data
.handler
= handler
;
1040 data
.thread
= SCM_BOOL_F
;
1041 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1042 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1044 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1045 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1048 scm_i_pthread_mutex_unlock (&data
.mutex
);
1050 scm_syserror (NULL
);
1052 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1053 scm_i_pthread_mutex_unlock (&data
.mutex
);
1061 scm_t_catch_body body
;
1063 scm_t_catch_handler handler
;
1066 scm_i_pthread_mutex_t mutex
;
1067 scm_i_pthread_cond_t cond
;
1071 really_spawn (void *d
)
1073 spawn_data
*data
= (spawn_data
*)d
;
1074 scm_t_catch_body body
= data
->body
;
1075 void *body_data
= data
->body_data
;
1076 scm_t_catch_handler handler
= data
->handler
;
1077 void *handler_data
= data
->handler_data
;
1078 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1080 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1081 data
->thread
= scm_current_thread ();
1082 scm_i_pthread_cond_signal (&data
->cond
);
1083 scm_i_pthread_mutex_unlock (&data
->mutex
);
1085 if (handler
== NULL
)
1086 t
->result
= body (body_data
);
1088 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1090 handler
, handler_data
);
1096 spawn_thread (void *d
)
1098 spawn_data
*data
= (spawn_data
*)d
;
1099 scm_i_pthread_detach (scm_i_pthread_self ());
1100 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1105 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1106 scm_t_catch_handler handler
, void *handler_data
)
1112 data
.parent
= scm_current_dynamic_state ();
1114 data
.body_data
= body_data
;
1115 data
.handler
= handler
;
1116 data
.handler_data
= handler_data
;
1117 data
.thread
= SCM_BOOL_F
;
1118 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1119 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1121 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1122 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1125 scm_i_pthread_mutex_unlock (&data
.mutex
);
1127 scm_syserror (NULL
);
1129 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1130 scm_i_pthread_mutex_unlock (&data
.mutex
);
1135 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1137 "Move the calling thread to the end of the scheduling queue.")
1138 #define FUNC_NAME s_scm_yield
1140 return scm_from_bool (scm_i_sched_yield ());
1144 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1146 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1147 "cannot be the current thread, and if @var{thread} has already terminated or "
1148 "been signaled to terminate, this function is a no-op.")
1149 #define FUNC_NAME s_scm_cancel_thread
1151 scm_i_thread
*t
= NULL
;
1153 SCM_VALIDATE_THREAD (1, thread
);
1154 t
= SCM_I_THREAD_DATA (thread
);
1155 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1159 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1160 scm_i_pthread_cancel (t
->pthread
);
1163 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1165 return SCM_UNSPECIFIED
;
1169 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1170 (SCM thread
, SCM proc
),
1171 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1172 "This handler will be called when the thread exits.")
1173 #define FUNC_NAME s_scm_set_thread_cleanup_x
1177 SCM_VALIDATE_THREAD (1, thread
);
1178 if (!scm_is_false (proc
))
1179 SCM_VALIDATE_THUNK (2, proc
);
1181 t
= SCM_I_THREAD_DATA (thread
);
1182 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1184 if (!(t
->exited
|| t
->canceled
))
1185 t
->cleanup_handler
= proc
;
1187 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1189 return SCM_UNSPECIFIED
;
1193 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1195 "Return the cleanup handler installed for the thread @var{thread}.")
1196 #define FUNC_NAME s_scm_thread_cleanup
1201 SCM_VALIDATE_THREAD (1, thread
);
1203 t
= SCM_I_THREAD_DATA (thread
);
1204 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1205 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1206 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1212 SCM
scm_join_thread (SCM thread
)
1214 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1217 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1218 (SCM thread
, SCM timeout
, SCM timeoutval
),
1219 "Suspend execution of the calling thread until the target @var{thread} "
1220 "terminates, unless the target @var{thread} has already terminated. ")
1221 #define FUNC_NAME s_scm_join_thread_timed
1224 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1225 SCM res
= SCM_BOOL_F
;
1227 if (! (SCM_UNBNDP (timeoutval
)))
1230 SCM_VALIDATE_THREAD (1, thread
);
1231 if (scm_is_eq (scm_current_thread (), thread
))
1232 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1234 t
= SCM_I_THREAD_DATA (thread
);
1235 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1237 if (! SCM_UNBNDP (timeout
))
1239 to_timespec (timeout
, &ctimeout
);
1240 timeout_ptr
= &ctimeout
;
1249 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1259 else if (err
== ETIMEDOUT
)
1262 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1264 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1266 /* Check for exit again, since we just released and
1267 reacquired the admin mutex, before the next block_self
1268 call (which would block forever if t has already
1278 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1284 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1286 "Return @code{#t} if @var{obj} is a thread.")
1287 #define FUNC_NAME s_scm_thread_p
1289 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1295 fat_mutex_free (SCM mx
)
1297 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1298 scm_i_pthread_mutex_destroy (&m
->lock
);
1303 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1305 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1306 scm_puts ("#<mutex ", port
);
1307 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1308 scm_puts (">", port
);
1313 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1318 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1319 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1320 m
->owner
= SCM_BOOL_F
;
1323 m
->recursive
= recursive
;
1324 m
->unchecked_unlock
= unchecked_unlock
;
1325 m
->allow_external_unlock
= external_unlock
;
1327 m
->waiting
= SCM_EOL
;
1328 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1329 m
->waiting
= make_queue ();
1333 SCM
scm_make_mutex (void)
1335 return scm_make_mutex_with_flags (SCM_EOL
);
1338 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1339 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1340 SCM_SYMBOL (recursive_sym
, "recursive");
1342 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1344 "Create a new mutex. ")
1345 #define FUNC_NAME s_scm_make_mutex_with_flags
1347 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1350 while (! scm_is_null (ptr
))
1352 SCM flag
= SCM_CAR (ptr
);
1353 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1354 unchecked_unlock
= 1;
1355 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1356 external_unlock
= 1;
1357 else if (scm_is_eq (flag
, recursive_sym
))
1360 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1361 ptr
= SCM_CDR (ptr
);
1363 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1367 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1369 "Create a new recursive mutex. ")
1370 #define FUNC_NAME s_scm_make_recursive_mutex
1372 return make_fat_mutex (1, 0, 0);
1376 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1379 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1381 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1383 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1384 SCM err
= SCM_BOOL_F
;
1386 struct timeval current_time
;
1388 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1394 m
->owner
= new_owner
;
1397 if (SCM_I_IS_THREAD (new_owner
))
1399 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1401 /* FIXME: The order in which `t->admin_mutex' and
1402 `m->lock' are taken differs from that in
1403 `on_thread_exit', potentially leading to deadlocks. */
1404 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1406 /* Only keep a weak reference to MUTEX so that it's not
1407 retained when not referenced elsewhere (bug #27450).
1408 The weak pair itself is eventually removed when MUTEX
1409 is unlocked. Note that `t->mutexes' lists mutexes
1410 currently held by T, so it should be small. */
1411 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1413 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1418 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1420 m
->owner
= new_owner
;
1421 err
= scm_cons (scm_abandoned_mutex_error_key
,
1422 scm_from_locale_string ("lock obtained on abandoned "
1427 else if (scm_is_eq (m
->owner
, new_owner
))
1436 err
= scm_cons (scm_misc_error_key
,
1437 scm_from_locale_string ("mutex already locked "
1445 if (timeout
!= NULL
)
1447 gettimeofday (¤t_time
, NULL
);
1448 if (current_time
.tv_sec
> timeout
->tv_sec
||
1449 (current_time
.tv_sec
== timeout
->tv_sec
&&
1450 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1456 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1457 scm_i_pthread_mutex_unlock (&m
->lock
);
1459 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1462 scm_i_pthread_mutex_unlock (&m
->lock
);
1466 SCM
scm_lock_mutex (SCM mx
)
1468 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1471 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1472 (SCM m
, SCM timeout
, SCM owner
),
1473 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1474 "blocks until the mutex becomes available. The function returns when "
1475 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1476 "a thread already owns will succeed right away and will not block the "
1477 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1478 #define FUNC_NAME s_scm_lock_mutex_timed
1482 scm_t_timespec cwaittime
, *waittime
= NULL
;
1484 SCM_VALIDATE_MUTEX (1, m
);
1486 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1488 to_timespec (timeout
, &cwaittime
);
1489 waittime
= &cwaittime
;
1492 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1493 SCM_VALIDATE_THREAD (3, owner
);
1495 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1496 if (!scm_is_false (exception
))
1497 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1498 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1503 lock_mutex_return_void (SCM mx
)
1505 (void) scm_lock_mutex (mx
);
1509 unlock_mutex_return_void (SCM mx
)
1511 (void) scm_unlock_mutex (mx
);
1515 scm_dynwind_lock_mutex (SCM mutex
)
1517 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1518 SCM_F_WIND_EXPLICITLY
);
1519 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1520 SCM_F_WIND_EXPLICITLY
);
1523 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1525 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1526 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1527 #define FUNC_NAME s_scm_try_mutex
1531 scm_t_timespec cwaittime
, *waittime
= NULL
;
1533 SCM_VALIDATE_MUTEX (1, mutex
);
1535 to_timespec (scm_from_int(0), &cwaittime
);
1536 waittime
= &cwaittime
;
1538 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1539 if (!scm_is_false (exception
))
1540 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1541 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1545 /*** Fat condition variables */
1548 scm_i_pthread_mutex_t lock
;
1549 SCM waiting
; /* the threads waiting for this condition. */
1552 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1553 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1556 fat_mutex_unlock (SCM mutex
, SCM cond
,
1557 const scm_t_timespec
*waittime
, int relock
)
1560 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1562 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1563 int err
= 0, ret
= 0;
1565 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1569 if (!scm_is_eq (owner
, t
->handle
))
1573 if (!m
->unchecked_unlock
)
1575 scm_i_pthread_mutex_unlock (&m
->lock
);
1576 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1580 else if (!m
->allow_external_unlock
)
1582 scm_i_pthread_mutex_unlock (&m
->lock
);
1583 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1587 if (! (SCM_UNBNDP (cond
)))
1589 c
= SCM_CONDVAR_DATA (cond
);
1598 /* Change the owner of MUTEX. */
1599 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1600 m
->owner
= unblock_from_queue (m
->waiting
);
1605 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1606 scm_i_pthread_mutex_unlock (&m
->lock
);
1613 else if (err
== ETIMEDOUT
)
1618 else if (err
!= EINTR
)
1621 scm_syserror (NULL
);
1627 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1635 scm_remember_upto_here_2 (cond
, mutex
);
1637 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1646 /* Change the owner of MUTEX. */
1647 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1648 m
->owner
= unblock_from_queue (m
->waiting
);
1651 scm_i_pthread_mutex_unlock (&m
->lock
);
1658 SCM
scm_unlock_mutex (SCM mx
)
1660 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1663 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1664 (SCM mx
, SCM cond
, SCM timeout
),
1665 "Unlocks @var{mutex} if the calling thread owns the lock on "
1666 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1667 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1668 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1669 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1670 "with a call to @code{unlock-mutex}. Only the last call to "
1671 "@code{unlock-mutex} will actually unlock the mutex. ")
1672 #define FUNC_NAME s_scm_unlock_mutex_timed
1674 scm_t_timespec cwaittime
, *waittime
= NULL
;
1676 SCM_VALIDATE_MUTEX (1, mx
);
1677 if (! (SCM_UNBNDP (cond
)))
1679 SCM_VALIDATE_CONDVAR (2, cond
);
1681 if (! (SCM_UNBNDP (timeout
)))
1683 to_timespec (timeout
, &cwaittime
);
1684 waittime
= &cwaittime
;
1688 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1692 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1694 "Return @code{#t} if @var{obj} is a mutex.")
1695 #define FUNC_NAME s_scm_mutex_p
1697 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1701 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1703 "Return the thread owning @var{mx}, or @code{#f}.")
1704 #define FUNC_NAME s_scm_mutex_owner
1707 fat_mutex
*m
= NULL
;
1709 SCM_VALIDATE_MUTEX (1, mx
);
1710 m
= SCM_MUTEX_DATA (mx
);
1711 scm_i_pthread_mutex_lock (&m
->lock
);
1713 scm_i_pthread_mutex_unlock (&m
->lock
);
1719 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1721 "Return the lock level of mutex @var{mx}.")
1722 #define FUNC_NAME s_scm_mutex_level
1724 SCM_VALIDATE_MUTEX (1, mx
);
1725 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1729 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1731 "Returns @code{#t} if the mutex @var{mx} is locked.")
1732 #define FUNC_NAME s_scm_mutex_locked_p
1734 SCM_VALIDATE_MUTEX (1, mx
);
1735 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1740 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1742 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1743 scm_puts ("#<condition-variable ", port
);
1744 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1745 scm_puts (">", port
);
1749 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1751 "Make a new condition variable.")
1752 #define FUNC_NAME s_scm_make_condition_variable
1757 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1758 c
->waiting
= SCM_EOL
;
1759 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1760 c
->waiting
= make_queue ();
1765 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1766 (SCM cv
, SCM mx
, SCM t
),
1767 "Wait until @var{cond-var} has been signalled. While waiting, "
1768 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1769 "is locked again when this function returns. When @var{time} is given, "
1770 "it specifies a point in time where the waiting should be aborted. It "
1771 "can be either a integer as returned by @code{current-time} or a pair "
1772 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1773 "mutex is locked and @code{#f} is returned. When the condition "
1774 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1776 #define FUNC_NAME s_scm_timed_wait_condition_variable
1778 scm_t_timespec waittime
, *waitptr
= NULL
;
1780 SCM_VALIDATE_CONDVAR (1, cv
);
1781 SCM_VALIDATE_MUTEX (2, mx
);
1783 if (!SCM_UNBNDP (t
))
1785 to_timespec (t
, &waittime
);
1786 waitptr
= &waittime
;
1789 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1794 fat_cond_signal (fat_cond
*c
)
1796 unblock_from_queue (c
->waiting
);
1799 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1801 "Wake up one thread that is waiting for @var{cv}")
1802 #define FUNC_NAME s_scm_signal_condition_variable
1804 SCM_VALIDATE_CONDVAR (1, cv
);
1805 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1811 fat_cond_broadcast (fat_cond
*c
)
1813 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1817 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1819 "Wake up all threads that are waiting for @var{cv}. ")
1820 #define FUNC_NAME s_scm_broadcast_condition_variable
1822 SCM_VALIDATE_CONDVAR (1, cv
);
1823 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1828 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1830 "Return @code{#t} if @var{obj} is a condition variable.")
1831 #define FUNC_NAME s_scm_condition_variable_p
1833 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1844 SELECT_TYPE
*read_fds
;
1845 SELECT_TYPE
*write_fds
;
1846 SELECT_TYPE
*except_fds
;
1847 struct timeval
*timeout
;
1854 do_std_select (void *args
)
1856 struct select_args
*select_args
;
1858 select_args
= (struct select_args
*) args
;
1860 select_args
->result
=
1861 select (select_args
->nfds
,
1862 select_args
->read_fds
, select_args
->write_fds
,
1863 select_args
->except_fds
, select_args
->timeout
);
1864 select_args
->errno_value
= errno
;
1870 scm_std_select (int nfds
,
1871 SELECT_TYPE
*readfds
,
1872 SELECT_TYPE
*writefds
,
1873 SELECT_TYPE
*exceptfds
,
1874 struct timeval
*timeout
)
1877 int res
, eno
, wakeup_fd
;
1878 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1879 struct select_args args
;
1881 if (readfds
== NULL
)
1883 FD_ZERO (&my_readfds
);
1884 readfds
= &my_readfds
;
1887 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1890 wakeup_fd
= t
->sleep_pipe
[0];
1891 FD_SET (wakeup_fd
, readfds
);
1892 if (wakeup_fd
>= nfds
)
1896 args
.read_fds
= readfds
;
1897 args
.write_fds
= writefds
;
1898 args
.except_fds
= exceptfds
;
1899 args
.timeout
= timeout
;
1901 /* Explicitly cooperate with the GC. */
1902 scm_without_guile (do_std_select
, &args
);
1905 eno
= args
.errno_value
;
1908 scm_i_reset_sleep (t
);
1910 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1913 full_read (wakeup_fd
, &dummy
, 1);
1915 FD_CLR (wakeup_fd
, readfds
);
1927 /* Convenience API for blocking while in guile mode. */
1929 #if SCM_USE_PTHREAD_THREADS
1931 /* It seems reasonable to not run procedures related to mutex and condition
1932 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1933 without it, and (ii) the only potential gain would be GC latency. See
1934 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1935 for a discussion of the pros and cons. */
1938 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1940 int res
= scm_i_pthread_mutex_lock (mutex
);
1945 do_unlock (void *data
)
1947 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1951 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1953 scm_i_scm_pthread_mutex_lock (mutex
);
1954 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1958 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1961 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1963 t
->held_mutex
= mutex
;
1964 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1965 t
->held_mutex
= NULL
;
1971 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1972 scm_i_pthread_mutex_t
*mutex
,
1973 const scm_t_timespec
*wt
)
1976 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1978 t
->held_mutex
= mutex
;
1979 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1980 t
->held_mutex
= NULL
;
1988 scm_std_usleep (unsigned long usecs
)
1991 tv
.tv_usec
= usecs
% 1000000;
1992 tv
.tv_sec
= usecs
/ 1000000;
1993 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1994 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1998 scm_std_sleep (unsigned int secs
)
2003 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2009 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
2011 "Return the thread that called this function.")
2012 #define FUNC_NAME s_scm_current_thread
2014 return SCM_I_CURRENT_THREAD
->handle
;
2019 scm_c_make_list (size_t n
, SCM fill
)
2023 res
= scm_cons (fill
, res
);
2027 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
2029 "Return a list of all threads.")
2030 #define FUNC_NAME s_scm_all_threads
2032 /* We can not allocate while holding the thread_admin_mutex because
2033 of the way GC is done.
2035 int n
= thread_count
;
2037 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
2039 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
2041 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2043 if (t
!= scm_i_signal_delivery_thread
)
2045 SCM_SETCAR (*l
, t
->handle
);
2046 l
= SCM_CDRLOC (*l
);
2051 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2056 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2058 "Return @code{#t} iff @var{thread} has exited.\n")
2059 #define FUNC_NAME s_scm_thread_exited_p
2061 return scm_from_bool (scm_c_thread_exited_p (thread
));
2066 scm_c_thread_exited_p (SCM thread
)
2067 #define FUNC_NAME s_scm_thread_exited_p
2070 SCM_VALIDATE_THREAD (1, thread
);
2071 t
= SCM_I_THREAD_DATA (thread
);
2076 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2078 "Return the total number of processors of the machine, which\n"
2079 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2080 "thread execution unit, which can be either:\n\n"
2082 "@item an execution core in a (possibly multi-core) chip, in a\n"
2083 " (possibly multi- chip) module, in a single computer, or\n"
2084 "@item a thread execution unit inside a core in the case of\n"
2085 " @dfn{hyper-threaded} CPUs.\n"
2087 "Which of the two definitions is used, is unspecified.\n")
2088 #define FUNC_NAME s_scm_total_processor_count
2090 return scm_from_ulong (num_processors (NPROC_ALL
));
2094 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2096 "Like @code{total-processor-count}, but return the number of\n"
2097 "processors available to the current process. See\n"
2098 "@code{setaffinity} and @code{getaffinity} for more\n"
2100 #define FUNC_NAME s_scm_current_processor_count
2102 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2109 static scm_i_pthread_cond_t wake_up_cond
;
2110 static int threads_initialized_p
= 0;
2113 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2115 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2117 static SCM dynwind_critical_section_mutex
;
2120 scm_dynwind_critical_section (SCM mutex
)
2122 if (scm_is_false (mutex
))
2123 mutex
= dynwind_critical_section_mutex
;
2124 scm_dynwind_lock_mutex (mutex
);
2125 scm_dynwind_block_asyncs ();
2128 /*** Initialization */
2130 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2132 #if SCM_USE_PTHREAD_THREADS
2133 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2137 scm_threads_prehistory (void *base
)
2139 #if SCM_USE_PTHREAD_THREADS
2140 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2141 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2142 PTHREAD_MUTEX_RECURSIVE
);
2145 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2146 scm_i_pthread_mutexattr_recursive
);
2147 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2148 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2150 guilify_self_1 ((struct GC_stack_base
*) base
);
2153 scm_t_bits scm_tc16_thread
;
2154 scm_t_bits scm_tc16_mutex
;
2155 scm_t_bits scm_tc16_condvar
;
2160 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2161 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2163 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2164 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2165 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2167 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2169 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2171 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2172 guilify_self_2 (SCM_BOOL_F
);
2173 threads_initialized_p
= 1;
2175 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2179 scm_init_threads_default_dynamic_state ()
2181 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2182 scm_i_default_dynamic_state
= state
;
2186 scm_init_thread_procs ()
2188 #include "libguile/threads.x"
2192 /* IA64-specific things. */
2196 # include <sys/param.h>
2197 # include <sys/pstat.h>
2199 scm_ia64_register_backing_store_base (void)
2201 struct pst_vm_status vm_status
;
2203 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2204 if (vm_status
.pst_type
== PS_RSESTACK
)
2205 return (void *) vm_status
.pst_vaddr
;
2209 scm_ia64_ar_bsp (const void *ctx
)
2212 __uc_get_ar_bsp (ctx
, &bsp
);
2213 return (void *) bsp
;
2217 # include <ucontext.h>
2219 scm_ia64_register_backing_store_base (void)
2221 extern void *__libc_ia64_register_backing_store_base
;
2222 return __libc_ia64_register_backing_store_base
;
2225 scm_ia64_ar_bsp (const void *opaque
)
2227 const ucontext_t
*ctx
= opaque
;
2228 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2232 # include <ucontext.h>
2234 scm_ia64_register_backing_store_base (void)
2236 return (void *)0x8000000000000000;
2239 scm_ia64_ar_bsp (const void *opaque
)
2241 const ucontext_t
*ctx
= opaque
;
2242 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2243 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2245 # endif /* __FreeBSD__ */
2246 #endif /* __ia64__ */