1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
45 #include "libguile/validate.h"
46 #include "libguile/root.h"
47 #include "libguile/eval.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/threads.h"
51 #include "libguile/dynwind.h"
52 #include "libguile/iselect.h"
53 #include "libguile/fluids.h"
54 #include "libguile/continuations.h"
55 #include "libguile/gc.h"
56 #include "libguile/init.h"
57 #include "libguile/scmsigs.h"
58 #include "libguile/strings.h"
59 #include "libguile/weaks.h"
61 #include <full-read.h>
66 /* First some libgc shims. */
68 /* Make sure GC_fn_type is defined; it is missing from the public
69 headers of GC 7.1 and earlier. */
70 #ifndef HAVE_GC_FN_TYPE
71 typedef void * (* GC_fn_type
) (void *);
79 #ifndef GC_UNIMPLEMENTED
80 #define GC_UNIMPLEMENTED 3
83 /* Likewise struct GC_stack_base is missing before 7.1. */
84 #ifndef HAVE_GC_STACK_BASE
85 struct GC_stack_base
{
86 void * mem_base
; /* Base of memory stack. */
88 void * reg_base
; /* Base of separate register stack. */
93 GC_register_my_thread (struct GC_stack_base
*stack_base
)
95 return GC_UNIMPLEMENTED
;
99 GC_unregister_my_thread ()
103 #if !SCM_USE_PTHREAD_THREADS
104 /* No threads; we can just use GC_stackbottom. */
106 get_thread_stack_base ()
108 return GC_stackbottom
;
111 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
112 && defined PTHREAD_ATTR_GETSTACK_WORKS
113 /* This method for GNU/Linux and perhaps some other systems.
114 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
115 available on them. */
117 get_thread_stack_base ()
123 pthread_getattr_np (pthread_self (), &attr
);
124 pthread_attr_getstack (&attr
, &start
, &size
);
125 end
= (char *)start
+ size
;
127 #if SCM_STACK_GROWS_UP
134 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
135 /* This method for MacOS X.
136 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
137 but as of 2006 there's nothing obvious at apple.com. */
139 get_thread_stack_base ()
141 return pthread_get_stackaddr_np (pthread_self ());
145 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
149 GC_get_stack_base (struct GC_stack_base
*stack_base
)
151 stack_base
->mem_base
= get_thread_stack_base ();
153 /* Calculate and store off the base of this thread's register
154 backing store (RBS). Unfortunately our implementation(s) of
155 scm_ia64_register_backing_store_base are only reliable for the
156 main thread. For other threads, therefore, find out the current
157 top of the RBS, and use that as a maximum. */
158 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
163 bsp
= scm_ia64_ar_bsp (&ctx
);
164 if (stack_base
->reg_base
> bsp
)
165 stack_base
->reg_base
= bsp
;
172 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
174 struct GC_stack_base stack_base
;
176 stack_base
.mem_base
= (void*)&stack_base
;
178 /* FIXME: Untested. */
182 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
186 return fn (&stack_base
, arg
);
188 #endif /* HAVE_GC_STACK_BASE */
191 /* Now define with_gc_active and with_gc_inactive. */
193 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
195 /* We have a sufficiently new libgc (7.2 or newer). */
198 with_gc_inactive (GC_fn_type func
, void *data
)
200 return GC_do_blocking (func
, data
);
204 with_gc_active (GC_fn_type func
, void *data
)
206 return GC_call_with_gc_active (func
, data
);
211 /* libgc not new enough, so never actually deactivate GC.
213 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
214 GC_call_with_gc_active. */
217 with_gc_inactive (GC_fn_type func
, void *data
)
223 with_gc_active (GC_fn_type func
, void *data
)
228 #endif /* HAVE_GC_DO_BLOCKING */
233 to_timespec (SCM t
, scm_t_timespec
*waittime
)
237 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
238 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
242 double time
= scm_to_double (t
);
243 double sec
= scm_c_truncate (time
);
245 waittime
->tv_sec
= (long) sec
;
246 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
253 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
254 the risk of false references leading to unbounded retained space as
255 described in "Bounding Space Usage of Conservative Garbage Collectors",
258 /* Make an empty queue data structure.
263 return scm_cons (SCM_EOL
, SCM_EOL
);
266 /* Put T at the back of Q and return a handle that can be used with
267 remqueue to remove T from Q again.
270 enqueue (SCM q
, SCM t
)
272 SCM c
= scm_cons (t
, SCM_EOL
);
273 SCM_CRITICAL_SECTION_START
;
274 if (scm_is_null (SCM_CDR (q
)))
277 SCM_SETCDR (SCM_CAR (q
), c
);
279 SCM_CRITICAL_SECTION_END
;
283 /* Remove the element that the handle C refers to from the queue Q. C
284 must have been returned from a call to enqueue. The return value
285 is zero when the element referred to by C has already been removed.
286 Otherwise, 1 is returned.
289 remqueue (SCM q
, SCM c
)
292 SCM_CRITICAL_SECTION_START
;
293 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
295 if (scm_is_eq (p
, c
))
297 if (scm_is_eq (c
, SCM_CAR (q
)))
298 SCM_SETCAR (q
, SCM_CDR (c
));
299 SCM_SETCDR (prev
, SCM_CDR (c
));
302 SCM_SETCDR (c
, SCM_EOL
);
304 SCM_CRITICAL_SECTION_END
;
309 SCM_CRITICAL_SECTION_END
;
313 /* Remove the front-most element from the queue Q and return it.
314 Return SCM_BOOL_F when Q is empty.
320 SCM_CRITICAL_SECTION_START
;
324 SCM_CRITICAL_SECTION_END
;
329 SCM_SETCDR (q
, SCM_CDR (c
));
330 if (scm_is_null (SCM_CDR (q
)))
331 SCM_SETCAR (q
, SCM_EOL
);
332 SCM_CRITICAL_SECTION_END
;
335 SCM_SETCDR (c
, SCM_EOL
);
341 /*** Thread smob routines */
345 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
347 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
348 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
349 the struct case, hence we go via a union, and extract according to the
350 size of pthread_t. */
358 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
359 scm_i_pthread_t p
= t
->pthread
;
362 if (sizeof (p
) == sizeof (unsigned short))
364 else if (sizeof (p
) == sizeof (unsigned int))
366 else if (sizeof (p
) == sizeof (unsigned long))
371 scm_puts ("#<thread ", port
);
372 scm_uintprint (id
, 10, port
);
373 scm_puts (" (", port
);
374 scm_uintprint ((scm_t_bits
)t
, 16, port
);
375 scm_puts (")>", port
);
380 /*** Blocking on queues. */
382 /* See also scm_i_queue_async_cell for how such a block is
386 /* Put the current thread on QUEUE and go to sleep, waiting for it to
387 be woken up by a call to 'unblock_from_queue', or to be
388 interrupted. Upon return of this function, the current thread is
389 no longer on QUEUE, even when the sleep has been interrupted.
391 The caller of block_self must hold MUTEX. It will be atomically
392 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
394 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
397 When WAITTIME is not NULL, the sleep will be aborted at that time.
399 The return value of block_self is an errno value. It will be zero
400 when the sleep has been successfully completed by a call to
401 unblock_from_queue, EINTR when it has been interrupted by the
402 delivery of a system async, and ETIMEDOUT when the timeout has
405 The system asyncs themselves are not executed by block_self.
408 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
409 const scm_t_timespec
*waittime
)
411 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
415 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
420 q_handle
= enqueue (queue
, t
->handle
);
421 if (waittime
== NULL
)
422 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
424 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
426 /* When we are still on QUEUE, we have been interrupted. We
427 report this only when no other error (such as a timeout) has
430 if (remqueue (queue
, q_handle
) && err
== 0)
433 scm_i_reset_sleep (t
);
439 /* Wake up the first thread on QUEUE, if any. The awoken thread is
440 returned, or #f if the queue was empty.
443 unblock_from_queue (SCM queue
)
445 SCM thread
= dequeue (queue
);
446 if (scm_is_true (thread
))
447 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
452 /* Getting into and out of guile mode.
455 /* Key used to attach a cleanup handler to a given thread. Also, if
456 thread-local storage is unavailable, this key is used to retrieve the
457 current thread with `pthread_getspecific ()'. */
458 scm_i_pthread_key_t scm_i_thread_key
;
461 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
463 /* When thread-local storage (TLS) is available, a pointer to the
464 current-thread object is kept in TLS. Note that storing the thread-object
465 itself in TLS (rather than a pointer to some malloc'd memory) is not
466 possible since thread objects may live longer than the actual thread they
468 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
470 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
473 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
474 static scm_i_thread
*all_threads
= NULL
;
475 static int thread_count
;
477 static SCM scm_i_default_dynamic_state
;
479 /* Run when a fluid is collected. */
481 scm_i_reset_fluid (size_t n
)
485 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
486 for (t
= all_threads
; t
; t
= t
->next_thread
)
487 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
489 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
491 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
492 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
494 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
497 /* Perform first stage of thread initialisation, in non-guile mode.
500 guilify_self_1 (struct GC_stack_base
*base
)
504 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
505 before allocating anything in this thread, because allocation could
506 cause GC to run, and GC could cause finalizers, which could invoke
507 Scheme functions, which need the current thread to be set. */
509 t
.pthread
= scm_i_pthread_self ();
510 t
.handle
= SCM_BOOL_F
;
511 t
.result
= SCM_BOOL_F
;
512 t
.cleanup_handler
= SCM_BOOL_F
;
515 t
.join_queue
= SCM_EOL
;
516 t
.dynamic_state
= SCM_BOOL_F
;
517 t
.dynwinds
= SCM_EOL
;
518 t
.active_asyncs
= SCM_EOL
;
520 t
.pending_asyncs
= 1;
521 t
.critical_section_level
= 0;
522 t
.base
= base
->mem_base
;
524 t
.register_backing_store_base
= base
->reg
-base
;
526 t
.continuation_root
= SCM_EOL
;
527 t
.continuation_base
= t
.base
;
528 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
529 t
.sleep_mutex
= NULL
;
530 t
.sleep_object
= SCM_BOOL_F
;
533 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
534 /* FIXME: Error conditions during the initialization phase are handled
535 gracelessly since public functions such as `scm_init_guile ()'
536 currently have type `void'. */
539 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
540 t
.current_mark_stack_ptr
= NULL
;
541 t
.current_mark_stack_limit
= NULL
;
546 /* The switcheroo. */
548 scm_i_thread
*t_ptr
= &t
;
551 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
552 memcpy (t_ptr
, &t
, sizeof t
);
554 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
556 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
557 /* Cache the current thread in TLS for faster lookup. */
558 scm_i_current_thread
= t_ptr
;
561 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
562 t_ptr
->next_thread
= all_threads
;
565 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
571 /* Perform second stage of thread initialisation, in guile mode.
574 guilify_self_2 (SCM parent
)
576 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
580 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
582 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
583 t
->continuation_base
= t
->base
;
586 if (scm_is_true (parent
))
587 t
->dynamic_state
= scm_make_dynamic_state (parent
);
589 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
591 t
->join_queue
= make_queue ();
598 /* We implement our own mutex type since we want them to be 'fair', we
599 want to do fancy things while waiting for them (like running
600 asyncs) and we might want to add things that are nice for
605 scm_i_pthread_mutex_t lock
;
607 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
609 int recursive
; /* allow recursive locking? */
610 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
611 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
612 owned by the current thread? */
614 SCM waiting
; /* the threads waiting for this mutex. */
617 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
618 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
621 call_cleanup (void *data
)
624 return scm_call_0 (*proc_p
);
627 /* Perform thread tear-down, in guile mode.
630 do_thread_exit (void *v
)
632 scm_i_thread
*t
= (scm_i_thread
*) v
;
634 /* Ensure the signal handling thread has been launched, because we might be
635 shutting it down. This needs to be done in Guile mode. */
636 scm_i_ensure_signal_delivery_thread ();
638 if (!scm_is_false (t
->cleanup_handler
))
640 SCM ptr
= t
->cleanup_handler
;
642 t
->cleanup_handler
= SCM_BOOL_F
;
643 t
->result
= scm_internal_catch (SCM_BOOL_T
,
645 scm_handle_by_message_noexit
, NULL
);
648 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
651 close (t
->sleep_pipe
[0]);
652 close (t
->sleep_pipe
[1]);
653 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
656 while (!scm_is_null (t
->mutexes
))
658 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
660 if (!SCM_UNBNDP (mutex
))
662 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
664 scm_i_pthread_mutex_lock (&m
->lock
);
666 /* Since MUTEX is in `t->mutexes', T must be its owner. */
667 assert (scm_is_eq (m
->owner
, t
->handle
));
669 unblock_from_queue (m
->waiting
);
671 scm_i_pthread_mutex_unlock (&m
->lock
);
674 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
677 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
683 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
685 /* Won't hurt if we are already registered. */
686 #if SCM_USE_PTHREAD_THREADS
687 GC_register_my_thread (sb
);
690 return scm_with_guile (do_thread_exit
, v
);
694 on_thread_exit (void *v
)
696 /* This handler is executed in non-guile mode. */
697 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
699 /* If we were canceled, we were unable to clear `t->guile_mode', so do
703 /* If this thread was cancelled while doing a cond wait, it will
704 still have a mutex locked, so we unlock it here. */
707 scm_i_pthread_mutex_unlock (t
->held_mutex
);
708 t
->held_mutex
= NULL
;
711 /* Reinstate the current thread for purposes of scm_with_guile
712 guile-mode cleanup handlers. Only really needed in the non-TLS
713 case but it doesn't hurt to be consistent. */
714 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
716 /* Scheme-level thread finalizers and other cleanup needs to happen in
718 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
720 /* Removing ourself from the list of all threads needs to happen in
721 non-guile mode since all SCM values on our stack become
722 unprotected once we are no longer in the list. */
723 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
724 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
727 *tp
= t
->next_thread
;
730 t
->next_thread
= NULL
;
736 /* If there's only one other thread, it could be the signal delivery
737 thread, so we need to notify it to shut down by closing its read pipe.
738 If it's not the signal delivery thread, then closing the read pipe isn't
740 if (thread_count
<= 1)
741 scm_i_close_signal_pipe ();
743 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
745 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
747 #if SCM_USE_PTHREAD_THREADS
748 GC_unregister_my_thread ();
752 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
755 init_thread_key (void)
757 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
760 /* Perform any initializations necessary to make the current thread
761 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
764 BASE is the stack base to use with GC.
766 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
767 which case the default dynamic state is used.
769 Returns zero when the thread was known to guile already; otherwise
772 Note that it could be the case that the thread was known
773 to Guile, but not in guile mode (because we are within a
774 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
775 be sure. New threads are put into guile mode implicitly. */
778 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
780 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
782 if (SCM_I_CURRENT_THREAD
)
784 /* Thread is already known to Guile.
790 /* This thread has not been guilified yet.
793 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
794 if (scm_initialized_p
== 0)
796 /* First thread ever to enter Guile. Run the full
799 scm_i_init_guile (base
);
801 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
802 /* Allow other threads to come in later. */
803 GC_allow_register_threads ();
806 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
810 /* Guile is already initialized, but this thread enters it for
811 the first time. Only initialize this thread.
813 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
815 /* Register this thread with libgc. */
816 #if SCM_USE_PTHREAD_THREADS
817 GC_register_my_thread (base
);
820 guilify_self_1 (base
);
821 guilify_self_2 (parent
);
830 struct GC_stack_base stack_base
;
832 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
833 scm_i_init_thread_for_guile (&stack_base
,
834 scm_i_default_dynamic_state
);
837 fprintf (stderr
, "Failed to get stack base for current thread.\n");
842 struct with_guile_args
850 with_guile_trampoline (void *data
)
852 struct with_guile_args
*args
= data
;
854 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
858 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
863 struct with_guile_args
*args
= data
;
865 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
866 t
= SCM_I_CURRENT_THREAD
;
869 /* We are in Guile mode. */
870 assert (t
->guile_mode
);
872 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
874 /* Leave Guile mode. */
877 else if (t
->guile_mode
)
879 /* Already in Guile mode. */
880 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
884 /* We are not in Guile mode, either because we are not within a
885 scm_with_guile, or because we are within a scm_without_guile.
887 This call to scm_with_guile() could happen from anywhere on the
888 stack, and in particular lower on the stack than when it was
889 when this thread was first guilified. Thus, `base' must be
891 #if SCM_STACK_GROWS_UP
892 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
893 t
->base
= SCM_STACK_PTR (base
->mem_base
);
895 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
896 t
->base
= SCM_STACK_PTR (base
->mem_base
);
900 res
= with_gc_active (with_guile_trampoline
, args
);
907 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
909 struct with_guile_args args
;
913 args
.parent
= parent
;
915 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
919 scm_with_guile (void *(*func
)(void *), void *data
)
921 return scm_i_with_guile_and_parent (func
, data
,
922 scm_i_default_dynamic_state
);
926 scm_without_guile (void *(*func
)(void *), void *data
)
929 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
933 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
934 result
= with_gc_inactive (func
, data
);
935 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
938 /* Otherwise we're not in guile mode, so nothing to do. */
939 result
= func (data
);
945 /*** Thread creation */
952 scm_i_pthread_mutex_t mutex
;
953 scm_i_pthread_cond_t cond
;
957 really_launch (void *d
)
959 launch_data
*data
= (launch_data
*)d
;
960 SCM thunk
= data
->thunk
, handler
= data
->handler
;
963 t
= SCM_I_CURRENT_THREAD
;
965 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
966 data
->thread
= scm_current_thread ();
967 scm_i_pthread_cond_signal (&data
->cond
);
968 scm_i_pthread_mutex_unlock (&data
->mutex
);
970 if (SCM_UNBNDP (handler
))
971 t
->result
= scm_call_0 (thunk
);
973 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
979 launch_thread (void *d
)
981 launch_data
*data
= (launch_data
*)d
;
982 scm_i_pthread_detach (scm_i_pthread_self ());
983 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
987 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
988 (SCM thunk
, SCM handler
),
989 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
990 "returning a new thread object representing the thread. The procedure\n"
991 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
993 "When @var{handler} is specified, then @var{thunk} is called from\n"
994 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
995 "handler. This catch is established inside the continuation barrier.\n"
997 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
998 "the @emph{exit value} of the thread and the thread is terminated.")
999 #define FUNC_NAME s_scm_call_with_new_thread
1005 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
1006 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
1007 handler
, SCM_ARG2
, FUNC_NAME
);
1009 GC_collect_a_little ();
1010 data
.parent
= scm_current_dynamic_state ();
1012 data
.handler
= handler
;
1013 data
.thread
= SCM_BOOL_F
;
1014 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1015 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1017 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1018 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1021 scm_i_pthread_mutex_unlock (&data
.mutex
);
1023 scm_syserror (NULL
);
1025 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1026 scm_i_pthread_mutex_unlock (&data
.mutex
);
1034 scm_t_catch_body body
;
1036 scm_t_catch_handler handler
;
1039 scm_i_pthread_mutex_t mutex
;
1040 scm_i_pthread_cond_t cond
;
1044 really_spawn (void *d
)
1046 spawn_data
*data
= (spawn_data
*)d
;
1047 scm_t_catch_body body
= data
->body
;
1048 void *body_data
= data
->body_data
;
1049 scm_t_catch_handler handler
= data
->handler
;
1050 void *handler_data
= data
->handler_data
;
1051 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1053 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1054 data
->thread
= scm_current_thread ();
1055 scm_i_pthread_cond_signal (&data
->cond
);
1056 scm_i_pthread_mutex_unlock (&data
->mutex
);
1058 if (handler
== NULL
)
1059 t
->result
= body (body_data
);
1061 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1063 handler
, handler_data
);
1069 spawn_thread (void *d
)
1071 spawn_data
*data
= (spawn_data
*)d
;
1072 scm_i_pthread_detach (scm_i_pthread_self ());
1073 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1078 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1079 scm_t_catch_handler handler
, void *handler_data
)
1085 data
.parent
= scm_current_dynamic_state ();
1087 data
.body_data
= body_data
;
1088 data
.handler
= handler
;
1089 data
.handler_data
= handler_data
;
1090 data
.thread
= SCM_BOOL_F
;
1091 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1092 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1094 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1095 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1098 scm_i_pthread_mutex_unlock (&data
.mutex
);
1100 scm_syserror (NULL
);
1102 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1103 scm_i_pthread_mutex_unlock (&data
.mutex
);
1108 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1110 "Move the calling thread to the end of the scheduling queue.")
1111 #define FUNC_NAME s_scm_yield
1113 return scm_from_bool (scm_i_sched_yield ());
1117 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1119 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1120 "cannot be the current thread, and if @var{thread} has already terminated or "
1121 "been signaled to terminate, this function is a no-op.")
1122 #define FUNC_NAME s_scm_cancel_thread
1124 scm_i_thread
*t
= NULL
;
1126 SCM_VALIDATE_THREAD (1, thread
);
1127 t
= SCM_I_THREAD_DATA (thread
);
1128 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1132 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1133 scm_i_pthread_cancel (t
->pthread
);
1136 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1138 return SCM_UNSPECIFIED
;
1142 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1143 (SCM thread
, SCM proc
),
1144 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1145 "This handler will be called when the thread exits.")
1146 #define FUNC_NAME s_scm_set_thread_cleanup_x
1150 SCM_VALIDATE_THREAD (1, thread
);
1151 if (!scm_is_false (proc
))
1152 SCM_VALIDATE_THUNK (2, proc
);
1154 t
= SCM_I_THREAD_DATA (thread
);
1155 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1157 if (!(t
->exited
|| t
->canceled
))
1158 t
->cleanup_handler
= proc
;
1160 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1162 return SCM_UNSPECIFIED
;
1166 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1168 "Return the cleanup handler installed for the thread @var{thread}.")
1169 #define FUNC_NAME s_scm_thread_cleanup
1174 SCM_VALIDATE_THREAD (1, thread
);
1176 t
= SCM_I_THREAD_DATA (thread
);
1177 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1178 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1179 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1185 SCM
scm_join_thread (SCM thread
)
1187 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1190 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1191 (SCM thread
, SCM timeout
, SCM timeoutval
),
1192 "Suspend execution of the calling thread until the target @var{thread} "
1193 "terminates, unless the target @var{thread} has already terminated. ")
1194 #define FUNC_NAME s_scm_join_thread_timed
1197 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1198 SCM res
= SCM_BOOL_F
;
1200 if (! (SCM_UNBNDP (timeoutval
)))
1203 SCM_VALIDATE_THREAD (1, thread
);
1204 if (scm_is_eq (scm_current_thread (), thread
))
1205 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1207 t
= SCM_I_THREAD_DATA (thread
);
1208 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1210 if (! SCM_UNBNDP (timeout
))
1212 to_timespec (timeout
, &ctimeout
);
1213 timeout_ptr
= &ctimeout
;
1222 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1232 else if (err
== ETIMEDOUT
)
1235 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1237 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1239 /* Check for exit again, since we just released and
1240 reacquired the admin mutex, before the next block_self
1241 call (which would block forever if t has already
1251 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1257 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1259 "Return @code{#t} if @var{obj} is a thread.")
1260 #define FUNC_NAME s_scm_thread_p
1262 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1268 fat_mutex_free (SCM mx
)
1270 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1271 scm_i_pthread_mutex_destroy (&m
->lock
);
1276 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1278 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1279 scm_puts ("#<mutex ", port
);
1280 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1281 scm_puts (">", port
);
1286 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1291 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1292 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1293 m
->owner
= SCM_BOOL_F
;
1296 m
->recursive
= recursive
;
1297 m
->unchecked_unlock
= unchecked_unlock
;
1298 m
->allow_external_unlock
= external_unlock
;
1300 m
->waiting
= SCM_EOL
;
1301 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1302 m
->waiting
= make_queue ();
1306 SCM
scm_make_mutex (void)
1308 return scm_make_mutex_with_flags (SCM_EOL
);
1311 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1312 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1313 SCM_SYMBOL (recursive_sym
, "recursive");
1315 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1317 "Create a new mutex. ")
1318 #define FUNC_NAME s_scm_make_mutex_with_flags
1320 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1323 while (! scm_is_null (ptr
))
1325 SCM flag
= SCM_CAR (ptr
);
1326 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1327 unchecked_unlock
= 1;
1328 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1329 external_unlock
= 1;
1330 else if (scm_is_eq (flag
, recursive_sym
))
1333 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1334 ptr
= SCM_CDR (ptr
);
1336 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1340 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1342 "Create a new recursive mutex. ")
1343 #define FUNC_NAME s_scm_make_recursive_mutex
1345 return make_fat_mutex (1, 0, 0);
1349 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1352 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1354 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1356 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1357 SCM err
= SCM_BOOL_F
;
1359 struct timeval current_time
;
1361 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1367 m
->owner
= new_owner
;
1370 if (SCM_I_IS_THREAD (new_owner
))
1372 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1374 /* FIXME: The order in which `t->admin_mutex' and
1375 `m->lock' are taken differs from that in
1376 `on_thread_exit', potentially leading to deadlocks. */
1377 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1379 /* Only keep a weak reference to MUTEX so that it's not
1380 retained when not referenced elsewhere (bug #27450).
1381 The weak pair itself is eventually removed when MUTEX
1382 is unlocked. Note that `t->mutexes' lists mutexes
1383 currently held by T, so it should be small. */
1384 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1386 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1391 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1393 m
->owner
= new_owner
;
1394 err
= scm_cons (scm_abandoned_mutex_error_key
,
1395 scm_from_locale_string ("lock obtained on abandoned "
1400 else if (scm_is_eq (m
->owner
, new_owner
))
1409 err
= scm_cons (scm_misc_error_key
,
1410 scm_from_locale_string ("mutex already locked "
1418 if (timeout
!= NULL
)
1420 gettimeofday (¤t_time
, NULL
);
1421 if (current_time
.tv_sec
> timeout
->tv_sec
||
1422 (current_time
.tv_sec
== timeout
->tv_sec
&&
1423 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1429 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1430 scm_i_pthread_mutex_unlock (&m
->lock
);
1432 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1435 scm_i_pthread_mutex_unlock (&m
->lock
);
1439 SCM
scm_lock_mutex (SCM mx
)
1441 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1444 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1445 (SCM m
, SCM timeout
, SCM owner
),
1446 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1447 "blocks until the mutex becomes available. The function returns when "
1448 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1449 "a thread already owns will succeed right away and will not block the "
1450 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1451 #define FUNC_NAME s_scm_lock_mutex_timed
1455 scm_t_timespec cwaittime
, *waittime
= NULL
;
1457 SCM_VALIDATE_MUTEX (1, m
);
1459 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1461 to_timespec (timeout
, &cwaittime
);
1462 waittime
= &cwaittime
;
1465 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1466 SCM_VALIDATE_THREAD (3, owner
);
1468 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1469 if (!scm_is_false (exception
))
1470 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1471 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1476 lock_mutex_return_void (SCM mx
)
1478 (void) scm_lock_mutex (mx
);
1482 unlock_mutex_return_void (SCM mx
)
1484 (void) scm_unlock_mutex (mx
);
1488 scm_dynwind_lock_mutex (SCM mutex
)
1490 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1491 SCM_F_WIND_EXPLICITLY
);
1492 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1493 SCM_F_WIND_EXPLICITLY
);
1496 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1498 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1499 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1500 #define FUNC_NAME s_scm_try_mutex
1504 scm_t_timespec cwaittime
, *waittime
= NULL
;
1506 SCM_VALIDATE_MUTEX (1, mutex
);
1508 to_timespec (scm_from_int(0), &cwaittime
);
1509 waittime
= &cwaittime
;
1511 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1512 if (!scm_is_false (exception
))
1513 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1514 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1518 /*** Fat condition variables */
1521 scm_i_pthread_mutex_t lock
;
1522 SCM waiting
; /* the threads waiting for this condition. */
1525 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1526 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1529 fat_mutex_unlock (SCM mutex
, SCM cond
,
1530 const scm_t_timespec
*waittime
, int relock
)
1533 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1535 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1536 int err
= 0, ret
= 0;
1538 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1542 if (!scm_is_eq (owner
, t
->handle
))
1546 if (!m
->unchecked_unlock
)
1548 scm_i_pthread_mutex_unlock (&m
->lock
);
1549 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1553 else if (!m
->allow_external_unlock
)
1555 scm_i_pthread_mutex_unlock (&m
->lock
);
1556 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1560 if (! (SCM_UNBNDP (cond
)))
1562 c
= SCM_CONDVAR_DATA (cond
);
1571 /* Change the owner of MUTEX. */
1572 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1573 m
->owner
= unblock_from_queue (m
->waiting
);
1578 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1579 scm_i_pthread_mutex_unlock (&m
->lock
);
1586 else if (err
== ETIMEDOUT
)
1591 else if (err
!= EINTR
)
1594 scm_syserror (NULL
);
1600 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1608 scm_remember_upto_here_2 (cond
, mutex
);
1610 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1619 /* Change the owner of MUTEX. */
1620 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1621 m
->owner
= unblock_from_queue (m
->waiting
);
1624 scm_i_pthread_mutex_unlock (&m
->lock
);
1631 SCM
scm_unlock_mutex (SCM mx
)
1633 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1636 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1637 (SCM mx
, SCM cond
, SCM timeout
),
1638 "Unlocks @var{mutex} if the calling thread owns the lock on "
1639 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1640 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1641 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1642 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1643 "with a call to @code{unlock-mutex}. Only the last call to "
1644 "@code{unlock-mutex} will actually unlock the mutex. ")
1645 #define FUNC_NAME s_scm_unlock_mutex_timed
1647 scm_t_timespec cwaittime
, *waittime
= NULL
;
1649 SCM_VALIDATE_MUTEX (1, mx
);
1650 if (! (SCM_UNBNDP (cond
)))
1652 SCM_VALIDATE_CONDVAR (2, cond
);
1654 if (! (SCM_UNBNDP (timeout
)))
1656 to_timespec (timeout
, &cwaittime
);
1657 waittime
= &cwaittime
;
1661 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1665 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1667 "Return @code{#t} if @var{obj} is a mutex.")
1668 #define FUNC_NAME s_scm_mutex_p
1670 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1674 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1676 "Return the thread owning @var{mx}, or @code{#f}.")
1677 #define FUNC_NAME s_scm_mutex_owner
1680 fat_mutex
*m
= NULL
;
1682 SCM_VALIDATE_MUTEX (1, mx
);
1683 m
= SCM_MUTEX_DATA (mx
);
1684 scm_i_pthread_mutex_lock (&m
->lock
);
1686 scm_i_pthread_mutex_unlock (&m
->lock
);
1692 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1694 "Return the lock level of mutex @var{mx}.")
1695 #define FUNC_NAME s_scm_mutex_level
1697 SCM_VALIDATE_MUTEX (1, mx
);
1698 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1702 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1704 "Returns @code{#t} if the mutex @var{mx} is locked.")
1705 #define FUNC_NAME s_scm_mutex_locked_p
1707 SCM_VALIDATE_MUTEX (1, mx
);
1708 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1713 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1715 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1716 scm_puts ("#<condition-variable ", port
);
1717 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1718 scm_puts (">", port
);
1722 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1724 "Make a new condition variable.")
1725 #define FUNC_NAME s_scm_make_condition_variable
1730 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1731 c
->waiting
= SCM_EOL
;
1732 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1733 c
->waiting
= make_queue ();
1738 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1739 (SCM cv
, SCM mx
, SCM t
),
1740 "Wait until @var{cond-var} has been signalled. While waiting, "
1741 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1742 "is locked again when this function returns. When @var{time} is given, "
1743 "it specifies a point in time where the waiting should be aborted. It "
1744 "can be either a integer as returned by @code{current-time} or a pair "
1745 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1746 "mutex is locked and @code{#f} is returned. When the condition "
1747 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1749 #define FUNC_NAME s_scm_timed_wait_condition_variable
1751 scm_t_timespec waittime
, *waitptr
= NULL
;
1753 SCM_VALIDATE_CONDVAR (1, cv
);
1754 SCM_VALIDATE_MUTEX (2, mx
);
1756 if (!SCM_UNBNDP (t
))
1758 to_timespec (t
, &waittime
);
1759 waitptr
= &waittime
;
1762 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1767 fat_cond_signal (fat_cond
*c
)
1769 unblock_from_queue (c
->waiting
);
1772 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1774 "Wake up one thread that is waiting for @var{cv}")
1775 #define FUNC_NAME s_scm_signal_condition_variable
1777 SCM_VALIDATE_CONDVAR (1, cv
);
1778 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1784 fat_cond_broadcast (fat_cond
*c
)
1786 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1790 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1792 "Wake up all threads that are waiting for @var{cv}. ")
1793 #define FUNC_NAME s_scm_broadcast_condition_variable
1795 SCM_VALIDATE_CONDVAR (1, cv
);
1796 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1801 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1803 "Return @code{#t} if @var{obj} is a condition variable.")
1804 #define FUNC_NAME s_scm_condition_variable_p
1806 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1817 SELECT_TYPE
*read_fds
;
1818 SELECT_TYPE
*write_fds
;
1819 SELECT_TYPE
*except_fds
;
1820 struct timeval
*timeout
;
1827 do_std_select (void *args
)
1829 struct select_args
*select_args
;
1831 select_args
= (struct select_args
*) args
;
1833 select_args
->result
=
1834 select (select_args
->nfds
,
1835 select_args
->read_fds
, select_args
->write_fds
,
1836 select_args
->except_fds
, select_args
->timeout
);
1837 select_args
->errno_value
= errno
;
1843 scm_std_select (int nfds
,
1844 SELECT_TYPE
*readfds
,
1845 SELECT_TYPE
*writefds
,
1846 SELECT_TYPE
*exceptfds
,
1847 struct timeval
*timeout
)
1850 int res
, eno
, wakeup_fd
;
1851 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1852 struct select_args args
;
1854 if (readfds
== NULL
)
1856 FD_ZERO (&my_readfds
);
1857 readfds
= &my_readfds
;
1860 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1863 wakeup_fd
= t
->sleep_pipe
[0];
1864 FD_SET (wakeup_fd
, readfds
);
1865 if (wakeup_fd
>= nfds
)
1869 args
.read_fds
= readfds
;
1870 args
.write_fds
= writefds
;
1871 args
.except_fds
= exceptfds
;
1872 args
.timeout
= timeout
;
1874 /* Explicitly cooperate with the GC. */
1875 scm_without_guile (do_std_select
, &args
);
1878 eno
= args
.errno_value
;
1881 scm_i_reset_sleep (t
);
1883 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1886 full_read (wakeup_fd
, &dummy
, 1);
1888 FD_CLR (wakeup_fd
, readfds
);
1900 /* Convenience API for blocking while in guile mode. */
1902 #if SCM_USE_PTHREAD_THREADS
1904 /* It seems reasonable to not run procedures related to mutex and condition
1905 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1906 without it, and (ii) the only potential gain would be GC latency. See
1907 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1908 for a discussion of the pros and cons. */
1911 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1913 int res
= scm_i_pthread_mutex_lock (mutex
);
1918 do_unlock (void *data
)
1920 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1924 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1926 scm_i_scm_pthread_mutex_lock (mutex
);
1927 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1931 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1934 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1936 t
->held_mutex
= mutex
;
1937 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1938 t
->held_mutex
= NULL
;
1944 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1945 scm_i_pthread_mutex_t
*mutex
,
1946 const scm_t_timespec
*wt
)
1949 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1951 t
->held_mutex
= mutex
;
1952 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1953 t
->held_mutex
= NULL
;
1961 scm_std_usleep (unsigned long usecs
)
1964 tv
.tv_usec
= usecs
% 1000000;
1965 tv
.tv_sec
= usecs
/ 1000000;
1966 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1967 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1971 scm_std_sleep (unsigned int secs
)
1976 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1982 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1984 "Return the thread that called this function.")
1985 #define FUNC_NAME s_scm_current_thread
1987 return SCM_I_CURRENT_THREAD
->handle
;
1992 scm_c_make_list (size_t n
, SCM fill
)
1996 res
= scm_cons (fill
, res
);
2000 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
2002 "Return a list of all threads.")
2003 #define FUNC_NAME s_scm_all_threads
2005 /* We can not allocate while holding the thread_admin_mutex because
2006 of the way GC is done.
2008 int n
= thread_count
;
2010 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
2012 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
2014 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2016 if (t
!= scm_i_signal_delivery_thread
)
2018 SCM_SETCAR (*l
, t
->handle
);
2019 l
= SCM_CDRLOC (*l
);
2024 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2029 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2031 "Return @code{#t} iff @var{thread} has exited.\n")
2032 #define FUNC_NAME s_scm_thread_exited_p
2034 return scm_from_bool (scm_c_thread_exited_p (thread
));
2039 scm_c_thread_exited_p (SCM thread
)
2040 #define FUNC_NAME s_scm_thread_exited_p
2043 SCM_VALIDATE_THREAD (1, thread
);
2044 t
= SCM_I_THREAD_DATA (thread
);
2049 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2051 "Return the total number of processors of the machine, which\n"
2052 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2053 "thread execution unit, which can be either:\n\n"
2055 "@item an execution core in a (possibly multi-core) chip, in a\n"
2056 " (possibly multi- chip) module, in a single computer, or\n"
2057 "@item a thread execution unit inside a core in the case of\n"
2058 " @dfn{hyper-threaded} CPUs.\n"
2060 "Which of the two definitions is used, is unspecified.\n")
2061 #define FUNC_NAME s_scm_total_processor_count
2063 return scm_from_ulong (num_processors (NPROC_ALL
));
2067 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2069 "Like @code{total-processor-count}, but return the number of\n"
2070 "processors available to the current process. See\n"
2071 "@code{setaffinity} and @code{getaffinity} for more\n"
2073 #define FUNC_NAME s_scm_current_processor_count
2075 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2082 static scm_i_pthread_cond_t wake_up_cond
;
2083 static int threads_initialized_p
= 0;
2086 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2088 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2090 static SCM dynwind_critical_section_mutex
;
2093 scm_dynwind_critical_section (SCM mutex
)
2095 if (scm_is_false (mutex
))
2096 mutex
= dynwind_critical_section_mutex
;
2097 scm_dynwind_lock_mutex (mutex
);
2098 scm_dynwind_block_asyncs ();
2101 /*** Initialization */
2103 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2105 #if SCM_USE_PTHREAD_THREADS
2106 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2110 scm_threads_prehistory (void *base
)
2112 #if SCM_USE_PTHREAD_THREADS
2113 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2114 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2115 PTHREAD_MUTEX_RECURSIVE
);
2118 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2119 scm_i_pthread_mutexattr_recursive
);
2120 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2121 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2123 guilify_self_1 ((struct GC_stack_base
*) base
);
2126 scm_t_bits scm_tc16_thread
;
2127 scm_t_bits scm_tc16_mutex
;
2128 scm_t_bits scm_tc16_condvar
;
2133 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2134 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2136 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2137 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2138 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2140 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2142 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2144 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2145 guilify_self_2 (SCM_BOOL_F
);
2146 threads_initialized_p
= 1;
2148 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2152 scm_init_threads_default_dynamic_state ()
2154 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2155 scm_i_default_dynamic_state
= state
;
2159 scm_init_thread_procs ()
2161 #include "libguile/threads.x"
2165 /* IA64-specific things. */
2169 # include <sys/param.h>
2170 # include <sys/pstat.h>
2172 scm_ia64_register_backing_store_base (void)
2174 struct pst_vm_status vm_status
;
2176 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2177 if (vm_status
.pst_type
== PS_RSESTACK
)
2178 return (void *) vm_status
.pst_vaddr
;
2182 scm_ia64_ar_bsp (const void *ctx
)
2185 __uc_get_ar_bsp (ctx
, &bsp
);
2186 return (void *) bsp
;
2190 # include <ucontext.h>
2192 scm_ia64_register_backing_store_base (void)
2194 extern void *__libc_ia64_register_backing_store_base
;
2195 return __libc_ia64_register_backing_store_base
;
2198 scm_ia64_ar_bsp (const void *opaque
)
2200 const ucontext_t
*ctx
= opaque
;
2201 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2204 #endif /* __ia64__ */