1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
44 #include "libguile/validate.h"
45 #include "libguile/root.h"
46 #include "libguile/eval.h"
47 #include "libguile/async.h"
48 #include "libguile/ports.h"
49 #include "libguile/threads.h"
50 #include "libguile/dynwind.h"
51 #include "libguile/iselect.h"
52 #include "libguile/fluids.h"
53 #include "libguile/continuations.h"
54 #include "libguile/gc.h"
55 #include "libguile/init.h"
56 #include "libguile/scmsigs.h"
57 #include "libguile/strings.h"
58 #include "libguile/weaks.h"
62 # define ETIMEDOUT WSAETIMEDOUT
66 # define pipe(fd) _pipe (fd, 256, O_BINARY)
67 #endif /* __MINGW32__ */
69 #include <full-read.h>
74 /* First some libgc shims. */
76 /* Make sure GC_fn_type is defined; it is missing from the public
77 headers of GC 7.1 and earlier. */
78 #ifndef HAVE_GC_FN_TYPE
79 typedef void * (* GC_fn_type
) (void *);
87 #ifndef GC_UNIMPLEMENTED
88 #define GC_UNIMPLEMENTED 3
91 /* Likewise struct GC_stack_base is missing before 7.1. */
92 #ifndef HAVE_GC_STACK_BASE
93 struct GC_stack_base
{
94 void * mem_base
; /* Base of memory stack. */
96 void * reg_base
; /* Base of separate register stack. */
101 GC_register_my_thread (struct GC_stack_base
*stack_base
)
103 return GC_UNIMPLEMENTED
;
107 GC_unregister_my_thread ()
111 #if !SCM_USE_PTHREAD_THREADS
112 /* No threads; we can just use GC_stackbottom. */
114 get_thread_stack_base ()
116 return GC_stackbottom
;
119 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
120 && defined PTHREAD_ATTR_GETSTACK_WORKS
121 /* This method for GNU/Linux and perhaps some other systems.
122 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
123 available on them. */
125 get_thread_stack_base ()
131 pthread_getattr_np (pthread_self (), &attr
);
132 pthread_attr_getstack (&attr
, &start
, &size
);
133 end
= (char *)start
+ size
;
135 #if SCM_STACK_GROWS_UP
142 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
143 /* This method for MacOS X.
144 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
145 but as of 2006 there's nothing obvious at apple.com. */
147 get_thread_stack_base ()
149 return pthread_get_stackaddr_np (pthread_self ());
153 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
157 GC_get_stack_base (struct GC_stack_base
*stack_base
)
159 stack_base
->mem_base
= get_thread_stack_base ();
161 /* Calculate and store off the base of this thread's register
162 backing store (RBS). Unfortunately our implementation(s) of
163 scm_ia64_register_backing_store_base are only reliable for the
164 main thread. For other threads, therefore, find out the current
165 top of the RBS, and use that as a maximum. */
166 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
171 bsp
= scm_ia64_ar_bsp (&ctx
);
172 if (stack_base
->reg_base
> bsp
)
173 stack_base
->reg_base
= bsp
;
180 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
182 struct GC_stack_base stack_base
;
184 stack_base
.mem_base
= (void*)&stack_base
;
186 /* FIXME: Untested. */
190 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
194 return fn (&stack_base
, arg
);
196 #endif /* HAVE_GC_STACK_BASE */
199 /* Now define with_gc_active and with_gc_inactive. */
201 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
203 /* We have a sufficiently new libgc (7.2 or newer). */
206 with_gc_inactive (GC_fn_type func
, void *data
)
208 return GC_do_blocking (func
, data
);
212 with_gc_active (GC_fn_type func
, void *data
)
214 return GC_call_with_gc_active (func
, data
);
219 /* libgc not new enough, so never actually deactivate GC.
221 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
222 GC_call_with_gc_active. */
225 with_gc_inactive (GC_fn_type func
, void *data
)
231 with_gc_active (GC_fn_type func
, void *data
)
236 #endif /* HAVE_GC_DO_BLOCKING */
241 to_timespec (SCM t
, scm_t_timespec
*waittime
)
245 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
246 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
250 double time
= scm_to_double (t
);
251 double sec
= scm_c_truncate (time
);
253 waittime
->tv_sec
= (long) sec
;
254 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
261 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
262 the risk of false references leading to unbounded retained space as
263 described in "Bounding Space Usage of Conservative Garbage Collectors",
266 /* Make an empty queue data structure.
271 return scm_cons (SCM_EOL
, SCM_EOL
);
274 /* Put T at the back of Q and return a handle that can be used with
275 remqueue to remove T from Q again.
278 enqueue (SCM q
, SCM t
)
280 SCM c
= scm_cons (t
, SCM_EOL
);
281 SCM_CRITICAL_SECTION_START
;
282 if (scm_is_null (SCM_CDR (q
)))
285 SCM_SETCDR (SCM_CAR (q
), c
);
287 SCM_CRITICAL_SECTION_END
;
291 /* Remove the element that the handle C refers to from the queue Q. C
292 must have been returned from a call to enqueue. The return value
293 is zero when the element referred to by C has already been removed.
294 Otherwise, 1 is returned.
297 remqueue (SCM q
, SCM c
)
300 SCM_CRITICAL_SECTION_START
;
301 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
303 if (scm_is_eq (p
, c
))
305 if (scm_is_eq (c
, SCM_CAR (q
)))
306 SCM_SETCAR (q
, SCM_CDR (c
));
307 SCM_SETCDR (prev
, SCM_CDR (c
));
310 SCM_SETCDR (c
, SCM_EOL
);
312 SCM_CRITICAL_SECTION_END
;
317 SCM_CRITICAL_SECTION_END
;
321 /* Remove the front-most element from the queue Q and return it.
322 Return SCM_BOOL_F when Q is empty.
328 SCM_CRITICAL_SECTION_START
;
332 SCM_CRITICAL_SECTION_END
;
337 SCM_SETCDR (q
, SCM_CDR (c
));
338 if (scm_is_null (SCM_CDR (q
)))
339 SCM_SETCAR (q
, SCM_EOL
);
340 SCM_CRITICAL_SECTION_END
;
343 SCM_SETCDR (c
, SCM_EOL
);
349 /*** Thread smob routines */
353 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
355 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
356 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
357 the struct case, hence we go via a union, and extract according to the
358 size of pthread_t. */
366 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
367 scm_i_pthread_t p
= t
->pthread
;
370 if (sizeof (p
) == sizeof (unsigned short))
372 else if (sizeof (p
) == sizeof (unsigned int))
374 else if (sizeof (p
) == sizeof (unsigned long))
379 scm_puts ("#<thread ", port
);
380 scm_uintprint (id
, 10, port
);
381 scm_puts (" (", port
);
382 scm_uintprint ((scm_t_bits
)t
, 16, port
);
383 scm_puts (")>", port
);
388 /*** Blocking on queues. */
390 /* See also scm_i_queue_async_cell for how such a block is
394 /* Put the current thread on QUEUE and go to sleep, waiting for it to
395 be woken up by a call to 'unblock_from_queue', or to be
396 interrupted. Upon return of this function, the current thread is
397 no longer on QUEUE, even when the sleep has been interrupted.
399 The caller of block_self must hold MUTEX. It will be atomically
400 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
402 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
405 When WAITTIME is not NULL, the sleep will be aborted at that time.
407 The return value of block_self is an errno value. It will be zero
408 when the sleep has been successfully completed by a call to
409 unblock_from_queue, EINTR when it has been interrupted by the
410 delivery of a system async, and ETIMEDOUT when the timeout has
413 The system asyncs themselves are not executed by block_self.
416 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
417 const scm_t_timespec
*waittime
)
419 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
423 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
428 q_handle
= enqueue (queue
, t
->handle
);
429 if (waittime
== NULL
)
430 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
432 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
434 /* When we are still on QUEUE, we have been interrupted. We
435 report this only when no other error (such as a timeout) has
438 if (remqueue (queue
, q_handle
) && err
== 0)
441 scm_i_reset_sleep (t
);
447 /* Wake up the first thread on QUEUE, if any. The awoken thread is
448 returned, or #f if the queue was empty.
451 unblock_from_queue (SCM queue
)
453 SCM thread
= dequeue (queue
);
454 if (scm_is_true (thread
))
455 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
460 /* Getting into and out of guile mode.
463 /* Key used to attach a cleanup handler to a given thread. Also, if
464 thread-local storage is unavailable, this key is used to retrieve the
465 current thread with `pthread_getspecific ()'. */
466 scm_i_pthread_key_t scm_i_thread_key
;
469 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
471 /* When thread-local storage (TLS) is available, a pointer to the
472 current-thread object is kept in TLS. Note that storing the thread-object
473 itself in TLS (rather than a pointer to some malloc'd memory) is not
474 possible since thread objects may live longer than the actual thread they
476 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
478 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
481 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
482 static scm_i_thread
*all_threads
= NULL
;
483 static int thread_count
;
485 static SCM scm_i_default_dynamic_state
;
487 /* Perform first stage of thread initialisation, in non-guile mode.
490 guilify_self_1 (struct GC_stack_base
*base
)
494 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
495 before allocating anything in this thread, because allocation could
496 cause GC to run, and GC could cause finalizers, which could invoke
497 Scheme functions, which need the current thread to be set. */
499 t
.pthread
= scm_i_pthread_self ();
500 t
.handle
= SCM_BOOL_F
;
501 t
.result
= SCM_BOOL_F
;
502 t
.cleanup_handler
= SCM_BOOL_F
;
505 t
.join_queue
= SCM_EOL
;
506 t
.dynamic_state
= SCM_BOOL_F
;
507 t
.dynwinds
= SCM_EOL
;
508 t
.active_asyncs
= SCM_EOL
;
510 t
.pending_asyncs
= 1;
511 t
.critical_section_level
= 0;
512 t
.base
= base
->mem_base
;
514 t
.register_backing_store_base
= base
->reg
-base
;
516 t
.continuation_root
= SCM_EOL
;
517 t
.continuation_base
= t
.base
;
518 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
519 t
.sleep_mutex
= NULL
;
520 t
.sleep_object
= SCM_BOOL_F
;
523 if (pipe (t
.sleep_pipe
) != 0)
524 /* FIXME: Error conditions during the initialization phase are handled
525 gracelessly since public functions such as `scm_init_guile ()'
526 currently have type `void'. */
529 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
530 t
.current_mark_stack_ptr
= NULL
;
531 t
.current_mark_stack_limit
= NULL
;
536 /* The switcheroo. */
538 scm_i_thread
*t_ptr
= &t
;
541 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
542 memcpy (t_ptr
, &t
, sizeof t
);
544 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
546 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
547 /* Cache the current thread in TLS for faster lookup. */
548 scm_i_current_thread
= t_ptr
;
551 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
552 t_ptr
->next_thread
= all_threads
;
555 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
561 /* Perform second stage of thread initialisation, in guile mode.
564 guilify_self_2 (SCM parent
)
566 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
570 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
572 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
573 t
->continuation_base
= t
->base
;
576 if (scm_is_true (parent
))
577 t
->dynamic_state
= scm_make_dynamic_state (parent
);
579 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
581 t
->join_queue
= make_queue ();
588 /* We implement our own mutex type since we want them to be 'fair', we
589 want to do fancy things while waiting for them (like running
590 asyncs) and we might want to add things that are nice for
595 scm_i_pthread_mutex_t lock
;
597 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
599 int recursive
; /* allow recursive locking? */
600 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
601 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
602 owned by the current thread? */
604 SCM waiting
; /* the threads waiting for this mutex. */
607 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
608 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
611 call_cleanup (void *data
)
614 return scm_call_0 (*proc_p
);
617 /* Perform thread tear-down, in guile mode.
620 do_thread_exit (void *v
)
622 scm_i_thread
*t
= (scm_i_thread
*) v
;
624 /* Ensure the signal handling thread has been launched, because we might be
625 shutting it down. This needs to be done in Guile mode. */
626 scm_i_ensure_signal_delivery_thread ();
628 if (!scm_is_false (t
->cleanup_handler
))
630 SCM ptr
= t
->cleanup_handler
;
632 t
->cleanup_handler
= SCM_BOOL_F
;
633 t
->result
= scm_internal_catch (SCM_BOOL_T
,
635 scm_handle_by_message_noexit
, NULL
);
638 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
641 close (t
->sleep_pipe
[0]);
642 close (t
->sleep_pipe
[1]);
643 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
646 while (!scm_is_null (t
->mutexes
))
648 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
650 if (!SCM_UNBNDP (mutex
))
652 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
654 scm_i_pthread_mutex_lock (&m
->lock
);
656 /* Since MUTEX is in `t->mutexes', T must be its owner. */
657 assert (scm_is_eq (m
->owner
, t
->handle
));
659 unblock_from_queue (m
->waiting
);
661 scm_i_pthread_mutex_unlock (&m
->lock
);
664 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
667 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
673 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
675 /* Won't hurt if we are already registered. */
676 #if SCM_USE_PTHREAD_THREADS
677 GC_register_my_thread (sb
);
680 return scm_with_guile (do_thread_exit
, v
);
684 on_thread_exit (void *v
)
686 /* This handler is executed in non-guile mode. */
687 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
689 /* If this thread was cancelled while doing a cond wait, it will
690 still have a mutex locked, so we unlock it here. */
693 scm_i_pthread_mutex_unlock (t
->held_mutex
);
694 t
->held_mutex
= NULL
;
697 /* Reinstate the current thread for purposes of scm_with_guile
698 guile-mode cleanup handlers. Only really needed in the non-TLS
699 case but it doesn't hurt to be consistent. */
700 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
702 /* Scheme-level thread finalizers and other cleanup needs to happen in
704 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
706 /* Removing ourself from the list of all threads needs to happen in
707 non-guile mode since all SCM values on our stack become
708 unprotected once we are no longer in the list. */
709 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
710 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
713 *tp
= t
->next_thread
;
716 t
->next_thread
= NULL
;
722 /* If there's only one other thread, it could be the signal delivery
723 thread, so we need to notify it to shut down by closing its read pipe.
724 If it's not the signal delivery thread, then closing the read pipe isn't
726 if (thread_count
<= 1)
727 scm_i_close_signal_pipe ();
729 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
731 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
733 #if SCM_USE_PTHREAD_THREADS
734 GC_unregister_my_thread ();
738 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
741 init_thread_key (void)
743 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
746 /* Perform any initializations necessary to make the current thread
747 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
750 BASE is the stack base to use with GC.
752 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
753 which case the default dynamic state is used.
755 Returns zero when the thread was known to guile already; otherwise
758 Note that it could be the case that the thread was known
759 to Guile, but not in guile mode (because we are within a
760 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
761 be sure. New threads are put into guile mode implicitly. */
764 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
766 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
768 if (SCM_I_CURRENT_THREAD
)
770 /* Thread is already known to Guile.
776 /* This thread has not been guilified yet.
779 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
780 if (scm_initialized_p
== 0)
782 /* First thread ever to enter Guile. Run the full
785 scm_i_init_guile (base
);
787 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
788 /* Allow other threads to come in later. */
789 GC_allow_register_threads ();
792 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
796 /* Guile is already initialized, but this thread enters it for
797 the first time. Only initialize this thread.
799 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
801 /* Register this thread with libgc. */
802 #if SCM_USE_PTHREAD_THREADS
803 GC_register_my_thread (base
);
806 guilify_self_1 (base
);
807 guilify_self_2 (parent
);
816 struct GC_stack_base stack_base
;
818 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
819 scm_i_init_thread_for_guile (&stack_base
,
820 scm_i_default_dynamic_state
);
823 fprintf (stderr
, "Failed to get stack base for current thread.\n");
828 SCM_UNUSED
static void
829 scm_leave_guile_cleanup (void *x
)
831 on_thread_exit (SCM_I_CURRENT_THREAD
);
834 struct with_guile_args
842 with_guile_trampoline (void *data
)
844 struct with_guile_args
*args
= data
;
846 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
850 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
855 struct with_guile_args
*args
= data
;
857 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
858 t
= SCM_I_CURRENT_THREAD
;
861 /* We are in Guile mode. */
862 assert (t
->guile_mode
);
864 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
866 /* Leave Guile mode. */
869 else if (t
->guile_mode
)
871 /* Already in Guile mode. */
872 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
876 /* We are not in Guile mode, either because we are not within a
877 scm_with_guile, or because we are within a scm_without_guile.
879 This call to scm_with_guile() could happen from anywhere on the
880 stack, and in particular lower on the stack than when it was
881 when this thread was first guilified. Thus, `base' must be
883 #if SCM_STACK_GROWS_UP
884 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
885 t
->base
= SCM_STACK_PTR (base
->mem_base
);
887 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
888 t
->base
= SCM_STACK_PTR (base
->mem_base
);
892 res
= with_gc_active (with_guile_trampoline
, args
);
899 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
901 struct with_guile_args args
;
905 args
.parent
= parent
;
907 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
911 scm_with_guile (void *(*func
)(void *), void *data
)
913 return scm_i_with_guile_and_parent (func
, data
,
914 scm_i_default_dynamic_state
);
918 scm_without_guile (void *(*func
)(void *), void *data
)
921 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
925 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
926 result
= with_gc_inactive (func
, data
);
927 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
930 /* Otherwise we're not in guile mode, so nothing to do. */
931 result
= func (data
);
937 /*** Thread creation */
944 scm_i_pthread_mutex_t mutex
;
945 scm_i_pthread_cond_t cond
;
949 really_launch (void *d
)
951 launch_data
*data
= (launch_data
*)d
;
952 SCM thunk
= data
->thunk
, handler
= data
->handler
;
955 t
= SCM_I_CURRENT_THREAD
;
957 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
958 data
->thread
= scm_current_thread ();
959 scm_i_pthread_cond_signal (&data
->cond
);
960 scm_i_pthread_mutex_unlock (&data
->mutex
);
962 if (SCM_UNBNDP (handler
))
963 t
->result
= scm_call_0 (thunk
);
965 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
971 launch_thread (void *d
)
973 launch_data
*data
= (launch_data
*)d
;
974 scm_i_pthread_detach (scm_i_pthread_self ());
975 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
979 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
980 (SCM thunk
, SCM handler
),
981 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
982 "returning a new thread object representing the thread. The procedure\n"
983 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
985 "When @var{handler} is specified, then @var{thunk} is called from\n"
986 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
987 "handler. This catch is established inside the continuation barrier.\n"
989 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
990 "the @emph{exit value} of the thread and the thread is terminated.")
991 #define FUNC_NAME s_scm_call_with_new_thread
997 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
998 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
999 handler
, SCM_ARG2
, FUNC_NAME
);
1001 data
.parent
= scm_current_dynamic_state ();
1003 data
.handler
= handler
;
1004 data
.thread
= SCM_BOOL_F
;
1005 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1006 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1008 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1009 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1012 scm_i_pthread_mutex_unlock (&data
.mutex
);
1014 scm_syserror (NULL
);
1016 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1017 scm_i_pthread_mutex_unlock (&data
.mutex
);
1025 scm_t_catch_body body
;
1027 scm_t_catch_handler handler
;
1030 scm_i_pthread_mutex_t mutex
;
1031 scm_i_pthread_cond_t cond
;
1035 really_spawn (void *d
)
1037 spawn_data
*data
= (spawn_data
*)d
;
1038 scm_t_catch_body body
= data
->body
;
1039 void *body_data
= data
->body_data
;
1040 scm_t_catch_handler handler
= data
->handler
;
1041 void *handler_data
= data
->handler_data
;
1042 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1044 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1045 data
->thread
= scm_current_thread ();
1046 scm_i_pthread_cond_signal (&data
->cond
);
1047 scm_i_pthread_mutex_unlock (&data
->mutex
);
1049 if (handler
== NULL
)
1050 t
->result
= body (body_data
);
1052 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1054 handler
, handler_data
);
1060 spawn_thread (void *d
)
1062 spawn_data
*data
= (spawn_data
*)d
;
1063 scm_i_pthread_detach (scm_i_pthread_self ());
1064 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1069 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1070 scm_t_catch_handler handler
, void *handler_data
)
1076 data
.parent
= scm_current_dynamic_state ();
1078 data
.body_data
= body_data
;
1079 data
.handler
= handler
;
1080 data
.handler_data
= handler_data
;
1081 data
.thread
= SCM_BOOL_F
;
1082 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1083 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1085 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1086 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1089 scm_i_pthread_mutex_unlock (&data
.mutex
);
1091 scm_syserror (NULL
);
1093 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1094 scm_i_pthread_mutex_unlock (&data
.mutex
);
1099 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1101 "Move the calling thread to the end of the scheduling queue.")
1102 #define FUNC_NAME s_scm_yield
1104 return scm_from_bool (scm_i_sched_yield ());
1108 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1110 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1111 "cannot be the current thread, and if @var{thread} has already terminated or "
1112 "been signaled to terminate, this function is a no-op.")
1113 #define FUNC_NAME s_scm_cancel_thread
1115 scm_i_thread
*t
= NULL
;
1117 SCM_VALIDATE_THREAD (1, thread
);
1118 t
= SCM_I_THREAD_DATA (thread
);
1119 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1123 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1124 scm_i_pthread_cancel (t
->pthread
);
1127 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1129 return SCM_UNSPECIFIED
;
1133 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1134 (SCM thread
, SCM proc
),
1135 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1136 "This handler will be called when the thread exits.")
1137 #define FUNC_NAME s_scm_set_thread_cleanup_x
1141 SCM_VALIDATE_THREAD (1, thread
);
1142 if (!scm_is_false (proc
))
1143 SCM_VALIDATE_THUNK (2, proc
);
1145 t
= SCM_I_THREAD_DATA (thread
);
1146 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1148 if (!(t
->exited
|| t
->canceled
))
1149 t
->cleanup_handler
= proc
;
1151 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1153 return SCM_UNSPECIFIED
;
1157 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1159 "Return the cleanup handler installed for the thread @var{thread}.")
1160 #define FUNC_NAME s_scm_thread_cleanup
1165 SCM_VALIDATE_THREAD (1, thread
);
1167 t
= SCM_I_THREAD_DATA (thread
);
1168 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1169 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1170 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1176 SCM
scm_join_thread (SCM thread
)
1178 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1181 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1182 (SCM thread
, SCM timeout
, SCM timeoutval
),
1183 "Suspend execution of the calling thread until the target @var{thread} "
1184 "terminates, unless the target @var{thread} has already terminated. ")
1185 #define FUNC_NAME s_scm_join_thread_timed
1188 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1189 SCM res
= SCM_BOOL_F
;
1191 if (! (SCM_UNBNDP (timeoutval
)))
1194 SCM_VALIDATE_THREAD (1, thread
);
1195 if (scm_is_eq (scm_current_thread (), thread
))
1196 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1198 t
= SCM_I_THREAD_DATA (thread
);
1199 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1201 if (! SCM_UNBNDP (timeout
))
1203 to_timespec (timeout
, &ctimeout
);
1204 timeout_ptr
= &ctimeout
;
1213 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1223 else if (err
== ETIMEDOUT
)
1226 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1228 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1230 /* Check for exit again, since we just released and
1231 reacquired the admin mutex, before the next block_self
1232 call (which would block forever if t has already
1242 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1248 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1250 "Return @code{#t} if @var{obj} is a thread.")
1251 #define FUNC_NAME s_scm_thread_p
1253 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1259 fat_mutex_free (SCM mx
)
1261 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1262 scm_i_pthread_mutex_destroy (&m
->lock
);
1267 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1269 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1270 scm_puts ("#<mutex ", port
);
1271 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1272 scm_puts (">", port
);
1277 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1282 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1283 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1284 m
->owner
= SCM_BOOL_F
;
1287 m
->recursive
= recursive
;
1288 m
->unchecked_unlock
= unchecked_unlock
;
1289 m
->allow_external_unlock
= external_unlock
;
1291 m
->waiting
= SCM_EOL
;
1292 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1293 m
->waiting
= make_queue ();
1297 SCM
scm_make_mutex (void)
1299 return scm_make_mutex_with_flags (SCM_EOL
);
1302 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1303 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1304 SCM_SYMBOL (recursive_sym
, "recursive");
1306 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1308 "Create a new mutex. ")
1309 #define FUNC_NAME s_scm_make_mutex_with_flags
1311 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1314 while (! scm_is_null (ptr
))
1316 SCM flag
= SCM_CAR (ptr
);
1317 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1318 unchecked_unlock
= 1;
1319 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1320 external_unlock
= 1;
1321 else if (scm_is_eq (flag
, recursive_sym
))
1324 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1325 ptr
= SCM_CDR (ptr
);
1327 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1331 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1333 "Create a new recursive mutex. ")
1334 #define FUNC_NAME s_scm_make_recursive_mutex
1336 return make_fat_mutex (1, 0, 0);
1340 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1343 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1345 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1347 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1348 SCM err
= SCM_BOOL_F
;
1350 struct timeval current_time
;
1352 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1358 m
->owner
= new_owner
;
1361 if (SCM_I_IS_THREAD (new_owner
))
1363 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1364 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1366 /* Only keep a weak reference to MUTEX so that it's not
1367 retained when not referenced elsewhere (bug #27450).
1368 The weak pair itself is eventually removed when MUTEX
1369 is unlocked. Note that `t->mutexes' lists mutexes
1370 currently held by T, so it should be small. */
1371 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1373 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1378 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1380 m
->owner
= new_owner
;
1381 err
= scm_cons (scm_abandoned_mutex_error_key
,
1382 scm_from_locale_string ("lock obtained on abandoned "
1387 else if (scm_is_eq (m
->owner
, new_owner
))
1396 err
= scm_cons (scm_misc_error_key
,
1397 scm_from_locale_string ("mutex already locked "
1405 if (timeout
!= NULL
)
1407 gettimeofday (¤t_time
, NULL
);
1408 if (current_time
.tv_sec
> timeout
->tv_sec
||
1409 (current_time
.tv_sec
== timeout
->tv_sec
&&
1410 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1416 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1417 scm_i_pthread_mutex_unlock (&m
->lock
);
1419 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1422 scm_i_pthread_mutex_unlock (&m
->lock
);
1426 SCM
scm_lock_mutex (SCM mx
)
1428 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1431 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1432 (SCM m
, SCM timeout
, SCM owner
),
1433 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1434 "blocks until the mutex becomes available. The function returns when "
1435 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1436 "a thread already owns will succeed right away and will not block the "
1437 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1438 #define FUNC_NAME s_scm_lock_mutex_timed
1442 scm_t_timespec cwaittime
, *waittime
= NULL
;
1444 SCM_VALIDATE_MUTEX (1, m
);
1446 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1448 to_timespec (timeout
, &cwaittime
);
1449 waittime
= &cwaittime
;
1452 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1453 if (!scm_is_false (exception
))
1454 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1455 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1460 lock_mutex_return_void (SCM mx
)
1462 (void) scm_lock_mutex (mx
);
1466 unlock_mutex_return_void (SCM mx
)
1468 (void) scm_unlock_mutex (mx
);
1472 scm_dynwind_lock_mutex (SCM mutex
)
1474 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1475 SCM_F_WIND_EXPLICITLY
);
1476 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1477 SCM_F_WIND_EXPLICITLY
);
1480 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1482 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1483 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1484 #define FUNC_NAME s_scm_try_mutex
1488 scm_t_timespec cwaittime
, *waittime
= NULL
;
1490 SCM_VALIDATE_MUTEX (1, mutex
);
1492 to_timespec (scm_from_int(0), &cwaittime
);
1493 waittime
= &cwaittime
;
1495 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1496 if (!scm_is_false (exception
))
1497 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1498 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1502 /*** Fat condition variables */
1505 scm_i_pthread_mutex_t lock
;
1506 SCM waiting
; /* the threads waiting for this condition. */
1509 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1510 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1513 fat_mutex_unlock (SCM mutex
, SCM cond
,
1514 const scm_t_timespec
*waittime
, int relock
)
1517 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1519 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1520 int err
= 0, ret
= 0;
1522 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1526 if (!scm_is_eq (owner
, t
->handle
))
1530 if (!m
->unchecked_unlock
)
1532 scm_i_pthread_mutex_unlock (&m
->lock
);
1533 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1537 else if (!m
->allow_external_unlock
)
1539 scm_i_pthread_mutex_unlock (&m
->lock
);
1540 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1544 if (! (SCM_UNBNDP (cond
)))
1546 c
= SCM_CONDVAR_DATA (cond
);
1555 /* Change the owner of MUTEX. */
1556 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1557 m
->owner
= unblock_from_queue (m
->waiting
);
1562 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1563 scm_i_pthread_mutex_unlock (&m
->lock
);
1570 else if (err
== ETIMEDOUT
)
1575 else if (err
!= EINTR
)
1578 scm_syserror (NULL
);
1584 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1592 scm_remember_upto_here_2 (cond
, mutex
);
1594 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1603 /* Change the owner of MUTEX. */
1604 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1605 m
->owner
= unblock_from_queue (m
->waiting
);
1608 scm_i_pthread_mutex_unlock (&m
->lock
);
1615 SCM
scm_unlock_mutex (SCM mx
)
1617 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1620 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1621 (SCM mx
, SCM cond
, SCM timeout
),
1622 "Unlocks @var{mutex} if the calling thread owns the lock on "
1623 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1624 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1625 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1626 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1627 "with a call to @code{unlock-mutex}. Only the last call to "
1628 "@code{unlock-mutex} will actually unlock the mutex. ")
1629 #define FUNC_NAME s_scm_unlock_mutex_timed
1631 scm_t_timespec cwaittime
, *waittime
= NULL
;
1633 SCM_VALIDATE_MUTEX (1, mx
);
1634 if (! (SCM_UNBNDP (cond
)))
1636 SCM_VALIDATE_CONDVAR (2, cond
);
1638 if (! (SCM_UNBNDP (timeout
)))
1640 to_timespec (timeout
, &cwaittime
);
1641 waittime
= &cwaittime
;
1645 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1649 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1651 "Return @code{#t} if @var{obj} is a mutex.")
1652 #define FUNC_NAME s_scm_mutex_p
1654 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1658 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1660 "Return the thread owning @var{mx}, or @code{#f}.")
1661 #define FUNC_NAME s_scm_mutex_owner
1664 fat_mutex
*m
= NULL
;
1666 SCM_VALIDATE_MUTEX (1, mx
);
1667 m
= SCM_MUTEX_DATA (mx
);
1668 scm_i_pthread_mutex_lock (&m
->lock
);
1670 scm_i_pthread_mutex_unlock (&m
->lock
);
1676 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1678 "Return the lock level of mutex @var{mx}.")
1679 #define FUNC_NAME s_scm_mutex_level
1681 SCM_VALIDATE_MUTEX (1, mx
);
1682 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1686 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1688 "Returns @code{#t} if the mutex @var{mx} is locked.")
1689 #define FUNC_NAME s_scm_mutex_locked_p
1691 SCM_VALIDATE_MUTEX (1, mx
);
1692 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1697 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1699 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1700 scm_puts ("#<condition-variable ", port
);
1701 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1702 scm_puts (">", port
);
1706 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1708 "Make a new condition variable.")
1709 #define FUNC_NAME s_scm_make_condition_variable
1714 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1715 c
->waiting
= SCM_EOL
;
1716 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1717 c
->waiting
= make_queue ();
1722 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1723 (SCM cv
, SCM mx
, SCM t
),
1724 "Wait until @var{cond-var} has been signalled. While waiting, "
1725 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1726 "is locked again when this function returns. When @var{time} is given, "
1727 "it specifies a point in time where the waiting should be aborted. It "
1728 "can be either a integer as returned by @code{current-time} or a pair "
1729 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1730 "mutex is locked and @code{#f} is returned. When the condition "
1731 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1733 #define FUNC_NAME s_scm_timed_wait_condition_variable
1735 scm_t_timespec waittime
, *waitptr
= NULL
;
1737 SCM_VALIDATE_CONDVAR (1, cv
);
1738 SCM_VALIDATE_MUTEX (2, mx
);
1740 if (!SCM_UNBNDP (t
))
1742 to_timespec (t
, &waittime
);
1743 waitptr
= &waittime
;
1746 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1751 fat_cond_signal (fat_cond
*c
)
1753 unblock_from_queue (c
->waiting
);
1756 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1758 "Wake up one thread that is waiting for @var{cv}")
1759 #define FUNC_NAME s_scm_signal_condition_variable
1761 SCM_VALIDATE_CONDVAR (1, cv
);
1762 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1768 fat_cond_broadcast (fat_cond
*c
)
1770 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1774 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1776 "Wake up all threads that are waiting for @var{cv}. ")
1777 #define FUNC_NAME s_scm_broadcast_condition_variable
1779 SCM_VALIDATE_CONDVAR (1, cv
);
1780 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1785 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1787 "Return @code{#t} if @var{obj} is a condition variable.")
1788 #define FUNC_NAME s_scm_condition_variable_p
1790 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1801 SELECT_TYPE
*read_fds
;
1802 SELECT_TYPE
*write_fds
;
1803 SELECT_TYPE
*except_fds
;
1804 struct timeval
*timeout
;
1811 do_std_select (void *args
)
1813 struct select_args
*select_args
;
1815 select_args
= (struct select_args
*) args
;
1817 select_args
->result
=
1818 select (select_args
->nfds
,
1819 select_args
->read_fds
, select_args
->write_fds
,
1820 select_args
->except_fds
, select_args
->timeout
);
1821 select_args
->errno_value
= errno
;
1827 scm_std_select (int nfds
,
1828 SELECT_TYPE
*readfds
,
1829 SELECT_TYPE
*writefds
,
1830 SELECT_TYPE
*exceptfds
,
1831 struct timeval
*timeout
)
1834 int res
, eno
, wakeup_fd
;
1835 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1836 struct select_args args
;
1838 if (readfds
== NULL
)
1840 FD_ZERO (&my_readfds
);
1841 readfds
= &my_readfds
;
1844 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1847 wakeup_fd
= t
->sleep_pipe
[0];
1848 FD_SET (wakeup_fd
, readfds
);
1849 if (wakeup_fd
>= nfds
)
1853 args
.read_fds
= readfds
;
1854 args
.write_fds
= writefds
;
1855 args
.except_fds
= exceptfds
;
1856 args
.timeout
= timeout
;
1858 /* Explicitly cooperate with the GC. */
1859 scm_without_guile (do_std_select
, &args
);
1862 eno
= args
.errno_value
;
1865 scm_i_reset_sleep (t
);
1867 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1870 full_read (wakeup_fd
, &dummy
, 1);
1872 FD_CLR (wakeup_fd
, readfds
);
1884 /* Convenience API for blocking while in guile mode. */
1886 #if SCM_USE_PTHREAD_THREADS
1888 /* It seems reasonable to not run procedures related to mutex and condition
1889 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1890 without it, and (ii) the only potential gain would be GC latency. See
1891 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1892 for a discussion of the pros and cons. */
1895 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1897 int res
= scm_i_pthread_mutex_lock (mutex
);
1902 do_unlock (void *data
)
1904 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1908 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1910 scm_i_scm_pthread_mutex_lock (mutex
);
1911 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1915 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1918 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1920 t
->held_mutex
= mutex
;
1921 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1922 t
->held_mutex
= NULL
;
1928 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1929 scm_i_pthread_mutex_t
*mutex
,
1930 const scm_t_timespec
*wt
)
1933 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1935 t
->held_mutex
= mutex
;
1936 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1937 t
->held_mutex
= NULL
;
1945 scm_std_usleep (unsigned long usecs
)
1948 tv
.tv_usec
= usecs
% 1000000;
1949 tv
.tv_sec
= usecs
/ 1000000;
1950 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1951 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1955 scm_std_sleep (unsigned int secs
)
1960 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1966 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1968 "Return the thread that called this function.")
1969 #define FUNC_NAME s_scm_current_thread
1971 return SCM_I_CURRENT_THREAD
->handle
;
1976 scm_c_make_list (size_t n
, SCM fill
)
1980 res
= scm_cons (fill
, res
);
1984 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1986 "Return a list of all threads.")
1987 #define FUNC_NAME s_scm_all_threads
1989 /* We can not allocate while holding the thread_admin_mutex because
1990 of the way GC is done.
1992 int n
= thread_count
;
1994 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1996 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1998 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2000 if (t
!= scm_i_signal_delivery_thread
)
2002 SCM_SETCAR (*l
, t
->handle
);
2003 l
= SCM_CDRLOC (*l
);
2008 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2013 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2015 "Return @code{#t} iff @var{thread} has exited.\n")
2016 #define FUNC_NAME s_scm_thread_exited_p
2018 return scm_from_bool (scm_c_thread_exited_p (thread
));
2023 scm_c_thread_exited_p (SCM thread
)
2024 #define FUNC_NAME s_scm_thread_exited_p
2027 SCM_VALIDATE_THREAD (1, thread
);
2028 t
= SCM_I_THREAD_DATA (thread
);
2033 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2035 "Return the total number of processors of the machine, which\n"
2036 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2037 "thread execution unit, which can be either:\n\n"
2039 "@item an execution core in a (possibly multi-core) chip, in a\n"
2040 " (possibly multi- chip) module, in a single computer, or\n"
2041 "@item a thread execution unit inside a core in the case of\n"
2042 " @dfn{hyper-threaded} CPUs.\n"
2044 "Which of the two definitions is used, is unspecified.\n")
2045 #define FUNC_NAME s_scm_total_processor_count
2047 return scm_from_ulong (num_processors (NPROC_ALL
));
2051 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2053 "Like @code{total-processor-count}, but return the number of\n"
2054 "processors available to the current process. See\n"
2055 "@code{setaffinity} and @code{getaffinity} for more\n"
2057 #define FUNC_NAME s_scm_current_processor_count
2059 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2066 static scm_i_pthread_cond_t wake_up_cond
;
2067 static int threads_initialized_p
= 0;
2070 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2072 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2074 static SCM dynwind_critical_section_mutex
;
2077 scm_dynwind_critical_section (SCM mutex
)
2079 if (scm_is_false (mutex
))
2080 mutex
= dynwind_critical_section_mutex
;
2081 scm_dynwind_lock_mutex (mutex
);
2082 scm_dynwind_block_asyncs ();
2085 /*** Initialization */
2087 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2089 #if SCM_USE_PTHREAD_THREADS
2090 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2094 scm_threads_prehistory (void *base
)
2096 #if SCM_USE_PTHREAD_THREADS
2097 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2098 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2099 PTHREAD_MUTEX_RECURSIVE
);
2102 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2103 scm_i_pthread_mutexattr_recursive
);
2104 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2105 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2107 guilify_self_1 ((struct GC_stack_base
*) base
);
2110 scm_t_bits scm_tc16_thread
;
2111 scm_t_bits scm_tc16_mutex
;
2112 scm_t_bits scm_tc16_condvar
;
2117 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2118 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2120 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2121 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2122 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
2124 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2126 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2128 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2129 guilify_self_2 (SCM_BOOL_F
);
2130 threads_initialized_p
= 1;
2132 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2136 scm_init_threads_default_dynamic_state ()
2138 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2139 scm_i_default_dynamic_state
= state
;
2143 scm_init_thread_procs ()
2145 #include "libguile/threads.x"
2149 /* IA64-specific things. */
2153 # include <sys/param.h>
2154 # include <sys/pstat.h>
2156 scm_ia64_register_backing_store_base (void)
2158 struct pst_vm_status vm_status
;
2160 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2161 if (vm_status
.pst_type
== PS_RSESTACK
)
2162 return (void *) vm_status
.pst_vaddr
;
2166 scm_ia64_ar_bsp (const void *ctx
)
2169 __uc_get_ar_bsp (ctx
, &bsp
);
2170 return (void *) bsp
;
2174 # include <ucontext.h>
2176 scm_ia64_register_backing_store_base (void)
2178 extern void *__libc_ia64_register_backing_store_base
;
2179 return __libc_ia64_register_backing_store_base
;
2182 scm_ia64_ar_bsp (const void *opaque
)
2184 const ucontext_t
*ctx
= opaque
;
2185 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2188 #endif /* __ia64__ */