1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
45 # include <pthread_np.h>
48 #include <sys/select.h>
54 #include "libguile/validate.h"
55 #include "libguile/root.h"
56 #include "libguile/eval.h"
57 #include "libguile/async.h"
58 #include "libguile/ports.h"
59 #include "libguile/threads.h"
60 #include "libguile/dynwind.h"
61 #include "libguile/iselect.h"
62 #include "libguile/fluids.h"
63 #include "libguile/continuations.h"
64 #include "libguile/gc.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
69 #include <full-read.h>
74 /* First some libgc shims. */
76 /* Make sure GC_fn_type is defined; it is missing from the public
77 headers of GC 7.1 and earlier. */
78 #ifndef HAVE_GC_FN_TYPE
79 typedef void * (* GC_fn_type
) (void *);
87 #ifndef GC_UNIMPLEMENTED
88 #define GC_UNIMPLEMENTED 3
91 /* Likewise struct GC_stack_base is missing before 7.1. */
92 #ifndef HAVE_GC_STACK_BASE
93 struct GC_stack_base
{
94 void * mem_base
; /* Base of memory stack. */
96 void * reg_base
; /* Base of separate register stack. */
101 GC_register_my_thread (struct GC_stack_base
*stack_base
)
103 return GC_UNIMPLEMENTED
;
107 GC_unregister_my_thread ()
111 #if !SCM_USE_PTHREAD_THREADS
112 /* No threads; we can just use GC_stackbottom. */
114 get_thread_stack_base ()
116 return GC_stackbottom
;
119 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
120 && defined PTHREAD_ATTR_GETSTACK_WORKS
121 /* This method for GNU/Linux and perhaps some other systems.
122 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
123 available on them. */
125 get_thread_stack_base ()
131 pthread_getattr_np (pthread_self (), &attr
);
132 pthread_attr_getstack (&attr
, &start
, &size
);
133 end
= (char *)start
+ size
;
135 #if SCM_STACK_GROWS_UP
142 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
143 /* This method for MacOS X.
144 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
145 but as of 2006 there's nothing obvious at apple.com. */
147 get_thread_stack_base ()
149 return pthread_get_stackaddr_np (pthread_self ());
152 #elif HAVE_PTHREAD_ATTR_GET_NP
153 /* This one is for FreeBSD 9. */
155 get_thread_stack_base ()
161 pthread_attr_init (&attr
);
162 pthread_attr_get_np (pthread_self (), &attr
);
163 pthread_attr_getstack (&attr
, &start
, &size
);
164 pthread_attr_destroy (&attr
);
166 end
= (char *)start
+ size
;
168 #if SCM_STACK_GROWS_UP
176 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
180 GC_get_stack_base (struct GC_stack_base
*stack_base
)
182 stack_base
->mem_base
= get_thread_stack_base ();
184 /* Calculate and store off the base of this thread's register
185 backing store (RBS). Unfortunately our implementation(s) of
186 scm_ia64_register_backing_store_base are only reliable for the
187 main thread. For other threads, therefore, find out the current
188 top of the RBS, and use that as a maximum. */
189 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
194 bsp
= scm_ia64_ar_bsp (&ctx
);
195 if (stack_base
->reg_base
> bsp
)
196 stack_base
->reg_base
= bsp
;
203 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
205 struct GC_stack_base stack_base
;
207 stack_base
.mem_base
= (void*)&stack_base
;
209 /* FIXME: Untested. */
213 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
217 return fn (&stack_base
, arg
);
219 #endif /* HAVE_GC_STACK_BASE */
222 /* Now define with_gc_active and with_gc_inactive. */
224 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
226 /* We have a sufficiently new libgc (7.2 or newer). */
229 with_gc_inactive (GC_fn_type func
, void *data
)
231 return GC_do_blocking (func
, data
);
235 with_gc_active (GC_fn_type func
, void *data
)
237 return GC_call_with_gc_active (func
, data
);
242 /* libgc not new enough, so never actually deactivate GC.
244 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
245 GC_call_with_gc_active. */
248 with_gc_inactive (GC_fn_type func
, void *data
)
254 with_gc_active (GC_fn_type func
, void *data
)
259 #endif /* HAVE_GC_DO_BLOCKING */
264 to_timespec (SCM t
, scm_t_timespec
*waittime
)
268 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
269 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
273 double time
= scm_to_double (t
);
274 double sec
= scm_c_truncate (time
);
276 waittime
->tv_sec
= (long) sec
;
277 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
284 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
285 the risk of false references leading to unbounded retained space as
286 described in "Bounding Space Usage of Conservative Garbage Collectors",
289 /* Make an empty queue data structure.
294 return scm_cons (SCM_EOL
, SCM_EOL
);
297 /* Put T at the back of Q and return a handle that can be used with
298 remqueue to remove T from Q again.
301 enqueue (SCM q
, SCM t
)
303 SCM c
= scm_cons (t
, SCM_EOL
);
304 SCM_CRITICAL_SECTION_START
;
305 if (scm_is_null (SCM_CDR (q
)))
308 SCM_SETCDR (SCM_CAR (q
), c
);
310 SCM_CRITICAL_SECTION_END
;
314 /* Remove the element that the handle C refers to from the queue Q. C
315 must have been returned from a call to enqueue. The return value
316 is zero when the element referred to by C has already been removed.
317 Otherwise, 1 is returned.
320 remqueue (SCM q
, SCM c
)
323 SCM_CRITICAL_SECTION_START
;
324 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
326 if (scm_is_eq (p
, c
))
328 if (scm_is_eq (c
, SCM_CAR (q
)))
329 SCM_SETCAR (q
, SCM_CDR (c
));
330 SCM_SETCDR (prev
, SCM_CDR (c
));
333 SCM_SETCDR (c
, SCM_EOL
);
335 SCM_CRITICAL_SECTION_END
;
340 SCM_CRITICAL_SECTION_END
;
344 /* Remove the front-most element from the queue Q and return it.
345 Return SCM_BOOL_F when Q is empty.
351 SCM_CRITICAL_SECTION_START
;
355 SCM_CRITICAL_SECTION_END
;
360 SCM_SETCDR (q
, SCM_CDR (c
));
361 if (scm_is_null (SCM_CDR (q
)))
362 SCM_SETCAR (q
, SCM_EOL
);
363 SCM_CRITICAL_SECTION_END
;
366 SCM_SETCDR (c
, SCM_EOL
);
372 /*** Thread smob routines */
376 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
378 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
379 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
380 the struct case, hence we go via a union, and extract according to the
381 size of pthread_t. */
389 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
390 scm_i_pthread_t p
= t
->pthread
;
393 if (sizeof (p
) == sizeof (unsigned short))
395 else if (sizeof (p
) == sizeof (unsigned int))
397 else if (sizeof (p
) == sizeof (unsigned long))
402 scm_puts_unlocked ("#<thread ", port
);
403 scm_uintprint (id
, 10, port
);
404 scm_puts_unlocked (" (", port
);
405 scm_uintprint ((scm_t_bits
)t
, 16, port
);
406 scm_puts_unlocked (")>", port
);
411 /*** Blocking on queues. */
413 /* See also scm_i_queue_async_cell for how such a block is
417 /* Put the current thread on QUEUE and go to sleep, waiting for it to
418 be woken up by a call to 'unblock_from_queue', or to be
419 interrupted. Upon return of this function, the current thread is
420 no longer on QUEUE, even when the sleep has been interrupted.
422 The caller of block_self must hold MUTEX. It will be atomically
423 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
425 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
428 When WAITTIME is not NULL, the sleep will be aborted at that time.
430 The return value of block_self is an errno value. It will be zero
431 when the sleep has been successfully completed by a call to
432 unblock_from_queue, EINTR when it has been interrupted by the
433 delivery of a system async, and ETIMEDOUT when the timeout has
436 The system asyncs themselves are not executed by block_self.
439 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
440 const scm_t_timespec
*waittime
)
442 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
446 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
451 q_handle
= enqueue (queue
, t
->handle
);
452 if (waittime
== NULL
)
453 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
455 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
457 /* When we are still on QUEUE, we have been interrupted. We
458 report this only when no other error (such as a timeout) has
461 if (remqueue (queue
, q_handle
) && err
== 0)
464 scm_i_reset_sleep (t
);
470 /* Wake up the first thread on QUEUE, if any. The awoken thread is
471 returned, or #f if the queue was empty.
474 unblock_from_queue (SCM queue
)
476 SCM thread
= dequeue (queue
);
477 if (scm_is_true (thread
))
478 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
483 /* Getting into and out of guile mode.
486 /* Key used to attach a cleanup handler to a given thread. Also, if
487 thread-local storage is unavailable, this key is used to retrieve the
488 current thread with `pthread_getspecific ()'. */
489 scm_i_pthread_key_t scm_i_thread_key
;
492 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
494 /* When thread-local storage (TLS) is available, a pointer to the
495 current-thread object is kept in TLS. Note that storing the thread-object
496 itself in TLS (rather than a pointer to some malloc'd memory) is not
497 possible since thread objects may live longer than the actual thread they
499 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
501 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
504 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
505 static scm_i_thread
*all_threads
= NULL
;
506 static int thread_count
;
508 static SCM scm_i_default_dynamic_state
;
510 /* Run when a fluid is collected. */
512 scm_i_reset_fluid (size_t n
)
516 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
517 for (t
= all_threads
; t
; t
= t
->next_thread
)
518 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
520 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
522 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
523 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
525 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
528 /* Perform first stage of thread initialisation, in non-guile mode.
531 guilify_self_1 (struct GC_stack_base
*base
)
535 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
536 before allocating anything in this thread, because allocation could
537 cause GC to run, and GC could cause finalizers, which could invoke
538 Scheme functions, which need the current thread to be set. */
540 t
.pthread
= scm_i_pthread_self ();
541 t
.handle
= SCM_BOOL_F
;
542 t
.result
= SCM_BOOL_F
;
543 t
.cleanup_handler
= SCM_BOOL_F
;
546 t
.join_queue
= SCM_EOL
;
547 t
.dynamic_state
= SCM_BOOL_F
;
548 t
.dynstack
.base
= NULL
;
549 t
.dynstack
.top
= NULL
;
550 t
.dynstack
.limit
= NULL
;
551 t
.active_asyncs
= SCM_EOL
;
553 t
.pending_asyncs
= 1;
554 t
.critical_section_level
= 0;
555 t
.base
= base
->mem_base
;
557 t
.register_backing_store_base
= base
->reg_base
;
559 t
.continuation_root
= SCM_EOL
;
560 t
.continuation_base
= t
.base
;
561 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
562 t
.sleep_mutex
= NULL
;
563 t
.sleep_object
= SCM_BOOL_F
;
566 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
567 /* FIXME: Error conditions during the initialization phase are handled
568 gracelessly since public functions such as `scm_init_guile ()'
569 currently have type `void'. */
572 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
573 t
.current_mark_stack_ptr
= NULL
;
574 t
.current_mark_stack_limit
= NULL
;
579 /* The switcheroo. */
581 scm_i_thread
*t_ptr
= &t
;
584 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
585 memcpy (t_ptr
, &t
, sizeof t
);
587 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
589 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
590 /* Cache the current thread in TLS for faster lookup. */
591 scm_i_current_thread
= t_ptr
;
594 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
595 t_ptr
->next_thread
= all_threads
;
598 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
604 /* Perform second stage of thread initialisation, in guile mode.
607 guilify_self_2 (SCM parent
)
609 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
613 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
615 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
616 t
->continuation_base
= t
->base
;
619 if (scm_is_true (parent
))
620 t
->dynamic_state
= scm_make_dynamic_state (parent
);
622 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
624 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
625 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
626 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
628 t
->join_queue
= make_queue ();
631 /* See note in finalizers.c:queue_finalizer_async(). */
632 GC_invoke_finalizers ();
638 /* We implement our own mutex type since we want them to be 'fair', we
639 want to do fancy things while waiting for them (like running
640 asyncs) and we might want to add things that are nice for
645 scm_i_pthread_mutex_t lock
;
647 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
649 int recursive
; /* allow recursive locking? */
650 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
651 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
652 owned by the current thread? */
654 SCM waiting
; /* the threads waiting for this mutex. */
657 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
658 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
661 call_cleanup (void *data
)
664 return scm_call_0 (*proc_p
);
667 /* Perform thread tear-down, in guile mode.
670 do_thread_exit (void *v
)
672 scm_i_thread
*t
= (scm_i_thread
*) v
;
674 if (!scm_is_false (t
->cleanup_handler
))
676 SCM ptr
= t
->cleanup_handler
;
678 t
->cleanup_handler
= SCM_BOOL_F
;
679 t
->result
= scm_internal_catch (SCM_BOOL_T
,
681 scm_handle_by_message_noexit
, NULL
);
684 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
687 close (t
->sleep_pipe
[0]);
688 close (t
->sleep_pipe
[1]);
689 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
692 while (!scm_is_null (t
->mutexes
))
694 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
696 if (scm_is_true (mutex
))
698 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
700 scm_i_pthread_mutex_lock (&m
->lock
);
702 /* Check whether T owns MUTEX. This is usually the case, unless
703 T abandoned MUTEX; in that case, T is no longer its owner (see
704 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
705 if (scm_is_eq (m
->owner
, t
->handle
))
706 unblock_from_queue (m
->waiting
);
708 scm_i_pthread_mutex_unlock (&m
->lock
);
711 t
->mutexes
= scm_cdr (t
->mutexes
);
714 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
720 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
722 /* Won't hurt if we are already registered. */
723 #if SCM_USE_PTHREAD_THREADS
724 GC_register_my_thread (sb
);
727 return scm_with_guile (do_thread_exit
, v
);
731 on_thread_exit (void *v
)
733 /* This handler is executed in non-guile mode. */
734 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
736 /* If we were canceled, we were unable to clear `t->guile_mode', so do
740 /* If this thread was cancelled while doing a cond wait, it will
741 still have a mutex locked, so we unlock it here. */
744 scm_i_pthread_mutex_unlock (t
->held_mutex
);
745 t
->held_mutex
= NULL
;
748 /* Reinstate the current thread for purposes of scm_with_guile
749 guile-mode cleanup handlers. Only really needed in the non-TLS
750 case but it doesn't hurt to be consistent. */
751 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
753 /* Scheme-level thread finalizers and other cleanup needs to happen in
755 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
757 /* Removing ourself from the list of all threads needs to happen in
758 non-guile mode since all SCM values on our stack become
759 unprotected once we are no longer in the list. */
760 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
761 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
764 *tp
= t
->next_thread
;
767 t
->next_thread
= NULL
;
773 /* If there's only one other thread, it could be the signal delivery
774 thread, so we need to notify it to shut down by closing its read pipe.
775 If it's not the signal delivery thread, then closing the read pipe isn't
777 if (thread_count
<= 1)
778 scm_i_close_signal_pipe ();
780 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
782 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
784 #if SCM_USE_PTHREAD_THREADS
785 GC_unregister_my_thread ();
789 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
792 init_thread_key (void)
794 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
797 /* Perform any initializations necessary to make the current thread
798 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
801 BASE is the stack base to use with GC.
803 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
804 which case the default dynamic state is used.
806 Returns zero when the thread was known to guile already; otherwise
809 Note that it could be the case that the thread was known
810 to Guile, but not in guile mode (because we are within a
811 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
812 be sure. New threads are put into guile mode implicitly. */
815 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
817 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
819 if (SCM_I_CURRENT_THREAD
)
821 /* Thread is already known to Guile.
827 /* This thread has not been guilified yet.
830 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
831 if (scm_initialized_p
== 0)
833 /* First thread ever to enter Guile. Run the full
836 scm_i_init_guile (base
);
838 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
839 /* Allow other threads to come in later. */
840 GC_allow_register_threads ();
843 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
847 /* Guile is already initialized, but this thread enters it for
848 the first time. Only initialize this thread.
850 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
852 /* Register this thread with libgc. */
853 #if SCM_USE_PTHREAD_THREADS
854 GC_register_my_thread (base
);
857 guilify_self_1 (base
);
858 guilify_self_2 (parent
);
867 struct GC_stack_base stack_base
;
869 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
870 scm_i_init_thread_for_guile (&stack_base
,
871 scm_i_default_dynamic_state
);
874 fprintf (stderr
, "Failed to get stack base for current thread.\n");
879 struct with_guile_args
887 with_guile_trampoline (void *data
)
889 struct with_guile_args
*args
= data
;
891 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
895 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
900 struct with_guile_args
*args
= data
;
902 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
903 t
= SCM_I_CURRENT_THREAD
;
906 /* We are in Guile mode. */
907 assert (t
->guile_mode
);
909 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
911 /* Leave Guile mode. */
914 else if (t
->guile_mode
)
916 /* Already in Guile mode. */
917 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
921 /* We are not in Guile mode, either because we are not within a
922 scm_with_guile, or because we are within a scm_without_guile.
924 This call to scm_with_guile() could happen from anywhere on the
925 stack, and in particular lower on the stack than when it was
926 when this thread was first guilified. Thus, `base' must be
928 #if SCM_STACK_GROWS_UP
929 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
930 t
->base
= SCM_STACK_PTR (base
->mem_base
);
932 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
933 t
->base
= SCM_STACK_PTR (base
->mem_base
);
937 res
= with_gc_active (with_guile_trampoline
, args
);
944 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
946 struct with_guile_args args
;
950 args
.parent
= parent
;
952 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
956 scm_with_guile (void *(*func
)(void *), void *data
)
958 return scm_i_with_guile_and_parent (func
, data
,
959 scm_i_default_dynamic_state
);
963 scm_without_guile (void *(*func
)(void *), void *data
)
966 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
970 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
971 result
= with_gc_inactive (func
, data
);
972 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
975 /* Otherwise we're not in guile mode, so nothing to do. */
976 result
= func (data
);
982 /*** Thread creation */
989 scm_i_pthread_mutex_t mutex
;
990 scm_i_pthread_cond_t cond
;
994 really_launch (void *d
)
996 launch_data
*data
= (launch_data
*)d
;
997 SCM thunk
= data
->thunk
, handler
= data
->handler
;
1000 t
= SCM_I_CURRENT_THREAD
;
1002 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1003 data
->thread
= scm_current_thread ();
1004 scm_i_pthread_cond_signal (&data
->cond
);
1005 scm_i_pthread_mutex_unlock (&data
->mutex
);
1007 if (SCM_UNBNDP (handler
))
1008 t
->result
= scm_call_0 (thunk
);
1010 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
1016 launch_thread (void *d
)
1018 launch_data
*data
= (launch_data
*)d
;
1019 scm_i_pthread_detach (scm_i_pthread_self ());
1020 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
1024 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
1025 (SCM thunk
, SCM handler
),
1026 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1027 "returning a new thread object representing the thread. The procedure\n"
1028 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1030 "When @var{handler} is specified, then @var{thunk} is called from\n"
1031 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1032 "handler. This catch is established inside the continuation barrier.\n"
1034 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1035 "the @emph{exit value} of the thread and the thread is terminated.")
1036 #define FUNC_NAME s_scm_call_with_new_thread
1042 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
1043 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
1044 handler
, SCM_ARG2
, FUNC_NAME
);
1046 GC_collect_a_little ();
1047 data
.parent
= scm_current_dynamic_state ();
1049 data
.handler
= handler
;
1050 data
.thread
= SCM_BOOL_F
;
1051 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1052 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1054 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1055 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1058 scm_i_pthread_mutex_unlock (&data
.mutex
);
1060 scm_syserror (NULL
);
1063 while (scm_is_false (data
.thread
))
1064 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1066 scm_i_pthread_mutex_unlock (&data
.mutex
);
1074 scm_t_catch_body body
;
1076 scm_t_catch_handler handler
;
1079 scm_i_pthread_mutex_t mutex
;
1080 scm_i_pthread_cond_t cond
;
1084 really_spawn (void *d
)
1086 spawn_data
*data
= (spawn_data
*)d
;
1087 scm_t_catch_body body
= data
->body
;
1088 void *body_data
= data
->body_data
;
1089 scm_t_catch_handler handler
= data
->handler
;
1090 void *handler_data
= data
->handler_data
;
1091 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1093 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1094 data
->thread
= scm_current_thread ();
1095 scm_i_pthread_cond_signal (&data
->cond
);
1096 scm_i_pthread_mutex_unlock (&data
->mutex
);
1098 if (handler
== NULL
)
1099 t
->result
= body (body_data
);
1101 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1103 handler
, handler_data
);
1109 spawn_thread (void *d
)
1111 spawn_data
*data
= (spawn_data
*)d
;
1112 scm_i_pthread_detach (scm_i_pthread_self ());
1113 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1118 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1119 scm_t_catch_handler handler
, void *handler_data
)
1125 data
.parent
= scm_current_dynamic_state ();
1127 data
.body_data
= body_data
;
1128 data
.handler
= handler
;
1129 data
.handler_data
= handler_data
;
1130 data
.thread
= SCM_BOOL_F
;
1131 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1132 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1134 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1135 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1138 scm_i_pthread_mutex_unlock (&data
.mutex
);
1140 scm_syserror (NULL
);
1143 while (scm_is_false (data
.thread
))
1144 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1146 scm_i_pthread_mutex_unlock (&data
.mutex
);
1148 assert (SCM_I_IS_THREAD (data
.thread
));
1153 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1155 "Move the calling thread to the end of the scheduling queue.")
1156 #define FUNC_NAME s_scm_yield
1158 return scm_from_bool (scm_i_sched_yield ());
1162 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1164 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1165 "cannot be the current thread, and if @var{thread} has already terminated or "
1166 "been signaled to terminate, this function is a no-op.")
1167 #define FUNC_NAME s_scm_cancel_thread
1169 scm_i_thread
*t
= NULL
;
1171 SCM_VALIDATE_THREAD (1, thread
);
1172 t
= SCM_I_THREAD_DATA (thread
);
1173 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1177 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1178 scm_i_pthread_cancel (t
->pthread
);
1181 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1183 return SCM_UNSPECIFIED
;
1187 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1188 (SCM thread
, SCM proc
),
1189 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1190 "This handler will be called when the thread exits.")
1191 #define FUNC_NAME s_scm_set_thread_cleanup_x
1195 SCM_VALIDATE_THREAD (1, thread
);
1196 if (!scm_is_false (proc
))
1197 SCM_VALIDATE_THUNK (2, proc
);
1199 t
= SCM_I_THREAD_DATA (thread
);
1200 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1202 if (!(t
->exited
|| t
->canceled
))
1203 t
->cleanup_handler
= proc
;
1205 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1207 return SCM_UNSPECIFIED
;
1211 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1213 "Return the cleanup handler installed for the thread @var{thread}.")
1214 #define FUNC_NAME s_scm_thread_cleanup
1219 SCM_VALIDATE_THREAD (1, thread
);
1221 t
= SCM_I_THREAD_DATA (thread
);
1222 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1223 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1224 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1230 SCM
scm_join_thread (SCM thread
)
1232 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1235 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1236 (SCM thread
, SCM timeout
, SCM timeoutval
),
1237 "Suspend execution of the calling thread until the target @var{thread} "
1238 "terminates, unless the target @var{thread} has already terminated. ")
1239 #define FUNC_NAME s_scm_join_thread_timed
1242 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1243 SCM res
= SCM_BOOL_F
;
1245 if (! (SCM_UNBNDP (timeoutval
)))
1248 SCM_VALIDATE_THREAD (1, thread
);
1249 if (scm_is_eq (scm_current_thread (), thread
))
1250 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1252 t
= SCM_I_THREAD_DATA (thread
);
1253 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1255 if (! SCM_UNBNDP (timeout
))
1257 to_timespec (timeout
, &ctimeout
);
1258 timeout_ptr
= &ctimeout
;
1267 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1277 else if (err
== ETIMEDOUT
)
1280 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1282 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1284 /* Check for exit again, since we just released and
1285 reacquired the admin mutex, before the next block_self
1286 call (which would block forever if t has already
1296 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1302 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1304 "Return @code{#t} if @var{obj} is a thread.")
1305 #define FUNC_NAME s_scm_thread_p
1307 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1313 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1315 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1316 scm_puts_unlocked ("#<mutex ", port
);
1317 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1318 scm_puts_unlocked (">", port
);
1323 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1327 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1329 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1330 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1331 and so we can just copy it. */
1332 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1333 m
->owner
= SCM_BOOL_F
;
1336 m
->recursive
= recursive
;
1337 m
->unchecked_unlock
= unchecked_unlock
;
1338 m
->allow_external_unlock
= external_unlock
;
1340 m
->waiting
= SCM_EOL
;
1341 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1342 m
->waiting
= make_queue ();
1346 SCM
scm_make_mutex (void)
1348 return scm_make_mutex_with_flags (SCM_EOL
);
1351 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1352 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1353 SCM_SYMBOL (recursive_sym
, "recursive");
1355 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1357 "Create a new mutex. ")
1358 #define FUNC_NAME s_scm_make_mutex_with_flags
1360 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1363 while (! scm_is_null (ptr
))
1365 SCM flag
= SCM_CAR (ptr
);
1366 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1367 unchecked_unlock
= 1;
1368 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1369 external_unlock
= 1;
1370 else if (scm_is_eq (flag
, recursive_sym
))
1373 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1374 ptr
= SCM_CDR (ptr
);
1376 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1380 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1382 "Create a new recursive mutex. ")
1383 #define FUNC_NAME s_scm_make_recursive_mutex
1385 return make_fat_mutex (1, 0, 0);
1389 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1392 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1394 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1396 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1397 SCM err
= SCM_BOOL_F
;
1399 struct timeval current_time
;
1401 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1407 m
->owner
= new_owner
;
1410 if (SCM_I_IS_THREAD (new_owner
))
1412 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1414 /* FIXME: The order in which `t->admin_mutex' and
1415 `m->lock' are taken differs from that in
1416 `on_thread_exit', potentially leading to deadlocks. */
1417 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1419 /* Only keep a weak reference to MUTEX so that it's not
1420 retained when not referenced elsewhere (bug #27450).
1421 The weak pair itself is eventually removed when MUTEX
1422 is unlocked. Note that `t->mutexes' lists mutexes
1423 currently held by T, so it should be small. */
1424 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1427 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1432 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1434 m
->owner
= new_owner
;
1435 err
= scm_cons (scm_abandoned_mutex_error_key
,
1436 scm_from_locale_string ("lock obtained on abandoned "
1441 else if (scm_is_eq (m
->owner
, new_owner
))
1450 err
= scm_cons (scm_misc_error_key
,
1451 scm_from_locale_string ("mutex already locked "
1459 if (timeout
!= NULL
)
1461 gettimeofday (¤t_time
, NULL
);
1462 if (current_time
.tv_sec
> timeout
->tv_sec
||
1463 (current_time
.tv_sec
== timeout
->tv_sec
&&
1464 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1470 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1471 scm_i_pthread_mutex_unlock (&m
->lock
);
1473 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1476 scm_i_pthread_mutex_unlock (&m
->lock
);
1480 SCM
scm_lock_mutex (SCM mx
)
1482 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1485 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1486 (SCM m
, SCM timeout
, SCM owner
),
1487 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1488 "thread blocks until the mutex becomes available. The function\n"
1489 "returns when the calling thread owns the lock on @var{m}.\n"
1490 "Locking a mutex that a thread already owns will succeed right\n"
1491 "away and will not block the thread. That is, Guile's mutexes\n"
1492 "are @emph{recursive}.")
1493 #define FUNC_NAME s_scm_lock_mutex_timed
1497 scm_t_timespec cwaittime
, *waittime
= NULL
;
1499 SCM_VALIDATE_MUTEX (1, m
);
1501 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1503 to_timespec (timeout
, &cwaittime
);
1504 waittime
= &cwaittime
;
1507 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1508 SCM_VALIDATE_THREAD (3, owner
);
1510 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1511 if (!scm_is_false (exception
))
1512 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1513 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1518 lock_mutex_return_void (SCM mx
)
1520 (void) scm_lock_mutex (mx
);
1524 unlock_mutex_return_void (SCM mx
)
1526 (void) scm_unlock_mutex (mx
);
1530 scm_dynwind_lock_mutex (SCM mutex
)
1532 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1533 SCM_F_WIND_EXPLICITLY
);
1534 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1535 SCM_F_WIND_EXPLICITLY
);
1538 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1540 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1541 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1542 #define FUNC_NAME s_scm_try_mutex
1546 scm_t_timespec cwaittime
, *waittime
= NULL
;
1548 SCM_VALIDATE_MUTEX (1, mutex
);
1550 to_timespec (scm_from_int(0), &cwaittime
);
1551 waittime
= &cwaittime
;
1553 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1554 if (!scm_is_false (exception
))
1555 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1556 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1560 /*** Fat condition variables */
1563 scm_i_pthread_mutex_t lock
;
1564 SCM waiting
; /* the threads waiting for this condition. */
1567 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1568 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1571 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1575 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1576 walk
= SCM_CDR (walk
))
1578 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1580 if (scm_is_pair (prev
))
1581 SCM_SETCDR (prev
, SCM_CDR (walk
));
1583 t
->mutexes
= SCM_CDR (walk
);
1590 fat_mutex_unlock (SCM mutex
, SCM cond
,
1591 const scm_t_timespec
*waittime
, int relock
)
1594 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1596 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1597 int err
= 0, ret
= 0;
1599 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1603 if (!scm_is_eq (owner
, t
->handle
))
1607 if (!m
->unchecked_unlock
)
1609 scm_i_pthread_mutex_unlock (&m
->lock
);
1610 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1614 else if (!m
->allow_external_unlock
)
1616 scm_i_pthread_mutex_unlock (&m
->lock
);
1617 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1621 if (! (SCM_UNBNDP (cond
)))
1623 c
= SCM_CONDVAR_DATA (cond
);
1632 /* Change the owner of MUTEX. */
1633 remove_mutex_from_thread (mutex
, t
);
1634 m
->owner
= unblock_from_queue (m
->waiting
);
1639 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1640 scm_i_pthread_mutex_unlock (&m
->lock
);
1647 else if (err
== ETIMEDOUT
)
1652 else if (err
!= EINTR
)
1655 scm_syserror (NULL
);
1661 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1669 scm_remember_upto_here_2 (cond
, mutex
);
1671 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1680 /* Change the owner of MUTEX. */
1681 remove_mutex_from_thread (mutex
, t
);
1682 m
->owner
= unblock_from_queue (m
->waiting
);
1685 scm_i_pthread_mutex_unlock (&m
->lock
);
1692 SCM
scm_unlock_mutex (SCM mx
)
1694 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1697 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1698 (SCM mx
, SCM cond
, SCM timeout
),
1699 "Unlocks @var{mutex} if the calling thread owns the lock on "
1700 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1701 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1702 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1703 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1704 "with a call to @code{unlock-mutex}. Only the last call to "
1705 "@code{unlock-mutex} will actually unlock the mutex. ")
1706 #define FUNC_NAME s_scm_unlock_mutex_timed
1708 scm_t_timespec cwaittime
, *waittime
= NULL
;
1710 SCM_VALIDATE_MUTEX (1, mx
);
1711 if (! (SCM_UNBNDP (cond
)))
1713 SCM_VALIDATE_CONDVAR (2, cond
);
1715 if (! (SCM_UNBNDP (timeout
)))
1717 to_timespec (timeout
, &cwaittime
);
1718 waittime
= &cwaittime
;
1722 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1726 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1728 "Return @code{#t} if @var{obj} is a mutex.")
1729 #define FUNC_NAME s_scm_mutex_p
1731 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1735 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1737 "Return the thread owning @var{mx}, or @code{#f}.")
1738 #define FUNC_NAME s_scm_mutex_owner
1741 fat_mutex
*m
= NULL
;
1743 SCM_VALIDATE_MUTEX (1, mx
);
1744 m
= SCM_MUTEX_DATA (mx
);
1745 scm_i_pthread_mutex_lock (&m
->lock
);
1747 scm_i_pthread_mutex_unlock (&m
->lock
);
1753 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1755 "Return the lock level of mutex @var{mx}.")
1756 #define FUNC_NAME s_scm_mutex_level
1758 SCM_VALIDATE_MUTEX (1, mx
);
1759 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1763 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1765 "Returns @code{#t} if the mutex @var{mx} is locked.")
1766 #define FUNC_NAME s_scm_mutex_locked_p
1768 SCM_VALIDATE_MUTEX (1, mx
);
1769 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1774 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1776 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1777 scm_puts_unlocked ("#<condition-variable ", port
);
1778 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1779 scm_puts_unlocked (">", port
);
1783 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1785 "Make a new condition variable.")
1786 #define FUNC_NAME s_scm_make_condition_variable
1791 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1792 c
->waiting
= SCM_EOL
;
1793 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1794 c
->waiting
= make_queue ();
1799 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1800 (SCM cv
, SCM mx
, SCM t
),
1801 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1802 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1803 "is locked again when this function returns. When @var{t} is given, "
1804 "it specifies a point in time where the waiting should be aborted. It "
1805 "can be either a integer as returned by @code{current-time} or a pair "
1806 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1807 "mutex is locked and @code{#f} is returned. When the condition "
1808 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1810 #define FUNC_NAME s_scm_timed_wait_condition_variable
1812 scm_t_timespec waittime
, *waitptr
= NULL
;
1814 SCM_VALIDATE_CONDVAR (1, cv
);
1815 SCM_VALIDATE_MUTEX (2, mx
);
1817 if (!SCM_UNBNDP (t
))
1819 to_timespec (t
, &waittime
);
1820 waitptr
= &waittime
;
1823 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1828 fat_cond_signal (fat_cond
*c
)
1830 unblock_from_queue (c
->waiting
);
1833 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1835 "Wake up one thread that is waiting for @var{cv}")
1836 #define FUNC_NAME s_scm_signal_condition_variable
1838 SCM_VALIDATE_CONDVAR (1, cv
);
1839 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1845 fat_cond_broadcast (fat_cond
*c
)
1847 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1851 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1853 "Wake up all threads that are waiting for @var{cv}. ")
1854 #define FUNC_NAME s_scm_broadcast_condition_variable
1856 SCM_VALIDATE_CONDVAR (1, cv
);
1857 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1862 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1864 "Return @code{#t} if @var{obj} is a condition variable.")
1865 #define FUNC_NAME s_scm_condition_variable_p
1867 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1881 struct timeval
*timeout
;
1888 do_std_select (void *args
)
1890 struct select_args
*select_args
;
1892 select_args
= (struct select_args
*) args
;
1894 select_args
->result
=
1895 select (select_args
->nfds
,
1896 select_args
->read_fds
, select_args
->write_fds
,
1897 select_args
->except_fds
, select_args
->timeout
);
1898 select_args
->errno_value
= errno
;
1903 #if !SCM_HAVE_SYS_SELECT_H
1904 static int scm_std_select (int nfds
,
1908 struct timeval
*timeout
);
1912 scm_std_select (int nfds
,
1916 struct timeval
*timeout
)
1919 int res
, eno
, wakeup_fd
;
1920 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1921 struct select_args args
;
1923 if (readfds
== NULL
)
1925 FD_ZERO (&my_readfds
);
1926 readfds
= &my_readfds
;
1929 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1932 wakeup_fd
= t
->sleep_pipe
[0];
1933 FD_SET (wakeup_fd
, readfds
);
1934 if (wakeup_fd
>= nfds
)
1938 args
.read_fds
= readfds
;
1939 args
.write_fds
= writefds
;
1940 args
.except_fds
= exceptfds
;
1941 args
.timeout
= timeout
;
1943 /* Explicitly cooperate with the GC. */
1944 scm_without_guile (do_std_select
, &args
);
1947 eno
= args
.errno_value
;
1950 scm_i_reset_sleep (t
);
1952 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1955 full_read (wakeup_fd
, &dummy
, 1);
1957 FD_CLR (wakeup_fd
, readfds
);
1969 /* Convenience API for blocking while in guile mode. */
1971 #if SCM_USE_PTHREAD_THREADS
1973 /* It seems reasonable to not run procedures related to mutex and condition
1974 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1975 without it, and (ii) the only potential gain would be GC latency. See
1976 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1977 for a discussion of the pros and cons. */
1980 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1982 int res
= scm_i_pthread_mutex_lock (mutex
);
1987 do_unlock (void *data
)
1989 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1993 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1995 scm_i_scm_pthread_mutex_lock (mutex
);
1996 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
2000 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
2003 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
2005 t
->held_mutex
= mutex
;
2006 res
= scm_i_pthread_cond_wait (cond
, mutex
);
2007 t
->held_mutex
= NULL
;
2013 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
2014 scm_i_pthread_mutex_t
*mutex
,
2015 const scm_t_timespec
*wt
)
2018 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
2020 t
->held_mutex
= mutex
;
2021 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
2022 t
->held_mutex
= NULL
;
2030 scm_std_usleep (unsigned long usecs
)
2033 tv
.tv_usec
= usecs
% 1000000;
2034 tv
.tv_sec
= usecs
/ 1000000;
2035 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2036 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
2040 scm_std_sleep (unsigned int secs
)
2045 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2051 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
2053 "Return the thread that called this function.")
2054 #define FUNC_NAME s_scm_current_thread
2056 return SCM_I_CURRENT_THREAD
->handle
;
2061 scm_c_make_list (size_t n
, SCM fill
)
2065 res
= scm_cons (fill
, res
);
2069 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
2071 "Return a list of all threads.")
2072 #define FUNC_NAME s_scm_all_threads
2074 /* We can not allocate while holding the thread_admin_mutex because
2075 of the way GC is done.
2077 int n
= thread_count
;
2079 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
2081 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
2083 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2085 if (t
!= scm_i_signal_delivery_thread
)
2087 SCM_SETCAR (*l
, t
->handle
);
2088 l
= SCM_CDRLOC (*l
);
2093 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2098 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2100 "Return @code{#t} iff @var{thread} has exited.\n")
2101 #define FUNC_NAME s_scm_thread_exited_p
2103 return scm_from_bool (scm_c_thread_exited_p (thread
));
2108 scm_c_thread_exited_p (SCM thread
)
2109 #define FUNC_NAME s_scm_thread_exited_p
2112 SCM_VALIDATE_THREAD (1, thread
);
2113 t
= SCM_I_THREAD_DATA (thread
);
2118 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2120 "Return the total number of processors of the machine, which\n"
2121 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2122 "thread execution unit, which can be either:\n\n"
2124 "@item an execution core in a (possibly multi-core) chip, in a\n"
2125 " (possibly multi- chip) module, in a single computer, or\n"
2126 "@item a thread execution unit inside a core in the case of\n"
2127 " @dfn{hyper-threaded} CPUs.\n"
2129 "Which of the two definitions is used, is unspecified.\n")
2130 #define FUNC_NAME s_scm_total_processor_count
2132 return scm_from_ulong (num_processors (NPROC_ALL
));
2136 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2138 "Like @code{total-processor-count}, but return the number of\n"
2139 "processors available to the current process. See\n"
2140 "@code{setaffinity} and @code{getaffinity} for more\n"
2142 #define FUNC_NAME s_scm_current_processor_count
2144 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2151 static scm_i_pthread_cond_t wake_up_cond
;
2152 static int threads_initialized_p
= 0;
2155 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2157 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2159 static SCM dynwind_critical_section_mutex
;
2162 scm_dynwind_critical_section (SCM mutex
)
2164 if (scm_is_false (mutex
))
2165 mutex
= dynwind_critical_section_mutex
;
2166 scm_dynwind_lock_mutex (mutex
);
2167 scm_dynwind_block_asyncs ();
2170 /*** Initialization */
2172 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2174 #if SCM_USE_PTHREAD_THREADS
2175 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2179 scm_threads_prehistory (void *base
)
2181 #if SCM_USE_PTHREAD_THREADS
2182 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2183 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2184 PTHREAD_MUTEX_RECURSIVE
);
2187 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2188 scm_i_pthread_mutexattr_recursive
);
2189 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2190 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2192 guilify_self_1 ((struct GC_stack_base
*) base
);
2195 scm_t_bits scm_tc16_thread
;
2196 scm_t_bits scm_tc16_mutex
;
2197 scm_t_bits scm_tc16_condvar
;
2202 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2203 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2205 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2206 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2208 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2210 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2212 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2213 guilify_self_2 (SCM_BOOL_F
);
2214 threads_initialized_p
= 1;
2216 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2220 scm_init_threads_default_dynamic_state ()
2222 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2223 scm_i_default_dynamic_state
= state
;
2227 scm_init_thread_procs ()
2229 #include "libguile/threads.x"
2233 /* IA64-specific things. */
2237 # include <sys/param.h>
2238 # include <sys/pstat.h>
2240 scm_ia64_register_backing_store_base (void)
2242 struct pst_vm_status vm_status
;
2244 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2245 if (vm_status
.pst_type
== PS_RSESTACK
)
2246 return (void *) vm_status
.pst_vaddr
;
2250 scm_ia64_ar_bsp (const void *ctx
)
2253 __uc_get_ar_bsp (ctx
, &bsp
);
2254 return (void *) bsp
;
2258 # include <ucontext.h>
2260 scm_ia64_register_backing_store_base (void)
2262 extern void *__libc_ia64_register_backing_store_base
;
2263 return __libc_ia64_register_backing_store_base
;
2266 scm_ia64_ar_bsp (const void *opaque
)
2268 const ucontext_t
*ctx
= opaque
;
2269 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2273 # include <ucontext.h>
2275 scm_ia64_register_backing_store_base (void)
2277 return (void *)0x8000000000000000;
2280 scm_ia64_ar_bsp (const void *opaque
)
2282 const ucontext_t
*ctx
= opaque
;
2283 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2284 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2286 # endif /* __FreeBSD__ */
2287 #endif /* __ia64__ */