1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include "libguile/_scm.h"
37 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
45 # include <pthread_np.h>
52 #include "libguile/validate.h"
53 #include "libguile/root.h"
54 #include "libguile/eval.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/threads.h"
58 #include "libguile/dynwind.h"
59 #include "libguile/iselect.h"
60 #include "libguile/fluids.h"
61 #include "libguile/continuations.h"
62 #include "libguile/gc.h"
63 #include "libguile/init.h"
64 #include "libguile/scmsigs.h"
65 #include "libguile/strings.h"
67 #include <full-read.h>
72 /* First some libgc shims. */
74 /* Make sure GC_fn_type is defined; it is missing from the public
75 headers of GC 7.1 and earlier. */
76 #ifndef HAVE_GC_FN_TYPE
77 typedef void * (* GC_fn_type
) (void *);
85 #ifndef GC_UNIMPLEMENTED
86 #define GC_UNIMPLEMENTED 3
89 /* Likewise struct GC_stack_base is missing before 7.1. */
90 #ifndef HAVE_GC_STACK_BASE
91 struct GC_stack_base
{
92 void * mem_base
; /* Base of memory stack. */
94 void * reg_base
; /* Base of separate register stack. */
99 GC_register_my_thread (struct GC_stack_base
*stack_base
)
101 return GC_UNIMPLEMENTED
;
105 GC_unregister_my_thread ()
109 #if !SCM_USE_PTHREAD_THREADS
110 /* No threads; we can just use GC_stackbottom. */
112 get_thread_stack_base ()
114 return GC_stackbottom
;
117 #elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
118 && defined PTHREAD_ATTR_GETSTACK_WORKS
119 /* This method for GNU/Linux and perhaps some other systems.
120 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
121 available on them. */
123 get_thread_stack_base ()
129 pthread_getattr_np (pthread_self (), &attr
);
130 pthread_attr_getstack (&attr
, &start
, &size
);
131 end
= (char *)start
+ size
;
133 #if SCM_STACK_GROWS_UP
140 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
141 /* This method for MacOS X.
142 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
143 but as of 2006 there's nothing obvious at apple.com. */
145 get_thread_stack_base ()
147 return pthread_get_stackaddr_np (pthread_self ());
150 #elif HAVE_PTHREAD_ATTR_GET_NP
151 /* This one is for FreeBSD 9. */
153 get_thread_stack_base ()
159 pthread_attr_init (&attr
);
160 pthread_attr_get_np (pthread_self (), &attr
);
161 pthread_attr_getstack (&attr
, &start
, &size
);
162 pthread_attr_destroy (&attr
);
164 end
= (char *)start
+ size
;
166 #if SCM_STACK_GROWS_UP
174 #error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
178 GC_get_stack_base (struct GC_stack_base
*stack_base
)
180 stack_base
->mem_base
= get_thread_stack_base ();
182 /* Calculate and store off the base of this thread's register
183 backing store (RBS). Unfortunately our implementation(s) of
184 scm_ia64_register_backing_store_base are only reliable for the
185 main thread. For other threads, therefore, find out the current
186 top of the RBS, and use that as a maximum. */
187 stack_base
->reg_base
= scm_ia64_register_backing_store_base ();
192 bsp
= scm_ia64_ar_bsp (&ctx
);
193 if (stack_base
->reg_base
> bsp
)
194 stack_base
->reg_base
= bsp
;
201 GC_call_with_stack_base(void * (*fn
) (struct GC_stack_base
*, void*), void *arg
)
203 struct GC_stack_base stack_base
;
205 stack_base
.mem_base
= (void*)&stack_base
;
207 /* FIXME: Untested. */
211 stack_base
.reg_base
= scm_ia64_ar_bsp (&ctx
);
215 return fn (&stack_base
, arg
);
217 #endif /* HAVE_GC_STACK_BASE */
220 /* Now define with_gc_active and with_gc_inactive. */
222 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
224 /* We have a sufficiently new libgc (7.2 or newer). */
227 with_gc_inactive (GC_fn_type func
, void *data
)
229 return GC_do_blocking (func
, data
);
233 with_gc_active (GC_fn_type func
, void *data
)
235 return GC_call_with_gc_active (func
, data
);
240 /* libgc not new enough, so never actually deactivate GC.
242 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
243 GC_call_with_gc_active. */
246 with_gc_inactive (GC_fn_type func
, void *data
)
252 with_gc_active (GC_fn_type func
, void *data
)
257 #endif /* HAVE_GC_DO_BLOCKING */
262 to_timespec (SCM t
, scm_t_timespec
*waittime
)
266 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
267 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
271 double time
= scm_to_double (t
);
272 double sec
= scm_c_truncate (time
);
274 waittime
->tv_sec
= (long) sec
;
275 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
282 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
283 the risk of false references leading to unbounded retained space as
284 described in "Bounding Space Usage of Conservative Garbage Collectors",
287 /* Make an empty queue data structure.
292 return scm_cons (SCM_EOL
, SCM_EOL
);
295 /* Put T at the back of Q and return a handle that can be used with
296 remqueue to remove T from Q again.
299 enqueue (SCM q
, SCM t
)
301 SCM c
= scm_cons (t
, SCM_EOL
);
302 SCM_CRITICAL_SECTION_START
;
303 if (scm_is_null (SCM_CDR (q
)))
306 SCM_SETCDR (SCM_CAR (q
), c
);
308 SCM_CRITICAL_SECTION_END
;
312 /* Remove the element that the handle C refers to from the queue Q. C
313 must have been returned from a call to enqueue. The return value
314 is zero when the element referred to by C has already been removed.
315 Otherwise, 1 is returned.
318 remqueue (SCM q
, SCM c
)
321 SCM_CRITICAL_SECTION_START
;
322 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
324 if (scm_is_eq (p
, c
))
326 if (scm_is_eq (c
, SCM_CAR (q
)))
327 SCM_SETCAR (q
, SCM_CDR (c
));
328 SCM_SETCDR (prev
, SCM_CDR (c
));
331 SCM_SETCDR (c
, SCM_EOL
);
333 SCM_CRITICAL_SECTION_END
;
338 SCM_CRITICAL_SECTION_END
;
342 /* Remove the front-most element from the queue Q and return it.
343 Return SCM_BOOL_F when Q is empty.
349 SCM_CRITICAL_SECTION_START
;
353 SCM_CRITICAL_SECTION_END
;
358 SCM_SETCDR (q
, SCM_CDR (c
));
359 if (scm_is_null (SCM_CDR (q
)))
360 SCM_SETCAR (q
, SCM_EOL
);
361 SCM_CRITICAL_SECTION_END
;
364 SCM_SETCDR (c
, SCM_EOL
);
370 /*** Thread smob routines */
374 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
376 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
377 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
378 the struct case, hence we go via a union, and extract according to the
379 size of pthread_t. */
387 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
388 scm_i_pthread_t p
= t
->pthread
;
391 if (sizeof (p
) == sizeof (unsigned short))
393 else if (sizeof (p
) == sizeof (unsigned int))
395 else if (sizeof (p
) == sizeof (unsigned long))
400 scm_puts_unlocked ("#<thread ", port
);
401 scm_uintprint (id
, 10, port
);
402 scm_puts_unlocked (" (", port
);
403 scm_uintprint ((scm_t_bits
)t
, 16, port
);
404 scm_puts_unlocked (")>", port
);
409 /*** Blocking on queues. */
411 /* See also scm_i_queue_async_cell for how such a block is
415 /* Put the current thread on QUEUE and go to sleep, waiting for it to
416 be woken up by a call to 'unblock_from_queue', or to be
417 interrupted. Upon return of this function, the current thread is
418 no longer on QUEUE, even when the sleep has been interrupted.
420 The caller of block_self must hold MUTEX. It will be atomically
421 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
423 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
426 When WAITTIME is not NULL, the sleep will be aborted at that time.
428 The return value of block_self is an errno value. It will be zero
429 when the sleep has been successfully completed by a call to
430 unblock_from_queue, EINTR when it has been interrupted by the
431 delivery of a system async, and ETIMEDOUT when the timeout has
434 The system asyncs themselves are not executed by block_self.
437 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
438 const scm_t_timespec
*waittime
)
440 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
444 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
449 q_handle
= enqueue (queue
, t
->handle
);
450 if (waittime
== NULL
)
451 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
453 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
455 /* When we are still on QUEUE, we have been interrupted. We
456 report this only when no other error (such as a timeout) has
459 if (remqueue (queue
, q_handle
) && err
== 0)
462 scm_i_reset_sleep (t
);
468 /* Wake up the first thread on QUEUE, if any. The awoken thread is
469 returned, or #f if the queue was empty.
472 unblock_from_queue (SCM queue
)
474 SCM thread
= dequeue (queue
);
475 if (scm_is_true (thread
))
476 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
481 /* Getting into and out of guile mode.
484 /* Key used to attach a cleanup handler to a given thread. Also, if
485 thread-local storage is unavailable, this key is used to retrieve the
486 current thread with `pthread_getspecific ()'. */
487 scm_i_pthread_key_t scm_i_thread_key
;
490 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
492 /* When thread-local storage (TLS) is available, a pointer to the
493 current-thread object is kept in TLS. Note that storing the thread-object
494 itself in TLS (rather than a pointer to some malloc'd memory) is not
495 possible since thread objects may live longer than the actual thread they
497 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
499 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
502 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
503 static scm_i_thread
*all_threads
= NULL
;
504 static int thread_count
;
506 static SCM scm_i_default_dynamic_state
;
508 /* Run when a fluid is collected. */
510 scm_i_reset_fluid (size_t n
)
514 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
515 for (t
= all_threads
; t
; t
= t
->next_thread
)
516 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
518 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
520 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
521 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
523 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
526 /* Perform first stage of thread initialisation, in non-guile mode.
529 guilify_self_1 (struct GC_stack_base
*base
)
533 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
534 before allocating anything in this thread, because allocation could
535 cause GC to run, and GC could cause finalizers, which could invoke
536 Scheme functions, which need the current thread to be set. */
538 t
.pthread
= scm_i_pthread_self ();
539 t
.handle
= SCM_BOOL_F
;
540 t
.result
= SCM_BOOL_F
;
541 t
.cleanup_handler
= SCM_BOOL_F
;
544 t
.join_queue
= SCM_EOL
;
545 t
.dynamic_state
= SCM_BOOL_F
;
546 t
.dynstack
.base
= NULL
;
547 t
.dynstack
.top
= NULL
;
548 t
.dynstack
.limit
= NULL
;
549 t
.active_asyncs
= SCM_EOL
;
551 t
.pending_asyncs
= 1;
552 t
.critical_section_level
= 0;
553 t
.base
= base
->mem_base
;
555 t
.register_backing_store_base
= base
->reg_base
;
557 t
.continuation_root
= SCM_EOL
;
558 t
.continuation_base
= t
.base
;
559 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
560 t
.sleep_mutex
= NULL
;
561 t
.sleep_object
= SCM_BOOL_F
;
564 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
565 /* FIXME: Error conditions during the initialization phase are handled
566 gracelessly since public functions such as `scm_init_guile ()'
567 currently have type `void'. */
570 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
571 t
.current_mark_stack_ptr
= NULL
;
572 t
.current_mark_stack_limit
= NULL
;
577 /* The switcheroo. */
579 scm_i_thread
*t_ptr
= &t
;
582 t_ptr
= GC_malloc (sizeof (scm_i_thread
));
583 memcpy (t_ptr
, &t
, sizeof t
);
585 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
587 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
588 /* Cache the current thread in TLS for faster lookup. */
589 scm_i_current_thread
= t_ptr
;
592 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
593 t_ptr
->next_thread
= all_threads
;
596 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
602 /* Perform second stage of thread initialisation, in guile mode.
605 guilify_self_2 (SCM parent
)
607 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
611 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
613 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
614 t
->continuation_base
= t
->base
;
617 if (scm_is_true (parent
))
618 t
->dynamic_state
= scm_make_dynamic_state (parent
);
620 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
622 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
623 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
624 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
626 t
->join_queue
= make_queue ();
629 /* See note in finalizers.c:queue_finalizer_async(). */
630 GC_invoke_finalizers ();
636 /* We implement our own mutex type since we want them to be 'fair', we
637 want to do fancy things while waiting for them (like running
638 asyncs) and we might want to add things that are nice for
643 scm_i_pthread_mutex_t lock
;
645 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
647 int recursive
; /* allow recursive locking? */
648 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
649 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
650 owned by the current thread? */
652 SCM waiting
; /* the threads waiting for this mutex. */
655 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
656 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
659 call_cleanup (void *data
)
662 return scm_call_0 (*proc_p
);
665 /* Perform thread tear-down, in guile mode.
668 do_thread_exit (void *v
)
670 scm_i_thread
*t
= (scm_i_thread
*) v
;
672 if (!scm_is_false (t
->cleanup_handler
))
674 SCM ptr
= t
->cleanup_handler
;
676 t
->cleanup_handler
= SCM_BOOL_F
;
677 t
->result
= scm_internal_catch (SCM_BOOL_T
,
679 scm_handle_by_message_noexit
, NULL
);
682 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
685 close (t
->sleep_pipe
[0]);
686 close (t
->sleep_pipe
[1]);
687 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
690 while (!scm_is_null (t
->mutexes
))
692 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
694 if (scm_is_true (mutex
))
696 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
698 scm_i_pthread_mutex_lock (&m
->lock
);
700 /* Check whether T owns MUTEX. This is usually the case, unless
701 T abandoned MUTEX; in that case, T is no longer its owner (see
702 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
703 if (scm_is_eq (m
->owner
, t
->handle
))
704 unblock_from_queue (m
->waiting
);
706 scm_i_pthread_mutex_unlock (&m
->lock
);
709 t
->mutexes
= scm_cdr (t
->mutexes
);
712 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
718 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
720 /* Won't hurt if we are already registered. */
721 #if SCM_USE_PTHREAD_THREADS
722 GC_register_my_thread (sb
);
725 return scm_with_guile (do_thread_exit
, v
);
729 on_thread_exit (void *v
)
731 /* This handler is executed in non-guile mode. */
732 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
734 /* If we were canceled, we were unable to clear `t->guile_mode', so do
738 /* If this thread was cancelled while doing a cond wait, it will
739 still have a mutex locked, so we unlock it here. */
742 scm_i_pthread_mutex_unlock (t
->held_mutex
);
743 t
->held_mutex
= NULL
;
746 /* Reinstate the current thread for purposes of scm_with_guile
747 guile-mode cleanup handlers. Only really needed in the non-TLS
748 case but it doesn't hurt to be consistent. */
749 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
751 /* Scheme-level thread finalizers and other cleanup needs to happen in
753 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
755 /* Removing ourself from the list of all threads needs to happen in
756 non-guile mode since all SCM values on our stack become
757 unprotected once we are no longer in the list. */
758 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
759 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
762 *tp
= t
->next_thread
;
765 t
->next_thread
= NULL
;
771 /* If there's only one other thread, it could be the signal delivery
772 thread, so we need to notify it to shut down by closing its read pipe.
773 If it's not the signal delivery thread, then closing the read pipe isn't
775 if (thread_count
<= 1)
776 scm_i_close_signal_pipe ();
778 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
780 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
782 #if SCM_USE_PTHREAD_THREADS
783 GC_unregister_my_thread ();
787 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
790 init_thread_key (void)
792 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
795 /* Perform any initializations necessary to make the current thread
796 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
799 BASE is the stack base to use with GC.
801 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
802 which case the default dynamic state is used.
804 Returns zero when the thread was known to guile already; otherwise
807 Note that it could be the case that the thread was known
808 to Guile, but not in guile mode (because we are within a
809 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
810 be sure. New threads are put into guile mode implicitly. */
813 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
815 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
817 if (SCM_I_CURRENT_THREAD
)
819 /* Thread is already known to Guile.
825 /* This thread has not been guilified yet.
828 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
829 if (scm_initialized_p
== 0)
831 /* First thread ever to enter Guile. Run the full
834 scm_i_init_guile (base
);
836 #if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
837 /* Allow other threads to come in later. */
838 GC_allow_register_threads ();
841 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
845 /* Guile is already initialized, but this thread enters it for
846 the first time. Only initialize this thread.
848 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
850 /* Register this thread with libgc. */
851 #if SCM_USE_PTHREAD_THREADS
852 GC_register_my_thread (base
);
855 guilify_self_1 (base
);
856 guilify_self_2 (parent
);
865 struct GC_stack_base stack_base
;
867 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
868 scm_i_init_thread_for_guile (&stack_base
,
869 scm_i_default_dynamic_state
);
872 fprintf (stderr
, "Failed to get stack base for current thread.\n");
877 struct with_guile_args
885 with_guile_trampoline (void *data
)
887 struct with_guile_args
*args
= data
;
889 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
893 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
898 struct with_guile_args
*args
= data
;
900 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
901 t
= SCM_I_CURRENT_THREAD
;
904 /* We are in Guile mode. */
905 assert (t
->guile_mode
);
907 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
909 /* Leave Guile mode. */
912 else if (t
->guile_mode
)
914 /* Already in Guile mode. */
915 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
919 /* We are not in Guile mode, either because we are not within a
920 scm_with_guile, or because we are within a scm_without_guile.
922 This call to scm_with_guile() could happen from anywhere on the
923 stack, and in particular lower on the stack than when it was
924 when this thread was first guilified. Thus, `base' must be
926 #if SCM_STACK_GROWS_UP
927 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
928 t
->base
= SCM_STACK_PTR (base
->mem_base
);
930 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
931 t
->base
= SCM_STACK_PTR (base
->mem_base
);
935 res
= with_gc_active (with_guile_trampoline
, args
);
942 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
944 struct with_guile_args args
;
948 args
.parent
= parent
;
950 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
954 scm_with_guile (void *(*func
)(void *), void *data
)
956 return scm_i_with_guile_and_parent (func
, data
,
957 scm_i_default_dynamic_state
);
961 scm_without_guile (void *(*func
)(void *), void *data
)
964 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
968 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
969 result
= with_gc_inactive (func
, data
);
970 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
973 /* Otherwise we're not in guile mode, so nothing to do. */
974 result
= func (data
);
980 /*** Thread creation */
987 scm_i_pthread_mutex_t mutex
;
988 scm_i_pthread_cond_t cond
;
992 really_launch (void *d
)
994 launch_data
*data
= (launch_data
*)d
;
995 SCM thunk
= data
->thunk
, handler
= data
->handler
;
998 t
= SCM_I_CURRENT_THREAD
;
1000 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1001 data
->thread
= scm_current_thread ();
1002 scm_i_pthread_cond_signal (&data
->cond
);
1003 scm_i_pthread_mutex_unlock (&data
->mutex
);
1005 if (SCM_UNBNDP (handler
))
1006 t
->result
= scm_call_0 (thunk
);
1008 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
1014 launch_thread (void *d
)
1016 launch_data
*data
= (launch_data
*)d
;
1017 scm_i_pthread_detach (scm_i_pthread_self ());
1018 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
1022 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
1023 (SCM thunk
, SCM handler
),
1024 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
1025 "returning a new thread object representing the thread. The procedure\n"
1026 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
1028 "When @var{handler} is specified, then @var{thunk} is called from\n"
1029 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
1030 "handler. This catch is established inside the continuation barrier.\n"
1032 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
1033 "the @emph{exit value} of the thread and the thread is terminated.")
1034 #define FUNC_NAME s_scm_call_with_new_thread
1040 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
1041 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
1042 handler
, SCM_ARG2
, FUNC_NAME
);
1044 GC_collect_a_little ();
1045 data
.parent
= scm_current_dynamic_state ();
1047 data
.handler
= handler
;
1048 data
.thread
= SCM_BOOL_F
;
1049 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1050 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1052 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1053 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
1056 scm_i_pthread_mutex_unlock (&data
.mutex
);
1058 scm_syserror (NULL
);
1060 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1061 scm_i_pthread_mutex_unlock (&data
.mutex
);
1069 scm_t_catch_body body
;
1071 scm_t_catch_handler handler
;
1074 scm_i_pthread_mutex_t mutex
;
1075 scm_i_pthread_cond_t cond
;
1079 really_spawn (void *d
)
1081 spawn_data
*data
= (spawn_data
*)d
;
1082 scm_t_catch_body body
= data
->body
;
1083 void *body_data
= data
->body_data
;
1084 scm_t_catch_handler handler
= data
->handler
;
1085 void *handler_data
= data
->handler_data
;
1086 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1088 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
1089 data
->thread
= scm_current_thread ();
1090 scm_i_pthread_cond_signal (&data
->cond
);
1091 scm_i_pthread_mutex_unlock (&data
->mutex
);
1093 if (handler
== NULL
)
1094 t
->result
= body (body_data
);
1096 t
->result
= scm_internal_catch (SCM_BOOL_T
,
1098 handler
, handler_data
);
1104 spawn_thread (void *d
)
1106 spawn_data
*data
= (spawn_data
*)d
;
1107 scm_i_pthread_detach (scm_i_pthread_self ());
1108 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
1113 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
1114 scm_t_catch_handler handler
, void *handler_data
)
1120 data
.parent
= scm_current_dynamic_state ();
1122 data
.body_data
= body_data
;
1123 data
.handler
= handler
;
1124 data
.handler_data
= handler_data
;
1125 data
.thread
= SCM_BOOL_F
;
1126 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1127 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1129 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1130 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1133 scm_i_pthread_mutex_unlock (&data
.mutex
);
1135 scm_syserror (NULL
);
1137 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1138 scm_i_pthread_mutex_unlock (&data
.mutex
);
1140 assert (SCM_I_IS_THREAD (data
.thread
));
1145 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1147 "Move the calling thread to the end of the scheduling queue.")
1148 #define FUNC_NAME s_scm_yield
1150 return scm_from_bool (scm_i_sched_yield ());
1154 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1156 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1157 "cannot be the current thread, and if @var{thread} has already terminated or "
1158 "been signaled to terminate, this function is a no-op.")
1159 #define FUNC_NAME s_scm_cancel_thread
1161 scm_i_thread
*t
= NULL
;
1163 SCM_VALIDATE_THREAD (1, thread
);
1164 t
= SCM_I_THREAD_DATA (thread
);
1165 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1169 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1170 scm_i_pthread_cancel (t
->pthread
);
1173 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1175 return SCM_UNSPECIFIED
;
1179 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1180 (SCM thread
, SCM proc
),
1181 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1182 "This handler will be called when the thread exits.")
1183 #define FUNC_NAME s_scm_set_thread_cleanup_x
1187 SCM_VALIDATE_THREAD (1, thread
);
1188 if (!scm_is_false (proc
))
1189 SCM_VALIDATE_THUNK (2, proc
);
1191 t
= SCM_I_THREAD_DATA (thread
);
1192 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1194 if (!(t
->exited
|| t
->canceled
))
1195 t
->cleanup_handler
= proc
;
1197 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1199 return SCM_UNSPECIFIED
;
1203 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1205 "Return the cleanup handler installed for the thread @var{thread}.")
1206 #define FUNC_NAME s_scm_thread_cleanup
1211 SCM_VALIDATE_THREAD (1, thread
);
1213 t
= SCM_I_THREAD_DATA (thread
);
1214 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1215 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1216 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1222 SCM
scm_join_thread (SCM thread
)
1224 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1227 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1228 (SCM thread
, SCM timeout
, SCM timeoutval
),
1229 "Suspend execution of the calling thread until the target @var{thread} "
1230 "terminates, unless the target @var{thread} has already terminated. ")
1231 #define FUNC_NAME s_scm_join_thread_timed
1234 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1235 SCM res
= SCM_BOOL_F
;
1237 if (! (SCM_UNBNDP (timeoutval
)))
1240 SCM_VALIDATE_THREAD (1, thread
);
1241 if (scm_is_eq (scm_current_thread (), thread
))
1242 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1244 t
= SCM_I_THREAD_DATA (thread
);
1245 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1247 if (! SCM_UNBNDP (timeout
))
1249 to_timespec (timeout
, &ctimeout
);
1250 timeout_ptr
= &ctimeout
;
1259 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1269 else if (err
== ETIMEDOUT
)
1272 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1274 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1276 /* Check for exit again, since we just released and
1277 reacquired the admin mutex, before the next block_self
1278 call (which would block forever if t has already
1288 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1294 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1296 "Return @code{#t} if @var{obj} is a thread.")
1297 #define FUNC_NAME s_scm_thread_p
1299 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1305 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1307 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1308 scm_puts_unlocked ("#<mutex ", port
);
1309 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1310 scm_puts_unlocked (">", port
);
1315 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1319 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1321 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1322 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1323 and so we can just copy it. */
1324 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1325 m
->owner
= SCM_BOOL_F
;
1328 m
->recursive
= recursive
;
1329 m
->unchecked_unlock
= unchecked_unlock
;
1330 m
->allow_external_unlock
= external_unlock
;
1332 m
->waiting
= SCM_EOL
;
1333 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1334 m
->waiting
= make_queue ();
1338 SCM
scm_make_mutex (void)
1340 return scm_make_mutex_with_flags (SCM_EOL
);
1343 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1344 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1345 SCM_SYMBOL (recursive_sym
, "recursive");
1347 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1349 "Create a new mutex. ")
1350 #define FUNC_NAME s_scm_make_mutex_with_flags
1352 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1355 while (! scm_is_null (ptr
))
1357 SCM flag
= SCM_CAR (ptr
);
1358 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1359 unchecked_unlock
= 1;
1360 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1361 external_unlock
= 1;
1362 else if (scm_is_eq (flag
, recursive_sym
))
1365 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1366 ptr
= SCM_CDR (ptr
);
1368 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1372 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1374 "Create a new recursive mutex. ")
1375 #define FUNC_NAME s_scm_make_recursive_mutex
1377 return make_fat_mutex (1, 0, 0);
1381 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1384 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1386 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1388 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1389 SCM err
= SCM_BOOL_F
;
1391 struct timeval current_time
;
1393 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1399 m
->owner
= new_owner
;
1402 if (SCM_I_IS_THREAD (new_owner
))
1404 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1406 /* FIXME: The order in which `t->admin_mutex' and
1407 `m->lock' are taken differs from that in
1408 `on_thread_exit', potentially leading to deadlocks. */
1409 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1411 /* Only keep a weak reference to MUTEX so that it's not
1412 retained when not referenced elsewhere (bug #27450).
1413 The weak pair itself is eventually removed when MUTEX
1414 is unlocked. Note that `t->mutexes' lists mutexes
1415 currently held by T, so it should be small. */
1416 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1419 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1424 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1426 m
->owner
= new_owner
;
1427 err
= scm_cons (scm_abandoned_mutex_error_key
,
1428 scm_from_locale_string ("lock obtained on abandoned "
1433 else if (scm_is_eq (m
->owner
, new_owner
))
1442 err
= scm_cons (scm_misc_error_key
,
1443 scm_from_locale_string ("mutex already locked "
1451 if (timeout
!= NULL
)
1453 gettimeofday (¤t_time
, NULL
);
1454 if (current_time
.tv_sec
> timeout
->tv_sec
||
1455 (current_time
.tv_sec
== timeout
->tv_sec
&&
1456 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1462 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1463 scm_i_pthread_mutex_unlock (&m
->lock
);
1465 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1468 scm_i_pthread_mutex_unlock (&m
->lock
);
1472 SCM
scm_lock_mutex (SCM mx
)
1474 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1477 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1478 (SCM m
, SCM timeout
, SCM owner
),
1479 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1480 "thread blocks until the mutex becomes available. The function\n"
1481 "returns when the calling thread owns the lock on @var{m}.\n"
1482 "Locking a mutex that a thread already owns will succeed right\n"
1483 "away and will not block the thread. That is, Guile's mutexes\n"
1484 "are @emph{recursive}.")
1485 #define FUNC_NAME s_scm_lock_mutex_timed
1489 scm_t_timespec cwaittime
, *waittime
= NULL
;
1491 SCM_VALIDATE_MUTEX (1, m
);
1493 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1495 to_timespec (timeout
, &cwaittime
);
1496 waittime
= &cwaittime
;
1499 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1500 SCM_VALIDATE_THREAD (3, owner
);
1502 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1503 if (!scm_is_false (exception
))
1504 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1505 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1510 lock_mutex_return_void (SCM mx
)
1512 (void) scm_lock_mutex (mx
);
1516 unlock_mutex_return_void (SCM mx
)
1518 (void) scm_unlock_mutex (mx
);
1522 scm_dynwind_lock_mutex (SCM mutex
)
1524 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1525 SCM_F_WIND_EXPLICITLY
);
1526 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1527 SCM_F_WIND_EXPLICITLY
);
1530 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1532 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1533 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1534 #define FUNC_NAME s_scm_try_mutex
1538 scm_t_timespec cwaittime
, *waittime
= NULL
;
1540 SCM_VALIDATE_MUTEX (1, mutex
);
1542 to_timespec (scm_from_int(0), &cwaittime
);
1543 waittime
= &cwaittime
;
1545 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1546 if (!scm_is_false (exception
))
1547 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1548 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1552 /*** Fat condition variables */
1555 scm_i_pthread_mutex_t lock
;
1556 SCM waiting
; /* the threads waiting for this condition. */
1559 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1560 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1563 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1567 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1568 walk
= SCM_CDR (walk
))
1570 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1572 if (scm_is_pair (prev
))
1573 SCM_SETCDR (prev
, SCM_CDR (walk
));
1575 t
->mutexes
= SCM_CDR (walk
);
1582 fat_mutex_unlock (SCM mutex
, SCM cond
,
1583 const scm_t_timespec
*waittime
, int relock
)
1586 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1588 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1589 int err
= 0, ret
= 0;
1591 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1595 if (!scm_is_eq (owner
, t
->handle
))
1599 if (!m
->unchecked_unlock
)
1601 scm_i_pthread_mutex_unlock (&m
->lock
);
1602 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1606 else if (!m
->allow_external_unlock
)
1608 scm_i_pthread_mutex_unlock (&m
->lock
);
1609 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1613 if (! (SCM_UNBNDP (cond
)))
1615 c
= SCM_CONDVAR_DATA (cond
);
1624 /* Change the owner of MUTEX. */
1625 remove_mutex_from_thread (mutex
, t
);
1626 m
->owner
= unblock_from_queue (m
->waiting
);
1631 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1632 scm_i_pthread_mutex_unlock (&m
->lock
);
1639 else if (err
== ETIMEDOUT
)
1644 else if (err
!= EINTR
)
1647 scm_syserror (NULL
);
1653 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1661 scm_remember_upto_here_2 (cond
, mutex
);
1663 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1672 /* Change the owner of MUTEX. */
1673 remove_mutex_from_thread (mutex
, t
);
1674 m
->owner
= unblock_from_queue (m
->waiting
);
1677 scm_i_pthread_mutex_unlock (&m
->lock
);
1684 SCM
scm_unlock_mutex (SCM mx
)
1686 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1689 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1690 (SCM mx
, SCM cond
, SCM timeout
),
1691 "Unlocks @var{mutex} if the calling thread owns the lock on "
1692 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1693 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1694 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1695 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1696 "with a call to @code{unlock-mutex}. Only the last call to "
1697 "@code{unlock-mutex} will actually unlock the mutex. ")
1698 #define FUNC_NAME s_scm_unlock_mutex_timed
1700 scm_t_timespec cwaittime
, *waittime
= NULL
;
1702 SCM_VALIDATE_MUTEX (1, mx
);
1703 if (! (SCM_UNBNDP (cond
)))
1705 SCM_VALIDATE_CONDVAR (2, cond
);
1707 if (! (SCM_UNBNDP (timeout
)))
1709 to_timespec (timeout
, &cwaittime
);
1710 waittime
= &cwaittime
;
1714 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1718 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1720 "Return @code{#t} if @var{obj} is a mutex.")
1721 #define FUNC_NAME s_scm_mutex_p
1723 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1727 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1729 "Return the thread owning @var{mx}, or @code{#f}.")
1730 #define FUNC_NAME s_scm_mutex_owner
1733 fat_mutex
*m
= NULL
;
1735 SCM_VALIDATE_MUTEX (1, mx
);
1736 m
= SCM_MUTEX_DATA (mx
);
1737 scm_i_pthread_mutex_lock (&m
->lock
);
1739 scm_i_pthread_mutex_unlock (&m
->lock
);
1745 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1747 "Return the lock level of mutex @var{mx}.")
1748 #define FUNC_NAME s_scm_mutex_level
1750 SCM_VALIDATE_MUTEX (1, mx
);
1751 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1755 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1757 "Returns @code{#t} if the mutex @var{mx} is locked.")
1758 #define FUNC_NAME s_scm_mutex_locked_p
1760 SCM_VALIDATE_MUTEX (1, mx
);
1761 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1766 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1768 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1769 scm_puts_unlocked ("#<condition-variable ", port
);
1770 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1771 scm_puts_unlocked (">", port
);
1775 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1777 "Make a new condition variable.")
1778 #define FUNC_NAME s_scm_make_condition_variable
1783 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1784 c
->waiting
= SCM_EOL
;
1785 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1786 c
->waiting
= make_queue ();
1791 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1792 (SCM cv
, SCM mx
, SCM t
),
1793 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1794 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1795 "is locked again when this function returns. When @var{t} is given, "
1796 "it specifies a point in time where the waiting should be aborted. It "
1797 "can be either a integer as returned by @code{current-time} or a pair "
1798 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1799 "mutex is locked and @code{#f} is returned. When the condition "
1800 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1802 #define FUNC_NAME s_scm_timed_wait_condition_variable
1804 scm_t_timespec waittime
, *waitptr
= NULL
;
1806 SCM_VALIDATE_CONDVAR (1, cv
);
1807 SCM_VALIDATE_MUTEX (2, mx
);
1809 if (!SCM_UNBNDP (t
))
1811 to_timespec (t
, &waittime
);
1812 waitptr
= &waittime
;
1815 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1820 fat_cond_signal (fat_cond
*c
)
1822 unblock_from_queue (c
->waiting
);
1825 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1827 "Wake up one thread that is waiting for @var{cv}")
1828 #define FUNC_NAME s_scm_signal_condition_variable
1830 SCM_VALIDATE_CONDVAR (1, cv
);
1831 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1837 fat_cond_broadcast (fat_cond
*c
)
1839 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1843 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1845 "Wake up all threads that are waiting for @var{cv}. ")
1846 #define FUNC_NAME s_scm_broadcast_condition_variable
1848 SCM_VALIDATE_CONDVAR (1, cv
);
1849 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1854 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1856 "Return @code{#t} if @var{obj} is a condition variable.")
1857 #define FUNC_NAME s_scm_condition_variable_p
1859 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1870 SELECT_TYPE
*read_fds
;
1871 SELECT_TYPE
*write_fds
;
1872 SELECT_TYPE
*except_fds
;
1873 struct timeval
*timeout
;
1880 do_std_select (void *args
)
1882 struct select_args
*select_args
;
1884 select_args
= (struct select_args
*) args
;
1886 select_args
->result
=
1887 select (select_args
->nfds
,
1888 select_args
->read_fds
, select_args
->write_fds
,
1889 select_args
->except_fds
, select_args
->timeout
);
1890 select_args
->errno_value
= errno
;
1896 scm_std_select (int nfds
,
1897 SELECT_TYPE
*readfds
,
1898 SELECT_TYPE
*writefds
,
1899 SELECT_TYPE
*exceptfds
,
1900 struct timeval
*timeout
)
1903 int res
, eno
, wakeup_fd
;
1904 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1905 struct select_args args
;
1907 if (readfds
== NULL
)
1909 FD_ZERO (&my_readfds
);
1910 readfds
= &my_readfds
;
1913 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1916 wakeup_fd
= t
->sleep_pipe
[0];
1917 FD_SET (wakeup_fd
, readfds
);
1918 if (wakeup_fd
>= nfds
)
1922 args
.read_fds
= readfds
;
1923 args
.write_fds
= writefds
;
1924 args
.except_fds
= exceptfds
;
1925 args
.timeout
= timeout
;
1927 /* Explicitly cooperate with the GC. */
1928 scm_without_guile (do_std_select
, &args
);
1931 eno
= args
.errno_value
;
1934 scm_i_reset_sleep (t
);
1936 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1939 full_read (wakeup_fd
, &dummy
, 1);
1941 FD_CLR (wakeup_fd
, readfds
);
1953 /* Convenience API for blocking while in guile mode. */
1955 #if SCM_USE_PTHREAD_THREADS
1957 /* It seems reasonable to not run procedures related to mutex and condition
1958 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1959 without it, and (ii) the only potential gain would be GC latency. See
1960 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1961 for a discussion of the pros and cons. */
1964 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1966 int res
= scm_i_pthread_mutex_lock (mutex
);
1971 do_unlock (void *data
)
1973 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1977 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1979 scm_i_scm_pthread_mutex_lock (mutex
);
1980 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1984 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1987 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1989 t
->held_mutex
= mutex
;
1990 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1991 t
->held_mutex
= NULL
;
1997 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1998 scm_i_pthread_mutex_t
*mutex
,
1999 const scm_t_timespec
*wt
)
2002 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
2004 t
->held_mutex
= mutex
;
2005 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
2006 t
->held_mutex
= NULL
;
2014 scm_std_usleep (unsigned long usecs
)
2017 tv
.tv_usec
= usecs
% 1000000;
2018 tv
.tv_sec
= usecs
/ 1000000;
2019 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2020 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
2024 scm_std_sleep (unsigned int secs
)
2029 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
2035 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
2037 "Return the thread that called this function.")
2038 #define FUNC_NAME s_scm_current_thread
2040 return SCM_I_CURRENT_THREAD
->handle
;
2045 scm_c_make_list (size_t n
, SCM fill
)
2049 res
= scm_cons (fill
, res
);
2053 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
2055 "Return a list of all threads.")
2056 #define FUNC_NAME s_scm_all_threads
2058 /* We can not allocate while holding the thread_admin_mutex because
2059 of the way GC is done.
2061 int n
= thread_count
;
2063 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
2065 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
2067 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
2069 if (t
!= scm_i_signal_delivery_thread
)
2071 SCM_SETCAR (*l
, t
->handle
);
2072 l
= SCM_CDRLOC (*l
);
2077 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
2082 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
2084 "Return @code{#t} iff @var{thread} has exited.\n")
2085 #define FUNC_NAME s_scm_thread_exited_p
2087 return scm_from_bool (scm_c_thread_exited_p (thread
));
2092 scm_c_thread_exited_p (SCM thread
)
2093 #define FUNC_NAME s_scm_thread_exited_p
2096 SCM_VALIDATE_THREAD (1, thread
);
2097 t
= SCM_I_THREAD_DATA (thread
);
2102 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2104 "Return the total number of processors of the machine, which\n"
2105 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2106 "thread execution unit, which can be either:\n\n"
2108 "@item an execution core in a (possibly multi-core) chip, in a\n"
2109 " (possibly multi- chip) module, in a single computer, or\n"
2110 "@item a thread execution unit inside a core in the case of\n"
2111 " @dfn{hyper-threaded} CPUs.\n"
2113 "Which of the two definitions is used, is unspecified.\n")
2114 #define FUNC_NAME s_scm_total_processor_count
2116 return scm_from_ulong (num_processors (NPROC_ALL
));
2120 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2122 "Like @code{total-processor-count}, but return the number of\n"
2123 "processors available to the current process. See\n"
2124 "@code{setaffinity} and @code{getaffinity} for more\n"
2126 #define FUNC_NAME s_scm_current_processor_count
2128 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2135 static scm_i_pthread_cond_t wake_up_cond
;
2136 static int threads_initialized_p
= 0;
2139 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2141 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2143 static SCM dynwind_critical_section_mutex
;
2146 scm_dynwind_critical_section (SCM mutex
)
2148 if (scm_is_false (mutex
))
2149 mutex
= dynwind_critical_section_mutex
;
2150 scm_dynwind_lock_mutex (mutex
);
2151 scm_dynwind_block_asyncs ();
2154 /*** Initialization */
2156 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2158 #if SCM_USE_PTHREAD_THREADS
2159 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2163 scm_threads_prehistory (void *base
)
2165 #if SCM_USE_PTHREAD_THREADS
2166 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2167 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2168 PTHREAD_MUTEX_RECURSIVE
);
2171 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2172 scm_i_pthread_mutexattr_recursive
);
2173 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2174 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2176 guilify_self_1 ((struct GC_stack_base
*) base
);
2179 scm_t_bits scm_tc16_thread
;
2180 scm_t_bits scm_tc16_mutex
;
2181 scm_t_bits scm_tc16_condvar
;
2186 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2187 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2189 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2190 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2192 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2194 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2196 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2197 guilify_self_2 (SCM_BOOL_F
);
2198 threads_initialized_p
= 1;
2200 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2204 scm_init_threads_default_dynamic_state ()
2206 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2207 scm_i_default_dynamic_state
= state
;
2211 scm_init_thread_procs ()
2213 #include "libguile/threads.x"
2217 /* IA64-specific things. */
2221 # include <sys/param.h>
2222 # include <sys/pstat.h>
2224 scm_ia64_register_backing_store_base (void)
2226 struct pst_vm_status vm_status
;
2228 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2229 if (vm_status
.pst_type
== PS_RSESTACK
)
2230 return (void *) vm_status
.pst_vaddr
;
2234 scm_ia64_ar_bsp (const void *ctx
)
2237 __uc_get_ar_bsp (ctx
, &bsp
);
2238 return (void *) bsp
;
2242 # include <ucontext.h>
2244 scm_ia64_register_backing_store_base (void)
2246 extern void *__libc_ia64_register_backing_store_base
;
2247 return __libc_ia64_register_backing_store_base
;
2250 scm_ia64_ar_bsp (const void *opaque
)
2252 const ucontext_t
*ctx
= opaque
;
2253 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2257 # include <ucontext.h>
2259 scm_ia64_register_backing_store_base (void)
2261 return (void *)0x8000000000000000;
2264 scm_ia64_ar_bsp (const void *opaque
)
2266 const ucontext_t
*ctx
= opaque
;
2267 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2268 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2270 # endif /* __FreeBSD__ */
2271 #endif /* __ia64__ */