1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
46 # include <pthread_np.h>
49 #include <sys/select.h>
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/init.h"
67 #include "libguile/scmsigs.h"
68 #include "libguile/strings.h"
69 #include "libguile/vm.h"
71 #include <full-read.h>
76 /* The GC "kind" for threads that allow them to mark their VM
78 static int thread_gc_kind
;
80 static struct GC_ms_entry
*
81 thread_mark (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
82 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
85 const struct scm_i_thread
*t
= (struct scm_i_thread
*) addr
;
87 if (SCM_UNPACK (t
->handle
) == 0)
88 /* T must be on the free-list; ignore. (See warning in
90 return mark_stack_ptr
;
92 /* Mark T. We could be more precise, but it doesn't matter. */
93 for (word
= 0; word
* sizeof (*addr
) < sizeof (*t
); word
++)
94 mark_stack_ptr
= GC_MARK_AND_PUSH ((void *) addr
[word
],
95 mark_stack_ptr
, mark_stack_limit
,
99 mark_stack_ptr
= scm_i_vm_mark_stack (t
->vp
, mark_stack_ptr
,
102 return mark_stack_ptr
;
108 to_timespec (SCM t
, scm_t_timespec
*waittime
)
112 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
113 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
117 double time
= scm_to_double (t
);
118 double sec
= scm_c_truncate (time
);
120 waittime
->tv_sec
= (long) sec
;
121 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
129 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
130 the risk of false references leading to unbounded retained space as
131 described in "Bounding Space Usage of Conservative Garbage Collectors",
134 /* Make an empty queue data structure.
139 return scm_cons (SCM_EOL
, SCM_EOL
);
142 /* Put T at the back of Q and return a handle that can be used with
143 remqueue to remove T from Q again.
146 enqueue (SCM q
, SCM t
)
148 SCM c
= scm_cons (t
, SCM_EOL
);
149 SCM_CRITICAL_SECTION_START
;
150 if (scm_is_null (SCM_CDR (q
)))
153 SCM_SETCDR (SCM_CAR (q
), c
);
155 SCM_CRITICAL_SECTION_END
;
159 /* Remove the element that the handle C refers to from the queue Q. C
160 must have been returned from a call to enqueue. The return value
161 is zero when the element referred to by C has already been removed.
162 Otherwise, 1 is returned.
165 remqueue (SCM q
, SCM c
)
168 SCM_CRITICAL_SECTION_START
;
169 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
171 if (scm_is_eq (p
, c
))
173 if (scm_is_eq (c
, SCM_CAR (q
)))
174 SCM_SETCAR (q
, scm_is_eq (prev
, q
) ? SCM_EOL
: prev
);
175 SCM_SETCDR (prev
, SCM_CDR (c
));
178 SCM_SETCDR (c
, SCM_EOL
);
180 SCM_CRITICAL_SECTION_END
;
185 SCM_CRITICAL_SECTION_END
;
189 /* Remove the front-most element from the queue Q and return it.
190 Return SCM_BOOL_F when Q is empty.
196 SCM_CRITICAL_SECTION_START
;
200 SCM_CRITICAL_SECTION_END
;
205 SCM_SETCDR (q
, SCM_CDR (c
));
206 if (scm_is_null (SCM_CDR (q
)))
207 SCM_SETCAR (q
, SCM_EOL
);
208 SCM_CRITICAL_SECTION_END
;
211 SCM_SETCDR (c
, SCM_EOL
);
217 /*** Thread smob routines */
221 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
223 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
224 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
225 the struct case, hence we go via a union, and extract according to the
226 size of pthread_t. */
234 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
235 scm_i_pthread_t p
= t
->pthread
;
238 if (sizeof (p
) == sizeof (unsigned short))
240 else if (sizeof (p
) == sizeof (unsigned int))
242 else if (sizeof (p
) == sizeof (unsigned long))
247 scm_puts_unlocked ("#<thread ", port
);
248 scm_uintprint (id
, 10, port
);
249 scm_puts_unlocked (" (", port
);
250 scm_uintprint ((scm_t_bits
)t
, 16, port
);
251 scm_puts_unlocked (")>", port
);
256 /*** Blocking on queues. */
258 /* See also scm_i_queue_async_cell for how such a block is
262 /* Put the current thread on QUEUE and go to sleep, waiting for it to
263 be woken up by a call to 'unblock_from_queue', or to be
264 interrupted. Upon return of this function, the current thread is
265 no longer on QUEUE, even when the sleep has been interrupted.
267 The caller of block_self must hold MUTEX. It will be atomically
268 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
270 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
273 When WAITTIME is not NULL, the sleep will be aborted at that time.
275 The return value of block_self is an errno value. It will be zero
276 when the sleep has been successfully completed by a call to
277 unblock_from_queue, EINTR when it has been interrupted by the
278 delivery of a system async, and ETIMEDOUT when the timeout has
281 The system asyncs themselves are not executed by block_self.
284 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
285 const scm_t_timespec
*waittime
)
287 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
291 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
296 q_handle
= enqueue (queue
, t
->handle
);
297 if (waittime
== NULL
)
298 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
300 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
302 /* When we are still on QUEUE, we have been interrupted. We
303 report this only when no other error (such as a timeout) has
306 if (remqueue (queue
, q_handle
) && err
== 0)
309 scm_i_reset_sleep (t
);
315 /* Wake up the first thread on QUEUE, if any. The awoken thread is
316 returned, or #f if the queue was empty.
319 unblock_from_queue (SCM queue
)
321 SCM thread
= dequeue (queue
);
322 if (scm_is_true (thread
))
323 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
328 /* Getting into and out of guile mode.
331 /* Key used to attach a cleanup handler to a given thread. Also, if
332 thread-local storage is unavailable, this key is used to retrieve the
333 current thread with `pthread_getspecific ()'. */
334 scm_i_pthread_key_t scm_i_thread_key
;
337 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
339 /* When thread-local storage (TLS) is available, a pointer to the
340 current-thread object is kept in TLS. Note that storing the thread-object
341 itself in TLS (rather than a pointer to some malloc'd memory) is not
342 possible since thread objects may live longer than the actual thread they
344 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
346 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
349 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
350 static scm_i_thread
*all_threads
= NULL
;
351 static int thread_count
;
353 static SCM scm_i_default_dynamic_state
;
355 /* Run when a fluid is collected. */
357 scm_i_reset_fluid (size_t n
)
361 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
362 for (t
= all_threads
; t
; t
= t
->next_thread
)
363 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
365 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
367 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
368 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
370 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
373 /* Perform first stage of thread initialisation, in non-guile mode.
376 guilify_self_1 (struct GC_stack_base
*base
)
380 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
381 before allocating anything in this thread, because allocation could
382 cause GC to run, and GC could cause finalizers, which could invoke
383 Scheme functions, which need the current thread to be set. */
385 t
.pthread
= scm_i_pthread_self ();
386 t
.handle
= SCM_BOOL_F
;
387 t
.result
= SCM_BOOL_F
;
388 t
.cleanup_handler
= SCM_BOOL_F
;
391 t
.join_queue
= SCM_EOL
;
392 t
.dynamic_state
= SCM_BOOL_F
;
393 t
.dynstack
.base
= NULL
;
394 t
.dynstack
.top
= NULL
;
395 t
.dynstack
.limit
= NULL
;
396 t
.active_asyncs
= SCM_EOL
;
398 t
.pending_asyncs
= 1;
399 t
.critical_section_level
= 0;
400 t
.base
= base
->mem_base
;
402 t
.register_backing_store_base
= base
->reg_base
;
404 t
.continuation_root
= SCM_EOL
;
405 t
.continuation_base
= t
.base
;
406 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
407 t
.sleep_mutex
= NULL
;
408 t
.sleep_object
= SCM_BOOL_F
;
412 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
413 /* FIXME: Error conditions during the initialization phase are handled
414 gracelessly since public functions such as `scm_init_guile ()'
415 currently have type `void'. */
418 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
423 /* The switcheroo. */
425 scm_i_thread
*t_ptr
= &t
;
428 t_ptr
= GC_generic_malloc (sizeof (*t_ptr
), thread_gc_kind
);
429 memcpy (t_ptr
, &t
, sizeof t
);
431 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
433 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
434 /* Cache the current thread in TLS for faster lookup. */
435 scm_i_current_thread
= t_ptr
;
438 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
439 t_ptr
->next_thread
= all_threads
;
442 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
448 /* Perform second stage of thread initialisation, in guile mode.
451 guilify_self_2 (SCM parent
)
453 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
457 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
459 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
460 t
->continuation_base
= t
->base
;
462 if (scm_is_true (parent
))
463 t
->dynamic_state
= scm_make_dynamic_state (parent
);
465 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
467 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
468 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
469 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
471 t
->join_queue
= make_queue ();
474 /* See note in finalizers.c:queue_finalizer_async(). */
475 GC_invoke_finalizers ();
481 /* We implement our own mutex type since we want them to be 'fair', we
482 want to do fancy things while waiting for them (like running
483 asyncs) and we might want to add things that are nice for
488 scm_i_pthread_mutex_t lock
;
490 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
492 int recursive
; /* allow recursive locking? */
493 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
494 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
495 owned by the current thread? */
497 SCM waiting
; /* the threads waiting for this mutex. */
500 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
501 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
504 call_cleanup (void *data
)
507 return scm_call_0 (*proc_p
);
510 /* Perform thread tear-down, in guile mode.
513 do_thread_exit (void *v
)
515 scm_i_thread
*t
= (scm_i_thread
*) v
;
517 if (!scm_is_false (t
->cleanup_handler
))
519 SCM ptr
= t
->cleanup_handler
;
521 t
->cleanup_handler
= SCM_BOOL_F
;
522 t
->result
= scm_internal_catch (SCM_BOOL_T
,
524 scm_handle_by_message_noexit
, NULL
);
527 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
530 close (t
->sleep_pipe
[0]);
531 close (t
->sleep_pipe
[1]);
532 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
535 while (!scm_is_null (t
->mutexes
))
537 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
539 if (scm_is_true (mutex
))
541 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
543 scm_i_pthread_mutex_lock (&m
->lock
);
545 /* Check whether T owns MUTEX. This is usually the case, unless
546 T abandoned MUTEX; in that case, T is no longer its owner (see
547 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
548 if (scm_is_eq (m
->owner
, t
->handle
))
549 unblock_from_queue (m
->waiting
);
551 scm_i_pthread_mutex_unlock (&m
->lock
);
554 t
->mutexes
= scm_cdr (t
->mutexes
);
557 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
563 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
565 /* Won't hurt if we are already registered. */
566 #if SCM_USE_PTHREAD_THREADS
567 GC_register_my_thread (sb
);
570 return scm_with_guile (do_thread_exit
, v
);
574 on_thread_exit (void *v
)
576 /* This handler is executed in non-guile mode. */
577 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
579 /* If we were canceled, we were unable to clear `t->guile_mode', so do
583 /* If this thread was cancelled while doing a cond wait, it will
584 still have a mutex locked, so we unlock it here. */
587 scm_i_pthread_mutex_unlock (t
->held_mutex
);
588 t
->held_mutex
= NULL
;
591 /* Reinstate the current thread for purposes of scm_with_guile
592 guile-mode cleanup handlers. Only really needed in the non-TLS
593 case but it doesn't hurt to be consistent. */
594 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
596 /* Scheme-level thread finalizers and other cleanup needs to happen in
598 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
600 /* Removing ourself from the list of all threads needs to happen in
601 non-guile mode since all SCM values on our stack become
602 unprotected once we are no longer in the list. */
603 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
604 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
607 *tp
= t
->next_thread
;
610 t
->next_thread
= NULL
;
616 /* If there's only one other thread, it could be the signal delivery
617 thread, so we need to notify it to shut down by closing its read pipe.
618 If it's not the signal delivery thread, then closing the read pipe isn't
620 if (thread_count
<= 1)
621 scm_i_close_signal_pipe ();
623 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
625 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
629 scm_i_vm_free_stack (t
->vp
);
633 #if SCM_USE_PTHREAD_THREADS
634 GC_unregister_my_thread ();
638 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
641 init_thread_key (void)
643 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
646 /* Perform any initializations necessary to make the current thread
647 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
650 BASE is the stack base to use with GC.
652 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
653 which case the default dynamic state is used.
655 Returns zero when the thread was known to guile already; otherwise
658 Note that it could be the case that the thread was known
659 to Guile, but not in guile mode (because we are within a
660 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
661 be sure. New threads are put into guile mode implicitly. */
664 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
666 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
668 if (SCM_I_CURRENT_THREAD
)
670 /* Thread is already known to Guile.
676 /* This thread has not been guilified yet.
679 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
680 if (scm_initialized_p
== 0)
682 /* First thread ever to enter Guile. Run the full
685 scm_i_init_guile (base
);
687 #if SCM_USE_PTHREAD_THREADS
688 /* Allow other threads to come in later. */
689 GC_allow_register_threads ();
692 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
696 /* Guile is already initialized, but this thread enters it for
697 the first time. Only initialize this thread.
699 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
701 /* Register this thread with libgc. */
702 #if SCM_USE_PTHREAD_THREADS
703 GC_register_my_thread (base
);
706 guilify_self_1 (base
);
707 guilify_self_2 (parent
);
716 struct GC_stack_base stack_base
;
718 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
719 scm_i_init_thread_for_guile (&stack_base
,
720 scm_i_default_dynamic_state
);
723 fprintf (stderr
, "Failed to get stack base for current thread.\n");
728 struct with_guile_args
736 with_guile_trampoline (void *data
)
738 struct with_guile_args
*args
= data
;
740 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
744 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
749 struct with_guile_args
*args
= data
;
751 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
752 t
= SCM_I_CURRENT_THREAD
;
755 /* We are in Guile mode. */
756 assert (t
->guile_mode
);
758 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
760 /* Leave Guile mode. */
763 else if (t
->guile_mode
)
765 /* Already in Guile mode. */
766 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
770 /* We are not in Guile mode, either because we are not within a
771 scm_with_guile, or because we are within a scm_without_guile.
773 This call to scm_with_guile() could happen from anywhere on the
774 stack, and in particular lower on the stack than when it was
775 when this thread was first guilified. Thus, `base' must be
777 #if SCM_STACK_GROWS_UP
778 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
779 t
->base
= SCM_STACK_PTR (base
->mem_base
);
781 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
782 t
->base
= SCM_STACK_PTR (base
->mem_base
);
786 res
= GC_call_with_gc_active (with_guile_trampoline
, args
);
793 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
795 struct with_guile_args args
;
799 args
.parent
= parent
;
801 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
805 scm_with_guile (void *(*func
)(void *), void *data
)
807 return scm_i_with_guile_and_parent (func
, data
,
808 scm_i_default_dynamic_state
);
812 scm_without_guile (void *(*func
)(void *), void *data
)
815 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
819 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
820 result
= GC_do_blocking (func
, data
);
821 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
824 /* Otherwise we're not in guile mode, so nothing to do. */
825 result
= func (data
);
831 /*** Thread creation */
838 scm_i_pthread_mutex_t mutex
;
839 scm_i_pthread_cond_t cond
;
843 really_launch (void *d
)
845 launch_data
*data
= (launch_data
*)d
;
846 SCM thunk
= data
->thunk
, handler
= data
->handler
;
849 t
= SCM_I_CURRENT_THREAD
;
851 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
852 data
->thread
= scm_current_thread ();
853 scm_i_pthread_cond_signal (&data
->cond
);
854 scm_i_pthread_mutex_unlock (&data
->mutex
);
856 if (SCM_UNBNDP (handler
))
857 t
->result
= scm_call_0 (thunk
);
859 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
865 launch_thread (void *d
)
867 launch_data
*data
= (launch_data
*)d
;
868 scm_i_pthread_detach (scm_i_pthread_self ());
869 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
873 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
874 (SCM thunk
, SCM handler
),
875 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
876 "returning a new thread object representing the thread. The procedure\n"
877 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
879 "When @var{handler} is specified, then @var{thunk} is called from\n"
880 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
881 "handler. This catch is established inside the continuation barrier.\n"
883 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
884 "the @emph{exit value} of the thread and the thread is terminated.")
885 #define FUNC_NAME s_scm_call_with_new_thread
891 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
892 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
893 handler
, SCM_ARG2
, FUNC_NAME
);
895 GC_collect_a_little ();
896 data
.parent
= scm_current_dynamic_state ();
898 data
.handler
= handler
;
899 data
.thread
= SCM_BOOL_F
;
900 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
901 scm_i_pthread_cond_init (&data
.cond
, NULL
);
903 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
904 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
907 scm_i_pthread_mutex_unlock (&data
.mutex
);
912 while (scm_is_false (data
.thread
))
913 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
915 scm_i_pthread_mutex_unlock (&data
.mutex
);
923 scm_t_catch_body body
;
925 scm_t_catch_handler handler
;
928 scm_i_pthread_mutex_t mutex
;
929 scm_i_pthread_cond_t cond
;
933 really_spawn (void *d
)
935 spawn_data
*data
= (spawn_data
*)d
;
936 scm_t_catch_body body
= data
->body
;
937 void *body_data
= data
->body_data
;
938 scm_t_catch_handler handler
= data
->handler
;
939 void *handler_data
= data
->handler_data
;
940 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
942 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
943 data
->thread
= scm_current_thread ();
944 scm_i_pthread_cond_signal (&data
->cond
);
945 scm_i_pthread_mutex_unlock (&data
->mutex
);
948 t
->result
= body (body_data
);
950 t
->result
= scm_internal_catch (SCM_BOOL_T
,
952 handler
, handler_data
);
958 spawn_thread (void *d
)
960 spawn_data
*data
= (spawn_data
*)d
;
961 scm_i_pthread_detach (scm_i_pthread_self ());
962 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
967 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
968 scm_t_catch_handler handler
, void *handler_data
)
974 data
.parent
= scm_current_dynamic_state ();
976 data
.body_data
= body_data
;
977 data
.handler
= handler
;
978 data
.handler_data
= handler_data
;
979 data
.thread
= SCM_BOOL_F
;
980 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
981 scm_i_pthread_cond_init (&data
.cond
, NULL
);
983 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
984 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
987 scm_i_pthread_mutex_unlock (&data
.mutex
);
992 while (scm_is_false (data
.thread
))
993 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
995 scm_i_pthread_mutex_unlock (&data
.mutex
);
997 assert (SCM_I_IS_THREAD (data
.thread
));
1002 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1004 "Move the calling thread to the end of the scheduling queue.")
1005 #define FUNC_NAME s_scm_yield
1007 return scm_from_bool (scm_i_sched_yield ());
1011 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1013 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1014 "cannot be the current thread, and if @var{thread} has already terminated or "
1015 "been signaled to terminate, this function is a no-op.")
1016 #define FUNC_NAME s_scm_cancel_thread
1018 scm_i_thread
*t
= NULL
;
1020 SCM_VALIDATE_THREAD (1, thread
);
1021 t
= SCM_I_THREAD_DATA (thread
);
1022 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1026 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1027 scm_i_pthread_cancel (t
->pthread
);
1030 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1032 return SCM_UNSPECIFIED
;
1036 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1037 (SCM thread
, SCM proc
),
1038 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1039 "This handler will be called when the thread exits.")
1040 #define FUNC_NAME s_scm_set_thread_cleanup_x
1044 SCM_VALIDATE_THREAD (1, thread
);
1045 if (!scm_is_false (proc
))
1046 SCM_VALIDATE_THUNK (2, proc
);
1048 t
= SCM_I_THREAD_DATA (thread
);
1049 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1051 if (!(t
->exited
|| t
->canceled
))
1052 t
->cleanup_handler
= proc
;
1054 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1056 return SCM_UNSPECIFIED
;
1060 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1062 "Return the cleanup handler installed for the thread @var{thread}.")
1063 #define FUNC_NAME s_scm_thread_cleanup
1068 SCM_VALIDATE_THREAD (1, thread
);
1070 t
= SCM_I_THREAD_DATA (thread
);
1071 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1072 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1073 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1079 SCM
scm_join_thread (SCM thread
)
1081 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1084 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1085 (SCM thread
, SCM timeout
, SCM timeoutval
),
1086 "Suspend execution of the calling thread until the target @var{thread} "
1087 "terminates, unless the target @var{thread} has already terminated. ")
1088 #define FUNC_NAME s_scm_join_thread_timed
1091 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1092 SCM res
= SCM_BOOL_F
;
1094 if (! (SCM_UNBNDP (timeoutval
)))
1097 SCM_VALIDATE_THREAD (1, thread
);
1098 if (scm_is_eq (scm_current_thread (), thread
))
1099 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1101 t
= SCM_I_THREAD_DATA (thread
);
1102 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1104 if (! SCM_UNBNDP (timeout
))
1106 to_timespec (timeout
, &ctimeout
);
1107 timeout_ptr
= &ctimeout
;
1116 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1126 else if (err
== ETIMEDOUT
)
1129 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1131 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1133 /* Check for exit again, since we just released and
1134 reacquired the admin mutex, before the next block_self
1135 call (which would block forever if t has already
1145 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1151 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1153 "Return @code{#t} if @var{obj} is a thread.")
1154 #define FUNC_NAME s_scm_thread_p
1156 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1162 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1164 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1165 scm_puts_unlocked ("#<mutex ", port
);
1166 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1167 scm_puts_unlocked (">", port
);
1172 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1176 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1178 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1179 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1180 and so we can just copy it. */
1181 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1182 m
->owner
= SCM_BOOL_F
;
1185 m
->recursive
= recursive
;
1186 m
->unchecked_unlock
= unchecked_unlock
;
1187 m
->allow_external_unlock
= external_unlock
;
1189 m
->waiting
= SCM_EOL
;
1190 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1191 m
->waiting
= make_queue ();
1195 SCM
scm_make_mutex (void)
1197 return scm_make_mutex_with_flags (SCM_EOL
);
1200 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1201 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1202 SCM_SYMBOL (recursive_sym
, "recursive");
1204 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1206 "Create a new mutex. ")
1207 #define FUNC_NAME s_scm_make_mutex_with_flags
1209 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1212 while (! scm_is_null (ptr
))
1214 SCM flag
= SCM_CAR (ptr
);
1215 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1216 unchecked_unlock
= 1;
1217 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1218 external_unlock
= 1;
1219 else if (scm_is_eq (flag
, recursive_sym
))
1222 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1223 ptr
= SCM_CDR (ptr
);
1225 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1229 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1231 "Create a new recursive mutex. ")
1232 #define FUNC_NAME s_scm_make_recursive_mutex
1234 return make_fat_mutex (1, 0, 0);
1238 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1241 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1243 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1245 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1246 SCM err
= SCM_BOOL_F
;
1248 struct timeval current_time
;
1250 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1256 m
->owner
= new_owner
;
1259 if (SCM_I_IS_THREAD (new_owner
))
1261 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1263 /* FIXME: The order in which `t->admin_mutex' and
1264 `m->lock' are taken differs from that in
1265 `on_thread_exit', potentially leading to deadlocks. */
1266 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1268 /* Only keep a weak reference to MUTEX so that it's not
1269 retained when not referenced elsewhere (bug #27450).
1270 The weak pair itself is eventually removed when MUTEX
1271 is unlocked. Note that `t->mutexes' lists mutexes
1272 currently held by T, so it should be small. */
1273 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1276 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1281 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1283 m
->owner
= new_owner
;
1284 err
= scm_cons (scm_abandoned_mutex_error_key
,
1285 scm_from_locale_string ("lock obtained on abandoned "
1290 else if (scm_is_eq (m
->owner
, new_owner
))
1299 err
= scm_cons (scm_misc_error_key
,
1300 scm_from_locale_string ("mutex already locked "
1308 if (timeout
!= NULL
)
1310 gettimeofday (¤t_time
, NULL
);
1311 if (current_time
.tv_sec
> timeout
->tv_sec
||
1312 (current_time
.tv_sec
== timeout
->tv_sec
&&
1313 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1319 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1320 scm_i_pthread_mutex_unlock (&m
->lock
);
1322 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1325 scm_i_pthread_mutex_unlock (&m
->lock
);
1329 SCM
scm_lock_mutex (SCM mx
)
1331 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1334 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1335 (SCM m
, SCM timeout
, SCM owner
),
1336 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1337 "thread blocks until the mutex becomes available. The function\n"
1338 "returns when the calling thread owns the lock on @var{m}.\n"
1339 "Locking a mutex that a thread already owns will succeed right\n"
1340 "away and will not block the thread. That is, Guile's mutexes\n"
1341 "are @emph{recursive}.")
1342 #define FUNC_NAME s_scm_lock_mutex_timed
1346 scm_t_timespec cwaittime
, *waittime
= NULL
;
1348 SCM_VALIDATE_MUTEX (1, m
);
1350 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1352 to_timespec (timeout
, &cwaittime
);
1353 waittime
= &cwaittime
;
1356 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1357 SCM_VALIDATE_THREAD (3, owner
);
1359 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1360 if (!scm_is_false (exception
))
1361 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1362 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1367 lock_mutex_return_void (SCM mx
)
1369 (void) scm_lock_mutex (mx
);
1373 unlock_mutex_return_void (SCM mx
)
1375 (void) scm_unlock_mutex (mx
);
1379 scm_dynwind_lock_mutex (SCM mutex
)
1381 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1382 SCM_F_WIND_EXPLICITLY
);
1383 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1384 SCM_F_WIND_EXPLICITLY
);
1387 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1389 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1390 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1391 #define FUNC_NAME s_scm_try_mutex
1395 scm_t_timespec cwaittime
, *waittime
= NULL
;
1397 SCM_VALIDATE_MUTEX (1, mutex
);
1399 to_timespec (scm_from_int(0), &cwaittime
);
1400 waittime
= &cwaittime
;
1402 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1403 if (!scm_is_false (exception
))
1404 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1405 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1409 /*** Fat condition variables */
1412 scm_i_pthread_mutex_t lock
;
1413 SCM waiting
; /* the threads waiting for this condition. */
1416 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1417 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1420 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1424 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1425 walk
= SCM_CDR (walk
))
1427 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1429 if (scm_is_pair (prev
))
1430 SCM_SETCDR (prev
, SCM_CDR (walk
));
1432 t
->mutexes
= SCM_CDR (walk
);
1439 fat_mutex_unlock (SCM mutex
, SCM cond
,
1440 const scm_t_timespec
*waittime
, int relock
)
1443 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1445 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1446 int err
= 0, ret
= 0;
1448 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1452 if (!scm_is_eq (owner
, t
->handle
))
1456 if (!m
->unchecked_unlock
)
1458 scm_i_pthread_mutex_unlock (&m
->lock
);
1459 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1463 else if (!m
->allow_external_unlock
)
1465 scm_i_pthread_mutex_unlock (&m
->lock
);
1466 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1470 if (! (SCM_UNBNDP (cond
)))
1472 c
= SCM_CONDVAR_DATA (cond
);
1481 /* Change the owner of MUTEX. */
1482 remove_mutex_from_thread (mutex
, t
);
1483 m
->owner
= unblock_from_queue (m
->waiting
);
1488 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1489 scm_i_pthread_mutex_unlock (&m
->lock
);
1496 else if (err
== ETIMEDOUT
)
1501 else if (err
!= EINTR
)
1504 scm_syserror (NULL
);
1510 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1518 scm_remember_upto_here_2 (cond
, mutex
);
1520 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1529 /* Change the owner of MUTEX. */
1530 remove_mutex_from_thread (mutex
, t
);
1531 m
->owner
= unblock_from_queue (m
->waiting
);
1534 scm_i_pthread_mutex_unlock (&m
->lock
);
1541 SCM
scm_unlock_mutex (SCM mx
)
1543 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1546 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1547 (SCM mx
, SCM cond
, SCM timeout
),
1548 "Unlocks @var{mutex} if the calling thread owns the lock on "
1549 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1550 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1551 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1552 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1553 "with a call to @code{unlock-mutex}. Only the last call to "
1554 "@code{unlock-mutex} will actually unlock the mutex. ")
1555 #define FUNC_NAME s_scm_unlock_mutex_timed
1557 scm_t_timespec cwaittime
, *waittime
= NULL
;
1559 SCM_VALIDATE_MUTEX (1, mx
);
1560 if (! (SCM_UNBNDP (cond
)))
1562 SCM_VALIDATE_CONDVAR (2, cond
);
1564 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1566 to_timespec (timeout
, &cwaittime
);
1567 waittime
= &cwaittime
;
1571 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1575 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1577 "Return @code{#t} if @var{obj} is a mutex.")
1578 #define FUNC_NAME s_scm_mutex_p
1580 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1584 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1586 "Return the thread owning @var{mx}, or @code{#f}.")
1587 #define FUNC_NAME s_scm_mutex_owner
1590 fat_mutex
*m
= NULL
;
1592 SCM_VALIDATE_MUTEX (1, mx
);
1593 m
= SCM_MUTEX_DATA (mx
);
1594 scm_i_pthread_mutex_lock (&m
->lock
);
1596 scm_i_pthread_mutex_unlock (&m
->lock
);
1602 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1604 "Return the lock level of mutex @var{mx}.")
1605 #define FUNC_NAME s_scm_mutex_level
1607 SCM_VALIDATE_MUTEX (1, mx
);
1608 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1612 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1614 "Returns @code{#t} if the mutex @var{mx} is locked.")
1615 #define FUNC_NAME s_scm_mutex_locked_p
1617 SCM_VALIDATE_MUTEX (1, mx
);
1618 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1623 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1625 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1626 scm_puts_unlocked ("#<condition-variable ", port
);
1627 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1628 scm_puts_unlocked (">", port
);
1632 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1634 "Make a new condition variable.")
1635 #define FUNC_NAME s_scm_make_condition_variable
1640 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1641 c
->waiting
= SCM_EOL
;
1642 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1643 c
->waiting
= make_queue ();
1648 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1649 (SCM cv
, SCM mx
, SCM t
),
1650 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1651 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1652 "is locked again when this function returns. When @var{t} is given, "
1653 "it specifies a point in time where the waiting should be aborted. It "
1654 "can be either a integer as returned by @code{current-time} or a pair "
1655 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1656 "mutex is locked and @code{#f} is returned. When the condition "
1657 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1659 #define FUNC_NAME s_scm_timed_wait_condition_variable
1661 scm_t_timespec waittime
, *waitptr
= NULL
;
1663 SCM_VALIDATE_CONDVAR (1, cv
);
1664 SCM_VALIDATE_MUTEX (2, mx
);
1666 if (!SCM_UNBNDP (t
))
1668 to_timespec (t
, &waittime
);
1669 waitptr
= &waittime
;
1672 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1677 fat_cond_signal (fat_cond
*c
)
1679 unblock_from_queue (c
->waiting
);
1682 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1684 "Wake up one thread that is waiting for @var{cv}")
1685 #define FUNC_NAME s_scm_signal_condition_variable
1687 SCM_VALIDATE_CONDVAR (1, cv
);
1688 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1694 fat_cond_broadcast (fat_cond
*c
)
1696 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1700 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1702 "Wake up all threads that are waiting for @var{cv}. ")
1703 #define FUNC_NAME s_scm_broadcast_condition_variable
1705 SCM_VALIDATE_CONDVAR (1, cv
);
1706 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1711 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1713 "Return @code{#t} if @var{obj} is a condition variable.")
1714 #define FUNC_NAME s_scm_condition_variable_p
1716 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1730 struct timeval
*timeout
;
1737 do_std_select (void *args
)
1739 struct select_args
*select_args
;
1741 select_args
= (struct select_args
*) args
;
1743 select_args
->result
=
1744 select (select_args
->nfds
,
1745 select_args
->read_fds
, select_args
->write_fds
,
1746 select_args
->except_fds
, select_args
->timeout
);
1747 select_args
->errno_value
= errno
;
1752 #if !SCM_HAVE_SYS_SELECT_H
1753 static int scm_std_select (int nfds
,
1757 struct timeval
*timeout
);
1761 scm_std_select (int nfds
,
1765 struct timeval
*timeout
)
1768 int res
, eno
, wakeup_fd
;
1769 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1770 struct select_args args
;
1772 if (readfds
== NULL
)
1774 FD_ZERO (&my_readfds
);
1775 readfds
= &my_readfds
;
1778 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1781 wakeup_fd
= t
->sleep_pipe
[0];
1782 FD_SET (wakeup_fd
, readfds
);
1783 if (wakeup_fd
>= nfds
)
1787 args
.read_fds
= readfds
;
1788 args
.write_fds
= writefds
;
1789 args
.except_fds
= exceptfds
;
1790 args
.timeout
= timeout
;
1792 /* Explicitly cooperate with the GC. */
1793 scm_without_guile (do_std_select
, &args
);
1796 eno
= args
.errno_value
;
1799 scm_i_reset_sleep (t
);
1801 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1804 full_read (wakeup_fd
, &dummy
, 1);
1806 FD_CLR (wakeup_fd
, readfds
);
1818 /* Convenience API for blocking while in guile mode. */
1820 #if SCM_USE_PTHREAD_THREADS
1822 /* It seems reasonable to not run procedures related to mutex and condition
1823 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1824 without it, and (ii) the only potential gain would be GC latency. See
1825 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1826 for a discussion of the pros and cons. */
1829 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1831 int res
= scm_i_pthread_mutex_lock (mutex
);
1836 do_unlock (void *data
)
1838 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1842 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1844 scm_i_scm_pthread_mutex_lock (mutex
);
1845 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1849 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1852 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1854 t
->held_mutex
= mutex
;
1855 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1856 t
->held_mutex
= NULL
;
1862 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1863 scm_i_pthread_mutex_t
*mutex
,
1864 const scm_t_timespec
*wt
)
1867 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1869 t
->held_mutex
= mutex
;
1870 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1871 t
->held_mutex
= NULL
;
1879 scm_std_usleep (unsigned long usecs
)
1882 tv
.tv_usec
= usecs
% 1000000;
1883 tv
.tv_sec
= usecs
/ 1000000;
1884 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1885 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1889 scm_std_sleep (unsigned int secs
)
1894 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1900 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1902 "Return the thread that called this function.")
1903 #define FUNC_NAME s_scm_current_thread
1905 return SCM_I_CURRENT_THREAD
->handle
;
1910 scm_c_make_list (size_t n
, SCM fill
)
1914 res
= scm_cons (fill
, res
);
1918 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1920 "Return a list of all threads.")
1921 #define FUNC_NAME s_scm_all_threads
1923 /* We can not allocate while holding the thread_admin_mutex because
1924 of the way GC is done.
1926 int n
= thread_count
;
1928 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1930 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1932 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1934 if (t
!= scm_i_signal_delivery_thread
)
1936 SCM_SETCAR (*l
, t
->handle
);
1937 l
= SCM_CDRLOC (*l
);
1942 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1947 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1949 "Return @code{#t} iff @var{thread} has exited.\n")
1950 #define FUNC_NAME s_scm_thread_exited_p
1952 return scm_from_bool (scm_c_thread_exited_p (thread
));
1957 scm_c_thread_exited_p (SCM thread
)
1958 #define FUNC_NAME s_scm_thread_exited_p
1961 SCM_VALIDATE_THREAD (1, thread
);
1962 t
= SCM_I_THREAD_DATA (thread
);
1967 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
1969 "Return the total number of processors of the machine, which\n"
1970 "is guaranteed to be at least 1. A ``processor'' here is a\n"
1971 "thread execution unit, which can be either:\n\n"
1973 "@item an execution core in a (possibly multi-core) chip, in a\n"
1974 " (possibly multi- chip) module, in a single computer, or\n"
1975 "@item a thread execution unit inside a core in the case of\n"
1976 " @dfn{hyper-threaded} CPUs.\n"
1978 "Which of the two definitions is used, is unspecified.\n")
1979 #define FUNC_NAME s_scm_total_processor_count
1981 return scm_from_ulong (num_processors (NPROC_ALL
));
1985 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
1987 "Like @code{total-processor-count}, but return the number of\n"
1988 "processors available to the current process. See\n"
1989 "@code{setaffinity} and @code{getaffinity} for more\n"
1991 #define FUNC_NAME s_scm_current_processor_count
1993 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2000 static scm_i_pthread_cond_t wake_up_cond
;
2001 static int threads_initialized_p
= 0;
2004 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2006 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2008 static SCM dynwind_critical_section_mutex
;
2011 scm_dynwind_critical_section (SCM mutex
)
2013 if (scm_is_false (mutex
))
2014 mutex
= dynwind_critical_section_mutex
;
2015 scm_dynwind_lock_mutex (mutex
);
2016 scm_dynwind_block_asyncs ();
2019 /*** Initialization */
2021 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2023 #if SCM_USE_PTHREAD_THREADS
2024 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2028 scm_threads_prehistory (void *base
)
2030 #if SCM_USE_PTHREAD_THREADS
2031 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2032 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2033 PTHREAD_MUTEX_RECURSIVE
);
2036 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2037 scm_i_pthread_mutexattr_recursive
);
2038 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2039 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2042 GC_new_kind (GC_new_free_list (),
2043 GC_MAKE_PROC (GC_new_proc (thread_mark
), 0),
2046 guilify_self_1 ((struct GC_stack_base
*) base
);
2049 scm_t_bits scm_tc16_thread
;
2050 scm_t_bits scm_tc16_mutex
;
2051 scm_t_bits scm_tc16_condvar
;
2056 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2057 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2059 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2060 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2062 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2064 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2066 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2067 guilify_self_2 (SCM_BOOL_F
);
2068 threads_initialized_p
= 1;
2070 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2074 scm_init_threads_default_dynamic_state ()
2076 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2077 scm_i_default_dynamic_state
= state
;
2081 scm_init_thread_procs ()
2083 #include "libguile/threads.x"
2087 /* IA64-specific things. */
2091 # include <sys/param.h>
2092 # include <sys/pstat.h>
2094 scm_ia64_register_backing_store_base (void)
2096 struct pst_vm_status vm_status
;
2098 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2099 if (vm_status
.pst_type
== PS_RSESTACK
)
2100 return (void *) vm_status
.pst_vaddr
;
2104 scm_ia64_ar_bsp (const void *ctx
)
2107 __uc_get_ar_bsp (ctx
, &bsp
);
2108 return (void *) bsp
;
2112 # include <ucontext.h>
2114 scm_ia64_register_backing_store_base (void)
2116 extern void *__libc_ia64_register_backing_store_base
;
2117 return __libc_ia64_register_backing_store_base
;
2120 scm_ia64_ar_bsp (const void *opaque
)
2122 const ucontext_t
*ctx
= opaque
;
2123 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2127 # include <ucontext.h>
2129 scm_ia64_register_backing_store_base (void)
2131 return (void *)0x8000000000000000;
2134 scm_ia64_ar_bsp (const void *opaque
)
2136 const ucontext_t
*ctx
= opaque
;
2137 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2138 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2140 # endif /* __FreeBSD__ */
2141 #endif /* __ia64__ */