1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
46 # include <pthread_np.h>
49 #include <sys/select.h>
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/init.h"
67 #include "libguile/scmsigs.h"
68 #include "libguile/strings.h"
70 #include <full-read.h>
75 /* The GC "kind" for threads that allow them to mark their VM
77 static int thread_gc_kind
;
79 static struct GC_ms_entry
*
80 thread_mark (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
81 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
84 const struct scm_i_thread
*t
= (struct scm_i_thread
*) addr
;
86 if (SCM_UNPACK (t
->handle
) == 0)
87 /* T must be on the free-list; ignore. (See warning in
89 return mark_stack_ptr
;
91 /* Mark T. We could be more precise, but it doesn't matte. */
92 for (word
= 0; word
* sizeof (*addr
) < sizeof (*t
); word
++)
93 mark_stack_ptr
= GC_MARK_AND_PUSH ((void *) addr
[word
],
94 mark_stack_ptr
, mark_stack_limit
,
97 return mark_stack_ptr
;
103 to_timespec (SCM t
, scm_t_timespec
*waittime
)
107 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
108 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
112 double time
= scm_to_double (t
);
113 double sec
= scm_c_truncate (time
);
115 waittime
->tv_sec
= (long) sec
;
116 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
124 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
125 the risk of false references leading to unbounded retained space as
126 described in "Bounding Space Usage of Conservative Garbage Collectors",
129 /* Make an empty queue data structure.
134 return scm_cons (SCM_EOL
, SCM_EOL
);
137 /* Put T at the back of Q and return a handle that can be used with
138 remqueue to remove T from Q again.
141 enqueue (SCM q
, SCM t
)
143 SCM c
= scm_cons (t
, SCM_EOL
);
144 SCM_CRITICAL_SECTION_START
;
145 if (scm_is_null (SCM_CDR (q
)))
148 SCM_SETCDR (SCM_CAR (q
), c
);
150 SCM_CRITICAL_SECTION_END
;
154 /* Remove the element that the handle C refers to from the queue Q. C
155 must have been returned from a call to enqueue. The return value
156 is zero when the element referred to by C has already been removed.
157 Otherwise, 1 is returned.
160 remqueue (SCM q
, SCM c
)
163 SCM_CRITICAL_SECTION_START
;
164 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
166 if (scm_is_eq (p
, c
))
168 if (scm_is_eq (c
, SCM_CAR (q
)))
169 SCM_SETCAR (q
, scm_is_eq (prev
, q
) ? SCM_EOL
: prev
);
170 SCM_SETCDR (prev
, SCM_CDR (c
));
173 SCM_SETCDR (c
, SCM_EOL
);
175 SCM_CRITICAL_SECTION_END
;
180 SCM_CRITICAL_SECTION_END
;
184 /* Remove the front-most element from the queue Q and return it.
185 Return SCM_BOOL_F when Q is empty.
191 SCM_CRITICAL_SECTION_START
;
195 SCM_CRITICAL_SECTION_END
;
200 SCM_SETCDR (q
, SCM_CDR (c
));
201 if (scm_is_null (SCM_CDR (q
)))
202 SCM_SETCAR (q
, SCM_EOL
);
203 SCM_CRITICAL_SECTION_END
;
206 SCM_SETCDR (c
, SCM_EOL
);
212 /*** Thread smob routines */
216 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
218 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
219 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
220 the struct case, hence we go via a union, and extract according to the
221 size of pthread_t. */
229 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
230 scm_i_pthread_t p
= t
->pthread
;
233 if (sizeof (p
) == sizeof (unsigned short))
235 else if (sizeof (p
) == sizeof (unsigned int))
237 else if (sizeof (p
) == sizeof (unsigned long))
242 scm_puts_unlocked ("#<thread ", port
);
243 scm_uintprint (id
, 10, port
);
244 scm_puts_unlocked (" (", port
);
245 scm_uintprint ((scm_t_bits
)t
, 16, port
);
246 scm_puts_unlocked (")>", port
);
251 /*** Blocking on queues. */
253 /* See also scm_i_queue_async_cell for how such a block is
257 /* Put the current thread on QUEUE and go to sleep, waiting for it to
258 be woken up by a call to 'unblock_from_queue', or to be
259 interrupted. Upon return of this function, the current thread is
260 no longer on QUEUE, even when the sleep has been interrupted.
262 The caller of block_self must hold MUTEX. It will be atomically
263 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
265 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
268 When WAITTIME is not NULL, the sleep will be aborted at that time.
270 The return value of block_self is an errno value. It will be zero
271 when the sleep has been successfully completed by a call to
272 unblock_from_queue, EINTR when it has been interrupted by the
273 delivery of a system async, and ETIMEDOUT when the timeout has
276 The system asyncs themselves are not executed by block_self.
279 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
280 const scm_t_timespec
*waittime
)
282 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
286 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
291 q_handle
= enqueue (queue
, t
->handle
);
292 if (waittime
== NULL
)
293 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
295 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
297 /* When we are still on QUEUE, we have been interrupted. We
298 report this only when no other error (such as a timeout) has
301 if (remqueue (queue
, q_handle
) && err
== 0)
304 scm_i_reset_sleep (t
);
310 /* Wake up the first thread on QUEUE, if any. The awoken thread is
311 returned, or #f if the queue was empty.
314 unblock_from_queue (SCM queue
)
316 SCM thread
= dequeue (queue
);
317 if (scm_is_true (thread
))
318 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
323 /* Getting into and out of guile mode.
326 /* Key used to attach a cleanup handler to a given thread. Also, if
327 thread-local storage is unavailable, this key is used to retrieve the
328 current thread with `pthread_getspecific ()'. */
329 scm_i_pthread_key_t scm_i_thread_key
;
332 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
334 /* When thread-local storage (TLS) is available, a pointer to the
335 current-thread object is kept in TLS. Note that storing the thread-object
336 itself in TLS (rather than a pointer to some malloc'd memory) is not
337 possible since thread objects may live longer than the actual thread they
339 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
341 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
344 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
345 static scm_i_thread
*all_threads
= NULL
;
346 static int thread_count
;
348 static SCM scm_i_default_dynamic_state
;
350 /* Run when a fluid is collected. */
352 scm_i_reset_fluid (size_t n
)
356 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
357 for (t
= all_threads
; t
; t
= t
->next_thread
)
358 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
360 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
362 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
363 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
365 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
368 /* Perform first stage of thread initialisation, in non-guile mode.
371 guilify_self_1 (struct GC_stack_base
*base
)
375 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
376 before allocating anything in this thread, because allocation could
377 cause GC to run, and GC could cause finalizers, which could invoke
378 Scheme functions, which need the current thread to be set. */
380 t
.pthread
= scm_i_pthread_self ();
381 t
.handle
= SCM_BOOL_F
;
382 t
.result
= SCM_BOOL_F
;
383 t
.cleanup_handler
= SCM_BOOL_F
;
386 t
.join_queue
= SCM_EOL
;
387 t
.dynamic_state
= SCM_BOOL_F
;
388 t
.dynstack
.base
= NULL
;
389 t
.dynstack
.top
= NULL
;
390 t
.dynstack
.limit
= NULL
;
391 t
.active_asyncs
= SCM_EOL
;
393 t
.pending_asyncs
= 1;
394 t
.critical_section_level
= 0;
395 t
.base
= base
->mem_base
;
397 t
.register_backing_store_base
= base
->reg_base
;
399 t
.continuation_root
= SCM_EOL
;
400 t
.continuation_base
= t
.base
;
401 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
402 t
.sleep_mutex
= NULL
;
403 t
.sleep_object
= SCM_BOOL_F
;
407 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
408 /* FIXME: Error conditions during the initialization phase are handled
409 gracelessly since public functions such as `scm_init_guile ()'
410 currently have type `void'. */
413 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
418 /* The switcheroo. */
420 scm_i_thread
*t_ptr
= &t
;
423 t_ptr
= GC_generic_malloc (sizeof (*t_ptr
), thread_gc_kind
);
424 memcpy (t_ptr
, &t
, sizeof t
);
426 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
428 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
429 /* Cache the current thread in TLS for faster lookup. */
430 scm_i_current_thread
= t_ptr
;
433 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
434 t_ptr
->next_thread
= all_threads
;
437 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
443 /* Perform second stage of thread initialisation, in guile mode.
446 guilify_self_2 (SCM parent
)
448 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
452 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
454 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
455 t
->continuation_base
= t
->base
;
457 if (scm_is_true (parent
))
458 t
->dynamic_state
= scm_make_dynamic_state (parent
);
460 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
462 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
463 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
464 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
466 t
->join_queue
= make_queue ();
469 /* See note in finalizers.c:queue_finalizer_async(). */
470 GC_invoke_finalizers ();
476 /* We implement our own mutex type since we want them to be 'fair', we
477 want to do fancy things while waiting for them (like running
478 asyncs) and we might want to add things that are nice for
483 scm_i_pthread_mutex_t lock
;
485 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
487 int recursive
; /* allow recursive locking? */
488 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
489 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
490 owned by the current thread? */
492 SCM waiting
; /* the threads waiting for this mutex. */
495 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
496 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
499 call_cleanup (void *data
)
502 return scm_call_0 (*proc_p
);
505 /* Perform thread tear-down, in guile mode.
508 do_thread_exit (void *v
)
510 scm_i_thread
*t
= (scm_i_thread
*) v
;
512 if (!scm_is_false (t
->cleanup_handler
))
514 SCM ptr
= t
->cleanup_handler
;
516 t
->cleanup_handler
= SCM_BOOL_F
;
517 t
->result
= scm_internal_catch (SCM_BOOL_T
,
519 scm_handle_by_message_noexit
, NULL
);
522 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
525 close (t
->sleep_pipe
[0]);
526 close (t
->sleep_pipe
[1]);
527 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
530 while (!scm_is_null (t
->mutexes
))
532 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
534 if (scm_is_true (mutex
))
536 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
538 scm_i_pthread_mutex_lock (&m
->lock
);
540 /* Check whether T owns MUTEX. This is usually the case, unless
541 T abandoned MUTEX; in that case, T is no longer its owner (see
542 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
543 if (scm_is_eq (m
->owner
, t
->handle
))
544 unblock_from_queue (m
->waiting
);
546 scm_i_pthread_mutex_unlock (&m
->lock
);
549 t
->mutexes
= scm_cdr (t
->mutexes
);
552 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
558 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
560 /* Won't hurt if we are already registered. */
561 #if SCM_USE_PTHREAD_THREADS
562 GC_register_my_thread (sb
);
565 return scm_with_guile (do_thread_exit
, v
);
569 on_thread_exit (void *v
)
571 /* This handler is executed in non-guile mode. */
572 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
574 /* If we were canceled, we were unable to clear `t->guile_mode', so do
578 /* If this thread was cancelled while doing a cond wait, it will
579 still have a mutex locked, so we unlock it here. */
582 scm_i_pthread_mutex_unlock (t
->held_mutex
);
583 t
->held_mutex
= NULL
;
586 /* Reinstate the current thread for purposes of scm_with_guile
587 guile-mode cleanup handlers. Only really needed in the non-TLS
588 case but it doesn't hurt to be consistent. */
589 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
591 /* Scheme-level thread finalizers and other cleanup needs to happen in
593 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
595 /* Removing ourself from the list of all threads needs to happen in
596 non-guile mode since all SCM values on our stack become
597 unprotected once we are no longer in the list. */
598 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
599 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
602 *tp
= t
->next_thread
;
605 t
->next_thread
= NULL
;
611 /* If there's only one other thread, it could be the signal delivery
612 thread, so we need to notify it to shut down by closing its read pipe.
613 If it's not the signal delivery thread, then closing the read pipe isn't
615 if (thread_count
<= 1)
616 scm_i_close_signal_pipe ();
618 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
620 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
622 #if SCM_USE_PTHREAD_THREADS
623 GC_unregister_my_thread ();
627 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
630 init_thread_key (void)
632 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
635 /* Perform any initializations necessary to make the current thread
636 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
639 BASE is the stack base to use with GC.
641 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
642 which case the default dynamic state is used.
644 Returns zero when the thread was known to guile already; otherwise
647 Note that it could be the case that the thread was known
648 to Guile, but not in guile mode (because we are within a
649 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
650 be sure. New threads are put into guile mode implicitly. */
653 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
655 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
657 if (SCM_I_CURRENT_THREAD
)
659 /* Thread is already known to Guile.
665 /* This thread has not been guilified yet.
668 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
669 if (scm_initialized_p
== 0)
671 /* First thread ever to enter Guile. Run the full
674 scm_i_init_guile (base
);
676 #if SCM_USE_PTHREAD_THREADS
677 /* Allow other threads to come in later. */
678 GC_allow_register_threads ();
681 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
685 /* Guile is already initialized, but this thread enters it for
686 the first time. Only initialize this thread.
688 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
690 /* Register this thread with libgc. */
691 #if SCM_USE_PTHREAD_THREADS
692 GC_register_my_thread (base
);
695 guilify_self_1 (base
);
696 guilify_self_2 (parent
);
705 struct GC_stack_base stack_base
;
707 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
708 scm_i_init_thread_for_guile (&stack_base
,
709 scm_i_default_dynamic_state
);
712 fprintf (stderr
, "Failed to get stack base for current thread.\n");
717 struct with_guile_args
725 with_guile_trampoline (void *data
)
727 struct with_guile_args
*args
= data
;
729 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
733 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
738 struct with_guile_args
*args
= data
;
740 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
741 t
= SCM_I_CURRENT_THREAD
;
744 /* We are in Guile mode. */
745 assert (t
->guile_mode
);
747 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
749 /* Leave Guile mode. */
752 else if (t
->guile_mode
)
754 /* Already in Guile mode. */
755 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
759 /* We are not in Guile mode, either because we are not within a
760 scm_with_guile, or because we are within a scm_without_guile.
762 This call to scm_with_guile() could happen from anywhere on the
763 stack, and in particular lower on the stack than when it was
764 when this thread was first guilified. Thus, `base' must be
766 #if SCM_STACK_GROWS_UP
767 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
768 t
->base
= SCM_STACK_PTR (base
->mem_base
);
770 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
771 t
->base
= SCM_STACK_PTR (base
->mem_base
);
775 res
= GC_call_with_gc_active (with_guile_trampoline
, args
);
782 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
784 struct with_guile_args args
;
788 args
.parent
= parent
;
790 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
794 scm_with_guile (void *(*func
)(void *), void *data
)
796 return scm_i_with_guile_and_parent (func
, data
,
797 scm_i_default_dynamic_state
);
801 scm_without_guile (void *(*func
)(void *), void *data
)
804 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
808 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
809 result
= GC_do_blocking (func
, data
);
810 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
813 /* Otherwise we're not in guile mode, so nothing to do. */
814 result
= func (data
);
820 /*** Thread creation */
827 scm_i_pthread_mutex_t mutex
;
828 scm_i_pthread_cond_t cond
;
832 really_launch (void *d
)
834 launch_data
*data
= (launch_data
*)d
;
835 SCM thunk
= data
->thunk
, handler
= data
->handler
;
838 t
= SCM_I_CURRENT_THREAD
;
840 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
841 data
->thread
= scm_current_thread ();
842 scm_i_pthread_cond_signal (&data
->cond
);
843 scm_i_pthread_mutex_unlock (&data
->mutex
);
845 if (SCM_UNBNDP (handler
))
846 t
->result
= scm_call_0 (thunk
);
848 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
854 launch_thread (void *d
)
856 launch_data
*data
= (launch_data
*)d
;
857 scm_i_pthread_detach (scm_i_pthread_self ());
858 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
862 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
863 (SCM thunk
, SCM handler
),
864 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
865 "returning a new thread object representing the thread. The procedure\n"
866 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
868 "When @var{handler} is specified, then @var{thunk} is called from\n"
869 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
870 "handler. This catch is established inside the continuation barrier.\n"
872 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
873 "the @emph{exit value} of the thread and the thread is terminated.")
874 #define FUNC_NAME s_scm_call_with_new_thread
880 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
881 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
882 handler
, SCM_ARG2
, FUNC_NAME
);
884 GC_collect_a_little ();
885 data
.parent
= scm_current_dynamic_state ();
887 data
.handler
= handler
;
888 data
.thread
= SCM_BOOL_F
;
889 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
890 scm_i_pthread_cond_init (&data
.cond
, NULL
);
892 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
893 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
896 scm_i_pthread_mutex_unlock (&data
.mutex
);
901 while (scm_is_false (data
.thread
))
902 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
904 scm_i_pthread_mutex_unlock (&data
.mutex
);
912 scm_t_catch_body body
;
914 scm_t_catch_handler handler
;
917 scm_i_pthread_mutex_t mutex
;
918 scm_i_pthread_cond_t cond
;
922 really_spawn (void *d
)
924 spawn_data
*data
= (spawn_data
*)d
;
925 scm_t_catch_body body
= data
->body
;
926 void *body_data
= data
->body_data
;
927 scm_t_catch_handler handler
= data
->handler
;
928 void *handler_data
= data
->handler_data
;
929 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
931 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
932 data
->thread
= scm_current_thread ();
933 scm_i_pthread_cond_signal (&data
->cond
);
934 scm_i_pthread_mutex_unlock (&data
->mutex
);
937 t
->result
= body (body_data
);
939 t
->result
= scm_internal_catch (SCM_BOOL_T
,
941 handler
, handler_data
);
947 spawn_thread (void *d
)
949 spawn_data
*data
= (spawn_data
*)d
;
950 scm_i_pthread_detach (scm_i_pthread_self ());
951 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
956 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
957 scm_t_catch_handler handler
, void *handler_data
)
963 data
.parent
= scm_current_dynamic_state ();
965 data
.body_data
= body_data
;
966 data
.handler
= handler
;
967 data
.handler_data
= handler_data
;
968 data
.thread
= SCM_BOOL_F
;
969 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
970 scm_i_pthread_cond_init (&data
.cond
, NULL
);
972 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
973 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
976 scm_i_pthread_mutex_unlock (&data
.mutex
);
981 while (scm_is_false (data
.thread
))
982 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
984 scm_i_pthread_mutex_unlock (&data
.mutex
);
986 assert (SCM_I_IS_THREAD (data
.thread
));
991 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
993 "Move the calling thread to the end of the scheduling queue.")
994 #define FUNC_NAME s_scm_yield
996 return scm_from_bool (scm_i_sched_yield ());
1000 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1002 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1003 "cannot be the current thread, and if @var{thread} has already terminated or "
1004 "been signaled to terminate, this function is a no-op.")
1005 #define FUNC_NAME s_scm_cancel_thread
1007 scm_i_thread
*t
= NULL
;
1009 SCM_VALIDATE_THREAD (1, thread
);
1010 t
= SCM_I_THREAD_DATA (thread
);
1011 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1015 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1016 scm_i_pthread_cancel (t
->pthread
);
1019 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1021 return SCM_UNSPECIFIED
;
1025 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1026 (SCM thread
, SCM proc
),
1027 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1028 "This handler will be called when the thread exits.")
1029 #define FUNC_NAME s_scm_set_thread_cleanup_x
1033 SCM_VALIDATE_THREAD (1, thread
);
1034 if (!scm_is_false (proc
))
1035 SCM_VALIDATE_THUNK (2, proc
);
1037 t
= SCM_I_THREAD_DATA (thread
);
1038 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1040 if (!(t
->exited
|| t
->canceled
))
1041 t
->cleanup_handler
= proc
;
1043 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1045 return SCM_UNSPECIFIED
;
1049 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1051 "Return the cleanup handler installed for the thread @var{thread}.")
1052 #define FUNC_NAME s_scm_thread_cleanup
1057 SCM_VALIDATE_THREAD (1, thread
);
1059 t
= SCM_I_THREAD_DATA (thread
);
1060 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1061 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1062 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1068 SCM
scm_join_thread (SCM thread
)
1070 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1073 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1074 (SCM thread
, SCM timeout
, SCM timeoutval
),
1075 "Suspend execution of the calling thread until the target @var{thread} "
1076 "terminates, unless the target @var{thread} has already terminated. ")
1077 #define FUNC_NAME s_scm_join_thread_timed
1080 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1081 SCM res
= SCM_BOOL_F
;
1083 if (! (SCM_UNBNDP (timeoutval
)))
1086 SCM_VALIDATE_THREAD (1, thread
);
1087 if (scm_is_eq (scm_current_thread (), thread
))
1088 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1090 t
= SCM_I_THREAD_DATA (thread
);
1091 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1093 if (! SCM_UNBNDP (timeout
))
1095 to_timespec (timeout
, &ctimeout
);
1096 timeout_ptr
= &ctimeout
;
1105 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1115 else if (err
== ETIMEDOUT
)
1118 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1120 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1122 /* Check for exit again, since we just released and
1123 reacquired the admin mutex, before the next block_self
1124 call (which would block forever if t has already
1134 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1140 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1142 "Return @code{#t} if @var{obj} is a thread.")
1143 #define FUNC_NAME s_scm_thread_p
1145 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1151 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1153 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1154 scm_puts_unlocked ("#<mutex ", port
);
1155 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1156 scm_puts_unlocked (">", port
);
1161 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1165 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1167 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1168 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1169 and so we can just copy it. */
1170 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1171 m
->owner
= SCM_BOOL_F
;
1174 m
->recursive
= recursive
;
1175 m
->unchecked_unlock
= unchecked_unlock
;
1176 m
->allow_external_unlock
= external_unlock
;
1178 m
->waiting
= SCM_EOL
;
1179 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1180 m
->waiting
= make_queue ();
1184 SCM
scm_make_mutex (void)
1186 return scm_make_mutex_with_flags (SCM_EOL
);
1189 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1190 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1191 SCM_SYMBOL (recursive_sym
, "recursive");
1193 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1195 "Create a new mutex. ")
1196 #define FUNC_NAME s_scm_make_mutex_with_flags
1198 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1201 while (! scm_is_null (ptr
))
1203 SCM flag
= SCM_CAR (ptr
);
1204 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1205 unchecked_unlock
= 1;
1206 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1207 external_unlock
= 1;
1208 else if (scm_is_eq (flag
, recursive_sym
))
1211 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1212 ptr
= SCM_CDR (ptr
);
1214 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1218 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1220 "Create a new recursive mutex. ")
1221 #define FUNC_NAME s_scm_make_recursive_mutex
1223 return make_fat_mutex (1, 0, 0);
1227 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1230 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1232 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1234 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1235 SCM err
= SCM_BOOL_F
;
1237 struct timeval current_time
;
1239 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1245 m
->owner
= new_owner
;
1248 if (SCM_I_IS_THREAD (new_owner
))
1250 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1252 /* FIXME: The order in which `t->admin_mutex' and
1253 `m->lock' are taken differs from that in
1254 `on_thread_exit', potentially leading to deadlocks. */
1255 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1257 /* Only keep a weak reference to MUTEX so that it's not
1258 retained when not referenced elsewhere (bug #27450).
1259 The weak pair itself is eventually removed when MUTEX
1260 is unlocked. Note that `t->mutexes' lists mutexes
1261 currently held by T, so it should be small. */
1262 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1265 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1270 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1272 m
->owner
= new_owner
;
1273 err
= scm_cons (scm_abandoned_mutex_error_key
,
1274 scm_from_locale_string ("lock obtained on abandoned "
1279 else if (scm_is_eq (m
->owner
, new_owner
))
1288 err
= scm_cons (scm_misc_error_key
,
1289 scm_from_locale_string ("mutex already locked "
1297 if (timeout
!= NULL
)
1299 gettimeofday (¤t_time
, NULL
);
1300 if (current_time
.tv_sec
> timeout
->tv_sec
||
1301 (current_time
.tv_sec
== timeout
->tv_sec
&&
1302 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1308 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1309 scm_i_pthread_mutex_unlock (&m
->lock
);
1311 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1314 scm_i_pthread_mutex_unlock (&m
->lock
);
1318 SCM
scm_lock_mutex (SCM mx
)
1320 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1323 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1324 (SCM m
, SCM timeout
, SCM owner
),
1325 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1326 "thread blocks until the mutex becomes available. The function\n"
1327 "returns when the calling thread owns the lock on @var{m}.\n"
1328 "Locking a mutex that a thread already owns will succeed right\n"
1329 "away and will not block the thread. That is, Guile's mutexes\n"
1330 "are @emph{recursive}.")
1331 #define FUNC_NAME s_scm_lock_mutex_timed
1335 scm_t_timespec cwaittime
, *waittime
= NULL
;
1337 SCM_VALIDATE_MUTEX (1, m
);
1339 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1341 to_timespec (timeout
, &cwaittime
);
1342 waittime
= &cwaittime
;
1345 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1346 SCM_VALIDATE_THREAD (3, owner
);
1348 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1349 if (!scm_is_false (exception
))
1350 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1351 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1356 lock_mutex_return_void (SCM mx
)
1358 (void) scm_lock_mutex (mx
);
1362 unlock_mutex_return_void (SCM mx
)
1364 (void) scm_unlock_mutex (mx
);
1368 scm_dynwind_lock_mutex (SCM mutex
)
1370 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1371 SCM_F_WIND_EXPLICITLY
);
1372 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1373 SCM_F_WIND_EXPLICITLY
);
1376 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1378 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1379 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1380 #define FUNC_NAME s_scm_try_mutex
1384 scm_t_timespec cwaittime
, *waittime
= NULL
;
1386 SCM_VALIDATE_MUTEX (1, mutex
);
1388 to_timespec (scm_from_int(0), &cwaittime
);
1389 waittime
= &cwaittime
;
1391 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1392 if (!scm_is_false (exception
))
1393 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1394 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1398 /*** Fat condition variables */
1401 scm_i_pthread_mutex_t lock
;
1402 SCM waiting
; /* the threads waiting for this condition. */
1405 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1406 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1409 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1413 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1414 walk
= SCM_CDR (walk
))
1416 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1418 if (scm_is_pair (prev
))
1419 SCM_SETCDR (prev
, SCM_CDR (walk
));
1421 t
->mutexes
= SCM_CDR (walk
);
1428 fat_mutex_unlock (SCM mutex
, SCM cond
,
1429 const scm_t_timespec
*waittime
, int relock
)
1432 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1434 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1435 int err
= 0, ret
= 0;
1437 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1441 if (!scm_is_eq (owner
, t
->handle
))
1445 if (!m
->unchecked_unlock
)
1447 scm_i_pthread_mutex_unlock (&m
->lock
);
1448 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1452 else if (!m
->allow_external_unlock
)
1454 scm_i_pthread_mutex_unlock (&m
->lock
);
1455 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1459 if (! (SCM_UNBNDP (cond
)))
1461 c
= SCM_CONDVAR_DATA (cond
);
1470 /* Change the owner of MUTEX. */
1471 remove_mutex_from_thread (mutex
, t
);
1472 m
->owner
= unblock_from_queue (m
->waiting
);
1477 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1478 scm_i_pthread_mutex_unlock (&m
->lock
);
1485 else if (err
== ETIMEDOUT
)
1490 else if (err
!= EINTR
)
1493 scm_syserror (NULL
);
1499 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1507 scm_remember_upto_here_2 (cond
, mutex
);
1509 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1518 /* Change the owner of MUTEX. */
1519 remove_mutex_from_thread (mutex
, t
);
1520 m
->owner
= unblock_from_queue (m
->waiting
);
1523 scm_i_pthread_mutex_unlock (&m
->lock
);
1530 SCM
scm_unlock_mutex (SCM mx
)
1532 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1535 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1536 (SCM mx
, SCM cond
, SCM timeout
),
1537 "Unlocks @var{mutex} if the calling thread owns the lock on "
1538 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1539 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1540 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1541 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1542 "with a call to @code{unlock-mutex}. Only the last call to "
1543 "@code{unlock-mutex} will actually unlock the mutex. ")
1544 #define FUNC_NAME s_scm_unlock_mutex_timed
1546 scm_t_timespec cwaittime
, *waittime
= NULL
;
1548 SCM_VALIDATE_MUTEX (1, mx
);
1549 if (! (SCM_UNBNDP (cond
)))
1551 SCM_VALIDATE_CONDVAR (2, cond
);
1553 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1555 to_timespec (timeout
, &cwaittime
);
1556 waittime
= &cwaittime
;
1560 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1564 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1566 "Return @code{#t} if @var{obj} is a mutex.")
1567 #define FUNC_NAME s_scm_mutex_p
1569 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1573 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1575 "Return the thread owning @var{mx}, or @code{#f}.")
1576 #define FUNC_NAME s_scm_mutex_owner
1579 fat_mutex
*m
= NULL
;
1581 SCM_VALIDATE_MUTEX (1, mx
);
1582 m
= SCM_MUTEX_DATA (mx
);
1583 scm_i_pthread_mutex_lock (&m
->lock
);
1585 scm_i_pthread_mutex_unlock (&m
->lock
);
1591 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1593 "Return the lock level of mutex @var{mx}.")
1594 #define FUNC_NAME s_scm_mutex_level
1596 SCM_VALIDATE_MUTEX (1, mx
);
1597 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1601 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1603 "Returns @code{#t} if the mutex @var{mx} is locked.")
1604 #define FUNC_NAME s_scm_mutex_locked_p
1606 SCM_VALIDATE_MUTEX (1, mx
);
1607 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1612 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1614 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1615 scm_puts_unlocked ("#<condition-variable ", port
);
1616 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1617 scm_puts_unlocked (">", port
);
1621 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1623 "Make a new condition variable.")
1624 #define FUNC_NAME s_scm_make_condition_variable
1629 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1630 c
->waiting
= SCM_EOL
;
1631 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1632 c
->waiting
= make_queue ();
1637 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1638 (SCM cv
, SCM mx
, SCM t
),
1639 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1640 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1641 "is locked again when this function returns. When @var{t} is given, "
1642 "it specifies a point in time where the waiting should be aborted. It "
1643 "can be either a integer as returned by @code{current-time} or a pair "
1644 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1645 "mutex is locked and @code{#f} is returned. When the condition "
1646 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1648 #define FUNC_NAME s_scm_timed_wait_condition_variable
1650 scm_t_timespec waittime
, *waitptr
= NULL
;
1652 SCM_VALIDATE_CONDVAR (1, cv
);
1653 SCM_VALIDATE_MUTEX (2, mx
);
1655 if (!SCM_UNBNDP (t
))
1657 to_timespec (t
, &waittime
);
1658 waitptr
= &waittime
;
1661 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1666 fat_cond_signal (fat_cond
*c
)
1668 unblock_from_queue (c
->waiting
);
1671 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1673 "Wake up one thread that is waiting for @var{cv}")
1674 #define FUNC_NAME s_scm_signal_condition_variable
1676 SCM_VALIDATE_CONDVAR (1, cv
);
1677 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1683 fat_cond_broadcast (fat_cond
*c
)
1685 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1689 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1691 "Wake up all threads that are waiting for @var{cv}. ")
1692 #define FUNC_NAME s_scm_broadcast_condition_variable
1694 SCM_VALIDATE_CONDVAR (1, cv
);
1695 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1700 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1702 "Return @code{#t} if @var{obj} is a condition variable.")
1703 #define FUNC_NAME s_scm_condition_variable_p
1705 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1719 struct timeval
*timeout
;
1726 do_std_select (void *args
)
1728 struct select_args
*select_args
;
1730 select_args
= (struct select_args
*) args
;
1732 select_args
->result
=
1733 select (select_args
->nfds
,
1734 select_args
->read_fds
, select_args
->write_fds
,
1735 select_args
->except_fds
, select_args
->timeout
);
1736 select_args
->errno_value
= errno
;
1741 #if !SCM_HAVE_SYS_SELECT_H
1742 static int scm_std_select (int nfds
,
1746 struct timeval
*timeout
);
1750 scm_std_select (int nfds
,
1754 struct timeval
*timeout
)
1757 int res
, eno
, wakeup_fd
;
1758 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1759 struct select_args args
;
1761 if (readfds
== NULL
)
1763 FD_ZERO (&my_readfds
);
1764 readfds
= &my_readfds
;
1767 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1770 wakeup_fd
= t
->sleep_pipe
[0];
1771 FD_SET (wakeup_fd
, readfds
);
1772 if (wakeup_fd
>= nfds
)
1776 args
.read_fds
= readfds
;
1777 args
.write_fds
= writefds
;
1778 args
.except_fds
= exceptfds
;
1779 args
.timeout
= timeout
;
1781 /* Explicitly cooperate with the GC. */
1782 scm_without_guile (do_std_select
, &args
);
1785 eno
= args
.errno_value
;
1788 scm_i_reset_sleep (t
);
1790 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1793 full_read (wakeup_fd
, &dummy
, 1);
1795 FD_CLR (wakeup_fd
, readfds
);
1807 /* Convenience API for blocking while in guile mode. */
1809 #if SCM_USE_PTHREAD_THREADS
1811 /* It seems reasonable to not run procedures related to mutex and condition
1812 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1813 without it, and (ii) the only potential gain would be GC latency. See
1814 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1815 for a discussion of the pros and cons. */
1818 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1820 int res
= scm_i_pthread_mutex_lock (mutex
);
1825 do_unlock (void *data
)
1827 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1831 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1833 scm_i_scm_pthread_mutex_lock (mutex
);
1834 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1838 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1841 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1843 t
->held_mutex
= mutex
;
1844 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1845 t
->held_mutex
= NULL
;
1851 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1852 scm_i_pthread_mutex_t
*mutex
,
1853 const scm_t_timespec
*wt
)
1856 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1858 t
->held_mutex
= mutex
;
1859 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1860 t
->held_mutex
= NULL
;
1868 scm_std_usleep (unsigned long usecs
)
1871 tv
.tv_usec
= usecs
% 1000000;
1872 tv
.tv_sec
= usecs
/ 1000000;
1873 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1874 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1878 scm_std_sleep (unsigned int secs
)
1883 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1889 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1891 "Return the thread that called this function.")
1892 #define FUNC_NAME s_scm_current_thread
1894 return SCM_I_CURRENT_THREAD
->handle
;
1899 scm_c_make_list (size_t n
, SCM fill
)
1903 res
= scm_cons (fill
, res
);
1907 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1909 "Return a list of all threads.")
1910 #define FUNC_NAME s_scm_all_threads
1912 /* We can not allocate while holding the thread_admin_mutex because
1913 of the way GC is done.
1915 int n
= thread_count
;
1917 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1919 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1921 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1923 if (t
!= scm_i_signal_delivery_thread
)
1925 SCM_SETCAR (*l
, t
->handle
);
1926 l
= SCM_CDRLOC (*l
);
1931 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1936 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1938 "Return @code{#t} iff @var{thread} has exited.\n")
1939 #define FUNC_NAME s_scm_thread_exited_p
1941 return scm_from_bool (scm_c_thread_exited_p (thread
));
1946 scm_c_thread_exited_p (SCM thread
)
1947 #define FUNC_NAME s_scm_thread_exited_p
1950 SCM_VALIDATE_THREAD (1, thread
);
1951 t
= SCM_I_THREAD_DATA (thread
);
1956 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
1958 "Return the total number of processors of the machine, which\n"
1959 "is guaranteed to be at least 1. A ``processor'' here is a\n"
1960 "thread execution unit, which can be either:\n\n"
1962 "@item an execution core in a (possibly multi-core) chip, in a\n"
1963 " (possibly multi- chip) module, in a single computer, or\n"
1964 "@item a thread execution unit inside a core in the case of\n"
1965 " @dfn{hyper-threaded} CPUs.\n"
1967 "Which of the two definitions is used, is unspecified.\n")
1968 #define FUNC_NAME s_scm_total_processor_count
1970 return scm_from_ulong (num_processors (NPROC_ALL
));
1974 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
1976 "Like @code{total-processor-count}, but return the number of\n"
1977 "processors available to the current process. See\n"
1978 "@code{setaffinity} and @code{getaffinity} for more\n"
1980 #define FUNC_NAME s_scm_current_processor_count
1982 return scm_from_ulong (num_processors (NPROC_CURRENT
));
1989 static scm_i_pthread_cond_t wake_up_cond
;
1990 static int threads_initialized_p
= 0;
1993 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1995 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1997 static SCM dynwind_critical_section_mutex
;
2000 scm_dynwind_critical_section (SCM mutex
)
2002 if (scm_is_false (mutex
))
2003 mutex
= dynwind_critical_section_mutex
;
2004 scm_dynwind_lock_mutex (mutex
);
2005 scm_dynwind_block_asyncs ();
2008 /*** Initialization */
2010 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2012 #if SCM_USE_PTHREAD_THREADS
2013 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2017 scm_threads_prehistory (void *base
)
2019 #if SCM_USE_PTHREAD_THREADS
2020 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2021 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2022 PTHREAD_MUTEX_RECURSIVE
);
2025 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2026 scm_i_pthread_mutexattr_recursive
);
2027 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2028 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2031 GC_new_kind (GC_new_free_list (),
2032 GC_MAKE_PROC (GC_new_proc (thread_mark
), 0),
2035 guilify_self_1 ((struct GC_stack_base
*) base
);
2038 scm_t_bits scm_tc16_thread
;
2039 scm_t_bits scm_tc16_mutex
;
2040 scm_t_bits scm_tc16_condvar
;
2045 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2046 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2048 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2049 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2051 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2053 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2055 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2056 guilify_self_2 (SCM_BOOL_F
);
2057 threads_initialized_p
= 1;
2059 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2063 scm_init_threads_default_dynamic_state ()
2065 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2066 scm_i_default_dynamic_state
= state
;
2070 scm_init_thread_procs ()
2072 #include "libguile/threads.x"
2076 /* IA64-specific things. */
2080 # include <sys/param.h>
2081 # include <sys/pstat.h>
2083 scm_ia64_register_backing_store_base (void)
2085 struct pst_vm_status vm_status
;
2087 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2088 if (vm_status
.pst_type
== PS_RSESTACK
)
2089 return (void *) vm_status
.pst_vaddr
;
2093 scm_ia64_ar_bsp (const void *ctx
)
2096 __uc_get_ar_bsp (ctx
, &bsp
);
2097 return (void *) bsp
;
2101 # include <ucontext.h>
2103 scm_ia64_register_backing_store_base (void)
2105 extern void *__libc_ia64_register_backing_store_base
;
2106 return __libc_ia64_register_backing_store_base
;
2109 scm_ia64_ar_bsp (const void *opaque
)
2111 const ucontext_t
*ctx
= opaque
;
2112 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2116 # include <ucontext.h>
2118 scm_ia64_register_backing_store_base (void)
2120 return (void *)0x8000000000000000;
2123 scm_ia64_ar_bsp (const void *opaque
)
2125 const ucontext_t
*ctx
= opaque
;
2126 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2127 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2129 # endif /* __FreeBSD__ */
2130 #endif /* __ia64__ */