1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
3 * 2014 Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
36 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
44 # include <pthread_np.h>
47 #include <sys/select.h>
53 #include "libguile/validate.h"
54 #include "libguile/root.h"
55 #include "libguile/eval.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/threads.h"
59 #include "libguile/dynwind.h"
60 #include "libguile/iselect.h"
61 #include "libguile/fluids.h"
62 #include "libguile/continuations.h"
63 #include "libguile/gc.h"
64 #include "libguile/gc-inline.h"
65 #include "libguile/init.h"
66 #include "libguile/scmsigs.h"
67 #include "libguile/strings.h"
68 #include "libguile/vm.h"
70 #include <full-read.h>
75 /* The GC "kind" for threads that allow them to mark their VM
77 static int thread_gc_kind
;
79 static struct GC_ms_entry
*
80 thread_mark (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
81 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
84 const struct scm_i_thread
*t
= (struct scm_i_thread
*) addr
;
86 if (SCM_UNPACK (t
->handle
) == 0)
87 /* T must be on the free-list; ignore. (See warning in
89 return mark_stack_ptr
;
91 /* Mark T. We could be more precise, but it doesn't matter. */
92 for (word
= 0; word
* sizeof (*addr
) < sizeof (*t
); word
++)
93 mark_stack_ptr
= GC_MARK_AND_PUSH ((void *) addr
[word
],
94 mark_stack_ptr
, mark_stack_limit
,
97 /* The pointerless freelists are threaded through their first word,
98 but GC doesn't know to trace them (as they are pointerless), so we
99 need to do that here. See the comments at the top of libgc's
101 if (t
->pointerless_freelists
)
104 for (n
= 0; n
< SCM_INLINE_GC_FREELIST_COUNT
; n
++)
106 void *chain
= t
->pointerless_freelists
[n
];
109 /* The first link is already marked by the freelist vector,
110 so we just have to mark the tail. */
111 while ((chain
= *(void **)chain
))
112 mark_stack_ptr
= GC_mark_and_push (chain
, mark_stack_ptr
,
113 mark_stack_limit
, NULL
);
119 mark_stack_ptr
= scm_i_vm_mark_stack (t
->vp
, mark_stack_ptr
,
122 return mark_stack_ptr
;
128 to_timespec (SCM t
, scm_t_timespec
*waittime
)
132 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
133 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
137 double time
= scm_to_double (t
);
138 double sec
= scm_c_truncate (time
);
140 waittime
->tv_sec
= (long) sec
;
141 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
149 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
150 the risk of false references leading to unbounded retained space as
151 described in "Bounding Space Usage of Conservative Garbage Collectors",
154 /* Make an empty queue data structure.
159 return scm_cons (SCM_EOL
, SCM_EOL
);
162 /* Put T at the back of Q and return a handle that can be used with
163 remqueue to remove T from Q again.
166 enqueue (SCM q
, SCM t
)
168 SCM c
= scm_cons (t
, SCM_EOL
);
169 SCM_CRITICAL_SECTION_START
;
170 if (scm_is_null (SCM_CDR (q
)))
173 SCM_SETCDR (SCM_CAR (q
), c
);
175 SCM_CRITICAL_SECTION_END
;
179 /* Remove the element that the handle C refers to from the queue Q. C
180 must have been returned from a call to enqueue. The return value
181 is zero when the element referred to by C has already been removed.
182 Otherwise, 1 is returned.
185 remqueue (SCM q
, SCM c
)
188 SCM_CRITICAL_SECTION_START
;
189 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
191 if (scm_is_eq (p
, c
))
193 if (scm_is_eq (c
, SCM_CAR (q
)))
194 SCM_SETCAR (q
, scm_is_eq (prev
, q
) ? SCM_EOL
: prev
);
195 SCM_SETCDR (prev
, SCM_CDR (c
));
198 SCM_SETCDR (c
, SCM_EOL
);
200 SCM_CRITICAL_SECTION_END
;
205 SCM_CRITICAL_SECTION_END
;
209 /* Remove the front-most element from the queue Q and return it.
210 Return SCM_BOOL_F when Q is empty.
216 SCM_CRITICAL_SECTION_START
;
220 SCM_CRITICAL_SECTION_END
;
225 SCM_SETCDR (q
, SCM_CDR (c
));
226 if (scm_is_null (SCM_CDR (q
)))
227 SCM_SETCAR (q
, SCM_EOL
);
228 SCM_CRITICAL_SECTION_END
;
231 SCM_SETCDR (c
, SCM_EOL
);
237 /*** Thread smob routines */
241 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
243 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
244 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
245 the struct case, hence we go via a union, and extract according to the
246 size of pthread_t. */
254 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
255 scm_i_pthread_t p
= t
->pthread
;
258 if (sizeof (p
) == sizeof (unsigned short))
260 else if (sizeof (p
) == sizeof (unsigned int))
262 else if (sizeof (p
) == sizeof (unsigned long))
267 scm_puts_unlocked ("#<thread ", port
);
268 scm_uintprint (id
, 10, port
);
269 scm_puts_unlocked (" (", port
);
270 scm_uintprint ((scm_t_bits
)t
, 16, port
);
271 scm_puts_unlocked (")>", port
);
276 /*** Blocking on queues. */
278 /* See also scm_i_queue_async_cell for how such a block is
282 /* Put the current thread on QUEUE and go to sleep, waiting for it to
283 be woken up by a call to 'unblock_from_queue', or to be
284 interrupted. Upon return of this function, the current thread is
285 no longer on QUEUE, even when the sleep has been interrupted.
287 The caller of block_self must hold MUTEX. It will be atomically
288 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
290 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
293 When WAITTIME is not NULL, the sleep will be aborted at that time.
295 The return value of block_self is an errno value. It will be zero
296 when the sleep has been successfully completed by a call to
297 unblock_from_queue, EINTR when it has been interrupted by the
298 delivery of a system async, and ETIMEDOUT when the timeout has
301 The system asyncs themselves are not executed by block_self.
304 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
305 const scm_t_timespec
*waittime
)
307 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
311 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
316 q_handle
= enqueue (queue
, t
->handle
);
317 if (waittime
== NULL
)
318 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
320 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
322 /* When we are still on QUEUE, we have been interrupted. We
323 report this only when no other error (such as a timeout) has
326 if (remqueue (queue
, q_handle
) && err
== 0)
329 scm_i_reset_sleep (t
);
335 /* Wake up the first thread on QUEUE, if any. The awoken thread is
336 returned, or #f if the queue was empty.
339 unblock_from_queue (SCM queue
)
341 SCM thread
= dequeue (queue
);
342 if (scm_is_true (thread
))
343 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
348 /* Getting into and out of guile mode.
351 /* Key used to attach a cleanup handler to a given thread. Also, if
352 thread-local storage is unavailable, this key is used to retrieve the
353 current thread with `pthread_getspecific ()'. */
354 scm_i_pthread_key_t scm_i_thread_key
;
357 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
359 /* When thread-local storage (TLS) is available, a pointer to the
360 current-thread object is kept in TLS. Note that storing the thread-object
361 itself in TLS (rather than a pointer to some malloc'd memory) is not
362 possible since thread objects may live longer than the actual thread they
364 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
366 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
369 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
370 static scm_i_thread
*all_threads
= NULL
;
371 static int thread_count
;
373 static SCM scm_i_default_dynamic_state
;
375 /* Run when a fluid is collected. */
377 scm_i_reset_fluid (size_t n
)
381 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
382 for (t
= all_threads
; t
; t
= t
->next_thread
)
383 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
385 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
387 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
388 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
390 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
393 /* Perform first stage of thread initialisation, in non-guile mode.
396 guilify_self_1 (struct GC_stack_base
*base
)
400 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
401 before allocating anything in this thread, because allocation could
402 cause GC to run, and GC could cause finalizers, which could invoke
403 Scheme functions, which need the current thread to be set. */
405 t
.pthread
= scm_i_pthread_self ();
406 t
.handle
= SCM_BOOL_F
;
407 t
.result
= SCM_BOOL_F
;
408 t
.cleanup_handler
= SCM_BOOL_F
;
411 t
.join_queue
= SCM_EOL
;
413 t
.pointerless_freelists
= NULL
;
414 t
.dynamic_state
= SCM_BOOL_F
;
415 t
.dynstack
.base
= NULL
;
416 t
.dynstack
.top
= NULL
;
417 t
.dynstack
.limit
= NULL
;
418 t
.active_asyncs
= SCM_EOL
;
420 t
.pending_asyncs
= 1;
421 t
.critical_section_level
= 0;
422 t
.base
= base
->mem_base
;
424 t
.register_backing_store_base
= base
->reg_base
;
426 t
.continuation_root
= SCM_EOL
;
427 t
.continuation_base
= t
.base
;
428 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
429 t
.sleep_mutex
= NULL
;
430 t
.sleep_object
= SCM_BOOL_F
;
434 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
435 /* FIXME: Error conditions during the initialization phase are handled
436 gracelessly since public functions such as `scm_init_guile ()'
437 currently have type `void'. */
440 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
445 /* The switcheroo. */
447 scm_i_thread
*t_ptr
= &t
;
450 t_ptr
= GC_generic_malloc (sizeof (*t_ptr
), thread_gc_kind
);
451 memcpy (t_ptr
, &t
, sizeof t
);
453 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
455 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
456 /* Cache the current thread in TLS for faster lookup. */
457 scm_i_current_thread
= t_ptr
;
460 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
461 t_ptr
->next_thread
= all_threads
;
464 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
470 /* Perform second stage of thread initialisation, in guile mode.
473 guilify_self_2 (SCM parent
)
475 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
479 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
481 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
482 t
->continuation_base
= t
->base
;
485 size_t size
= SCM_INLINE_GC_FREELIST_COUNT
* sizeof (void *);
486 t
->freelists
= scm_gc_malloc (size
, "freelists");
487 t
->pointerless_freelists
= scm_gc_malloc (size
, "atomic freelists");
490 if (scm_is_true (parent
))
491 t
->dynamic_state
= scm_make_dynamic_state (parent
);
493 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
495 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
496 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
497 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
499 t
->join_queue
= make_queue ();
502 /* See note in finalizers.c:queue_finalizer_async(). */
503 GC_invoke_finalizers ();
509 /* We implement our own mutex type since we want them to be 'fair', we
510 want to do fancy things while waiting for them (like running
511 asyncs) and we might want to add things that are nice for
516 scm_i_pthread_mutex_t lock
;
518 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
520 int recursive
; /* allow recursive locking? */
521 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
522 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
523 owned by the current thread? */
525 SCM waiting
; /* the threads waiting for this mutex. */
528 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
529 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
532 call_cleanup (void *data
)
535 return scm_call_0 (*proc_p
);
538 /* Perform thread tear-down, in guile mode.
541 do_thread_exit (void *v
)
543 scm_i_thread
*t
= (scm_i_thread
*) v
;
545 if (!scm_is_false (t
->cleanup_handler
))
547 SCM ptr
= t
->cleanup_handler
;
549 t
->cleanup_handler
= SCM_BOOL_F
;
550 t
->result
= scm_internal_catch (SCM_BOOL_T
,
552 scm_handle_by_message_noexit
, NULL
);
555 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
558 close (t
->sleep_pipe
[0]);
559 close (t
->sleep_pipe
[1]);
560 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
563 while (!scm_is_null (t
->mutexes
))
565 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
567 if (scm_is_true (mutex
))
569 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
571 scm_i_pthread_mutex_lock (&m
->lock
);
573 /* Check whether T owns MUTEX. This is usually the case, unless
574 T abandoned MUTEX; in that case, T is no longer its owner (see
575 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
576 if (scm_is_eq (m
->owner
, t
->handle
))
577 unblock_from_queue (m
->waiting
);
579 scm_i_pthread_mutex_unlock (&m
->lock
);
582 t
->mutexes
= scm_cdr (t
->mutexes
);
585 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
591 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
593 /* Won't hurt if we are already registered. */
594 #if SCM_USE_PTHREAD_THREADS
595 GC_register_my_thread (sb
);
598 return scm_with_guile (do_thread_exit
, v
);
602 on_thread_exit (void *v
)
604 /* This handler is executed in non-guile mode. */
605 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
607 /* If we were canceled, we were unable to clear `t->guile_mode', so do
611 /* If this thread was cancelled while doing a cond wait, it will
612 still have a mutex locked, so we unlock it here. */
615 scm_i_pthread_mutex_unlock (t
->held_mutex
);
616 t
->held_mutex
= NULL
;
619 /* Reinstate the current thread for purposes of scm_with_guile
620 guile-mode cleanup handlers. Only really needed in the non-TLS
621 case but it doesn't hurt to be consistent. */
622 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
624 /* Scheme-level thread finalizers and other cleanup needs to happen in
626 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
628 /* Removing ourself from the list of all threads needs to happen in
629 non-guile mode since all SCM values on our stack become
630 unprotected once we are no longer in the list. */
631 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
632 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
635 *tp
= t
->next_thread
;
638 t
->next_thread
= NULL
;
644 /* If there's only one other thread, it could be the signal delivery
645 thread, so we need to notify it to shut down by closing its read pipe.
646 If it's not the signal delivery thread, then closing the read pipe isn't
648 if (thread_count
<= 1)
649 scm_i_close_signal_pipe ();
651 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
653 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
657 scm_i_vm_free_stack (t
->vp
);
661 #if SCM_USE_PTHREAD_THREADS
662 GC_unregister_my_thread ();
666 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
669 init_thread_key (void)
671 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
674 /* Perform any initializations necessary to make the current thread
675 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
678 BASE is the stack base to use with GC.
680 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
681 which case the default dynamic state is used.
683 Returns zero when the thread was known to guile already; otherwise
686 Note that it could be the case that the thread was known
687 to Guile, but not in guile mode (because we are within a
688 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
689 be sure. New threads are put into guile mode implicitly. */
692 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
694 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
696 if (SCM_I_CURRENT_THREAD
)
698 /* Thread is already known to Guile.
704 /* This thread has not been guilified yet.
707 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
708 if (scm_initialized_p
== 0)
710 /* First thread ever to enter Guile. Run the full
713 scm_i_init_guile (base
);
715 #if SCM_USE_PTHREAD_THREADS
716 /* Allow other threads to come in later. */
717 GC_allow_register_threads ();
720 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
724 /* Guile is already initialized, but this thread enters it for
725 the first time. Only initialize this thread.
727 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
729 /* Register this thread with libgc. */
730 #if SCM_USE_PTHREAD_THREADS
731 GC_register_my_thread (base
);
734 guilify_self_1 (base
);
735 guilify_self_2 (parent
);
744 struct GC_stack_base stack_base
;
746 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
747 scm_i_init_thread_for_guile (&stack_base
,
748 scm_i_default_dynamic_state
);
751 fprintf (stderr
, "Failed to get stack base for current thread.\n");
756 struct with_guile_args
764 with_guile_trampoline (void *data
)
766 struct with_guile_args
*args
= data
;
768 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
772 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
777 struct with_guile_args
*args
= data
;
779 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
780 t
= SCM_I_CURRENT_THREAD
;
783 /* We are in Guile mode. */
784 assert (t
->guile_mode
);
786 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
788 /* Leave Guile mode. */
791 else if (t
->guile_mode
)
793 /* Already in Guile mode. */
794 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
798 /* We are not in Guile mode, either because we are not within a
799 scm_with_guile, or because we are within a scm_without_guile.
801 This call to scm_with_guile() could happen from anywhere on the
802 stack, and in particular lower on the stack than when it was
803 when this thread was first guilified. Thus, `base' must be
805 #if SCM_STACK_GROWS_UP
806 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
807 t
->base
= SCM_STACK_PTR (base
->mem_base
);
809 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
810 t
->base
= SCM_STACK_PTR (base
->mem_base
);
814 res
= GC_call_with_gc_active (with_guile_trampoline
, args
);
821 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
823 struct with_guile_args args
;
827 args
.parent
= parent
;
829 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
833 scm_with_guile (void *(*func
)(void *), void *data
)
835 return scm_i_with_guile_and_parent (func
, data
,
836 scm_i_default_dynamic_state
);
840 scm_without_guile (void *(*func
)(void *), void *data
)
843 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
847 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
848 result
= GC_do_blocking (func
, data
);
849 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
852 /* Otherwise we're not in guile mode, so nothing to do. */
853 result
= func (data
);
859 /*** Thread creation */
866 scm_i_pthread_mutex_t mutex
;
867 scm_i_pthread_cond_t cond
;
871 really_launch (void *d
)
873 launch_data
*data
= (launch_data
*)d
;
874 SCM thunk
= data
->thunk
, handler
= data
->handler
;
877 t
= SCM_I_CURRENT_THREAD
;
879 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
880 data
->thread
= scm_current_thread ();
881 scm_i_pthread_cond_signal (&data
->cond
);
882 scm_i_pthread_mutex_unlock (&data
->mutex
);
884 if (SCM_UNBNDP (handler
))
885 t
->result
= scm_call_0 (thunk
);
887 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
893 launch_thread (void *d
)
895 launch_data
*data
= (launch_data
*)d
;
896 scm_i_pthread_detach (scm_i_pthread_self ());
897 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
901 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
902 (SCM thunk
, SCM handler
),
903 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
904 "returning a new thread object representing the thread. The procedure\n"
905 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
907 "When @var{handler} is specified, then @var{thunk} is called from\n"
908 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
909 "handler. This catch is established inside the continuation barrier.\n"
911 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
912 "the @emph{exit value} of the thread and the thread is terminated.")
913 #define FUNC_NAME s_scm_call_with_new_thread
919 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
920 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
921 handler
, SCM_ARG2
, FUNC_NAME
);
923 GC_collect_a_little ();
924 data
.parent
= scm_current_dynamic_state ();
926 data
.handler
= handler
;
927 data
.thread
= SCM_BOOL_F
;
928 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
929 scm_i_pthread_cond_init (&data
.cond
, NULL
);
931 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
932 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
935 scm_i_pthread_mutex_unlock (&data
.mutex
);
940 while (scm_is_false (data
.thread
))
941 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
943 scm_i_pthread_mutex_unlock (&data
.mutex
);
951 scm_t_catch_body body
;
953 scm_t_catch_handler handler
;
956 scm_i_pthread_mutex_t mutex
;
957 scm_i_pthread_cond_t cond
;
961 really_spawn (void *d
)
963 spawn_data
*data
= (spawn_data
*)d
;
964 scm_t_catch_body body
= data
->body
;
965 void *body_data
= data
->body_data
;
966 scm_t_catch_handler handler
= data
->handler
;
967 void *handler_data
= data
->handler_data
;
968 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
970 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
971 data
->thread
= scm_current_thread ();
972 scm_i_pthread_cond_signal (&data
->cond
);
973 scm_i_pthread_mutex_unlock (&data
->mutex
);
976 t
->result
= body (body_data
);
978 t
->result
= scm_internal_catch (SCM_BOOL_T
,
980 handler
, handler_data
);
986 spawn_thread (void *d
)
988 spawn_data
*data
= (spawn_data
*)d
;
989 scm_i_pthread_detach (scm_i_pthread_self ());
990 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
995 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
996 scm_t_catch_handler handler
, void *handler_data
)
1002 data
.parent
= scm_current_dynamic_state ();
1004 data
.body_data
= body_data
;
1005 data
.handler
= handler
;
1006 data
.handler_data
= handler_data
;
1007 data
.thread
= SCM_BOOL_F
;
1008 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1009 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1011 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1012 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1015 scm_i_pthread_mutex_unlock (&data
.mutex
);
1017 scm_syserror (NULL
);
1020 while (scm_is_false (data
.thread
))
1021 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1023 scm_i_pthread_mutex_unlock (&data
.mutex
);
1025 assert (SCM_I_IS_THREAD (data
.thread
));
1030 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1032 "Move the calling thread to the end of the scheduling queue.")
1033 #define FUNC_NAME s_scm_yield
1035 return scm_from_bool (scm_i_sched_yield ());
1039 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1041 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1042 "cannot be the current thread, and if @var{thread} has already terminated or "
1043 "been signaled to terminate, this function is a no-op.")
1044 #define FUNC_NAME s_scm_cancel_thread
1046 scm_i_thread
*t
= NULL
;
1048 SCM_VALIDATE_THREAD (1, thread
);
1049 t
= SCM_I_THREAD_DATA (thread
);
1050 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1054 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1055 scm_i_pthread_cancel (t
->pthread
);
1058 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1060 return SCM_UNSPECIFIED
;
1064 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1065 (SCM thread
, SCM proc
),
1066 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1067 "This handler will be called when the thread exits.")
1068 #define FUNC_NAME s_scm_set_thread_cleanup_x
1072 SCM_VALIDATE_THREAD (1, thread
);
1073 if (!scm_is_false (proc
))
1074 SCM_VALIDATE_THUNK (2, proc
);
1076 t
= SCM_I_THREAD_DATA (thread
);
1077 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1079 if (!(t
->exited
|| t
->canceled
))
1080 t
->cleanup_handler
= proc
;
1082 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1084 return SCM_UNSPECIFIED
;
1088 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1090 "Return the cleanup handler installed for the thread @var{thread}.")
1091 #define FUNC_NAME s_scm_thread_cleanup
1096 SCM_VALIDATE_THREAD (1, thread
);
1098 t
= SCM_I_THREAD_DATA (thread
);
1099 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1100 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1101 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1107 SCM
scm_join_thread (SCM thread
)
1109 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1112 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1113 (SCM thread
, SCM timeout
, SCM timeoutval
),
1114 "Suspend execution of the calling thread until the target @var{thread} "
1115 "terminates, unless the target @var{thread} has already terminated. ")
1116 #define FUNC_NAME s_scm_join_thread_timed
1119 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1120 SCM res
= SCM_BOOL_F
;
1122 if (! (SCM_UNBNDP (timeoutval
)))
1125 SCM_VALIDATE_THREAD (1, thread
);
1126 if (scm_is_eq (scm_current_thread (), thread
))
1127 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1129 t
= SCM_I_THREAD_DATA (thread
);
1130 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1132 if (! SCM_UNBNDP (timeout
))
1134 to_timespec (timeout
, &ctimeout
);
1135 timeout_ptr
= &ctimeout
;
1144 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1154 else if (err
== ETIMEDOUT
)
1157 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1159 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1161 /* Check for exit again, since we just released and
1162 reacquired the admin mutex, before the next block_self
1163 call (which would block forever if t has already
1173 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1179 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1181 "Return @code{#t} if @var{obj} is a thread.")
1182 #define FUNC_NAME s_scm_thread_p
1184 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1190 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1192 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1193 scm_puts_unlocked ("#<mutex ", port
);
1194 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1195 scm_puts_unlocked (">", port
);
1200 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1204 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1206 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1207 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1208 and so we can just copy it. */
1209 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1210 m
->owner
= SCM_BOOL_F
;
1213 m
->recursive
= recursive
;
1214 m
->unchecked_unlock
= unchecked_unlock
;
1215 m
->allow_external_unlock
= external_unlock
;
1217 m
->waiting
= SCM_EOL
;
1218 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1219 m
->waiting
= make_queue ();
1223 SCM
scm_make_mutex (void)
1225 return scm_make_mutex_with_flags (SCM_EOL
);
1228 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1229 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1230 SCM_SYMBOL (recursive_sym
, "recursive");
1232 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1234 "Create a new mutex. ")
1235 #define FUNC_NAME s_scm_make_mutex_with_flags
1237 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1240 while (! scm_is_null (ptr
))
1242 SCM flag
= SCM_CAR (ptr
);
1243 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1244 unchecked_unlock
= 1;
1245 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1246 external_unlock
= 1;
1247 else if (scm_is_eq (flag
, recursive_sym
))
1250 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1251 ptr
= SCM_CDR (ptr
);
1253 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1257 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1259 "Create a new recursive mutex. ")
1260 #define FUNC_NAME s_scm_make_recursive_mutex
1262 return make_fat_mutex (1, 0, 0);
1266 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1269 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1271 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1273 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1274 SCM err
= SCM_BOOL_F
;
1276 struct timeval current_time
;
1278 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1284 m
->owner
= new_owner
;
1287 if (SCM_I_IS_THREAD (new_owner
))
1289 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1291 /* FIXME: The order in which `t->admin_mutex' and
1292 `m->lock' are taken differs from that in
1293 `on_thread_exit', potentially leading to deadlocks. */
1294 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1296 /* Only keep a weak reference to MUTEX so that it's not
1297 retained when not referenced elsewhere (bug #27450).
1298 The weak pair itself is eventually removed when MUTEX
1299 is unlocked. Note that `t->mutexes' lists mutexes
1300 currently held by T, so it should be small. */
1301 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1304 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1309 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1311 m
->owner
= new_owner
;
1312 err
= scm_cons (scm_abandoned_mutex_error_key
,
1313 scm_from_locale_string ("lock obtained on abandoned "
1318 else if (scm_is_eq (m
->owner
, new_owner
))
1327 err
= scm_cons (scm_misc_error_key
,
1328 scm_from_locale_string ("mutex already locked "
1336 if (timeout
!= NULL
)
1338 gettimeofday (¤t_time
, NULL
);
1339 if (current_time
.tv_sec
> timeout
->tv_sec
||
1340 (current_time
.tv_sec
== timeout
->tv_sec
&&
1341 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1347 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1348 scm_i_pthread_mutex_unlock (&m
->lock
);
1350 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1353 scm_i_pthread_mutex_unlock (&m
->lock
);
1357 SCM
scm_lock_mutex (SCM mx
)
1359 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1362 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1363 (SCM m
, SCM timeout
, SCM owner
),
1364 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1365 "thread blocks until the mutex becomes available. The function\n"
1366 "returns when the calling thread owns the lock on @var{m}.\n"
1367 "Locking a mutex that a thread already owns will succeed right\n"
1368 "away and will not block the thread. That is, Guile's mutexes\n"
1369 "are @emph{recursive}.")
1370 #define FUNC_NAME s_scm_lock_mutex_timed
1374 scm_t_timespec cwaittime
, *waittime
= NULL
;
1376 SCM_VALIDATE_MUTEX (1, m
);
1378 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1380 to_timespec (timeout
, &cwaittime
);
1381 waittime
= &cwaittime
;
1384 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1385 SCM_VALIDATE_THREAD (3, owner
);
1387 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1388 if (!scm_is_false (exception
))
1389 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1390 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1395 lock_mutex_return_void (SCM mx
)
1397 (void) scm_lock_mutex (mx
);
1401 unlock_mutex_return_void (SCM mx
)
1403 (void) scm_unlock_mutex (mx
);
1407 scm_dynwind_lock_mutex (SCM mutex
)
1409 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1410 SCM_F_WIND_EXPLICITLY
);
1411 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1412 SCM_F_WIND_EXPLICITLY
);
1415 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1417 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1418 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1419 #define FUNC_NAME s_scm_try_mutex
1423 scm_t_timespec cwaittime
, *waittime
= NULL
;
1425 SCM_VALIDATE_MUTEX (1, mutex
);
1427 to_timespec (scm_from_int(0), &cwaittime
);
1428 waittime
= &cwaittime
;
1430 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1431 if (!scm_is_false (exception
))
1432 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1433 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1437 /*** Fat condition variables */
1440 scm_i_pthread_mutex_t lock
;
1441 SCM waiting
; /* the threads waiting for this condition. */
1444 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1445 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1448 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1452 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1453 walk
= SCM_CDR (walk
))
1455 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1457 if (scm_is_pair (prev
))
1458 SCM_SETCDR (prev
, SCM_CDR (walk
));
1460 t
->mutexes
= SCM_CDR (walk
);
1467 fat_mutex_unlock (SCM mutex
, SCM cond
,
1468 const scm_t_timespec
*waittime
, int relock
)
1471 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1473 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1474 int err
= 0, ret
= 0;
1476 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1480 if (!scm_is_eq (owner
, t
->handle
))
1484 if (!m
->unchecked_unlock
)
1486 scm_i_pthread_mutex_unlock (&m
->lock
);
1487 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1491 else if (!m
->allow_external_unlock
)
1493 scm_i_pthread_mutex_unlock (&m
->lock
);
1494 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1498 if (! (SCM_UNBNDP (cond
)))
1500 c
= SCM_CONDVAR_DATA (cond
);
1509 /* Change the owner of MUTEX. */
1510 remove_mutex_from_thread (mutex
, t
);
1511 m
->owner
= unblock_from_queue (m
->waiting
);
1516 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1517 scm_i_pthread_mutex_unlock (&m
->lock
);
1524 else if (err
== ETIMEDOUT
)
1529 else if (err
!= EINTR
)
1532 scm_syserror (NULL
);
1538 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1546 scm_remember_upto_here_2 (cond
, mutex
);
1548 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1557 /* Change the owner of MUTEX. */
1558 remove_mutex_from_thread (mutex
, t
);
1559 m
->owner
= unblock_from_queue (m
->waiting
);
1562 scm_i_pthread_mutex_unlock (&m
->lock
);
1569 SCM
scm_unlock_mutex (SCM mx
)
1571 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1574 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1575 (SCM mx
, SCM cond
, SCM timeout
),
1576 "Unlocks @var{mutex} if the calling thread owns the lock on "
1577 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1578 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1579 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1580 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1581 "with a call to @code{unlock-mutex}. Only the last call to "
1582 "@code{unlock-mutex} will actually unlock the mutex. ")
1583 #define FUNC_NAME s_scm_unlock_mutex_timed
1585 scm_t_timespec cwaittime
, *waittime
= NULL
;
1587 SCM_VALIDATE_MUTEX (1, mx
);
1588 if (! (SCM_UNBNDP (cond
)))
1590 SCM_VALIDATE_CONDVAR (2, cond
);
1592 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1594 to_timespec (timeout
, &cwaittime
);
1595 waittime
= &cwaittime
;
1599 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1603 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1605 "Return @code{#t} if @var{obj} is a mutex.")
1606 #define FUNC_NAME s_scm_mutex_p
1608 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1612 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1614 "Return the thread owning @var{mx}, or @code{#f}.")
1615 #define FUNC_NAME s_scm_mutex_owner
1618 fat_mutex
*m
= NULL
;
1620 SCM_VALIDATE_MUTEX (1, mx
);
1621 m
= SCM_MUTEX_DATA (mx
);
1622 scm_i_pthread_mutex_lock (&m
->lock
);
1624 scm_i_pthread_mutex_unlock (&m
->lock
);
1630 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1632 "Return the lock level of mutex @var{mx}.")
1633 #define FUNC_NAME s_scm_mutex_level
1635 SCM_VALIDATE_MUTEX (1, mx
);
1636 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1640 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1642 "Returns @code{#t} if the mutex @var{mx} is locked.")
1643 #define FUNC_NAME s_scm_mutex_locked_p
1645 SCM_VALIDATE_MUTEX (1, mx
);
1646 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1651 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1653 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1654 scm_puts_unlocked ("#<condition-variable ", port
);
1655 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1656 scm_puts_unlocked (">", port
);
1660 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1662 "Make a new condition variable.")
1663 #define FUNC_NAME s_scm_make_condition_variable
1668 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1669 c
->waiting
= SCM_EOL
;
1670 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1671 c
->waiting
= make_queue ();
1676 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1677 (SCM cv
, SCM mx
, SCM t
),
1678 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1679 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1680 "is locked again when this function returns. When @var{t} is given, "
1681 "it specifies a point in time where the waiting should be aborted. It "
1682 "can be either a integer as returned by @code{current-time} or a pair "
1683 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1684 "mutex is locked and @code{#f} is returned. When the condition "
1685 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1687 #define FUNC_NAME s_scm_timed_wait_condition_variable
1689 scm_t_timespec waittime
, *waitptr
= NULL
;
1691 SCM_VALIDATE_CONDVAR (1, cv
);
1692 SCM_VALIDATE_MUTEX (2, mx
);
1694 if (!SCM_UNBNDP (t
))
1696 to_timespec (t
, &waittime
);
1697 waitptr
= &waittime
;
1700 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1705 fat_cond_signal (fat_cond
*c
)
1707 unblock_from_queue (c
->waiting
);
1710 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1712 "Wake up one thread that is waiting for @var{cv}")
1713 #define FUNC_NAME s_scm_signal_condition_variable
1715 SCM_VALIDATE_CONDVAR (1, cv
);
1716 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1722 fat_cond_broadcast (fat_cond
*c
)
1724 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1728 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1730 "Wake up all threads that are waiting for @var{cv}. ")
1731 #define FUNC_NAME s_scm_broadcast_condition_variable
1733 SCM_VALIDATE_CONDVAR (1, cv
);
1734 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1739 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1741 "Return @code{#t} if @var{obj} is a condition variable.")
1742 #define FUNC_NAME s_scm_condition_variable_p
1744 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1758 struct timeval
*timeout
;
1765 do_std_select (void *args
)
1767 struct select_args
*select_args
;
1769 select_args
= (struct select_args
*) args
;
1771 select_args
->result
=
1772 select (select_args
->nfds
,
1773 select_args
->read_fds
, select_args
->write_fds
,
1774 select_args
->except_fds
, select_args
->timeout
);
1775 select_args
->errno_value
= errno
;
1781 scm_std_select (int nfds
,
1785 struct timeval
*timeout
)
1788 int res
, eno
, wakeup_fd
;
1789 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1790 struct select_args args
;
1792 if (readfds
== NULL
)
1794 FD_ZERO (&my_readfds
);
1795 readfds
= &my_readfds
;
1798 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1801 wakeup_fd
= t
->sleep_pipe
[0];
1802 FD_SET (wakeup_fd
, readfds
);
1803 if (wakeup_fd
>= nfds
)
1807 args
.read_fds
= readfds
;
1808 args
.write_fds
= writefds
;
1809 args
.except_fds
= exceptfds
;
1810 args
.timeout
= timeout
;
1812 /* Explicitly cooperate with the GC. */
1813 scm_without_guile (do_std_select
, &args
);
1816 eno
= args
.errno_value
;
1819 scm_i_reset_sleep (t
);
1821 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1824 full_read (wakeup_fd
, &dummy
, 1);
1826 FD_CLR (wakeup_fd
, readfds
);
1838 /* Convenience API for blocking while in guile mode. */
1840 #if SCM_USE_PTHREAD_THREADS
1842 /* It seems reasonable to not run procedures related to mutex and condition
1843 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1844 without it, and (ii) the only potential gain would be GC latency. See
1845 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1846 for a discussion of the pros and cons. */
1849 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1851 int res
= scm_i_pthread_mutex_lock (mutex
);
1856 do_unlock (void *data
)
1858 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1862 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1864 scm_i_scm_pthread_mutex_lock (mutex
);
1865 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1869 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1872 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1874 t
->held_mutex
= mutex
;
1875 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1876 t
->held_mutex
= NULL
;
1882 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1883 scm_i_pthread_mutex_t
*mutex
,
1884 const scm_t_timespec
*wt
)
1887 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1889 t
->held_mutex
= mutex
;
1890 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1891 t
->held_mutex
= NULL
;
1899 do_unlock_with_asyncs (void *data
)
1901 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1902 SCM_I_CURRENT_THREAD
->block_asyncs
--;
1906 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t
*mutex
)
1908 SCM_I_CURRENT_THREAD
->block_asyncs
++;
1909 scm_i_scm_pthread_mutex_lock (mutex
);
1910 scm_dynwind_unwind_handler (do_unlock_with_asyncs
, mutex
,
1911 SCM_F_WIND_EXPLICITLY
);
1915 scm_std_usleep (unsigned long usecs
)
1918 tv
.tv_usec
= usecs
% 1000000;
1919 tv
.tv_sec
= usecs
/ 1000000;
1920 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1921 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1925 scm_std_sleep (unsigned int secs
)
1930 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1936 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1938 "Return the thread that called this function.")
1939 #define FUNC_NAME s_scm_current_thread
1941 return SCM_I_CURRENT_THREAD
->handle
;
1946 scm_c_make_list (size_t n
, SCM fill
)
1950 res
= scm_cons (fill
, res
);
1954 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1956 "Return a list of all threads.")
1957 #define FUNC_NAME s_scm_all_threads
1959 /* We can not allocate while holding the thread_admin_mutex because
1960 of the way GC is done.
1962 int n
= thread_count
;
1964 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1966 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1968 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1970 if (t
!= scm_i_signal_delivery_thread
)
1972 SCM_SETCAR (*l
, t
->handle
);
1973 l
= SCM_CDRLOC (*l
);
1978 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1983 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1985 "Return @code{#t} iff @var{thread} has exited.\n")
1986 #define FUNC_NAME s_scm_thread_exited_p
1988 return scm_from_bool (scm_c_thread_exited_p (thread
));
1993 scm_c_thread_exited_p (SCM thread
)
1994 #define FUNC_NAME s_scm_thread_exited_p
1997 SCM_VALIDATE_THREAD (1, thread
);
1998 t
= SCM_I_THREAD_DATA (thread
);
2003 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2005 "Return the total number of processors of the machine, which\n"
2006 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2007 "thread execution unit, which can be either:\n\n"
2009 "@item an execution core in a (possibly multi-core) chip, in a\n"
2010 " (possibly multi- chip) module, in a single computer, or\n"
2011 "@item a thread execution unit inside a core in the case of\n"
2012 " @dfn{hyper-threaded} CPUs.\n"
2014 "Which of the two definitions is used, is unspecified.\n")
2015 #define FUNC_NAME s_scm_total_processor_count
2017 return scm_from_ulong (num_processors (NPROC_ALL
));
2021 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2023 "Like @code{total-processor-count}, but return the number of\n"
2024 "processors available to the current process. See\n"
2025 "@code{setaffinity} and @code{getaffinity} for more\n"
2027 #define FUNC_NAME s_scm_current_processor_count
2029 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2036 static scm_i_pthread_cond_t wake_up_cond
;
2037 static int threads_initialized_p
= 0;
2040 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2042 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2044 static SCM dynwind_critical_section_mutex
;
2047 scm_dynwind_critical_section (SCM mutex
)
2049 if (scm_is_false (mutex
))
2050 mutex
= dynwind_critical_section_mutex
;
2051 scm_dynwind_lock_mutex (mutex
);
2052 scm_dynwind_block_asyncs ();
2055 /*** Initialization */
2057 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2059 #if SCM_USE_PTHREAD_THREADS
2060 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2064 scm_threads_prehistory (void *base
)
2066 #if SCM_USE_PTHREAD_THREADS
2067 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2068 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2069 PTHREAD_MUTEX_RECURSIVE
);
2072 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2073 scm_i_pthread_mutexattr_recursive
);
2074 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2075 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2078 GC_new_kind (GC_new_free_list (),
2079 GC_MAKE_PROC (GC_new_proc (thread_mark
), 0),
2082 guilify_self_1 ((struct GC_stack_base
*) base
);
2085 scm_t_bits scm_tc16_thread
;
2086 scm_t_bits scm_tc16_mutex
;
2087 scm_t_bits scm_tc16_condvar
;
2092 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2093 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2095 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2096 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2098 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2100 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2102 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2103 guilify_self_2 (SCM_BOOL_F
);
2104 threads_initialized_p
= 1;
2106 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2110 scm_init_threads_default_dynamic_state ()
2112 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2113 scm_i_default_dynamic_state
= state
;
2117 scm_init_thread_procs ()
2119 #include "libguile/threads.x"
2123 /* IA64-specific things. */
2127 # include <sys/param.h>
2128 # include <sys/pstat.h>
2130 scm_ia64_register_backing_store_base (void)
2132 struct pst_vm_status vm_status
;
2134 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2135 if (vm_status
.pst_type
== PS_RSESTACK
)
2136 return (void *) vm_status
.pst_vaddr
;
2140 scm_ia64_ar_bsp (const void *ctx
)
2143 __uc_get_ar_bsp (ctx
, &bsp
);
2144 return (void *) bsp
;
2148 # include <ucontext.h>
2150 scm_ia64_register_backing_store_base (void)
2152 extern void *__libc_ia64_register_backing_store_base
;
2153 return __libc_ia64_register_backing_store_base
;
2156 scm_ia64_ar_bsp (const void *opaque
)
2158 const ucontext_t
*ctx
= opaque
;
2159 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2163 # include <ucontext.h>
2165 scm_ia64_register_backing_store_base (void)
2167 return (void *)0x8000000000000000;
2170 scm_ia64_ar_bsp (const void *opaque
)
2172 const ucontext_t
*ctx
= opaque
;
2173 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2174 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2176 # endif /* __FreeBSD__ */
2177 #endif /* __ia64__ */