1 /* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
2 * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014
3 * Free Software Foundation, Inc.
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public License
7 * as published by the Free Software Foundation; either version 3 of
8 * the License, or (at your option) any later version.
10 * This library is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27 #include "libguile/bdw-gc.h"
28 #include <gc/gc_mark.h>
29 #include "libguile/_scm.h"
38 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
46 # include <pthread_np.h>
49 #include <sys/select.h>
55 #include "libguile/validate.h"
56 #include "libguile/root.h"
57 #include "libguile/eval.h"
58 #include "libguile/async.h"
59 #include "libguile/ports.h"
60 #include "libguile/threads.h"
61 #include "libguile/dynwind.h"
62 #include "libguile/iselect.h"
63 #include "libguile/fluids.h"
64 #include "libguile/continuations.h"
65 #include "libguile/gc.h"
66 #include "libguile/gc-inline.h"
67 #include "libguile/init.h"
68 #include "libguile/scmsigs.h"
69 #include "libguile/strings.h"
70 #include "libguile/vm.h"
72 #include <full-read.h>
77 /* The GC "kind" for threads that allow them to mark their VM
79 static int thread_gc_kind
;
81 static struct GC_ms_entry
*
82 thread_mark (GC_word
*addr
, struct GC_ms_entry
*mark_stack_ptr
,
83 struct GC_ms_entry
*mark_stack_limit
, GC_word env
)
86 const struct scm_i_thread
*t
= (struct scm_i_thread
*) addr
;
88 if (SCM_UNPACK (t
->handle
) == 0)
89 /* T must be on the free-list; ignore. (See warning in
91 return mark_stack_ptr
;
93 /* Mark T. We could be more precise, but it doesn't matter. */
94 for (word
= 0; word
* sizeof (*addr
) < sizeof (*t
); word
++)
95 mark_stack_ptr
= GC_MARK_AND_PUSH ((void *) addr
[word
],
96 mark_stack_ptr
, mark_stack_limit
,
99 /* The pointerless freelists are threaded through their first word,
100 but GC doesn't know to trace them (as they are pointerless), so we
101 need to do that here. See the comments at the top of libgc's
105 for (n
= 0; n
< SCM_INLINE_GC_FREELIST_COUNT
; n
++)
107 void *chain
= t
->pointerless_freelists
[n
];
110 /* The first link is already marked by the freelist vector,
111 so we just have to mark the tail. */
112 while ((chain
= *(void **)chain
))
113 mark_stack_ptr
= GC_mark_and_push (chain
, mark_stack_ptr
,
114 mark_stack_limit
, NULL
);
120 mark_stack_ptr
= scm_i_vm_mark_stack (t
->vp
, mark_stack_ptr
,
123 return mark_stack_ptr
;
129 to_timespec (SCM t
, scm_t_timespec
*waittime
)
133 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
134 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
138 double time
= scm_to_double (t
);
139 double sec
= scm_c_truncate (time
);
141 waittime
->tv_sec
= (long) sec
;
142 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
150 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
151 the risk of false references leading to unbounded retained space as
152 described in "Bounding Space Usage of Conservative Garbage Collectors",
155 /* Make an empty queue data structure.
160 return scm_cons (SCM_EOL
, SCM_EOL
);
163 /* Put T at the back of Q and return a handle that can be used with
164 remqueue to remove T from Q again.
167 enqueue (SCM q
, SCM t
)
169 SCM c
= scm_cons (t
, SCM_EOL
);
170 SCM_CRITICAL_SECTION_START
;
171 if (scm_is_null (SCM_CDR (q
)))
174 SCM_SETCDR (SCM_CAR (q
), c
);
176 SCM_CRITICAL_SECTION_END
;
180 /* Remove the element that the handle C refers to from the queue Q. C
181 must have been returned from a call to enqueue. The return value
182 is zero when the element referred to by C has already been removed.
183 Otherwise, 1 is returned.
186 remqueue (SCM q
, SCM c
)
189 SCM_CRITICAL_SECTION_START
;
190 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
192 if (scm_is_eq (p
, c
))
194 if (scm_is_eq (c
, SCM_CAR (q
)))
195 SCM_SETCAR (q
, scm_is_eq (prev
, q
) ? SCM_EOL
: prev
);
196 SCM_SETCDR (prev
, SCM_CDR (c
));
199 SCM_SETCDR (c
, SCM_EOL
);
201 SCM_CRITICAL_SECTION_END
;
206 SCM_CRITICAL_SECTION_END
;
210 /* Remove the front-most element from the queue Q and return it.
211 Return SCM_BOOL_F when Q is empty.
217 SCM_CRITICAL_SECTION_START
;
221 SCM_CRITICAL_SECTION_END
;
226 SCM_SETCDR (q
, SCM_CDR (c
));
227 if (scm_is_null (SCM_CDR (q
)))
228 SCM_SETCAR (q
, SCM_EOL
);
229 SCM_CRITICAL_SECTION_END
;
232 SCM_SETCDR (c
, SCM_EOL
);
238 /*** Thread smob routines */
242 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
244 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
245 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
246 the struct case, hence we go via a union, and extract according to the
247 size of pthread_t. */
255 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
256 scm_i_pthread_t p
= t
->pthread
;
259 if (sizeof (p
) == sizeof (unsigned short))
261 else if (sizeof (p
) == sizeof (unsigned int))
263 else if (sizeof (p
) == sizeof (unsigned long))
268 scm_puts_unlocked ("#<thread ", port
);
269 scm_uintprint (id
, 10, port
);
270 scm_puts_unlocked (" (", port
);
271 scm_uintprint ((scm_t_bits
)t
, 16, port
);
272 scm_puts_unlocked (")>", port
);
277 /*** Blocking on queues. */
279 /* See also scm_i_queue_async_cell for how such a block is
283 /* Put the current thread on QUEUE and go to sleep, waiting for it to
284 be woken up by a call to 'unblock_from_queue', or to be
285 interrupted. Upon return of this function, the current thread is
286 no longer on QUEUE, even when the sleep has been interrupted.
288 The caller of block_self must hold MUTEX. It will be atomically
289 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
291 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
294 When WAITTIME is not NULL, the sleep will be aborted at that time.
296 The return value of block_self is an errno value. It will be zero
297 when the sleep has been successfully completed by a call to
298 unblock_from_queue, EINTR when it has been interrupted by the
299 delivery of a system async, and ETIMEDOUT when the timeout has
302 The system asyncs themselves are not executed by block_self.
305 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
306 const scm_t_timespec
*waittime
)
308 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
312 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
317 q_handle
= enqueue (queue
, t
->handle
);
318 if (waittime
== NULL
)
319 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
321 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
323 /* When we are still on QUEUE, we have been interrupted. We
324 report this only when no other error (such as a timeout) has
327 if (remqueue (queue
, q_handle
) && err
== 0)
330 scm_i_reset_sleep (t
);
336 /* Wake up the first thread on QUEUE, if any. The awoken thread is
337 returned, or #f if the queue was empty.
340 unblock_from_queue (SCM queue
)
342 SCM thread
= dequeue (queue
);
343 if (scm_is_true (thread
))
344 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
349 /* Getting into and out of guile mode.
352 /* Key used to attach a cleanup handler to a given thread. Also, if
353 thread-local storage is unavailable, this key is used to retrieve the
354 current thread with `pthread_getspecific ()'. */
355 scm_i_pthread_key_t scm_i_thread_key
;
358 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
360 /* When thread-local storage (TLS) is available, a pointer to the
361 current-thread object is kept in TLS. Note that storing the thread-object
362 itself in TLS (rather than a pointer to some malloc'd memory) is not
363 possible since thread objects may live longer than the actual thread they
365 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
367 #endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
370 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
371 static scm_i_thread
*all_threads
= NULL
;
372 static int thread_count
;
374 static SCM scm_i_default_dynamic_state
;
376 /* Run when a fluid is collected. */
378 scm_i_reset_fluid (size_t n
)
382 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
383 for (t
= all_threads
; t
; t
= t
->next_thread
)
384 if (SCM_I_DYNAMIC_STATE_P (t
->dynamic_state
))
386 SCM v
= SCM_I_DYNAMIC_STATE_FLUIDS (t
->dynamic_state
);
388 if (n
< SCM_SIMPLE_VECTOR_LENGTH (v
))
389 SCM_SIMPLE_VECTOR_SET (v
, n
, SCM_UNDEFINED
);
391 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
394 /* Perform first stage of thread initialisation, in non-guile mode.
397 guilify_self_1 (struct GC_stack_base
*base
)
401 /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
402 before allocating anything in this thread, because allocation could
403 cause GC to run, and GC could cause finalizers, which could invoke
404 Scheme functions, which need the current thread to be set. */
406 t
.pthread
= scm_i_pthread_self ();
407 t
.handle
= SCM_BOOL_F
;
408 t
.result
= SCM_BOOL_F
;
409 t
.cleanup_handler
= SCM_BOOL_F
;
412 t
.join_queue
= SCM_EOL
;
414 t
.pointerless_freelists
= NULL
;
415 t
.dynamic_state
= SCM_BOOL_F
;
416 t
.dynstack
.base
= NULL
;
417 t
.dynstack
.top
= NULL
;
418 t
.dynstack
.limit
= NULL
;
419 t
.active_asyncs
= SCM_EOL
;
421 t
.pending_asyncs
= 1;
422 t
.critical_section_level
= 0;
423 t
.base
= base
->mem_base
;
425 t
.register_backing_store_base
= base
->reg_base
;
427 t
.continuation_root
= SCM_EOL
;
428 t
.continuation_base
= t
.base
;
429 scm_i_pthread_cond_init (&t
.sleep_cond
, NULL
);
430 t
.sleep_mutex
= NULL
;
431 t
.sleep_object
= SCM_BOOL_F
;
435 if (pipe2 (t
.sleep_pipe
, O_CLOEXEC
) != 0)
436 /* FIXME: Error conditions during the initialization phase are handled
437 gracelessly since public functions such as `scm_init_guile ()'
438 currently have type `void'. */
441 scm_i_pthread_mutex_init (&t
.admin_mutex
, NULL
);
446 /* The switcheroo. */
448 scm_i_thread
*t_ptr
= &t
;
451 t_ptr
= GC_generic_malloc (sizeof (*t_ptr
), thread_gc_kind
);
452 memcpy (t_ptr
, &t
, sizeof t
);
454 scm_i_pthread_setspecific (scm_i_thread_key
, t_ptr
);
456 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
457 /* Cache the current thread in TLS for faster lookup. */
458 scm_i_current_thread
= t_ptr
;
461 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
462 t_ptr
->next_thread
= all_threads
;
465 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
471 /* Perform second stage of thread initialisation, in guile mode.
474 guilify_self_2 (SCM parent
)
476 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
480 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
482 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
483 t
->continuation_base
= t
->base
;
486 size_t size
= SCM_INLINE_GC_FREELIST_COUNT
* sizeof (void *);
487 t
->freelists
= scm_gc_malloc (size
, "freelists");
488 t
->pointerless_freelists
= scm_gc_malloc (size
, "atomic freelists");
491 if (scm_is_true (parent
))
492 t
->dynamic_state
= scm_make_dynamic_state (parent
);
494 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
496 t
->dynstack
.base
= scm_gc_malloc (16 * sizeof (scm_t_bits
), "dynstack");
497 t
->dynstack
.limit
= t
->dynstack
.base
+ 16;
498 t
->dynstack
.top
= t
->dynstack
.base
+ SCM_DYNSTACK_HEADER_LEN
;
500 t
->join_queue
= make_queue ();
503 /* See note in finalizers.c:queue_finalizer_async(). */
504 GC_invoke_finalizers ();
510 /* We implement our own mutex type since we want them to be 'fair', we
511 want to do fancy things while waiting for them (like running
512 asyncs) and we might want to add things that are nice for
517 scm_i_pthread_mutex_t lock
;
519 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
521 int recursive
; /* allow recursive locking? */
522 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
523 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
524 owned by the current thread? */
526 SCM waiting
; /* the threads waiting for this mutex. */
529 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
530 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
533 call_cleanup (void *data
)
536 return scm_call_0 (*proc_p
);
539 /* Perform thread tear-down, in guile mode.
542 do_thread_exit (void *v
)
544 scm_i_thread
*t
= (scm_i_thread
*) v
;
546 if (!scm_is_false (t
->cleanup_handler
))
548 SCM ptr
= t
->cleanup_handler
;
550 t
->cleanup_handler
= SCM_BOOL_F
;
551 t
->result
= scm_internal_catch (SCM_BOOL_T
,
553 scm_handle_by_message_noexit
, NULL
);
556 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
559 close (t
->sleep_pipe
[0]);
560 close (t
->sleep_pipe
[1]);
561 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
564 while (!scm_is_null (t
->mutexes
))
566 SCM mutex
= scm_c_weak_vector_ref (scm_car (t
->mutexes
), 0);
568 if (scm_is_true (mutex
))
570 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
572 scm_i_pthread_mutex_lock (&m
->lock
);
574 /* Check whether T owns MUTEX. This is usually the case, unless
575 T abandoned MUTEX; in that case, T is no longer its owner (see
576 `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
577 if (scm_is_eq (m
->owner
, t
->handle
))
578 unblock_from_queue (m
->waiting
);
580 scm_i_pthread_mutex_unlock (&m
->lock
);
583 t
->mutexes
= scm_cdr (t
->mutexes
);
586 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
592 do_thread_exit_trampoline (struct GC_stack_base
*sb
, void *v
)
594 /* Won't hurt if we are already registered. */
595 #if SCM_USE_PTHREAD_THREADS
596 GC_register_my_thread (sb
);
599 return scm_with_guile (do_thread_exit
, v
);
603 on_thread_exit (void *v
)
605 /* This handler is executed in non-guile mode. */
606 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
608 /* If we were canceled, we were unable to clear `t->guile_mode', so do
612 /* If this thread was cancelled while doing a cond wait, it will
613 still have a mutex locked, so we unlock it here. */
616 scm_i_pthread_mutex_unlock (t
->held_mutex
);
617 t
->held_mutex
= NULL
;
620 /* Reinstate the current thread for purposes of scm_with_guile
621 guile-mode cleanup handlers. Only really needed in the non-TLS
622 case but it doesn't hurt to be consistent. */
623 scm_i_pthread_setspecific (scm_i_thread_key
, t
);
625 /* Scheme-level thread finalizers and other cleanup needs to happen in
627 GC_call_with_stack_base (do_thread_exit_trampoline
, t
);
629 /* Removing ourself from the list of all threads needs to happen in
630 non-guile mode since all SCM values on our stack become
631 unprotected once we are no longer in the list. */
632 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
633 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
636 *tp
= t
->next_thread
;
639 t
->next_thread
= NULL
;
645 /* If there's only one other thread, it could be the signal delivery
646 thread, so we need to notify it to shut down by closing its read pipe.
647 If it's not the signal delivery thread, then closing the read pipe isn't
649 if (thread_count
<= 1)
650 scm_i_close_signal_pipe ();
652 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
654 scm_i_pthread_setspecific (scm_i_thread_key
, NULL
);
658 scm_i_vm_free_stack (t
->vp
);
662 #if SCM_USE_PTHREAD_THREADS
663 GC_unregister_my_thread ();
667 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
670 init_thread_key (void)
672 scm_i_pthread_key_create (&scm_i_thread_key
, on_thread_exit
);
675 /* Perform any initializations necessary to make the current thread
676 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
679 BASE is the stack base to use with GC.
681 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
682 which case the default dynamic state is used.
684 Returns zero when the thread was known to guile already; otherwise
687 Note that it could be the case that the thread was known
688 to Guile, but not in guile mode (because we are within a
689 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
690 be sure. New threads are put into guile mode implicitly. */
693 scm_i_init_thread_for_guile (struct GC_stack_base
*base
, SCM parent
)
695 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
697 if (SCM_I_CURRENT_THREAD
)
699 /* Thread is already known to Guile.
705 /* This thread has not been guilified yet.
708 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
709 if (scm_initialized_p
== 0)
711 /* First thread ever to enter Guile. Run the full
714 scm_i_init_guile (base
);
716 #if SCM_USE_PTHREAD_THREADS
717 /* Allow other threads to come in later. */
718 GC_allow_register_threads ();
721 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
725 /* Guile is already initialized, but this thread enters it for
726 the first time. Only initialize this thread.
728 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
730 /* Register this thread with libgc. */
731 #if SCM_USE_PTHREAD_THREADS
732 GC_register_my_thread (base
);
735 guilify_self_1 (base
);
736 guilify_self_2 (parent
);
745 struct GC_stack_base stack_base
;
747 if (GC_get_stack_base (&stack_base
) == GC_SUCCESS
)
748 scm_i_init_thread_for_guile (&stack_base
,
749 scm_i_default_dynamic_state
);
752 fprintf (stderr
, "Failed to get stack base for current thread.\n");
757 struct with_guile_args
765 with_guile_trampoline (void *data
)
767 struct with_guile_args
*args
= data
;
769 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
773 with_guile_and_parent (struct GC_stack_base
*base
, void *data
)
778 struct with_guile_args
*args
= data
;
780 new_thread
= scm_i_init_thread_for_guile (base
, args
->parent
);
781 t
= SCM_I_CURRENT_THREAD
;
784 /* We are in Guile mode. */
785 assert (t
->guile_mode
);
787 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
789 /* Leave Guile mode. */
792 else if (t
->guile_mode
)
794 /* Already in Guile mode. */
795 res
= scm_c_with_continuation_barrier (args
->func
, args
->data
);
799 /* We are not in Guile mode, either because we are not within a
800 scm_with_guile, or because we are within a scm_without_guile.
802 This call to scm_with_guile() could happen from anywhere on the
803 stack, and in particular lower on the stack than when it was
804 when this thread was first guilified. Thus, `base' must be
806 #if SCM_STACK_GROWS_UP
807 if (SCM_STACK_PTR (base
->mem_base
) < t
->base
)
808 t
->base
= SCM_STACK_PTR (base
->mem_base
);
810 if (SCM_STACK_PTR (base
->mem_base
) > t
->base
)
811 t
->base
= SCM_STACK_PTR (base
->mem_base
);
815 res
= GC_call_with_gc_active (with_guile_trampoline
, args
);
822 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
824 struct with_guile_args args
;
828 args
.parent
= parent
;
830 return GC_call_with_stack_base (with_guile_and_parent
, &args
);
834 scm_with_guile (void *(*func
)(void *), void *data
)
836 return scm_i_with_guile_and_parent (func
, data
,
837 scm_i_default_dynamic_state
);
841 scm_without_guile (void *(*func
)(void *), void *data
)
844 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
848 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
849 result
= GC_do_blocking (func
, data
);
850 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
853 /* Otherwise we're not in guile mode, so nothing to do. */
854 result
= func (data
);
860 /*** Thread creation */
867 scm_i_pthread_mutex_t mutex
;
868 scm_i_pthread_cond_t cond
;
872 really_launch (void *d
)
874 launch_data
*data
= (launch_data
*)d
;
875 SCM thunk
= data
->thunk
, handler
= data
->handler
;
878 t
= SCM_I_CURRENT_THREAD
;
880 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
881 data
->thread
= scm_current_thread ();
882 scm_i_pthread_cond_signal (&data
->cond
);
883 scm_i_pthread_mutex_unlock (&data
->mutex
);
885 if (SCM_UNBNDP (handler
))
886 t
->result
= scm_call_0 (thunk
);
888 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
894 launch_thread (void *d
)
896 launch_data
*data
= (launch_data
*)d
;
897 scm_i_pthread_detach (scm_i_pthread_self ());
898 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
902 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
903 (SCM thunk
, SCM handler
),
904 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
905 "returning a new thread object representing the thread. The procedure\n"
906 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
908 "When @var{handler} is specified, then @var{thunk} is called from\n"
909 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
910 "handler. This catch is established inside the continuation barrier.\n"
912 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
913 "the @emph{exit value} of the thread and the thread is terminated.")
914 #define FUNC_NAME s_scm_call_with_new_thread
920 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
921 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
922 handler
, SCM_ARG2
, FUNC_NAME
);
924 GC_collect_a_little ();
925 data
.parent
= scm_current_dynamic_state ();
927 data
.handler
= handler
;
928 data
.thread
= SCM_BOOL_F
;
929 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
930 scm_i_pthread_cond_init (&data
.cond
, NULL
);
932 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
933 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
936 scm_i_pthread_mutex_unlock (&data
.mutex
);
941 while (scm_is_false (data
.thread
))
942 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
944 scm_i_pthread_mutex_unlock (&data
.mutex
);
952 scm_t_catch_body body
;
954 scm_t_catch_handler handler
;
957 scm_i_pthread_mutex_t mutex
;
958 scm_i_pthread_cond_t cond
;
962 really_spawn (void *d
)
964 spawn_data
*data
= (spawn_data
*)d
;
965 scm_t_catch_body body
= data
->body
;
966 void *body_data
= data
->body_data
;
967 scm_t_catch_handler handler
= data
->handler
;
968 void *handler_data
= data
->handler_data
;
969 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
971 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
972 data
->thread
= scm_current_thread ();
973 scm_i_pthread_cond_signal (&data
->cond
);
974 scm_i_pthread_mutex_unlock (&data
->mutex
);
977 t
->result
= body (body_data
);
979 t
->result
= scm_internal_catch (SCM_BOOL_T
,
981 handler
, handler_data
);
987 spawn_thread (void *d
)
989 spawn_data
*data
= (spawn_data
*)d
;
990 scm_i_pthread_detach (scm_i_pthread_self ());
991 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
996 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
997 scm_t_catch_handler handler
, void *handler_data
)
1003 data
.parent
= scm_current_dynamic_state ();
1005 data
.body_data
= body_data
;
1006 data
.handler
= handler
;
1007 data
.handler_data
= handler_data
;
1008 data
.thread
= SCM_BOOL_F
;
1009 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1010 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1012 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1013 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1016 scm_i_pthread_mutex_unlock (&data
.mutex
);
1018 scm_syserror (NULL
);
1021 while (scm_is_false (data
.thread
))
1022 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1024 scm_i_pthread_mutex_unlock (&data
.mutex
);
1026 assert (SCM_I_IS_THREAD (data
.thread
));
1031 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1033 "Move the calling thread to the end of the scheduling queue.")
1034 #define FUNC_NAME s_scm_yield
1036 return scm_from_bool (scm_i_sched_yield ());
1040 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1042 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1043 "cannot be the current thread, and if @var{thread} has already terminated or "
1044 "been signaled to terminate, this function is a no-op.")
1045 #define FUNC_NAME s_scm_cancel_thread
1047 scm_i_thread
*t
= NULL
;
1049 SCM_VALIDATE_THREAD (1, thread
);
1050 t
= SCM_I_THREAD_DATA (thread
);
1051 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1055 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1056 scm_i_pthread_cancel (t
->pthread
);
1059 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1061 return SCM_UNSPECIFIED
;
1065 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1066 (SCM thread
, SCM proc
),
1067 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1068 "This handler will be called when the thread exits.")
1069 #define FUNC_NAME s_scm_set_thread_cleanup_x
1073 SCM_VALIDATE_THREAD (1, thread
);
1074 if (!scm_is_false (proc
))
1075 SCM_VALIDATE_THUNK (2, proc
);
1077 t
= SCM_I_THREAD_DATA (thread
);
1078 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1080 if (!(t
->exited
|| t
->canceled
))
1081 t
->cleanup_handler
= proc
;
1083 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1085 return SCM_UNSPECIFIED
;
1089 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1091 "Return the cleanup handler installed for the thread @var{thread}.")
1092 #define FUNC_NAME s_scm_thread_cleanup
1097 SCM_VALIDATE_THREAD (1, thread
);
1099 t
= SCM_I_THREAD_DATA (thread
);
1100 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1101 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1102 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1108 SCM
scm_join_thread (SCM thread
)
1110 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1113 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1114 (SCM thread
, SCM timeout
, SCM timeoutval
),
1115 "Suspend execution of the calling thread until the target @var{thread} "
1116 "terminates, unless the target @var{thread} has already terminated. ")
1117 #define FUNC_NAME s_scm_join_thread_timed
1120 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1121 SCM res
= SCM_BOOL_F
;
1123 if (! (SCM_UNBNDP (timeoutval
)))
1126 SCM_VALIDATE_THREAD (1, thread
);
1127 if (scm_is_eq (scm_current_thread (), thread
))
1128 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1130 t
= SCM_I_THREAD_DATA (thread
);
1131 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1133 if (! SCM_UNBNDP (timeout
))
1135 to_timespec (timeout
, &ctimeout
);
1136 timeout_ptr
= &ctimeout
;
1145 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1155 else if (err
== ETIMEDOUT
)
1158 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1160 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1162 /* Check for exit again, since we just released and
1163 reacquired the admin mutex, before the next block_self
1164 call (which would block forever if t has already
1174 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1180 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1182 "Return @code{#t} if @var{obj} is a thread.")
1183 #define FUNC_NAME s_scm_thread_p
1185 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1191 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1193 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1194 scm_puts_unlocked ("#<mutex ", port
);
1195 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1196 scm_puts_unlocked (">", port
);
1201 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1205 scm_i_pthread_mutex_t lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
1207 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1208 /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
1209 and so we can just copy it. */
1210 memcpy (&m
->lock
, &lock
, sizeof (m
->lock
));
1211 m
->owner
= SCM_BOOL_F
;
1214 m
->recursive
= recursive
;
1215 m
->unchecked_unlock
= unchecked_unlock
;
1216 m
->allow_external_unlock
= external_unlock
;
1218 m
->waiting
= SCM_EOL
;
1219 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1220 m
->waiting
= make_queue ();
1224 SCM
scm_make_mutex (void)
1226 return scm_make_mutex_with_flags (SCM_EOL
);
1229 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1230 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1231 SCM_SYMBOL (recursive_sym
, "recursive");
1233 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1235 "Create a new mutex. ")
1236 #define FUNC_NAME s_scm_make_mutex_with_flags
1238 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1241 while (! scm_is_null (ptr
))
1243 SCM flag
= SCM_CAR (ptr
);
1244 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1245 unchecked_unlock
= 1;
1246 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1247 external_unlock
= 1;
1248 else if (scm_is_eq (flag
, recursive_sym
))
1251 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1252 ptr
= SCM_CDR (ptr
);
1254 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1258 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1260 "Create a new recursive mutex. ")
1261 #define FUNC_NAME s_scm_make_recursive_mutex
1263 return make_fat_mutex (1, 0, 0);
1267 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1270 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1272 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1274 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1275 SCM err
= SCM_BOOL_F
;
1277 struct timeval current_time
;
1279 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1285 m
->owner
= new_owner
;
1288 if (SCM_I_IS_THREAD (new_owner
))
1290 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1292 /* FIXME: The order in which `t->admin_mutex' and
1293 `m->lock' are taken differs from that in
1294 `on_thread_exit', potentially leading to deadlocks. */
1295 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1297 /* Only keep a weak reference to MUTEX so that it's not
1298 retained when not referenced elsewhere (bug #27450).
1299 The weak pair itself is eventually removed when MUTEX
1300 is unlocked. Note that `t->mutexes' lists mutexes
1301 currently held by T, so it should be small. */
1302 t
->mutexes
= scm_cons (scm_make_weak_vector (SCM_INUM1
, mutex
),
1305 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1310 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1312 m
->owner
= new_owner
;
1313 err
= scm_cons (scm_abandoned_mutex_error_key
,
1314 scm_from_locale_string ("lock obtained on abandoned "
1319 else if (scm_is_eq (m
->owner
, new_owner
))
1328 err
= scm_cons (scm_misc_error_key
,
1329 scm_from_locale_string ("mutex already locked "
1337 if (timeout
!= NULL
)
1339 gettimeofday (¤t_time
, NULL
);
1340 if (current_time
.tv_sec
> timeout
->tv_sec
||
1341 (current_time
.tv_sec
== timeout
->tv_sec
&&
1342 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1348 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1349 scm_i_pthread_mutex_unlock (&m
->lock
);
1351 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1354 scm_i_pthread_mutex_unlock (&m
->lock
);
1358 SCM
scm_lock_mutex (SCM mx
)
1360 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1363 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1364 (SCM m
, SCM timeout
, SCM owner
),
1365 "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
1366 "thread blocks until the mutex becomes available. The function\n"
1367 "returns when the calling thread owns the lock on @var{m}.\n"
1368 "Locking a mutex that a thread already owns will succeed right\n"
1369 "away and will not block the thread. That is, Guile's mutexes\n"
1370 "are @emph{recursive}.")
1371 #define FUNC_NAME s_scm_lock_mutex_timed
1375 scm_t_timespec cwaittime
, *waittime
= NULL
;
1377 SCM_VALIDATE_MUTEX (1, m
);
1379 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1381 to_timespec (timeout
, &cwaittime
);
1382 waittime
= &cwaittime
;
1385 if (!SCM_UNBNDP (owner
) && !scm_is_false (owner
))
1386 SCM_VALIDATE_THREAD (3, owner
);
1388 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1389 if (!scm_is_false (exception
))
1390 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1391 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1396 lock_mutex_return_void (SCM mx
)
1398 (void) scm_lock_mutex (mx
);
1402 unlock_mutex_return_void (SCM mx
)
1404 (void) scm_unlock_mutex (mx
);
1408 scm_dynwind_lock_mutex (SCM mutex
)
1410 scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void
, mutex
,
1411 SCM_F_WIND_EXPLICITLY
);
1412 scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void
, mutex
,
1413 SCM_F_WIND_EXPLICITLY
);
1416 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1418 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1419 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1420 #define FUNC_NAME s_scm_try_mutex
1424 scm_t_timespec cwaittime
, *waittime
= NULL
;
1426 SCM_VALIDATE_MUTEX (1, mutex
);
1428 to_timespec (scm_from_int(0), &cwaittime
);
1429 waittime
= &cwaittime
;
1431 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1432 if (!scm_is_false (exception
))
1433 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1434 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1438 /*** Fat condition variables */
1441 scm_i_pthread_mutex_t lock
;
1442 SCM waiting
; /* the threads waiting for this condition. */
1445 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1446 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1449 remove_mutex_from_thread (SCM mutex
, scm_i_thread
*t
)
1453 for (prev
= SCM_BOOL_F
, walk
= t
->mutexes
; scm_is_pair (walk
);
1454 walk
= SCM_CDR (walk
))
1456 if (scm_is_eq (mutex
, scm_c_weak_vector_ref (SCM_CAR (walk
), 0)))
1458 if (scm_is_pair (prev
))
1459 SCM_SETCDR (prev
, SCM_CDR (walk
));
1461 t
->mutexes
= SCM_CDR (walk
);
1468 fat_mutex_unlock (SCM mutex
, SCM cond
,
1469 const scm_t_timespec
*waittime
, int relock
)
1472 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1474 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1475 int err
= 0, ret
= 0;
1477 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1481 if (!scm_is_eq (owner
, t
->handle
))
1485 if (!m
->unchecked_unlock
)
1487 scm_i_pthread_mutex_unlock (&m
->lock
);
1488 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1492 else if (!m
->allow_external_unlock
)
1494 scm_i_pthread_mutex_unlock (&m
->lock
);
1495 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1499 if (! (SCM_UNBNDP (cond
)))
1501 c
= SCM_CONDVAR_DATA (cond
);
1510 /* Change the owner of MUTEX. */
1511 remove_mutex_from_thread (mutex
, t
);
1512 m
->owner
= unblock_from_queue (m
->waiting
);
1517 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1518 scm_i_pthread_mutex_unlock (&m
->lock
);
1525 else if (err
== ETIMEDOUT
)
1530 else if (err
!= EINTR
)
1533 scm_syserror (NULL
);
1539 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1547 scm_remember_upto_here_2 (cond
, mutex
);
1549 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1558 /* Change the owner of MUTEX. */
1559 remove_mutex_from_thread (mutex
, t
);
1560 m
->owner
= unblock_from_queue (m
->waiting
);
1563 scm_i_pthread_mutex_unlock (&m
->lock
);
1570 SCM
scm_unlock_mutex (SCM mx
)
1572 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1575 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1576 (SCM mx
, SCM cond
, SCM timeout
),
1577 "Unlocks @var{mutex} if the calling thread owns the lock on "
1578 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1579 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1580 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1581 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1582 "with a call to @code{unlock-mutex}. Only the last call to "
1583 "@code{unlock-mutex} will actually unlock the mutex. ")
1584 #define FUNC_NAME s_scm_unlock_mutex_timed
1586 scm_t_timespec cwaittime
, *waittime
= NULL
;
1588 SCM_VALIDATE_MUTEX (1, mx
);
1589 if (! (SCM_UNBNDP (cond
)))
1591 SCM_VALIDATE_CONDVAR (2, cond
);
1593 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1595 to_timespec (timeout
, &cwaittime
);
1596 waittime
= &cwaittime
;
1600 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1604 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1606 "Return @code{#t} if @var{obj} is a mutex.")
1607 #define FUNC_NAME s_scm_mutex_p
1609 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1613 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1615 "Return the thread owning @var{mx}, or @code{#f}.")
1616 #define FUNC_NAME s_scm_mutex_owner
1619 fat_mutex
*m
= NULL
;
1621 SCM_VALIDATE_MUTEX (1, mx
);
1622 m
= SCM_MUTEX_DATA (mx
);
1623 scm_i_pthread_mutex_lock (&m
->lock
);
1625 scm_i_pthread_mutex_unlock (&m
->lock
);
1631 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1633 "Return the lock level of mutex @var{mx}.")
1634 #define FUNC_NAME s_scm_mutex_level
1636 SCM_VALIDATE_MUTEX (1, mx
);
1637 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1641 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1643 "Returns @code{#t} if the mutex @var{mx} is locked.")
1644 #define FUNC_NAME s_scm_mutex_locked_p
1646 SCM_VALIDATE_MUTEX (1, mx
);
1647 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1652 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1654 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1655 scm_puts_unlocked ("#<condition-variable ", port
);
1656 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1657 scm_puts_unlocked (">", port
);
1661 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1663 "Make a new condition variable.")
1664 #define FUNC_NAME s_scm_make_condition_variable
1669 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1670 c
->waiting
= SCM_EOL
;
1671 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1672 c
->waiting
= make_queue ();
1677 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1678 (SCM cv
, SCM mx
, SCM t
),
1679 "Wait until condition variable @var{cv} has been signalled. While waiting, "
1680 "mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
1681 "is locked again when this function returns. When @var{t} is given, "
1682 "it specifies a point in time where the waiting should be aborted. It "
1683 "can be either a integer as returned by @code{current-time} or a pair "
1684 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1685 "mutex is locked and @code{#f} is returned. When the condition "
1686 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1688 #define FUNC_NAME s_scm_timed_wait_condition_variable
1690 scm_t_timespec waittime
, *waitptr
= NULL
;
1692 SCM_VALIDATE_CONDVAR (1, cv
);
1693 SCM_VALIDATE_MUTEX (2, mx
);
1695 if (!SCM_UNBNDP (t
))
1697 to_timespec (t
, &waittime
);
1698 waitptr
= &waittime
;
1701 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1706 fat_cond_signal (fat_cond
*c
)
1708 unblock_from_queue (c
->waiting
);
1711 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1713 "Wake up one thread that is waiting for @var{cv}")
1714 #define FUNC_NAME s_scm_signal_condition_variable
1716 SCM_VALIDATE_CONDVAR (1, cv
);
1717 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1723 fat_cond_broadcast (fat_cond
*c
)
1725 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1729 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1731 "Wake up all threads that are waiting for @var{cv}. ")
1732 #define FUNC_NAME s_scm_broadcast_condition_variable
1734 SCM_VALIDATE_CONDVAR (1, cv
);
1735 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1740 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1742 "Return @code{#t} if @var{obj} is a condition variable.")
1743 #define FUNC_NAME s_scm_condition_variable_p
1745 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1759 struct timeval
*timeout
;
1766 do_std_select (void *args
)
1768 struct select_args
*select_args
;
1770 select_args
= (struct select_args
*) args
;
1772 select_args
->result
=
1773 select (select_args
->nfds
,
1774 select_args
->read_fds
, select_args
->write_fds
,
1775 select_args
->except_fds
, select_args
->timeout
);
1776 select_args
->errno_value
= errno
;
1781 #if !SCM_HAVE_SYS_SELECT_H
1782 static int scm_std_select (int nfds
,
1786 struct timeval
*timeout
);
1790 scm_std_select (int nfds
,
1794 struct timeval
*timeout
)
1797 int res
, eno
, wakeup_fd
;
1798 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1799 struct select_args args
;
1801 if (readfds
== NULL
)
1803 FD_ZERO (&my_readfds
);
1804 readfds
= &my_readfds
;
1807 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1810 wakeup_fd
= t
->sleep_pipe
[0];
1811 FD_SET (wakeup_fd
, readfds
);
1812 if (wakeup_fd
>= nfds
)
1816 args
.read_fds
= readfds
;
1817 args
.write_fds
= writefds
;
1818 args
.except_fds
= exceptfds
;
1819 args
.timeout
= timeout
;
1821 /* Explicitly cooperate with the GC. */
1822 scm_without_guile (do_std_select
, &args
);
1825 eno
= args
.errno_value
;
1828 scm_i_reset_sleep (t
);
1830 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1833 full_read (wakeup_fd
, &dummy
, 1);
1835 FD_CLR (wakeup_fd
, readfds
);
1847 /* Convenience API for blocking while in guile mode. */
1849 #if SCM_USE_PTHREAD_THREADS
1851 /* It seems reasonable to not run procedures related to mutex and condition
1852 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1853 without it, and (ii) the only potential gain would be GC latency. See
1854 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1855 for a discussion of the pros and cons. */
1858 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1860 int res
= scm_i_pthread_mutex_lock (mutex
);
1865 do_unlock (void *data
)
1867 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1871 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1873 scm_i_scm_pthread_mutex_lock (mutex
);
1874 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1878 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1881 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1883 t
->held_mutex
= mutex
;
1884 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1885 t
->held_mutex
= NULL
;
1891 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1892 scm_i_pthread_mutex_t
*mutex
,
1893 const scm_t_timespec
*wt
)
1896 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1898 t
->held_mutex
= mutex
;
1899 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1900 t
->held_mutex
= NULL
;
1908 do_unlock_with_asyncs (void *data
)
1910 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1911 SCM_I_CURRENT_THREAD
->block_asyncs
--;
1915 scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t
*mutex
)
1917 SCM_I_CURRENT_THREAD
->block_asyncs
++;
1918 scm_i_scm_pthread_mutex_lock (mutex
);
1919 scm_dynwind_unwind_handler (do_unlock_with_asyncs
, mutex
,
1920 SCM_F_WIND_EXPLICITLY
);
1924 scm_std_usleep (unsigned long usecs
)
1927 tv
.tv_usec
= usecs
% 1000000;
1928 tv
.tv_sec
= usecs
/ 1000000;
1929 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1930 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1934 scm_std_sleep (unsigned int secs
)
1939 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1945 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1947 "Return the thread that called this function.")
1948 #define FUNC_NAME s_scm_current_thread
1950 return SCM_I_CURRENT_THREAD
->handle
;
1955 scm_c_make_list (size_t n
, SCM fill
)
1959 res
= scm_cons (fill
, res
);
1963 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1965 "Return a list of all threads.")
1966 #define FUNC_NAME s_scm_all_threads
1968 /* We can not allocate while holding the thread_admin_mutex because
1969 of the way GC is done.
1971 int n
= thread_count
;
1973 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1975 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1977 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1979 if (t
!= scm_i_signal_delivery_thread
)
1981 SCM_SETCAR (*l
, t
->handle
);
1982 l
= SCM_CDRLOC (*l
);
1987 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1992 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1994 "Return @code{#t} iff @var{thread} has exited.\n")
1995 #define FUNC_NAME s_scm_thread_exited_p
1997 return scm_from_bool (scm_c_thread_exited_p (thread
));
2002 scm_c_thread_exited_p (SCM thread
)
2003 #define FUNC_NAME s_scm_thread_exited_p
2006 SCM_VALIDATE_THREAD (1, thread
);
2007 t
= SCM_I_THREAD_DATA (thread
);
2012 SCM_DEFINE (scm_total_processor_count
, "total-processor-count", 0, 0, 0,
2014 "Return the total number of processors of the machine, which\n"
2015 "is guaranteed to be at least 1. A ``processor'' here is a\n"
2016 "thread execution unit, which can be either:\n\n"
2018 "@item an execution core in a (possibly multi-core) chip, in a\n"
2019 " (possibly multi- chip) module, in a single computer, or\n"
2020 "@item a thread execution unit inside a core in the case of\n"
2021 " @dfn{hyper-threaded} CPUs.\n"
2023 "Which of the two definitions is used, is unspecified.\n")
2024 #define FUNC_NAME s_scm_total_processor_count
2026 return scm_from_ulong (num_processors (NPROC_ALL
));
2030 SCM_DEFINE (scm_current_processor_count
, "current-processor-count", 0, 0, 0,
2032 "Like @code{total-processor-count}, but return the number of\n"
2033 "processors available to the current process. See\n"
2034 "@code{setaffinity} and @code{getaffinity} for more\n"
2036 #define FUNC_NAME s_scm_current_processor_count
2038 return scm_from_ulong (num_processors (NPROC_CURRENT
));
2045 static scm_i_pthread_cond_t wake_up_cond
;
2046 static int threads_initialized_p
= 0;
2049 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
2051 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
2053 static SCM dynwind_critical_section_mutex
;
2056 scm_dynwind_critical_section (SCM mutex
)
2058 if (scm_is_false (mutex
))
2059 mutex
= dynwind_critical_section_mutex
;
2060 scm_dynwind_lock_mutex (mutex
);
2061 scm_dynwind_block_asyncs ();
2064 /*** Initialization */
2066 scm_i_pthread_mutex_t scm_i_misc_mutex
;
2068 #if SCM_USE_PTHREAD_THREADS
2069 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
2073 scm_threads_prehistory (void *base
)
2075 #if SCM_USE_PTHREAD_THREADS
2076 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
2077 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
2078 PTHREAD_MUTEX_RECURSIVE
);
2081 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
2082 scm_i_pthread_mutexattr_recursive
);
2083 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
2084 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
2087 GC_new_kind (GC_new_free_list (),
2088 GC_MAKE_PROC (GC_new_proc (thread_mark
), 0),
2091 guilify_self_1 ((struct GC_stack_base
*) base
);
2094 scm_t_bits scm_tc16_thread
;
2095 scm_t_bits scm_tc16_mutex
;
2096 scm_t_bits scm_tc16_condvar
;
2101 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
2102 scm_set_smob_print (scm_tc16_thread
, thread_print
);
2104 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
2105 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
2107 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2109 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2111 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2112 guilify_self_2 (SCM_BOOL_F
);
2113 threads_initialized_p
= 1;
2115 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2119 scm_init_threads_default_dynamic_state ()
2121 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2122 scm_i_default_dynamic_state
= state
;
2126 scm_init_thread_procs ()
2128 #include "libguile/threads.x"
2132 /* IA64-specific things. */
2136 # include <sys/param.h>
2137 # include <sys/pstat.h>
2139 scm_ia64_register_backing_store_base (void)
2141 struct pst_vm_status vm_status
;
2143 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2144 if (vm_status
.pst_type
== PS_RSESTACK
)
2145 return (void *) vm_status
.pst_vaddr
;
2149 scm_ia64_ar_bsp (const void *ctx
)
2152 __uc_get_ar_bsp (ctx
, &bsp
);
2153 return (void *) bsp
;
2157 # include <ucontext.h>
2159 scm_ia64_register_backing_store_base (void)
2161 extern void *__libc_ia64_register_backing_store_base
;
2162 return __libc_ia64_register_backing_store_base
;
2165 scm_ia64_ar_bsp (const void *opaque
)
2167 const ucontext_t
*ctx
= opaque
;
2168 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2172 # include <ucontext.h>
2174 scm_ia64_register_backing_store_base (void)
2176 return (void *)0x8000000000000000;
2179 scm_ia64_ar_bsp (const void *opaque
)
2181 const ucontext_t
*ctx
= opaque
;
2182 return (void *)(ctx
->uc_mcontext
.mc_special
.bspstore
2183 + ctx
->uc_mcontext
.mc_special
.ndirty
);
2185 # endif /* __FreeBSD__ */
2186 #endif /* __ia64__ */