1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
34 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
43 #include "libguile/validate.h"
44 #include "libguile/root.h"
45 #include "libguile/eval.h"
46 #include "libguile/async.h"
47 #include "libguile/ports.h"
48 #include "libguile/threads.h"
49 #include "libguile/dynwind.h"
50 #include "libguile/iselect.h"
51 #include "libguile/fluids.h"
52 #include "libguile/continuations.h"
53 #include "libguile/gc.h"
54 #include "libguile/init.h"
55 #include "libguile/scmsigs.h"
56 #include "libguile/strings.h"
57 #include "libguile/weaks.h"
61 # define ETIMEDOUT WSAETIMEDOUT
65 # define pipe(fd) _pipe (fd, 256, O_BINARY)
66 #endif /* __MINGW32__ */
68 #include <full-read.h>
73 /* First some libgc shims. */
75 /* Make sure GC_fn_type is defined; it is missing from the public
76 headers of GC 7.1 and earlier. */
77 #ifndef HAVE_GC_FN_TYPE
78 typedef void * (* GC_fn_type
) (void *);
82 /* Now define with_gc_active and with_gc_inactive. */
84 #if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
86 /* We have a sufficiently new libgc (7.2 or newer). */
89 with_gc_inactive (GC_fn_type func
, void *data
)
91 return GC_do_blocking (func
, data
);
95 with_gc_active (GC_fn_type func
, void *data
)
97 return GC_call_with_gc_active (func
, data
);
102 /* libgc not new enough, so never actually deactivate GC.
104 Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
105 GC_call_with_gc_active. */
108 with_gc_inactive (GC_fn_type func
, void *data
)
114 with_gc_active (GC_fn_type func
, void *data
)
119 #endif /* HAVE_GC_DO_BLOCKING */
124 to_timespec (SCM t
, scm_t_timespec
*waittime
)
128 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
129 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
133 double time
= scm_to_double (t
);
134 double sec
= scm_c_truncate (time
);
136 waittime
->tv_sec
= (long) sec
;
137 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
144 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
145 the risk of false references leading to unbounded retained space as
146 described in "Bounding Space Usage of Conservative Garbage Collectors",
149 /* Make an empty queue data structure.
154 return scm_cons (SCM_EOL
, SCM_EOL
);
157 /* Put T at the back of Q and return a handle that can be used with
158 remqueue to remove T from Q again.
161 enqueue (SCM q
, SCM t
)
163 SCM c
= scm_cons (t
, SCM_EOL
);
164 SCM_CRITICAL_SECTION_START
;
165 if (scm_is_null (SCM_CDR (q
)))
168 SCM_SETCDR (SCM_CAR (q
), c
);
170 SCM_CRITICAL_SECTION_END
;
174 /* Remove the element that the handle C refers to from the queue Q. C
175 must have been returned from a call to enqueue. The return value
176 is zero when the element referred to by C has already been removed.
177 Otherwise, 1 is returned.
180 remqueue (SCM q
, SCM c
)
183 SCM_CRITICAL_SECTION_START
;
184 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
186 if (scm_is_eq (p
, c
))
188 if (scm_is_eq (c
, SCM_CAR (q
)))
189 SCM_SETCAR (q
, SCM_CDR (c
));
190 SCM_SETCDR (prev
, SCM_CDR (c
));
193 SCM_SETCDR (c
, SCM_EOL
);
195 SCM_CRITICAL_SECTION_END
;
200 SCM_CRITICAL_SECTION_END
;
204 /* Remove the front-most element from the queue Q and return it.
205 Return SCM_BOOL_F when Q is empty.
211 SCM_CRITICAL_SECTION_START
;
215 SCM_CRITICAL_SECTION_END
;
220 SCM_SETCDR (q
, SCM_CDR (c
));
221 if (scm_is_null (SCM_CDR (q
)))
222 SCM_SETCAR (q
, SCM_EOL
);
223 SCM_CRITICAL_SECTION_END
;
226 SCM_SETCDR (c
, SCM_EOL
);
232 /*** Thread smob routines */
236 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
238 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
239 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
240 the struct case, hence we go via a union, and extract according to the
241 size of pthread_t. */
249 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
250 scm_i_pthread_t p
= t
->pthread
;
253 if (sizeof (p
) == sizeof (unsigned short))
255 else if (sizeof (p
) == sizeof (unsigned int))
257 else if (sizeof (p
) == sizeof (unsigned long))
262 scm_puts ("#<thread ", port
);
263 scm_uintprint (id
, 10, port
);
264 scm_puts (" (", port
);
265 scm_uintprint ((scm_t_bits
)t
, 16, port
);
266 scm_puts (")>", port
);
271 /*** Blocking on queues. */
273 /* See also scm_i_queue_async_cell for how such a block is
277 /* Put the current thread on QUEUE and go to sleep, waiting for it to
278 be woken up by a call to 'unblock_from_queue', or to be
279 interrupted. Upon return of this function, the current thread is
280 no longer on QUEUE, even when the sleep has been interrupted.
282 The caller of block_self must hold MUTEX. It will be atomically
283 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
285 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
288 When WAITTIME is not NULL, the sleep will be aborted at that time.
290 The return value of block_self is an errno value. It will be zero
291 when the sleep has been successfully completed by a call to
292 unblock_from_queue, EINTR when it has been interrupted by the
293 delivery of a system async, and ETIMEDOUT when the timeout has
296 The system asyncs themselves are not executed by block_self.
299 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
300 const scm_t_timespec
*waittime
)
302 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
306 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
311 q_handle
= enqueue (queue
, t
->handle
);
312 if (waittime
== NULL
)
313 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
315 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
317 /* When we are still on QUEUE, we have been interrupted. We
318 report this only when no other error (such as a timeout) has
321 if (remqueue (queue
, q_handle
) && err
== 0)
324 scm_i_reset_sleep (t
);
330 /* Wake up the first thread on QUEUE, if any. The awoken thread is
331 returned, or #f if the queue was empty.
334 unblock_from_queue (SCM queue
)
336 SCM thread
= dequeue (queue
);
337 if (scm_is_true (thread
))
338 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
343 /* Getting into and out of guile mode.
346 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
348 /* When thread-local storage (TLS) is available, a pointer to the
349 current-thread object is kept in TLS. Note that storing the thread-object
350 itself in TLS (rather than a pointer to some malloc'd memory) is not
351 possible since thread objects may live longer than the actual thread they
353 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
355 # define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
357 #else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
359 /* Key used to retrieve the current thread with `pthread_getspecific ()'. */
360 scm_i_pthread_key_t scm_i_thread_key
;
362 # define SET_CURRENT_THREAD(_t) \
363 scm_i_pthread_setspecific (scm_i_thread_key, (_t))
365 #endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
368 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
369 static scm_i_thread
*all_threads
= NULL
;
370 static int thread_count
;
372 static SCM scm_i_default_dynamic_state
;
374 /* Perform first stage of thread initialisation, in non-guile mode.
377 guilify_self_1 (SCM_STACKITEM
*base
)
379 scm_i_thread
*t
= scm_gc_malloc (sizeof (scm_i_thread
), "thread");
381 t
->pthread
= scm_i_pthread_self ();
382 t
->handle
= SCM_BOOL_F
;
383 t
->result
= SCM_BOOL_F
;
384 t
->cleanup_handler
= SCM_BOOL_F
;
385 t
->mutexes
= SCM_EOL
;
386 t
->held_mutex
= NULL
;
387 t
->join_queue
= SCM_EOL
;
388 t
->dynamic_state
= SCM_BOOL_F
;
389 t
->dynwinds
= SCM_EOL
;
390 t
->active_asyncs
= SCM_EOL
;
392 t
->pending_asyncs
= 1;
393 t
->critical_section_level
= 0;
396 /* Calculate and store off the base of this thread's register
397 backing store (RBS). Unfortunately our implementation(s) of
398 scm_ia64_register_backing_store_base are only reliable for the
399 main thread. For other threads, therefore, find out the current
400 top of the RBS, and use that as a maximum. */
401 t
->register_backing_store_base
= scm_ia64_register_backing_store_base ();
406 bsp
= scm_ia64_ar_bsp (&ctx
);
407 if (t
->register_backing_store_base
> bsp
)
408 t
->register_backing_store_base
= bsp
;
411 t
->continuation_root
= SCM_EOL
;
412 t
->continuation_base
= base
;
413 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
414 t
->sleep_mutex
= NULL
;
415 t
->sleep_object
= SCM_BOOL_F
;
418 if (pipe (t
->sleep_pipe
) != 0)
419 /* FIXME: Error conditions during the initialization phase are handled
420 gracelessly since public functions such as `scm_init_guile ()'
421 currently have type `void'. */
424 scm_i_pthread_mutex_init (&t
->admin_mutex
, NULL
);
425 t
->current_mark_stack_ptr
= NULL
;
426 t
->current_mark_stack_limit
= NULL
;
431 SET_CURRENT_THREAD (t
);
433 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
434 t
->next_thread
= all_threads
;
437 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
440 /* Perform second stage of thread initialisation, in guile mode.
443 guilify_self_2 (SCM parent
)
445 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
449 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
451 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
452 t
->continuation_base
= t
->base
;
455 if (scm_is_true (parent
))
456 t
->dynamic_state
= scm_make_dynamic_state (parent
);
458 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
460 t
->join_queue
= make_queue ();
467 /* We implement our own mutex type since we want them to be 'fair', we
468 want to do fancy things while waiting for them (like running
469 asyncs) and we might want to add things that are nice for
474 scm_i_pthread_mutex_t lock
;
476 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
478 int recursive
; /* allow recursive locking? */
479 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
480 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
481 owned by the current thread? */
483 SCM waiting
; /* the threads waiting for this mutex. */
486 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
487 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
489 /* Perform thread tear-down, in guile mode.
492 do_thread_exit (void *v
)
494 scm_i_thread
*t
= (scm_i_thread
*) v
;
496 if (!scm_is_false (t
->cleanup_handler
))
498 SCM ptr
= t
->cleanup_handler
;
500 t
->cleanup_handler
= SCM_BOOL_F
;
501 t
->result
= scm_internal_catch (SCM_BOOL_T
,
502 (scm_t_catch_body
) scm_call_0
, ptr
,
503 scm_handle_by_message_noexit
, NULL
);
506 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
509 close (t
->sleep_pipe
[0]);
510 close (t
->sleep_pipe
[1]);
511 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
514 while (!scm_is_null (t
->mutexes
))
516 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
518 if (!SCM_UNBNDP (mutex
))
520 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
522 scm_i_pthread_mutex_lock (&m
->lock
);
524 /* Since MUTEX is in `t->mutexes', T must be its owner. */
525 assert (scm_is_eq (m
->owner
, t
->handle
));
527 unblock_from_queue (m
->waiting
);
529 scm_i_pthread_mutex_unlock (&m
->lock
);
532 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
535 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
541 on_thread_exit (void *v
)
543 /* This handler is executed in non-guile mode. */
544 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
546 /* If this thread was cancelled while doing a cond wait, it will
547 still have a mutex locked, so we unlock it here. */
550 scm_i_pthread_mutex_unlock (t
->held_mutex
);
551 t
->held_mutex
= NULL
;
554 SET_CURRENT_THREAD (v
);
556 /* Ensure the signal handling thread has been launched, because we might be
558 scm_i_ensure_signal_delivery_thread ();
560 /* Unblocking the joining threads needs to happen in guile mode
561 since the queue is a SCM data structure. */
563 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
564 assume the GC is usable at this point, and notably that thread-local
565 storage (TLS) hasn't been deallocated yet. */
568 /* Removing ourself from the list of all threads needs to happen in
569 non-guile mode since all SCM values on our stack become
570 unprotected once we are no longer in the list. */
571 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
572 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
575 *tp
= t
->next_thread
;
578 t
->next_thread
= NULL
;
584 /* If there's only one other thread, it could be the signal delivery
585 thread, so we need to notify it to shut down by closing its read pipe.
586 If it's not the signal delivery thread, then closing the read pipe isn't
588 if (thread_count
<= 1)
589 scm_i_close_signal_pipe ();
591 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
593 SET_CURRENT_THREAD (NULL
);
596 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
598 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
601 init_thread_key (void)
603 scm_i_pthread_key_create (&scm_i_thread_key
, NULL
);
608 /* Perform any initializations necessary to make the current thread
609 known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
612 BASE is the stack base to use with GC.
614 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
615 which case the default dynamic state is used.
617 Returns zero when the thread was known to guile already; otherwise
620 Note that it could be the case that the thread was known
621 to Guile, but not in guile mode (because we are within a
622 scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
623 be sure. New threads are put into guile mode implicitly. */
626 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
628 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
629 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
632 if (SCM_I_CURRENT_THREAD
)
634 /* Thread is already known to Guile.
640 /* This thread has not been guilified yet.
643 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
644 if (scm_initialized_p
== 0)
646 /* First thread ever to enter Guile. Run the full
649 scm_i_init_guile (base
);
650 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
654 /* Guile is already initialized, but this thread enters it for
655 the first time. Only initialize this thread.
657 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
658 guilify_self_1 (base
);
659 guilify_self_2 (parent
);
665 #if SCM_USE_PTHREAD_THREADS
667 #if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
668 /* This method for GNU/Linux and perhaps some other systems.
669 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
670 available on them. */
671 #define HAVE_GET_THREAD_STACK_BASE
673 static SCM_STACKITEM
*
674 get_thread_stack_base ()
680 pthread_getattr_np (pthread_self (), &attr
);
681 pthread_attr_getstack (&attr
, &start
, &size
);
682 end
= (char *)start
+ size
;
684 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
685 for the main thread, but we can use scm_get_stack_base in that
689 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
690 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
691 return (SCM_STACKITEM
*) GC_stackbottom
;
695 #if SCM_STACK_GROWS_UP
703 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP
704 /* This method for MacOS X.
705 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
706 but as of 2006 there's nothing obvious at apple.com. */
707 #define HAVE_GET_THREAD_STACK_BASE
708 static SCM_STACKITEM
*
709 get_thread_stack_base ()
711 return pthread_get_stackaddr_np (pthread_self ());
714 #elif defined (__MINGW32__)
715 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
716 in any thread. We don't like hard-coding the name of a system, but there
717 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
719 #define HAVE_GET_THREAD_STACK_BASE
720 static SCM_STACKITEM
*
721 get_thread_stack_base ()
723 return (SCM_STACKITEM
*) GC_stackbottom
;
726 #endif /* pthread methods of get_thread_stack_base */
728 #else /* !SCM_USE_PTHREAD_THREADS */
730 #define HAVE_GET_THREAD_STACK_BASE
732 static SCM_STACKITEM
*
733 get_thread_stack_base ()
735 return (SCM_STACKITEM
*) GC_stackbottom
;
738 #endif /* !SCM_USE_PTHREAD_THREADS */
740 #ifdef HAVE_GET_THREAD_STACK_BASE
745 scm_i_init_thread_for_guile (get_thread_stack_base (),
746 scm_i_default_dynamic_state
);
752 scm_with_guile (void *(*func
)(void *), void *data
)
754 return scm_i_with_guile_and_parent (func
, data
,
755 scm_i_default_dynamic_state
);
758 SCM_UNUSED
static void
759 scm_leave_guile_cleanup (void *x
)
761 on_thread_exit (SCM_I_CURRENT_THREAD
);
764 struct with_guile_trampoline_args
771 with_guile_trampoline (void *data
)
773 struct with_guile_trampoline_args
*args
= data
;
775 return scm_c_with_continuation_barrier (args
->func
, args
->data
);
779 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
784 SCM_STACKITEM base_item
;
786 new_thread
= scm_i_init_thread_for_guile (&base_item
, parent
);
787 t
= SCM_I_CURRENT_THREAD
;
790 /* We are in Guile mode. */
791 assert (t
->guile_mode
);
793 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup
, NULL
);
794 res
= scm_c_with_continuation_barrier (func
, data
);
795 scm_i_pthread_cleanup_pop (0);
797 /* Leave Guile mode. */
800 else if (t
->guile_mode
)
802 /* Already in Guile mode. */
803 res
= scm_c_with_continuation_barrier (func
, data
);
807 struct with_guile_trampoline_args args
;
811 /* We are not in Guile mode, either because we are not within a
812 scm_with_guile, or because we are within a scm_without_guile.
814 This call to scm_with_guile() could happen from anywhere on the
815 stack, and in particular lower on the stack than when it was
816 when this thread was first guilified. Thus, `base' must be
818 #if SCM_STACK_GROWS_UP
819 if (SCM_STACK_PTR (&base_item
) < t
->base
)
820 t
->base
= SCM_STACK_PTR (&base_item
);
822 if (SCM_STACK_PTR (&base_item
) > t
->base
)
823 t
->base
= SCM_STACK_PTR (&base_item
);
827 res
= with_gc_active (with_guile_trampoline
, &args
);
834 scm_without_guile (void *(*func
)(void *), void *data
)
837 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
841 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
842 result
= with_gc_inactive (func
, data
);
843 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
846 /* Otherwise we're not in guile mode, so nothing to do. */
847 result
= func (data
);
853 /*** Thread creation */
860 scm_i_pthread_mutex_t mutex
;
861 scm_i_pthread_cond_t cond
;
865 really_launch (void *d
)
867 launch_data
*data
= (launch_data
*)d
;
868 SCM thunk
= data
->thunk
, handler
= data
->handler
;
871 t
= SCM_I_CURRENT_THREAD
;
873 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
874 data
->thread
= scm_current_thread ();
875 scm_i_pthread_cond_signal (&data
->cond
);
876 scm_i_pthread_mutex_unlock (&data
->mutex
);
878 if (SCM_UNBNDP (handler
))
879 t
->result
= scm_call_0 (thunk
);
881 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
883 /* Trigger a call to `on_thread_exit ()'. */
890 launch_thread (void *d
)
892 launch_data
*data
= (launch_data
*)d
;
893 scm_i_pthread_detach (scm_i_pthread_self ());
894 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
898 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
899 (SCM thunk
, SCM handler
),
900 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
901 "returning a new thread object representing the thread. The procedure\n"
902 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
904 "When @var{handler} is specified, then @var{thunk} is called from\n"
905 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
906 "handler. This catch is established inside the continuation barrier.\n"
908 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
909 "the @emph{exit value} of the thread and the thread is terminated.")
910 #define FUNC_NAME s_scm_call_with_new_thread
916 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
917 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
918 handler
, SCM_ARG2
, FUNC_NAME
);
920 data
.parent
= scm_current_dynamic_state ();
922 data
.handler
= handler
;
923 data
.thread
= SCM_BOOL_F
;
924 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
925 scm_i_pthread_cond_init (&data
.cond
, NULL
);
927 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
928 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
931 scm_i_pthread_mutex_unlock (&data
.mutex
);
935 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
936 scm_i_pthread_mutex_unlock (&data
.mutex
);
944 scm_t_catch_body body
;
946 scm_t_catch_handler handler
;
949 scm_i_pthread_mutex_t mutex
;
950 scm_i_pthread_cond_t cond
;
954 really_spawn (void *d
)
956 spawn_data
*data
= (spawn_data
*)d
;
957 scm_t_catch_body body
= data
->body
;
958 void *body_data
= data
->body_data
;
959 scm_t_catch_handler handler
= data
->handler
;
960 void *handler_data
= data
->handler_data
;
961 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
963 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
964 data
->thread
= scm_current_thread ();
965 scm_i_pthread_cond_signal (&data
->cond
);
966 scm_i_pthread_mutex_unlock (&data
->mutex
);
969 t
->result
= body (body_data
);
971 t
->result
= scm_internal_catch (SCM_BOOL_T
,
973 handler
, handler_data
);
979 spawn_thread (void *d
)
981 spawn_data
*data
= (spawn_data
*)d
;
982 scm_i_pthread_detach (scm_i_pthread_self ());
983 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
988 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
989 scm_t_catch_handler handler
, void *handler_data
)
995 data
.parent
= scm_current_dynamic_state ();
997 data
.body_data
= body_data
;
998 data
.handler
= handler
;
999 data
.handler_data
= handler_data
;
1000 data
.thread
= SCM_BOOL_F
;
1001 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
1002 scm_i_pthread_cond_init (&data
.cond
, NULL
);
1004 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
1005 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
1008 scm_i_pthread_mutex_unlock (&data
.mutex
);
1010 scm_syserror (NULL
);
1012 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
1013 scm_i_pthread_mutex_unlock (&data
.mutex
);
1018 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
1020 "Move the calling thread to the end of the scheduling queue.")
1021 #define FUNC_NAME s_scm_yield
1023 return scm_from_bool (scm_i_sched_yield ());
1027 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
1029 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
1030 "cannot be the current thread, and if @var{thread} has already terminated or "
1031 "been signaled to terminate, this function is a no-op.")
1032 #define FUNC_NAME s_scm_cancel_thread
1034 scm_i_thread
*t
= NULL
;
1036 SCM_VALIDATE_THREAD (1, thread
);
1037 t
= SCM_I_THREAD_DATA (thread
);
1038 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1042 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1043 scm_i_pthread_cancel (t
->pthread
);
1046 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1048 return SCM_UNSPECIFIED
;
1052 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1053 (SCM thread
, SCM proc
),
1054 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1055 "This handler will be called when the thread exits.")
1056 #define FUNC_NAME s_scm_set_thread_cleanup_x
1060 SCM_VALIDATE_THREAD (1, thread
);
1061 if (!scm_is_false (proc
))
1062 SCM_VALIDATE_THUNK (2, proc
);
1064 t
= SCM_I_THREAD_DATA (thread
);
1065 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1067 if (!(t
->exited
|| t
->canceled
))
1068 t
->cleanup_handler
= proc
;
1070 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1072 return SCM_UNSPECIFIED
;
1076 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1078 "Return the cleanup handler installed for the thread @var{thread}.")
1079 #define FUNC_NAME s_scm_thread_cleanup
1084 SCM_VALIDATE_THREAD (1, thread
);
1086 t
= SCM_I_THREAD_DATA (thread
);
1087 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1088 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1089 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1095 SCM
scm_join_thread (SCM thread
)
1097 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1100 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1101 (SCM thread
, SCM timeout
, SCM timeoutval
),
1102 "Suspend execution of the calling thread until the target @var{thread} "
1103 "terminates, unless the target @var{thread} has already terminated. ")
1104 #define FUNC_NAME s_scm_join_thread_timed
1107 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1108 SCM res
= SCM_BOOL_F
;
1110 if (! (SCM_UNBNDP (timeoutval
)))
1113 SCM_VALIDATE_THREAD (1, thread
);
1114 if (scm_is_eq (scm_current_thread (), thread
))
1115 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1117 t
= SCM_I_THREAD_DATA (thread
);
1118 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1120 if (! SCM_UNBNDP (timeout
))
1122 to_timespec (timeout
, &ctimeout
);
1123 timeout_ptr
= &ctimeout
;
1132 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1142 else if (err
== ETIMEDOUT
)
1145 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1147 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1149 /* Check for exit again, since we just released and
1150 reacquired the admin mutex, before the next block_self
1151 call (which would block forever if t has already
1161 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1167 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1169 "Return @code{#t} if @var{obj} is a thread.")
1170 #define FUNC_NAME s_scm_thread_p
1172 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1178 fat_mutex_free (SCM mx
)
1180 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1181 scm_i_pthread_mutex_destroy (&m
->lock
);
1186 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1188 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1189 scm_puts ("#<mutex ", port
);
1190 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1191 scm_puts (">", port
);
1196 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1201 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1202 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1203 m
->owner
= SCM_BOOL_F
;
1206 m
->recursive
= recursive
;
1207 m
->unchecked_unlock
= unchecked_unlock
;
1208 m
->allow_external_unlock
= external_unlock
;
1210 m
->waiting
= SCM_EOL
;
1211 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1212 m
->waiting
= make_queue ();
1216 SCM
scm_make_mutex (void)
1218 return scm_make_mutex_with_flags (SCM_EOL
);
1221 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1222 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1223 SCM_SYMBOL (recursive_sym
, "recursive");
1225 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1227 "Create a new mutex. ")
1228 #define FUNC_NAME s_scm_make_mutex_with_flags
1230 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1233 while (! scm_is_null (ptr
))
1235 SCM flag
= SCM_CAR (ptr
);
1236 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1237 unchecked_unlock
= 1;
1238 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1239 external_unlock
= 1;
1240 else if (scm_is_eq (flag
, recursive_sym
))
1243 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1244 ptr
= SCM_CDR (ptr
);
1246 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1250 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1252 "Create a new recursive mutex. ")
1253 #define FUNC_NAME s_scm_make_recursive_mutex
1255 return make_fat_mutex (1, 0, 0);
1259 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1262 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1264 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1266 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1267 SCM err
= SCM_BOOL_F
;
1269 struct timeval current_time
;
1271 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1277 m
->owner
= new_owner
;
1280 if (SCM_I_IS_THREAD (new_owner
))
1282 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1283 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1285 /* Only keep a weak reference to MUTEX so that it's not
1286 retained when not referenced elsewhere (bug #27450).
1287 The weak pair itself is eventually removed when MUTEX
1288 is unlocked. Note that `t->mutexes' lists mutexes
1289 currently held by T, so it should be small. */
1290 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1292 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1297 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1299 m
->owner
= new_owner
;
1300 err
= scm_cons (scm_abandoned_mutex_error_key
,
1301 scm_from_locale_string ("lock obtained on abandoned "
1306 else if (scm_is_eq (m
->owner
, new_owner
))
1315 err
= scm_cons (scm_misc_error_key
,
1316 scm_from_locale_string ("mutex already locked "
1324 if (timeout
!= NULL
)
1326 gettimeofday (¤t_time
, NULL
);
1327 if (current_time
.tv_sec
> timeout
->tv_sec
||
1328 (current_time
.tv_sec
== timeout
->tv_sec
&&
1329 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1335 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1336 scm_i_pthread_mutex_unlock (&m
->lock
);
1338 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1341 scm_i_pthread_mutex_unlock (&m
->lock
);
1345 SCM
scm_lock_mutex (SCM mx
)
1347 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1350 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1351 (SCM m
, SCM timeout
, SCM owner
),
1352 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1353 "blocks until the mutex becomes available. The function returns when "
1354 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1355 "a thread already owns will succeed right away and will not block the "
1356 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1357 #define FUNC_NAME s_scm_lock_mutex_timed
1361 scm_t_timespec cwaittime
, *waittime
= NULL
;
1363 SCM_VALIDATE_MUTEX (1, m
);
1365 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1367 to_timespec (timeout
, &cwaittime
);
1368 waittime
= &cwaittime
;
1371 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1372 if (!scm_is_false (exception
))
1373 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1374 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1379 scm_dynwind_lock_mutex (SCM mutex
)
1381 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1382 SCM_F_WIND_EXPLICITLY
);
1383 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1384 SCM_F_WIND_EXPLICITLY
);
1387 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1389 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1390 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1391 #define FUNC_NAME s_scm_try_mutex
1395 scm_t_timespec cwaittime
, *waittime
= NULL
;
1397 SCM_VALIDATE_MUTEX (1, mutex
);
1399 to_timespec (scm_from_int(0), &cwaittime
);
1400 waittime
= &cwaittime
;
1402 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1403 if (!scm_is_false (exception
))
1404 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1405 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1409 /*** Fat condition variables */
1412 scm_i_pthread_mutex_t lock
;
1413 SCM waiting
; /* the threads waiting for this condition. */
1416 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1417 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1420 fat_mutex_unlock (SCM mutex
, SCM cond
,
1421 const scm_t_timespec
*waittime
, int relock
)
1424 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1426 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1427 int err
= 0, ret
= 0;
1429 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1433 if (!scm_is_eq (owner
, t
->handle
))
1437 if (!m
->unchecked_unlock
)
1439 scm_i_pthread_mutex_unlock (&m
->lock
);
1440 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1444 else if (!m
->allow_external_unlock
)
1446 scm_i_pthread_mutex_unlock (&m
->lock
);
1447 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1451 if (! (SCM_UNBNDP (cond
)))
1453 c
= SCM_CONDVAR_DATA (cond
);
1462 /* Change the owner of MUTEX. */
1463 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1464 m
->owner
= unblock_from_queue (m
->waiting
);
1469 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1470 scm_i_pthread_mutex_unlock (&m
->lock
);
1477 else if (err
== ETIMEDOUT
)
1482 else if (err
!= EINTR
)
1485 scm_syserror (NULL
);
1491 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1499 scm_remember_upto_here_2 (cond
, mutex
);
1501 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1510 /* Change the owner of MUTEX. */
1511 t
->mutexes
= scm_delq_x (mutex
, t
->mutexes
);
1512 m
->owner
= unblock_from_queue (m
->waiting
);
1515 scm_i_pthread_mutex_unlock (&m
->lock
);
1522 SCM
scm_unlock_mutex (SCM mx
)
1524 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1527 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1528 (SCM mx
, SCM cond
, SCM timeout
),
1529 "Unlocks @var{mutex} if the calling thread owns the lock on "
1530 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1531 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1532 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1533 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1534 "with a call to @code{unlock-mutex}. Only the last call to "
1535 "@code{unlock-mutex} will actually unlock the mutex. ")
1536 #define FUNC_NAME s_scm_unlock_mutex_timed
1538 scm_t_timespec cwaittime
, *waittime
= NULL
;
1540 SCM_VALIDATE_MUTEX (1, mx
);
1541 if (! (SCM_UNBNDP (cond
)))
1543 SCM_VALIDATE_CONDVAR (2, cond
);
1545 if (! (SCM_UNBNDP (timeout
)))
1547 to_timespec (timeout
, &cwaittime
);
1548 waittime
= &cwaittime
;
1552 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1556 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1558 "Return @code{#t} if @var{obj} is a mutex.")
1559 #define FUNC_NAME s_scm_mutex_p
1561 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1565 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1567 "Return the thread owning @var{mx}, or @code{#f}.")
1568 #define FUNC_NAME s_scm_mutex_owner
1571 fat_mutex
*m
= NULL
;
1573 SCM_VALIDATE_MUTEX (1, mx
);
1574 m
= SCM_MUTEX_DATA (mx
);
1575 scm_i_pthread_mutex_lock (&m
->lock
);
1577 scm_i_pthread_mutex_unlock (&m
->lock
);
1583 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1585 "Return the lock level of mutex @var{mx}.")
1586 #define FUNC_NAME s_scm_mutex_level
1588 SCM_VALIDATE_MUTEX (1, mx
);
1589 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1593 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1595 "Returns @code{#t} if the mutex @var{mx} is locked.")
1596 #define FUNC_NAME s_scm_mutex_locked_p
1598 SCM_VALIDATE_MUTEX (1, mx
);
1599 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1604 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1606 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1607 scm_puts ("#<condition-variable ", port
);
1608 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1609 scm_puts (">", port
);
1613 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1615 "Make a new condition variable.")
1616 #define FUNC_NAME s_scm_make_condition_variable
1621 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1622 c
->waiting
= SCM_EOL
;
1623 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1624 c
->waiting
= make_queue ();
1629 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1630 (SCM cv
, SCM mx
, SCM t
),
1631 "Wait until @var{cond-var} has been signalled. While waiting, "
1632 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1633 "is locked again when this function returns. When @var{time} is given, "
1634 "it specifies a point in time where the waiting should be aborted. It "
1635 "can be either a integer as returned by @code{current-time} or a pair "
1636 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1637 "mutex is locked and @code{#f} is returned. When the condition "
1638 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1640 #define FUNC_NAME s_scm_timed_wait_condition_variable
1642 scm_t_timespec waittime
, *waitptr
= NULL
;
1644 SCM_VALIDATE_CONDVAR (1, cv
);
1645 SCM_VALIDATE_MUTEX (2, mx
);
1647 if (!SCM_UNBNDP (t
))
1649 to_timespec (t
, &waittime
);
1650 waitptr
= &waittime
;
1653 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1658 fat_cond_signal (fat_cond
*c
)
1660 unblock_from_queue (c
->waiting
);
1663 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1665 "Wake up one thread that is waiting for @var{cv}")
1666 #define FUNC_NAME s_scm_signal_condition_variable
1668 SCM_VALIDATE_CONDVAR (1, cv
);
1669 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1675 fat_cond_broadcast (fat_cond
*c
)
1677 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1681 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1683 "Wake up all threads that are waiting for @var{cv}. ")
1684 #define FUNC_NAME s_scm_broadcast_condition_variable
1686 SCM_VALIDATE_CONDVAR (1, cv
);
1687 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1692 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1694 "Return @code{#t} if @var{obj} is a condition variable.")
1695 #define FUNC_NAME s_scm_condition_variable_p
1697 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1708 SELECT_TYPE
*read_fds
;
1709 SELECT_TYPE
*write_fds
;
1710 SELECT_TYPE
*except_fds
;
1711 struct timeval
*timeout
;
1718 do_std_select (void *args
)
1720 struct select_args
*select_args
;
1722 select_args
= (struct select_args
*) args
;
1724 select_args
->result
=
1725 select (select_args
->nfds
,
1726 select_args
->read_fds
, select_args
->write_fds
,
1727 select_args
->except_fds
, select_args
->timeout
);
1728 select_args
->errno_value
= errno
;
1734 scm_std_select (int nfds
,
1735 SELECT_TYPE
*readfds
,
1736 SELECT_TYPE
*writefds
,
1737 SELECT_TYPE
*exceptfds
,
1738 struct timeval
*timeout
)
1741 int res
, eno
, wakeup_fd
;
1742 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1743 struct select_args args
;
1745 if (readfds
== NULL
)
1747 FD_ZERO (&my_readfds
);
1748 readfds
= &my_readfds
;
1751 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1754 wakeup_fd
= t
->sleep_pipe
[0];
1755 FD_SET (wakeup_fd
, readfds
);
1756 if (wakeup_fd
>= nfds
)
1760 args
.read_fds
= readfds
;
1761 args
.write_fds
= writefds
;
1762 args
.except_fds
= exceptfds
;
1763 args
.timeout
= timeout
;
1765 /* Explicitly cooperate with the GC. */
1766 scm_without_guile (do_std_select
, &args
);
1769 eno
= args
.errno_value
;
1772 scm_i_reset_sleep (t
);
1774 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1777 full_read (wakeup_fd
, &dummy
, 1);
1779 FD_CLR (wakeup_fd
, readfds
);
1791 /* Convenience API for blocking while in guile mode. */
1793 #if SCM_USE_PTHREAD_THREADS
1795 /* It seems reasonable to not run procedures related to mutex and condition
1796 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1797 without it, and (ii) the only potential gain would be GC latency. See
1798 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1799 for a discussion of the pros and cons. */
1802 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1804 int res
= scm_i_pthread_mutex_lock (mutex
);
1809 do_unlock (void *data
)
1811 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1815 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1817 scm_i_scm_pthread_mutex_lock (mutex
);
1818 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1822 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1825 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1827 t
->held_mutex
= mutex
;
1828 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1829 t
->held_mutex
= NULL
;
1835 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1836 scm_i_pthread_mutex_t
*mutex
,
1837 const scm_t_timespec
*wt
)
1840 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1842 t
->held_mutex
= mutex
;
1843 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1844 t
->held_mutex
= NULL
;
1852 scm_std_usleep (unsigned long usecs
)
1855 tv
.tv_usec
= usecs
% 1000000;
1856 tv
.tv_sec
= usecs
/ 1000000;
1857 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1858 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1862 scm_std_sleep (unsigned int secs
)
1867 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1873 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1875 "Return the thread that called this function.")
1876 #define FUNC_NAME s_scm_current_thread
1878 return SCM_I_CURRENT_THREAD
->handle
;
1883 scm_c_make_list (size_t n
, SCM fill
)
1887 res
= scm_cons (fill
, res
);
1891 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1893 "Return a list of all threads.")
1894 #define FUNC_NAME s_scm_all_threads
1896 /* We can not allocate while holding the thread_admin_mutex because
1897 of the way GC is done.
1899 int n
= thread_count
;
1901 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1903 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1905 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1907 if (t
!= scm_i_signal_delivery_thread
)
1909 SCM_SETCAR (*l
, t
->handle
);
1910 l
= SCM_CDRLOC (*l
);
1915 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1920 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1922 "Return @code{#t} iff @var{thread} has exited.\n")
1923 #define FUNC_NAME s_scm_thread_exited_p
1925 return scm_from_bool (scm_c_thread_exited_p (thread
));
1930 scm_c_thread_exited_p (SCM thread
)
1931 #define FUNC_NAME s_scm_thread_exited_p
1934 SCM_VALIDATE_THREAD (1, thread
);
1935 t
= SCM_I_THREAD_DATA (thread
);
1940 static scm_i_pthread_cond_t wake_up_cond
;
1941 static int threads_initialized_p
= 0;
1944 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1946 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1948 static SCM dynwind_critical_section_mutex
;
1951 scm_dynwind_critical_section (SCM mutex
)
1953 if (scm_is_false (mutex
))
1954 mutex
= dynwind_critical_section_mutex
;
1955 scm_dynwind_lock_mutex (mutex
);
1956 scm_dynwind_block_asyncs ();
1959 /*** Initialization */
1961 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1963 #if SCM_USE_PTHREAD_THREADS
1964 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1968 scm_threads_prehistory (SCM_STACKITEM
*base
)
1970 #if SCM_USE_PTHREAD_THREADS
1971 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1972 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1973 PTHREAD_MUTEX_RECURSIVE
);
1976 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
1977 scm_i_pthread_mutexattr_recursive
);
1978 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
1979 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
1981 guilify_self_1 (base
);
1984 scm_t_bits scm_tc16_thread
;
1985 scm_t_bits scm_tc16_mutex
;
1986 scm_t_bits scm_tc16_condvar
;
1991 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
1992 scm_set_smob_print (scm_tc16_thread
, thread_print
);
1994 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
1995 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
1996 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
1998 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
2000 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
2002 scm_i_default_dynamic_state
= SCM_BOOL_F
;
2003 guilify_self_2 (SCM_BOOL_F
);
2004 threads_initialized_p
= 1;
2006 dynwind_critical_section_mutex
= scm_make_recursive_mutex ();
2010 scm_init_threads_default_dynamic_state ()
2012 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
2013 scm_i_default_dynamic_state
= state
;
2017 scm_init_thread_procs ()
2019 #include "libguile/threads.x"
2023 /* IA64-specific things. */
2027 # include <sys/param.h>
2028 # include <sys/pstat.h>
2030 scm_ia64_register_backing_store_base (void)
2032 struct pst_vm_status vm_status
;
2034 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
2035 if (vm_status
.pst_type
== PS_RSESTACK
)
2036 return (void *) vm_status
.pst_vaddr
;
2040 scm_ia64_ar_bsp (const void *ctx
)
2043 __uc_get_ar_bsp (ctx
, &bsp
);
2044 return (void *) bsp
;
2048 # include <ucontext.h>
2050 scm_ia64_register_backing_store_base (void)
2052 extern void *__libc_ia64_register_backing_store_base
;
2053 return __libc_ia64_register_backing_store_base
;
2056 scm_ia64_ar_bsp (const void *opaque
)
2058 const ucontext_t
*ctx
= opaque
;
2059 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2062 #endif /* __ia64__ */