1 /* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public License
5 * as published by the Free Software Foundation; either version 3 of
6 * the License, or (at your option) any later version.
8 * This library is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
25 #include "libguile/bdw-gc.h"
26 #include "libguile/_scm.h"
35 #include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
42 #include "libguile/validate.h"
43 #include "libguile/root.h"
44 #include "libguile/eval.h"
45 #include "libguile/async.h"
46 #include "libguile/ports.h"
47 #include "libguile/threads.h"
48 #include "libguile/dynwind.h"
49 #include "libguile/iselect.h"
50 #include "libguile/fluids.h"
51 #include "libguile/continuations.h"
52 #include "libguile/gc.h"
53 #include "libguile/init.h"
54 #include "libguile/scmsigs.h"
55 #include "libguile/strings.h"
56 #include "libguile/weaks.h"
60 # define ETIMEDOUT WSAETIMEDOUT
64 # define pipe(fd) _pipe (fd, 256, O_BINARY)
65 #endif /* __MINGW32__ */
67 #include <full-read.h>
71 to_timespec (SCM t
, scm_t_timespec
*waittime
)
75 waittime
->tv_sec
= scm_to_ulong (SCM_CAR (t
));
76 waittime
->tv_nsec
= scm_to_ulong (SCM_CDR (t
)) * 1000;
80 double time
= scm_to_double (t
);
81 double sec
= scm_c_truncate (time
);
83 waittime
->tv_sec
= (long) sec
;
84 waittime
->tv_nsec
= (long) ((time
- sec
) * 1000000000);
91 /* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
92 the risk of false references leading to unbounded retained space as
93 described in "Bounding Space Usage of Conservative Garbage Collectors",
96 /* Make an empty queue data structure.
101 return scm_cons (SCM_EOL
, SCM_EOL
);
104 /* Put T at the back of Q and return a handle that can be used with
105 remqueue to remove T from Q again.
108 enqueue (SCM q
, SCM t
)
110 SCM c
= scm_cons (t
, SCM_EOL
);
111 SCM_CRITICAL_SECTION_START
;
112 if (scm_is_null (SCM_CDR (q
)))
115 SCM_SETCDR (SCM_CAR (q
), c
);
117 SCM_CRITICAL_SECTION_END
;
121 /* Remove the element that the handle C refers to from the queue Q. C
122 must have been returned from a call to enqueue. The return value
123 is zero when the element referred to by C has already been removed.
124 Otherwise, 1 is returned.
127 remqueue (SCM q
, SCM c
)
130 SCM_CRITICAL_SECTION_START
;
131 for (p
= SCM_CDR (q
); !scm_is_null (p
); p
= SCM_CDR (p
))
133 if (scm_is_eq (p
, c
))
135 if (scm_is_eq (c
, SCM_CAR (q
)))
136 SCM_SETCAR (q
, SCM_CDR (c
));
137 SCM_SETCDR (prev
, SCM_CDR (c
));
140 SCM_SETCDR (c
, SCM_EOL
);
142 SCM_CRITICAL_SECTION_END
;
147 SCM_CRITICAL_SECTION_END
;
151 /* Remove the front-most element from the queue Q and return it.
152 Return SCM_BOOL_F when Q is empty.
158 SCM_CRITICAL_SECTION_START
;
162 SCM_CRITICAL_SECTION_END
;
167 SCM_SETCDR (q
, SCM_CDR (c
));
168 if (scm_is_null (SCM_CDR (q
)))
169 SCM_SETCAR (q
, SCM_EOL
);
170 SCM_CRITICAL_SECTION_END
;
173 SCM_SETCDR (c
, SCM_EOL
);
179 /*** Thread smob routines */
183 thread_print (SCM exp
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
185 /* On a Gnu system pthread_t is an unsigned long, but on mingw it's a
186 struct. A cast like "(unsigned long) t->pthread" is a syntax error in
187 the struct case, hence we go via a union, and extract according to the
188 size of pthread_t. */
196 scm_i_thread
*t
= SCM_I_THREAD_DATA (exp
);
197 scm_i_pthread_t p
= t
->pthread
;
200 if (sizeof (p
) == sizeof (unsigned short))
202 else if (sizeof (p
) == sizeof (unsigned int))
204 else if (sizeof (p
) == sizeof (unsigned long))
209 scm_puts ("#<thread ", port
);
210 scm_uintprint (id
, 10, port
);
211 scm_puts (" (", port
);
212 scm_uintprint ((scm_t_bits
)t
, 16, port
);
213 scm_puts (")>", port
);
218 /*** Blocking on queues. */
220 /* See also scm_i_queue_async_cell for how such a block is
224 /* Put the current thread on QUEUE and go to sleep, waiting for it to
225 be woken up by a call to 'unblock_from_queue', or to be
226 interrupted. Upon return of this function, the current thread is
227 no longer on QUEUE, even when the sleep has been interrupted.
229 The caller of block_self must hold MUTEX. It will be atomically
230 unlocked while sleeping, just as with scm_i_pthread_cond_wait.
232 SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
235 When WAITTIME is not NULL, the sleep will be aborted at that time.
237 The return value of block_self is an errno value. It will be zero
238 when the sleep has been successfully completed by a call to
239 unblock_from_queue, EINTR when it has been interrupted by the
240 delivery of a system async, and ETIMEDOUT when the timeout has
243 The system asyncs themselves are not executed by block_self.
246 block_self (SCM queue
, SCM sleep_object
, scm_i_pthread_mutex_t
*mutex
,
247 const scm_t_timespec
*waittime
)
249 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
253 if (scm_i_setup_sleep (t
, sleep_object
, mutex
, -1))
258 q_handle
= enqueue (queue
, t
->handle
);
259 if (waittime
== NULL
)
260 err
= scm_i_scm_pthread_cond_wait (&t
->sleep_cond
, mutex
);
262 err
= scm_i_scm_pthread_cond_timedwait (&t
->sleep_cond
, mutex
, waittime
);
264 /* When we are still on QUEUE, we have been interrupted. We
265 report this only when no other error (such as a timeout) has
268 if (remqueue (queue
, q_handle
) && err
== 0)
271 scm_i_reset_sleep (t
);
277 /* Wake up the first thread on QUEUE, if any. The awoken thread is
278 returned, or #f if the queue was empty.
281 unblock_from_queue (SCM queue
)
283 SCM thread
= dequeue (queue
);
284 if (scm_is_true (thread
))
285 scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread
)->sleep_cond
);
290 /* Getting into and out of guile mode.
293 #ifdef SCM_HAVE_THREAD_STORAGE_CLASS
295 /* When thread-local storage (TLS) is available, a pointer to the
296 current-thread object is kept in TLS. Note that storing the thread-object
297 itself in TLS (rather than a pointer to some malloc'd memory) is not
298 possible since thread objects may live longer than the actual thread they
300 SCM_THREAD_LOCAL scm_i_thread
*scm_i_current_thread
= NULL
;
302 # define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
304 #else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
306 /* Key used to retrieve the current thread with `pthread_getspecific ()'. */
307 scm_i_pthread_key_t scm_i_thread_key
;
309 # define SET_CURRENT_THREAD(_t) \
310 scm_i_pthread_setspecific (scm_i_thread_key, (_t))
312 #endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
315 static scm_i_pthread_mutex_t thread_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
316 static scm_i_thread
*all_threads
= NULL
;
317 static int thread_count
;
319 static SCM scm_i_default_dynamic_state
;
321 /* Perform first stage of thread initialisation, in non-guile mode.
324 guilify_self_1 (SCM_STACKITEM
*base
)
326 scm_i_thread
*t
= scm_gc_malloc (sizeof (scm_i_thread
), "thread");
328 t
->pthread
= scm_i_pthread_self ();
329 t
->handle
= SCM_BOOL_F
;
330 t
->result
= SCM_BOOL_F
;
331 t
->cleanup_handler
= SCM_BOOL_F
;
332 t
->mutexes
= SCM_EOL
;
333 t
->held_mutex
= NULL
;
334 t
->join_queue
= SCM_EOL
;
335 t
->dynamic_state
= SCM_BOOL_F
;
336 t
->dynwinds
= SCM_EOL
;
337 t
->active_asyncs
= SCM_EOL
;
339 t
->pending_asyncs
= 1;
340 t
->critical_section_level
= 0;
341 t
->last_debug_frame
= NULL
;
344 /* Calculate and store off the base of this thread's register
345 backing store (RBS). Unfortunately our implementation(s) of
346 scm_ia64_register_backing_store_base are only reliable for the
347 main thread. For other threads, therefore, find out the current
348 top of the RBS, and use that as a maximum. */
349 t
->register_backing_store_base
= scm_ia64_register_backing_store_base ();
354 bsp
= scm_ia64_ar_bsp (&ctx
);
355 if (t
->register_backing_store_base
> bsp
)
356 t
->register_backing_store_base
= bsp
;
359 t
->continuation_root
= SCM_EOL
;
360 t
->continuation_base
= base
;
361 scm_i_pthread_cond_init (&t
->sleep_cond
, NULL
);
362 t
->sleep_mutex
= NULL
;
363 t
->sleep_object
= SCM_BOOL_F
;
366 if (pipe (t
->sleep_pipe
) != 0)
367 /* FIXME: Error conditions during the initialization phase are handled
368 gracelessly since public functions such as `scm_init_guile ()'
369 currently have type `void'. */
372 scm_i_pthread_mutex_init (&t
->admin_mutex
, NULL
);
373 t
->current_mark_stack_ptr
= NULL
;
374 t
->current_mark_stack_limit
= NULL
;
379 SET_CURRENT_THREAD (t
);
381 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
382 t
->next_thread
= all_threads
;
385 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
388 /* Perform second stage of thread initialisation, in guile mode.
391 guilify_self_2 (SCM parent
)
393 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
397 SCM_NEWSMOB (t
->handle
, scm_tc16_thread
, t
);
399 t
->continuation_root
= scm_cons (t
->handle
, SCM_EOL
);
400 t
->continuation_base
= t
->base
;
403 if (scm_is_true (parent
))
404 t
->dynamic_state
= scm_make_dynamic_state (parent
);
406 t
->dynamic_state
= scm_i_make_initial_dynamic_state ();
408 t
->join_queue
= make_queue ();
415 /* We implement our own mutex type since we want them to be 'fair', we
416 want to do fancy things while waiting for them (like running
417 asyncs) and we might want to add things that are nice for
422 scm_i_pthread_mutex_t lock
;
424 int level
; /* how much the owner owns us. <= 1 for non-recursive mutexes */
426 int recursive
; /* allow recursive locking? */
427 int unchecked_unlock
; /* is it an error to unlock an unlocked mutex? */
428 int allow_external_unlock
; /* is it an error to unlock a mutex that is not
429 owned by the current thread? */
431 SCM waiting
; /* the threads waiting for this mutex. */
434 #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
435 #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
437 /* Perform thread tear-down, in guile mode.
440 do_thread_exit (void *v
)
442 scm_i_thread
*t
= (scm_i_thread
*) v
;
444 if (!scm_is_false (t
->cleanup_handler
))
446 SCM ptr
= t
->cleanup_handler
;
448 t
->cleanup_handler
= SCM_BOOL_F
;
449 t
->result
= scm_internal_catch (SCM_BOOL_T
,
450 (scm_t_catch_body
) scm_call_0
, ptr
,
451 scm_handle_by_message_noexit
, NULL
);
454 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
457 close (t
->sleep_pipe
[0]);
458 close (t
->sleep_pipe
[1]);
459 while (scm_is_true (unblock_from_queue (t
->join_queue
)))
462 while (!scm_is_null (t
->mutexes
))
464 SCM mutex
= SCM_WEAK_PAIR_CAR (t
->mutexes
);
466 if (!SCM_UNBNDP (mutex
))
468 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
470 scm_i_pthread_mutex_lock (&m
->lock
);
471 unblock_from_queue (m
->waiting
);
472 scm_i_pthread_mutex_unlock (&m
->lock
);
475 t
->mutexes
= SCM_WEAK_PAIR_CDR (t
->mutexes
);
478 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
484 on_thread_exit (void *v
)
486 /* This handler is executed in non-guile mode. */
487 scm_i_thread
*t
= (scm_i_thread
*) v
, **tp
;
489 /* If this thread was cancelled while doing a cond wait, it will
490 still have a mutex locked, so we unlock it here. */
493 scm_i_pthread_mutex_unlock (t
->held_mutex
);
494 t
->held_mutex
= NULL
;
497 SET_CURRENT_THREAD (v
);
499 /* Ensure the signal handling thread has been launched, because we might be
501 scm_i_ensure_signal_delivery_thread ();
503 /* Unblocking the joining threads needs to happen in guile mode
504 since the queue is a SCM data structure. */
506 /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
507 assume the GC is usable at this point, and notably that thread-local
508 storage (TLS) hasn't been deallocated yet. */
511 /* Removing ourself from the list of all threads needs to happen in
512 non-guile mode since all SCM values on our stack become
513 unprotected once we are no longer in the list. */
514 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
515 for (tp
= &all_threads
; *tp
; tp
= &(*tp
)->next_thread
)
518 *tp
= t
->next_thread
;
521 t
->next_thread
= NULL
;
527 /* If there's only one other thread, it could be the signal delivery
528 thread, so we need to notify it to shut down by closing its read pipe.
529 If it's not the signal delivery thread, then closing the read pipe isn't
531 if (thread_count
<= 1)
532 scm_i_close_signal_pipe ();
534 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
536 SET_CURRENT_THREAD (NULL
);
539 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
541 static scm_i_pthread_once_t init_thread_key_once
= SCM_I_PTHREAD_ONCE_INIT
;
544 init_thread_key (void)
546 scm_i_pthread_key_create (&scm_i_thread_key
, NULL
);
551 /* Perform any initializations necessary to bring the current thread
552 into guile mode, initializing Guile itself, if necessary.
554 BASE is the stack base to use with GC.
556 PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
557 which case the default dynamic state is used.
559 Return zero when the thread was in guile mode already; otherwise
564 scm_i_init_thread_for_guile (SCM_STACKITEM
*base
, SCM parent
)
568 #ifndef SCM_HAVE_THREAD_STORAGE_CLASS
569 scm_i_pthread_once (&init_thread_key_once
, init_thread_key
);
572 t
= SCM_I_CURRENT_THREAD
;
575 /* This thread has not been guilified yet.
578 scm_i_pthread_mutex_lock (&scm_i_init_mutex
);
579 if (scm_initialized_p
== 0)
581 /* First thread ever to enter Guile. Run the full
584 scm_i_init_guile (base
);
585 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
589 /* Guile is already initialized, but this thread enters it for
590 the first time. Only initialize this thread.
592 scm_i_pthread_mutex_unlock (&scm_i_init_mutex
);
593 guilify_self_1 (base
);
594 guilify_self_2 (parent
);
600 /* This thread is already guilified but not in guile mode, just
603 A user call to scm_with_guile() will lead us to here. This could
604 happen from anywhere on the stack, and in particular lower on the
605 stack than when it was when this thread was first guilified. Thus,
606 `base' must be updated. */
607 #if SCM_STACK_GROWS_UP
620 /* Thread is already in guile mode. Nothing to do.
626 #if SCM_USE_PTHREAD_THREADS
628 #if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
629 /* This method for GNU/Linux and perhaps some other systems.
630 It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
631 available on them. */
632 #define HAVE_GET_THREAD_STACK_BASE
634 static SCM_STACKITEM
*
635 get_thread_stack_base ()
641 pthread_getattr_np (pthread_self (), &attr
);
642 pthread_attr_getstack (&attr
, &start
, &size
);
643 end
= (char *)start
+ size
;
645 /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
646 for the main thread, but we can use scm_get_stack_base in that
650 #ifndef PTHREAD_ATTR_GETSTACK_WORKS
651 if ((void *)&attr
< start
|| (void *)&attr
>= end
)
652 return (SCM_STACKITEM
*) GC_stackbottom
;
656 #if SCM_STACK_GROWS_UP
664 #elif HAVE_PTHREAD_GET_STACKADDR_NP
665 /* This method for MacOS X.
666 It'd be nice if there was some documentation on pthread_get_stackaddr_np,
667 but as of 2006 there's nothing obvious at apple.com. */
668 #define HAVE_GET_THREAD_STACK_BASE
669 static SCM_STACKITEM
*
670 get_thread_stack_base ()
672 return pthread_get_stackaddr_np (pthread_self ());
675 #elif defined (__MINGW32__)
676 /* This method for mingw. In mingw the basic scm_get_stack_base can be used
677 in any thread. We don't like hard-coding the name of a system, but there
678 doesn't seem to be a cleaner way of knowing scm_get_stack_base can
680 #define HAVE_GET_THREAD_STACK_BASE
681 static SCM_STACKITEM
*
682 get_thread_stack_base ()
684 return (SCM_STACKITEM
*) GC_stackbottom
;
687 #endif /* pthread methods of get_thread_stack_base */
689 #else /* !SCM_USE_PTHREAD_THREADS */
691 #define HAVE_GET_THREAD_STACK_BASE
693 static SCM_STACKITEM
*
694 get_thread_stack_base ()
696 return (SCM_STACKITEM
*) GC_stackbottom
;
699 #endif /* !SCM_USE_PTHREAD_THREADS */
701 #ifdef HAVE_GET_THREAD_STACK_BASE
706 scm_i_init_thread_for_guile (get_thread_stack_base (),
707 scm_i_default_dynamic_state
);
713 scm_with_guile (void *(*func
)(void *), void *data
)
715 return scm_i_with_guile_and_parent (func
, data
,
716 scm_i_default_dynamic_state
);
719 SCM_UNUSED
static void
720 scm_leave_guile_cleanup (void *x
)
722 on_thread_exit (SCM_I_CURRENT_THREAD
);
726 scm_i_with_guile_and_parent (void *(*func
)(void *), void *data
, SCM parent
)
730 SCM_STACKITEM base_item
;
732 really_entered
= scm_i_init_thread_for_guile (&base_item
, parent
);
735 scm_i_pthread_cleanup_push (scm_leave_guile_cleanup
, NULL
);
736 res
= scm_c_with_continuation_barrier (func
, data
);
737 scm_i_pthread_cleanup_pop (0);
740 res
= scm_c_with_continuation_barrier (func
, data
);
746 /*** Non-guile mode. */
748 #if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
750 /* This declaration is missing from the public headers of GC 7.1. */
751 extern void GC_do_blocking (void (*) (void *), void *);
755 #ifdef HAVE_GC_DO_BLOCKING
756 struct without_guile_arg
758 void * (*function
) (void *);
764 without_guile_trampoline (void *closure
)
766 struct without_guile_arg
*arg
;
768 SCM_I_CURRENT_THREAD
->guile_mode
= 0;
770 arg
= (struct without_guile_arg
*) closure
;
771 arg
->result
= arg
->function (arg
->data
);
773 SCM_I_CURRENT_THREAD
->guile_mode
= 1;
778 scm_without_guile (void *(*func
)(void *), void *data
)
782 #ifdef HAVE_GC_DO_BLOCKING
783 if (SCM_I_CURRENT_THREAD
->guile_mode
)
785 struct without_guile_arg arg
;
789 GC_do_blocking (without_guile_trampoline
, &arg
);
794 result
= func (data
);
800 /*** Thread creation */
807 scm_i_pthread_mutex_t mutex
;
808 scm_i_pthread_cond_t cond
;
812 really_launch (void *d
)
814 launch_data
*data
= (launch_data
*)d
;
815 SCM thunk
= data
->thunk
, handler
= data
->handler
;
818 t
= SCM_I_CURRENT_THREAD
;
820 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
821 data
->thread
= scm_current_thread ();
822 scm_i_pthread_cond_signal (&data
->cond
);
823 scm_i_pthread_mutex_unlock (&data
->mutex
);
825 if (SCM_UNBNDP (handler
))
826 t
->result
= scm_call_0 (thunk
);
828 t
->result
= scm_catch (SCM_BOOL_T
, thunk
, handler
);
830 /* Trigger a call to `on_thread_exit ()'. */
837 launch_thread (void *d
)
839 launch_data
*data
= (launch_data
*)d
;
840 scm_i_pthread_detach (scm_i_pthread_self ());
841 scm_i_with_guile_and_parent (really_launch
, d
, data
->parent
);
845 SCM_DEFINE (scm_call_with_new_thread
, "call-with-new-thread", 1, 1, 0,
846 (SCM thunk
, SCM handler
),
847 "Call @code{thunk} in a new thread and with a new dynamic state,\n"
848 "returning a new thread object representing the thread. The procedure\n"
849 "@var{thunk} is called via @code{with-continuation-barrier}.\n"
851 "When @var{handler} is specified, then @var{thunk} is called from\n"
852 "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
853 "handler. This catch is established inside the continuation barrier.\n"
855 "Once @var{thunk} or @var{handler} returns, the return value is made\n"
856 "the @emph{exit value} of the thread and the thread is terminated.")
857 #define FUNC_NAME s_scm_call_with_new_thread
863 SCM_ASSERT (scm_is_true (scm_thunk_p (thunk
)), thunk
, SCM_ARG1
, FUNC_NAME
);
864 SCM_ASSERT (SCM_UNBNDP (handler
) || scm_is_true (scm_procedure_p (handler
)),
865 handler
, SCM_ARG2
, FUNC_NAME
);
867 data
.parent
= scm_current_dynamic_state ();
869 data
.handler
= handler
;
870 data
.thread
= SCM_BOOL_F
;
871 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
872 scm_i_pthread_cond_init (&data
.cond
, NULL
);
874 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
875 err
= scm_i_pthread_create (&id
, NULL
, launch_thread
, &data
);
878 scm_i_pthread_mutex_unlock (&data
.mutex
);
882 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
883 scm_i_pthread_mutex_unlock (&data
.mutex
);
891 scm_t_catch_body body
;
893 scm_t_catch_handler handler
;
896 scm_i_pthread_mutex_t mutex
;
897 scm_i_pthread_cond_t cond
;
901 really_spawn (void *d
)
903 spawn_data
*data
= (spawn_data
*)d
;
904 scm_t_catch_body body
= data
->body
;
905 void *body_data
= data
->body_data
;
906 scm_t_catch_handler handler
= data
->handler
;
907 void *handler_data
= data
->handler_data
;
908 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
910 scm_i_scm_pthread_mutex_lock (&data
->mutex
);
911 data
->thread
= scm_current_thread ();
912 scm_i_pthread_cond_signal (&data
->cond
);
913 scm_i_pthread_mutex_unlock (&data
->mutex
);
916 t
->result
= body (body_data
);
918 t
->result
= scm_internal_catch (SCM_BOOL_T
,
920 handler
, handler_data
);
926 spawn_thread (void *d
)
928 spawn_data
*data
= (spawn_data
*)d
;
929 scm_i_pthread_detach (scm_i_pthread_self ());
930 scm_i_with_guile_and_parent (really_spawn
, d
, data
->parent
);
935 scm_spawn_thread (scm_t_catch_body body
, void *body_data
,
936 scm_t_catch_handler handler
, void *handler_data
)
942 data
.parent
= scm_current_dynamic_state ();
944 data
.body_data
= body_data
;
945 data
.handler
= handler
;
946 data
.handler_data
= handler_data
;
947 data
.thread
= SCM_BOOL_F
;
948 scm_i_pthread_mutex_init (&data
.mutex
, NULL
);
949 scm_i_pthread_cond_init (&data
.cond
, NULL
);
951 scm_i_scm_pthread_mutex_lock (&data
.mutex
);
952 err
= scm_i_pthread_create (&id
, NULL
, spawn_thread
, &data
);
955 scm_i_pthread_mutex_unlock (&data
.mutex
);
959 scm_i_scm_pthread_cond_wait (&data
.cond
, &data
.mutex
);
960 scm_i_pthread_mutex_unlock (&data
.mutex
);
965 SCM_DEFINE (scm_yield
, "yield", 0, 0, 0,
967 "Move the calling thread to the end of the scheduling queue.")
968 #define FUNC_NAME s_scm_yield
970 return scm_from_bool (scm_i_sched_yield ());
974 SCM_DEFINE (scm_cancel_thread
, "cancel-thread", 1, 0, 0,
976 "Asynchronously force the target @var{thread} to terminate. @var{thread} "
977 "cannot be the current thread, and if @var{thread} has already terminated or "
978 "been signaled to terminate, this function is a no-op.")
979 #define FUNC_NAME s_scm_cancel_thread
981 scm_i_thread
*t
= NULL
;
983 SCM_VALIDATE_THREAD (1, thread
);
984 t
= SCM_I_THREAD_DATA (thread
);
985 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
989 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
990 scm_i_pthread_cancel (t
->pthread
);
993 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
995 return SCM_UNSPECIFIED
;
999 SCM_DEFINE (scm_set_thread_cleanup_x
, "set-thread-cleanup!", 2, 0, 0,
1000 (SCM thread
, SCM proc
),
1001 "Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
1002 "This handler will be called when the thread exits.")
1003 #define FUNC_NAME s_scm_set_thread_cleanup_x
1007 SCM_VALIDATE_THREAD (1, thread
);
1008 if (!scm_is_false (proc
))
1009 SCM_VALIDATE_THUNK (2, proc
);
1011 t
= SCM_I_THREAD_DATA (thread
);
1012 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1014 if (!(t
->exited
|| t
->canceled
))
1015 t
->cleanup_handler
= proc
;
1017 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1019 return SCM_UNSPECIFIED
;
1023 SCM_DEFINE (scm_thread_cleanup
, "thread-cleanup", 1, 0, 0,
1025 "Return the cleanup handler installed for the thread @var{thread}.")
1026 #define FUNC_NAME s_scm_thread_cleanup
1031 SCM_VALIDATE_THREAD (1, thread
);
1033 t
= SCM_I_THREAD_DATA (thread
);
1034 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1035 ret
= (t
->exited
|| t
->canceled
) ? SCM_BOOL_F
: t
->cleanup_handler
;
1036 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1042 SCM
scm_join_thread (SCM thread
)
1044 return scm_join_thread_timed (thread
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1047 SCM_DEFINE (scm_join_thread_timed
, "join-thread", 1, 2, 0,
1048 (SCM thread
, SCM timeout
, SCM timeoutval
),
1049 "Suspend execution of the calling thread until the target @var{thread} "
1050 "terminates, unless the target @var{thread} has already terminated. ")
1051 #define FUNC_NAME s_scm_join_thread_timed
1054 scm_t_timespec ctimeout
, *timeout_ptr
= NULL
;
1055 SCM res
= SCM_BOOL_F
;
1057 if (! (SCM_UNBNDP (timeoutval
)))
1060 SCM_VALIDATE_THREAD (1, thread
);
1061 if (scm_is_eq (scm_current_thread (), thread
))
1062 SCM_MISC_ERROR ("cannot join the current thread", SCM_EOL
);
1064 t
= SCM_I_THREAD_DATA (thread
);
1065 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1067 if (! SCM_UNBNDP (timeout
))
1069 to_timespec (timeout
, &ctimeout
);
1070 timeout_ptr
= &ctimeout
;
1079 int err
= block_self (t
->join_queue
, thread
, &t
->admin_mutex
,
1089 else if (err
== ETIMEDOUT
)
1092 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1094 scm_i_scm_pthread_mutex_lock (&t
->admin_mutex
);
1096 /* Check for exit again, since we just released and
1097 reacquired the admin mutex, before the next block_self
1098 call (which would block forever if t has already
1108 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1114 SCM_DEFINE (scm_thread_p
, "thread?", 1, 0, 0,
1116 "Return @code{#t} if @var{obj} is a thread.")
1117 #define FUNC_NAME s_scm_thread_p
1119 return SCM_I_IS_THREAD(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1125 fat_mutex_free (SCM mx
)
1127 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1128 scm_i_pthread_mutex_destroy (&m
->lock
);
1133 fat_mutex_print (SCM mx
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1135 fat_mutex
*m
= SCM_MUTEX_DATA (mx
);
1136 scm_puts ("#<mutex ", port
);
1137 scm_uintprint ((scm_t_bits
)m
, 16, port
);
1138 scm_puts (">", port
);
1143 make_fat_mutex (int recursive
, int unchecked_unlock
, int external_unlock
)
1148 m
= scm_gc_malloc (sizeof (fat_mutex
), "mutex");
1149 scm_i_pthread_mutex_init (&m
->lock
, NULL
);
1150 m
->owner
= SCM_BOOL_F
;
1153 m
->recursive
= recursive
;
1154 m
->unchecked_unlock
= unchecked_unlock
;
1155 m
->allow_external_unlock
= external_unlock
;
1157 m
->waiting
= SCM_EOL
;
1158 SCM_NEWSMOB (mx
, scm_tc16_mutex
, (scm_t_bits
) m
);
1159 m
->waiting
= make_queue ();
1163 SCM
scm_make_mutex (void)
1165 return scm_make_mutex_with_flags (SCM_EOL
);
1168 SCM_SYMBOL (unchecked_unlock_sym
, "unchecked-unlock");
1169 SCM_SYMBOL (allow_external_unlock_sym
, "allow-external-unlock");
1170 SCM_SYMBOL (recursive_sym
, "recursive");
1172 SCM_DEFINE (scm_make_mutex_with_flags
, "make-mutex", 0, 0, 1,
1174 "Create a new mutex. ")
1175 #define FUNC_NAME s_scm_make_mutex_with_flags
1177 int unchecked_unlock
= 0, external_unlock
= 0, recursive
= 0;
1180 while (! scm_is_null (ptr
))
1182 SCM flag
= SCM_CAR (ptr
);
1183 if (scm_is_eq (flag
, unchecked_unlock_sym
))
1184 unchecked_unlock
= 1;
1185 else if (scm_is_eq (flag
, allow_external_unlock_sym
))
1186 external_unlock
= 1;
1187 else if (scm_is_eq (flag
, recursive_sym
))
1190 SCM_MISC_ERROR ("unsupported mutex option: ~a", scm_list_1 (flag
));
1191 ptr
= SCM_CDR (ptr
);
1193 return make_fat_mutex (recursive
, unchecked_unlock
, external_unlock
);
1197 SCM_DEFINE (scm_make_recursive_mutex
, "make-recursive-mutex", 0, 0, 0,
1199 "Create a new recursive mutex. ")
1200 #define FUNC_NAME s_scm_make_recursive_mutex
1202 return make_fat_mutex (1, 0, 0);
1206 SCM_SYMBOL (scm_abandoned_mutex_error_key
, "abandoned-mutex-error");
1209 fat_mutex_lock (SCM mutex
, scm_t_timespec
*timeout
, SCM owner
, int *ret
)
1211 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1213 SCM new_owner
= SCM_UNBNDP (owner
) ? scm_current_thread() : owner
;
1214 SCM err
= SCM_BOOL_F
;
1216 struct timeval current_time
;
1218 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1224 m
->owner
= new_owner
;
1227 if (SCM_I_IS_THREAD (new_owner
))
1229 scm_i_thread
*t
= SCM_I_THREAD_DATA (new_owner
);
1230 scm_i_pthread_mutex_lock (&t
->admin_mutex
);
1232 /* Only keep a weak reference to MUTEX so that it's not
1233 retained when not referenced elsewhere (bug #27450). Note
1234 that the weak pair itself it still retained, but it's better
1235 than retaining MUTEX and the threads referred to by its
1236 associated queue. */
1237 t
->mutexes
= scm_weak_car_pair (mutex
, t
->mutexes
);
1239 scm_i_pthread_mutex_unlock (&t
->admin_mutex
);
1244 else if (SCM_I_IS_THREAD (m
->owner
) && scm_c_thread_exited_p (m
->owner
))
1246 m
->owner
= new_owner
;
1247 err
= scm_cons (scm_abandoned_mutex_error_key
,
1248 scm_from_locale_string ("lock obtained on abandoned "
1253 else if (scm_is_eq (m
->owner
, new_owner
))
1262 err
= scm_cons (scm_misc_error_key
,
1263 scm_from_locale_string ("mutex already locked "
1271 if (timeout
!= NULL
)
1273 gettimeofday (¤t_time
, NULL
);
1274 if (current_time
.tv_sec
> timeout
->tv_sec
||
1275 (current_time
.tv_sec
== timeout
->tv_sec
&&
1276 current_time
.tv_usec
* 1000 > timeout
->tv_nsec
))
1282 block_self (m
->waiting
, mutex
, &m
->lock
, timeout
);
1283 scm_i_pthread_mutex_unlock (&m
->lock
);
1285 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1288 scm_i_pthread_mutex_unlock (&m
->lock
);
1292 SCM
scm_lock_mutex (SCM mx
)
1294 return scm_lock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1297 SCM_DEFINE (scm_lock_mutex_timed
, "lock-mutex", 1, 2, 0,
1298 (SCM m
, SCM timeout
, SCM owner
),
1299 "Lock @var{mutex}. If the mutex is already locked, the calling thread "
1300 "blocks until the mutex becomes available. The function returns when "
1301 "the calling thread owns the lock on @var{mutex}. Locking a mutex that "
1302 "a thread already owns will succeed right away and will not block the "
1303 "thread. That is, Guile's mutexes are @emph{recursive}. ")
1304 #define FUNC_NAME s_scm_lock_mutex_timed
1308 scm_t_timespec cwaittime
, *waittime
= NULL
;
1310 SCM_VALIDATE_MUTEX (1, m
);
1312 if (! SCM_UNBNDP (timeout
) && ! scm_is_false (timeout
))
1314 to_timespec (timeout
, &cwaittime
);
1315 waittime
= &cwaittime
;
1318 exception
= fat_mutex_lock (m
, waittime
, owner
, &ret
);
1319 if (!scm_is_false (exception
))
1320 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1321 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1326 scm_dynwind_lock_mutex (SCM mutex
)
1328 scm_dynwind_unwind_handler_with_scm ((void(*)(SCM
))scm_unlock_mutex
, mutex
,
1329 SCM_F_WIND_EXPLICITLY
);
1330 scm_dynwind_rewind_handler_with_scm ((void(*)(SCM
))scm_lock_mutex
, mutex
,
1331 SCM_F_WIND_EXPLICITLY
);
1334 SCM_DEFINE (scm_try_mutex
, "try-mutex", 1, 0, 0,
1336 "Try to lock @var{mutex}. If the mutex is already locked by someone "
1337 "else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
1338 #define FUNC_NAME s_scm_try_mutex
1342 scm_t_timespec cwaittime
, *waittime
= NULL
;
1344 SCM_VALIDATE_MUTEX (1, mutex
);
1346 to_timespec (scm_from_int(0), &cwaittime
);
1347 waittime
= &cwaittime
;
1349 exception
= fat_mutex_lock (mutex
, waittime
, SCM_UNDEFINED
, &ret
);
1350 if (!scm_is_false (exception
))
1351 scm_ithrow (SCM_CAR (exception
), scm_list_1 (SCM_CDR (exception
)), 1);
1352 return ret
? SCM_BOOL_T
: SCM_BOOL_F
;
1356 /*** Fat condition variables */
1359 scm_i_pthread_mutex_t lock
;
1360 SCM waiting
; /* the threads waiting for this condition. */
1363 #define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
1364 #define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
1367 fat_mutex_unlock (SCM mutex
, SCM cond
,
1368 const scm_t_timespec
*waittime
, int relock
)
1370 fat_mutex
*m
= SCM_MUTEX_DATA (mutex
);
1372 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1373 int err
= 0, ret
= 0;
1375 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1377 SCM owner
= m
->owner
;
1379 if (!scm_is_eq (owner
, scm_current_thread ()))
1383 if (!m
->unchecked_unlock
)
1385 scm_i_pthread_mutex_unlock (&m
->lock
);
1386 scm_misc_error (NULL
, "mutex not locked", SCM_EOL
);
1388 owner
= scm_current_thread ();
1390 else if (!m
->allow_external_unlock
)
1392 scm_i_pthread_mutex_unlock (&m
->lock
);
1393 scm_misc_error (NULL
, "mutex not locked by current thread", SCM_EOL
);
1397 if (! (SCM_UNBNDP (cond
)))
1399 c
= SCM_CONDVAR_DATA (cond
);
1407 m
->owner
= unblock_from_queue (m
->waiting
);
1411 err
= block_self (c
->waiting
, cond
, &m
->lock
, waittime
);
1412 scm_i_pthread_mutex_unlock (&m
->lock
);
1419 else if (err
== ETIMEDOUT
)
1424 else if (err
!= EINTR
)
1427 scm_syserror (NULL
);
1433 scm_lock_mutex_timed (mutex
, SCM_UNDEFINED
, owner
);
1441 scm_remember_upto_here_2 (cond
, mutex
);
1443 scm_i_scm_pthread_mutex_lock (&m
->lock
);
1451 m
->owner
= unblock_from_queue (m
->waiting
);
1453 scm_i_pthread_mutex_unlock (&m
->lock
);
1460 SCM
scm_unlock_mutex (SCM mx
)
1462 return scm_unlock_mutex_timed (mx
, SCM_UNDEFINED
, SCM_UNDEFINED
);
1465 SCM_DEFINE (scm_unlock_mutex_timed
, "unlock-mutex", 1, 2, 0,
1466 (SCM mx
, SCM cond
, SCM timeout
),
1467 "Unlocks @var{mutex} if the calling thread owns the lock on "
1468 "@var{mutex}. Calling unlock-mutex on a mutex not owned by the current "
1469 "thread results in undefined behaviour. Once a mutex has been unlocked, "
1470 "one thread blocked on @var{mutex} is awakened and grabs the mutex "
1471 "lock. Every call to @code{lock-mutex} by this thread must be matched "
1472 "with a call to @code{unlock-mutex}. Only the last call to "
1473 "@code{unlock-mutex} will actually unlock the mutex. ")
1474 #define FUNC_NAME s_scm_unlock_mutex_timed
1476 scm_t_timespec cwaittime
, *waittime
= NULL
;
1478 SCM_VALIDATE_MUTEX (1, mx
);
1479 if (! (SCM_UNBNDP (cond
)))
1481 SCM_VALIDATE_CONDVAR (2, cond
);
1483 if (! (SCM_UNBNDP (timeout
)))
1485 to_timespec (timeout
, &cwaittime
);
1486 waittime
= &cwaittime
;
1490 return fat_mutex_unlock (mx
, cond
, waittime
, 0) ? SCM_BOOL_T
: SCM_BOOL_F
;
1494 SCM_DEFINE (scm_mutex_p
, "mutex?", 1, 0, 0,
1496 "Return @code{#t} if @var{obj} is a mutex.")
1497 #define FUNC_NAME s_scm_mutex_p
1499 return SCM_MUTEXP (obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1503 SCM_DEFINE (scm_mutex_owner
, "mutex-owner", 1, 0, 0,
1505 "Return the thread owning @var{mx}, or @code{#f}.")
1506 #define FUNC_NAME s_scm_mutex_owner
1509 fat_mutex
*m
= NULL
;
1511 SCM_VALIDATE_MUTEX (1, mx
);
1512 m
= SCM_MUTEX_DATA (mx
);
1513 scm_i_pthread_mutex_lock (&m
->lock
);
1515 scm_i_pthread_mutex_unlock (&m
->lock
);
1521 SCM_DEFINE (scm_mutex_level
, "mutex-level", 1, 0, 0,
1523 "Return the lock level of mutex @var{mx}.")
1524 #define FUNC_NAME s_scm_mutex_level
1526 SCM_VALIDATE_MUTEX (1, mx
);
1527 return scm_from_int (SCM_MUTEX_DATA(mx
)->level
);
1531 SCM_DEFINE (scm_mutex_locked_p
, "mutex-locked?", 1, 0, 0,
1533 "Returns @code{#t} if the mutex @var{mx} is locked.")
1534 #define FUNC_NAME s_scm_mutex_locked_p
1536 SCM_VALIDATE_MUTEX (1, mx
);
1537 return SCM_MUTEX_DATA (mx
)->level
> 0 ? SCM_BOOL_T
: SCM_BOOL_F
;
1542 fat_cond_print (SCM cv
, SCM port
, scm_print_state
*pstate SCM_UNUSED
)
1544 fat_cond
*c
= SCM_CONDVAR_DATA (cv
);
1545 scm_puts ("#<condition-variable ", port
);
1546 scm_uintprint ((scm_t_bits
)c
, 16, port
);
1547 scm_puts (">", port
);
1551 SCM_DEFINE (scm_make_condition_variable
, "make-condition-variable", 0, 0, 0,
1553 "Make a new condition variable.")
1554 #define FUNC_NAME s_scm_make_condition_variable
1559 c
= scm_gc_malloc (sizeof (fat_cond
), "condition variable");
1560 c
->waiting
= SCM_EOL
;
1561 SCM_NEWSMOB (cv
, scm_tc16_condvar
, (scm_t_bits
) c
);
1562 c
->waiting
= make_queue ();
1567 SCM_DEFINE (scm_timed_wait_condition_variable
, "wait-condition-variable", 2, 1, 0,
1568 (SCM cv
, SCM mx
, SCM t
),
1569 "Wait until @var{cond-var} has been signalled. While waiting, "
1570 "@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
1571 "is locked again when this function returns. When @var{time} is given, "
1572 "it specifies a point in time where the waiting should be aborted. It "
1573 "can be either a integer as returned by @code{current-time} or a pair "
1574 "as returned by @code{gettimeofday}. When the waiting is aborted the "
1575 "mutex is locked and @code{#f} is returned. When the condition "
1576 "variable is in fact signalled, the mutex is also locked and @code{#t} "
1578 #define FUNC_NAME s_scm_timed_wait_condition_variable
1580 scm_t_timespec waittime
, *waitptr
= NULL
;
1582 SCM_VALIDATE_CONDVAR (1, cv
);
1583 SCM_VALIDATE_MUTEX (2, mx
);
1585 if (!SCM_UNBNDP (t
))
1587 to_timespec (t
, &waittime
);
1588 waitptr
= &waittime
;
1591 return fat_mutex_unlock (mx
, cv
, waitptr
, 1) ? SCM_BOOL_T
: SCM_BOOL_F
;
1596 fat_cond_signal (fat_cond
*c
)
1598 unblock_from_queue (c
->waiting
);
1601 SCM_DEFINE (scm_signal_condition_variable
, "signal-condition-variable", 1, 0, 0,
1603 "Wake up one thread that is waiting for @var{cv}")
1604 #define FUNC_NAME s_scm_signal_condition_variable
1606 SCM_VALIDATE_CONDVAR (1, cv
);
1607 fat_cond_signal (SCM_CONDVAR_DATA (cv
));
1613 fat_cond_broadcast (fat_cond
*c
)
1615 while (scm_is_true (unblock_from_queue (c
->waiting
)))
1619 SCM_DEFINE (scm_broadcast_condition_variable
, "broadcast-condition-variable", 1, 0, 0,
1621 "Wake up all threads that are waiting for @var{cv}. ")
1622 #define FUNC_NAME s_scm_broadcast_condition_variable
1624 SCM_VALIDATE_CONDVAR (1, cv
);
1625 fat_cond_broadcast (SCM_CONDVAR_DATA (cv
));
1630 SCM_DEFINE (scm_condition_variable_p
, "condition-variable?", 1, 0, 0,
1632 "Return @code{#t} if @var{obj} is a condition variable.")
1633 #define FUNC_NAME s_scm_condition_variable_p
1635 return SCM_CONDVARP(obj
) ? SCM_BOOL_T
: SCM_BOOL_F
;
1646 SELECT_TYPE
*read_fds
;
1647 SELECT_TYPE
*write_fds
;
1648 SELECT_TYPE
*except_fds
;
1649 struct timeval
*timeout
;
1656 do_std_select (void *args
)
1658 struct select_args
*select_args
;
1660 select_args
= (struct select_args
*) args
;
1662 select_args
->result
=
1663 select (select_args
->nfds
,
1664 select_args
->read_fds
, select_args
->write_fds
,
1665 select_args
->except_fds
, select_args
->timeout
);
1666 select_args
->errno_value
= errno
;
1672 scm_std_select (int nfds
,
1673 SELECT_TYPE
*readfds
,
1674 SELECT_TYPE
*writefds
,
1675 SELECT_TYPE
*exceptfds
,
1676 struct timeval
*timeout
)
1679 int res
, eno
, wakeup_fd
;
1680 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1681 struct select_args args
;
1683 if (readfds
== NULL
)
1685 FD_ZERO (&my_readfds
);
1686 readfds
= &my_readfds
;
1689 while (scm_i_setup_sleep (t
, SCM_BOOL_F
, NULL
, t
->sleep_pipe
[1]))
1692 wakeup_fd
= t
->sleep_pipe
[0];
1693 FD_SET (wakeup_fd
, readfds
);
1694 if (wakeup_fd
>= nfds
)
1698 args
.read_fds
= readfds
;
1699 args
.write_fds
= writefds
;
1700 args
.except_fds
= exceptfds
;
1701 args
.timeout
= timeout
;
1703 /* Explicitly cooperate with the GC. */
1704 scm_without_guile (do_std_select
, &args
);
1707 eno
= args
.errno_value
;
1710 scm_i_reset_sleep (t
);
1712 if (res
> 0 && FD_ISSET (wakeup_fd
, readfds
))
1715 full_read (wakeup_fd
, &dummy
, 1);
1717 FD_CLR (wakeup_fd
, readfds
);
1729 /* Convenience API for blocking while in guile mode. */
1731 #if SCM_USE_PTHREAD_THREADS
1733 /* It seems reasonable to not run procedures related to mutex and condition
1734 variables within `GC_do_blocking ()' since, (i) the GC can operate even
1735 without it, and (ii) the only potential gain would be GC latency. See
1736 http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251
1737 for a discussion of the pros and cons. */
1740 scm_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1742 int res
= scm_i_pthread_mutex_lock (mutex
);
1747 do_unlock (void *data
)
1749 scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t
*)data
);
1753 scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t
*mutex
)
1755 scm_i_scm_pthread_mutex_lock (mutex
);
1756 scm_dynwind_unwind_handler (do_unlock
, mutex
, SCM_F_WIND_EXPLICITLY
);
1760 scm_pthread_cond_wait (scm_i_pthread_cond_t
*cond
, scm_i_pthread_mutex_t
*mutex
)
1763 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1765 t
->held_mutex
= mutex
;
1766 res
= scm_i_pthread_cond_wait (cond
, mutex
);
1767 t
->held_mutex
= NULL
;
1773 scm_pthread_cond_timedwait (scm_i_pthread_cond_t
*cond
,
1774 scm_i_pthread_mutex_t
*mutex
,
1775 const scm_t_timespec
*wt
)
1778 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
1780 t
->held_mutex
= mutex
;
1781 res
= scm_i_pthread_cond_timedwait (cond
, mutex
, wt
);
1782 t
->held_mutex
= NULL
;
1790 scm_std_usleep (unsigned long usecs
)
1793 tv
.tv_usec
= usecs
% 1000000;
1794 tv
.tv_sec
= usecs
/ 1000000;
1795 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1796 return tv
.tv_sec
* 1000000 + tv
.tv_usec
;
1800 scm_std_sleep (unsigned int secs
)
1805 scm_std_select (0, NULL
, NULL
, NULL
, &tv
);
1811 SCM_DEFINE (scm_current_thread
, "current-thread", 0, 0, 0,
1813 "Return the thread that called this function.")
1814 #define FUNC_NAME s_scm_current_thread
1816 return SCM_I_CURRENT_THREAD
->handle
;
1821 scm_c_make_list (size_t n
, SCM fill
)
1825 res
= scm_cons (fill
, res
);
1829 SCM_DEFINE (scm_all_threads
, "all-threads", 0, 0, 0,
1831 "Return a list of all threads.")
1832 #define FUNC_NAME s_scm_all_threads
1834 /* We can not allocate while holding the thread_admin_mutex because
1835 of the way GC is done.
1837 int n
= thread_count
;
1839 SCM list
= scm_c_make_list (n
, SCM_UNSPECIFIED
), *l
;
1841 scm_i_pthread_mutex_lock (&thread_admin_mutex
);
1843 for (t
= all_threads
; t
&& n
> 0; t
= t
->next_thread
)
1845 if (t
!= scm_i_signal_delivery_thread
)
1847 SCM_SETCAR (*l
, t
->handle
);
1848 l
= SCM_CDRLOC (*l
);
1853 scm_i_pthread_mutex_unlock (&thread_admin_mutex
);
1858 SCM_DEFINE (scm_thread_exited_p
, "thread-exited?", 1, 0, 0,
1860 "Return @code{#t} iff @var{thread} has exited.\n")
1861 #define FUNC_NAME s_scm_thread_exited_p
1863 return scm_from_bool (scm_c_thread_exited_p (thread
));
1868 scm_c_thread_exited_p (SCM thread
)
1869 #define FUNC_NAME s_scm_thread_exited_p
1872 SCM_VALIDATE_THREAD (1, thread
);
1873 t
= SCM_I_THREAD_DATA (thread
);
1878 static scm_i_pthread_cond_t wake_up_cond
;
1879 static int threads_initialized_p
= 0;
1882 /* This mutex is used by SCM_CRITICAL_SECTION_START/END.
1884 scm_i_pthread_mutex_t scm_i_critical_section_mutex
;
1886 static SCM dynwind_critical_section_mutex
;
1889 scm_dynwind_critical_section (SCM mutex
)
1891 if (scm_is_false (mutex
))
1892 mutex
= dynwind_critical_section_mutex
;
1893 scm_dynwind_lock_mutex (mutex
);
1894 scm_dynwind_block_asyncs ();
1897 /*** Initialization */
1899 scm_i_pthread_mutex_t scm_i_misc_mutex
;
1901 #if SCM_USE_PTHREAD_THREADS
1902 pthread_mutexattr_t scm_i_pthread_mutexattr_recursive
[1];
1906 scm_threads_prehistory (SCM_STACKITEM
*base
)
1908 #if SCM_USE_PTHREAD_THREADS
1909 pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive
);
1910 pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive
,
1911 PTHREAD_MUTEX_RECURSIVE
);
1914 scm_i_pthread_mutex_init (&scm_i_critical_section_mutex
,
1915 scm_i_pthread_mutexattr_recursive
);
1916 scm_i_pthread_mutex_init (&scm_i_misc_mutex
, NULL
);
1917 scm_i_pthread_cond_init (&wake_up_cond
, NULL
);
1919 guilify_self_1 (base
);
1922 scm_t_bits scm_tc16_thread
;
1923 scm_t_bits scm_tc16_mutex
;
1924 scm_t_bits scm_tc16_condvar
;
1929 scm_tc16_thread
= scm_make_smob_type ("thread", sizeof (scm_i_thread
));
1930 scm_set_smob_print (scm_tc16_thread
, thread_print
);
1932 scm_tc16_mutex
= scm_make_smob_type ("mutex", sizeof (fat_mutex
));
1933 scm_set_smob_print (scm_tc16_mutex
, fat_mutex_print
);
1934 scm_set_smob_free (scm_tc16_mutex
, fat_mutex_free
);
1936 scm_tc16_condvar
= scm_make_smob_type ("condition-variable",
1938 scm_set_smob_print (scm_tc16_condvar
, fat_cond_print
);
1940 scm_i_default_dynamic_state
= SCM_BOOL_F
;
1941 guilify_self_2 (SCM_BOOL_F
);
1942 threads_initialized_p
= 1;
1944 dynwind_critical_section_mutex
=
1945 scm_permanent_object (scm_make_recursive_mutex ());
1949 scm_init_threads_default_dynamic_state ()
1951 SCM state
= scm_make_dynamic_state (scm_current_dynamic_state ());
1952 scm_i_default_dynamic_state
= scm_permanent_object (state
);
1956 scm_init_thread_procs ()
1958 #include "libguile/threads.x"
1962 /* IA64-specific things. */
1966 # include <sys/param.h>
1967 # include <sys/pstat.h>
1969 scm_ia64_register_backing_store_base (void)
1971 struct pst_vm_status vm_status
;
1973 while (pstat_getprocvm (&vm_status
, sizeof (vm_status
), 0, i
++) == 1)
1974 if (vm_status
.pst_type
== PS_RSESTACK
)
1975 return (void *) vm_status
.pst_vaddr
;
1979 scm_ia64_ar_bsp (const void *ctx
)
1982 __uc_get_ar_bsp (ctx
, &bsp
);
1983 return (void *) bsp
;
1987 # include <ucontext.h>
1989 scm_ia64_register_backing_store_base (void)
1991 extern void *__libc_ia64_register_backing_store_base
;
1992 return __libc_ia64_register_backing_store_base
;
1995 scm_ia64_ar_bsp (const void *opaque
)
1997 const ucontext_t
*ctx
= opaque
;
1998 return (void *) ctx
->uc_mcontext
.sc_ar_bsp
;
2001 #endif /* __ia64__ */