-/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
\f
+#define _GNU_SOURCE
+
+#include "libguile/boehm-gc.h"
#include "libguile/_scm.h"
#if HAVE_UNISTD_H
#endif
#include <stdio.h>
#include <assert.h>
+
+#ifdef HAVE_STRING_H
+#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
+#endif
+
#if HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include "libguile/iselect.h"
#include "libguile/fluids.h"
#include "libguile/continuations.h"
+#include "libguile/gc.h"
#include "libguile/init.h"
+#ifdef __MINGW32__
+#ifndef ETIMEDOUT
+# define ETIMEDOUT WSAETIMEDOUT
+#endif
+# include <fcntl.h>
+# include <process.h>
+# define pipe(fd) _pipe (fd, 256, O_BINARY)
+#endif /* __MINGW32__ */
+
/*** Queues */
+/* Make an empty queue data structure.
+ */
static SCM
make_queue ()
{
return scm_cons (SCM_EOL, SCM_EOL);
}
+/* Put T at the back of Q and return a handle that can be used with
+ remqueue to remove T from Q again.
+ */
static SCM
enqueue (SCM q, SCM t)
{
return c;
}
-static void
+/* Remove the element that the handle C refers to from the queue Q. C
+ must have been returned from a call to enqueue. The return value
+ is zero when the element referred to by C has already been removed.
+ Otherwise, 1 is returned.
+*/
+static int
remqueue (SCM q, SCM c)
{
SCM p, prev = q;
if (scm_is_eq (c, SCM_CAR (q)))
SCM_SETCAR (q, SCM_CDR (c));
SCM_SETCDR (prev, SCM_CDR (c));
- return;
+ return 1;
}
prev = p;
}
- abort ();
+ return 0;
}
+/* Remove the front-most element from the queue Q and return it.
+ Return SCM_BOOL_F when Q is empty.
+*/
static SCM
dequeue (SCM q)
{
}
}
-/*** Threads */
+/*** Thread smob routines */
-static SCM
-thread_mark (SCM obj)
-{
- scm_thread *t = SCM_THREAD_DATA (obj);
- scm_gc_mark (t->result);
- return t->root;
-}
static int
thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
{
- scm_thread *t = SCM_THREAD_DATA (exp);
+ scm_i_thread *t = SCM_I_THREAD_DATA (exp);
scm_puts ("#<thread ", port);
scm_uintprint ((size_t)t->pthread, 10, port);
scm_puts (" (", port);
static size_t
thread_free (SCM obj)
{
- scm_thread *t = SCM_THREAD_DATA (obj);
+ scm_i_thread *t = SCM_I_THREAD_DATA (obj);
assert (t->exited);
scm_gc_free (t, sizeof (*t), "thread");
return 0;
}
-/*** Scheduling */
-
-#define cur_thread (SCM_CURRENT_THREAD->handle)
-pthread_key_t scm_i_thread_key;
+/*** Blocking on queues. */
-static void
-resume (scm_thread *t)
-{
- t->top = NULL;
- if (t->clear_freelists_p)
- {
- *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
- *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
- t->clear_freelists_p = 0;
- }
-}
+/* See also scm_i_queue_async_cell for how such a block is
+ interrputed.
+*/
-static void
-scm_i_enter_guile (scm_thread *t)
-{
- pthread_mutex_lock (&t->heap_mutex);
- resume (t);
-}
+/* Put the current thread on QUEUE and go to sleep, waiting for it to
+ be woken up by a call to 'unblock_from_queue', or to be
+ interrupted. Upon return of this function, the current thread is
+ no longer on QUEUE, even when the sleep has been interrupted.
-static scm_thread *
-suspend ()
-{
- scm_thread *c = SCM_CURRENT_THREAD;
+ The QUEUE data structure is assumed to be protected by MUTEX and
+ the caller of block_self must hold MUTEX. It will be atomically
+ unlocked while sleeping, just as with scm_i_pthread_cond_wait.
- /* record top of stack for the GC */
- c->top = SCM_STACK_PTR (&c);
- /* save registers. */
- SCM_FLUSH_REGISTER_WINDOWS;
- setjmp (c->regs);
+ SLEEP_OBJECT is an arbitrary SCM value that is kept alive as long
+ as MUTEX is needed.
- return c;
-}
+ When WAITTIME is not NULL, the sleep will be aborted at that time.
-static scm_thread *
-scm_i_leave_guile ()
-{
- scm_thread *t = suspend ();
- pthread_mutex_unlock (&t->heap_mutex);
- return t;
-}
+ The return value of block_self is an errno value. It will be zero
+ when the sleep has been successfully completed by a call to
+ unblock_from_queue, EINTR when it has been interrupted by the
+ delivery of a system async, and ETIMEDOUT when the timeout has
+ expired.
-/* Put the current thread to sleep until it is explicitely unblocked.
- */
+ The system asyncs themselves are not executed by block_self.
+*/
static int
-block ()
+block_self (SCM queue, SCM sleep_object, scm_i_pthread_mutex_t *mutex,
+ const scm_t_timespec *waittime)
{
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
+ SCM q_handle;
int err;
- scm_thread *t = suspend ();
- err = pthread_cond_wait (&t->sleep_cond, &t->heap_mutex);
- resume (t);
- return err;
-}
-/* Put the current thread to sleep until it is explicitely unblocked
- or until a signal arrives or until time AT (absolute time) is
- reached. Return 0 when it has been unblocked; errno otherwise.
- */
-static int
-timed_block (const scm_t_timespec *at)
-{
- int err;
- scm_thread *t = suspend ();
- err = pthread_cond_timedwait (&t->sleep_cond, &t->heap_mutex, at);
- resume (t);
+ if (scm_i_setup_sleep (t, sleep_object, mutex, -1))
+ err = EINTR;
+ else
+ {
+ t->block_asyncs++;
+ q_handle = enqueue (queue, t->handle);
+ if (waittime == NULL)
+ err = scm_i_scm_pthread_cond_wait (&t->sleep_cond, mutex);
+ else
+ err = scm_i_scm_pthread_cond_timedwait (&t->sleep_cond, mutex, waittime);
+
+ /* When we are still on QUEUE, we have been interrupted. We
+ report this only when no other error (such as a timeout) has
+ happened above.
+ */
+ if (remqueue (queue, q_handle) && err == 0)
+ err = EINTR;
+ t->block_asyncs--;
+ scm_i_reset_sleep (t);
+ }
+
return err;
}
-/* Unblock a sleeping thread.
+/* Wake up the first thread on QUEUE, if any. The caller must hold
+ the mutex that protects QUEUE. The awoken thread is returned, or
+ #f when the queue was empty.
*/
-static void
-unblock (scm_thread *t)
+static SCM
+unblock_from_queue (SCM queue)
{
- pthread_cond_signal (&t->sleep_cond);
+ SCM thread = dequeue (queue);
+ if (scm_is_true (thread))
+ scm_i_pthread_cond_signal (&SCM_I_THREAD_DATA(thread)->sleep_cond);
+ return thread;
}
/* Getting into and out of guile mode.
*/
-static pthread_mutex_t thread_admin_mutex = PTHREAD_MUTEX_INITIALIZER;
-static scm_thread *all_threads = NULL;
-static int thread_count;
+/* Ken Raeburn observes that the implementation of suspend and resume
+ (and the things that build on top of them) are very likely not
+ correct (see below). We will need fix this eventually, and that's
+ why scm_leave_guile/scm_enter_guile are not exported in the API.
+
+ Ken writes:
+
+ Consider this sequence:
+
+ Function foo, called in Guile mode, calls suspend (maybe indirectly
+ through scm_leave_guile), which does this:
+
+ // record top of stack for the GC
+ t->top = SCM_STACK_PTR (&t); // just takes address of automatic
+ var 't'
+ // save registers.
+ SCM_FLUSH_REGISTER_WINDOWS; // sparc only
+ setjmp (t->regs); // here's most of the magic
+
+ ... and returns.
+
+ Function foo has a SCM value X, a handle on a non-immediate object, in
+ a caller-saved register R, and it's the only reference to the object
+ currently.
+
+ The compiler wants to use R in suspend, so it pushes the current
+ value, X, into a stack slot which will be reloaded on exit from
+ suspend; then it loads stuff into R and goes about its business. The
+ setjmp call saves (some of) the current registers, including R, which
+ no longer contains X. (This isn't a problem for a normal
+ setjmp/longjmp situation, where longjmp would be called before
+ setjmp's caller returns; the old value for X would be loaded back from
+ the stack after the longjmp, before the function returned.)
+
+ So, suspend returns, loading X back into R (and invalidating the jump
+ buffer) in the process. The caller foo then goes off and calls a
+ bunch of other functions out of Guile mode, occasionally storing X on
+ the stack again, but, say, much deeper on the stack than suspend's
+ stack frame went, and the stack slot where suspend had written X has
+ long since been overwritten with other values.
+
+ Okay, nothing actively broken so far. Now, let garbage collection
+ run, triggered by another thread.
+
+ The thread calling foo is out of Guile mode at the time, so the
+ garbage collector just scans a range of stack addresses. Too bad that
+ X isn't stored there. So the pointed-to storage goes onto the free
+ list, and I think you can see where things go from there.
+
+ Is there anything I'm missing that'll prevent this scenario from
+ happening? I mean, aside from, "well, suspend and scm_leave_guile
+ don't have many local variables, so they probably won't need to save
+ any registers on most systems, so we hope everything will wind up in
+ the jump buffer and we'll just get away with it"?
+
+ (And, going the other direction, if scm_leave_guile and suspend push
+ the stack pointer over onto a new page, and foo doesn't make further
+ function calls and thus the stack pointer no longer includes that
+ page, are we guaranteed that the kernel cannot release the now-unused
+ stack page that contains the top-of-stack pointer we just saved? I
+ don't know if any OS actually does that. If it does, we could get
+ faults in garbage collection.)
+
+ I don't think scm_without_guile has to have this problem, as it gets
+ more control over the stack handling -- but it should call setjmp
+ itself. I'd probably try something like:
+
+ // record top of stack for the GC
+ t->top = SCM_STACK_PTR (&t);
+ // save registers.
+ SCM_FLUSH_REGISTER_WINDOWS;
+ setjmp (t->regs);
+ res = func(data);
+ scm_enter_guile (t);
+
+ ... though even that's making some assumptions about the stack
+ ordering of local variables versus caller-saved registers.
+
+ For something like scm_leave_guile to work, I don't think it can just
+ rely on invalidated jump buffers. A valid jump buffer, and a handle
+ on the stack state at the point when the jump buffer was initialized,
+ together, would work fine, but I think then we're talking about macros
+ invoking setjmp in the caller's stack frame, and requiring that the
+ caller of scm_leave_guile also call scm_enter_guile before returning,
+ kind of like pthread_cleanup_push/pop calls that have to be paired up
+ in a function. (In fact, the pthread ones have to be paired up
+ syntactically, as if they might expand to a compound statement
+ incorporating the user's code, and invoking a compiler's
+ exception-handling primitives. Which might be something to think
+ about for cases where Guile is used with C++ exceptions or
+ pthread_cancel.)
+*/
+
+scm_i_pthread_key_t scm_i_thread_key;
static void
-restart_stack (void *base)
+resume (scm_i_thread *t)
{
- scm_dynwinds = SCM_EOL;
- SCM_DYNENV (scm_rootcont) = SCM_EOL;
- SCM_THROW_VALUE (scm_rootcont) = SCM_EOL;
- SCM_DFRAME (scm_rootcont) = scm_last_debug_frame = 0;
- SCM_BASE (scm_rootcont) = base;
+ t->top = NULL;
+ if (t->clear_freelists_p)
+ {
+ *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
+ *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
+ t->clear_freelists_p = 0;
+ }
}
+typedef void* scm_t_guile_ticket;
+
static void
-start_stack (void *base)
+scm_enter_guile (scm_t_guile_ticket ticket)
+{
+ scm_i_thread *t = (scm_i_thread *)ticket;
+ if (t)
+ {
+ scm_i_pthread_mutex_lock (&t->heap_mutex);
+ resume (t);
+ }
+}
+
+static scm_i_thread *
+suspend (void)
{
- scm_stack_base = base;
- scm_root->fluids = scm_i_make_initial_fluids ();
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
- /* Create an object to hold the root continuation.
- */
- {
- scm_t_contregs *contregs = scm_gc_malloc (sizeof (scm_t_contregs),
- "continuation");
- contregs->num_stack_items = 0;
- contregs->seq = 0;
- SCM_NEWSMOB (scm_rootcont, scm_tc16_continuation, contregs);
- }
+ /* record top of stack for the GC */
+ t->top = SCM_STACK_PTR (&t);
+ /* save registers. */
+ SCM_FLUSH_REGISTER_WINDOWS;
+ setjmp (t->regs);
+ return t;
+}
- /* The remainder of stack initialization is factored out to another
- * function so that if this stack is ever exitted, it can be
- * re-entered using restart_stack. */
- restart_stack (base);
+static scm_t_guile_ticket
+scm_leave_guile ()
+{
+ scm_i_thread *t = suspend ();
+ scm_i_pthread_mutex_unlock (&t->heap_mutex);
+ return (scm_t_guile_ticket) t;
}
-static SCM scm_i_root_root;
+static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
+static scm_i_thread *all_threads = NULL;
+static int thread_count;
+
+static SCM scm_i_default_dynamic_state;
+/* Perform first stage of thread initialisation, in non-guile mode.
+ */
static void
guilify_self_1 (SCM_STACKITEM *base)
{
- scm_thread *t = malloc (sizeof (scm_thread));
+ scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
- t->pthread = pthread_self ();
+ t->pthread = scm_i_pthread_self ();
t->handle = SCM_BOOL_F;
- t->root = SCM_BOOL_F;
t->result = SCM_BOOL_F;
+ t->join_queue = SCM_EOL;
+ t->dynamic_state = SCM_BOOL_F;
+ t->dynwinds = SCM_EOL;
+ t->active_asyncs = SCM_EOL;
+ t->block_asyncs = 1;
+ t->pending_asyncs = 1;
+ t->last_debug_frame = NULL;
t->base = base;
- pthread_cond_init (&t->sleep_cond, NULL);
- pthread_mutex_init (&t->heap_mutex, NULL);
+ t->continuation_root = SCM_EOL;
+ t->continuation_base = base;
+ scm_i_pthread_cond_init (&t->sleep_cond, NULL);
+ t->sleep_mutex = NULL;
+ t->sleep_object = SCM_BOOL_F;
+ t->sleep_fd = -1;
+ /* XXX - check for errors. */
+ pipe (t->sleep_pipe);
+ scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
t->clear_freelists_p = 0;
+ t->gc_running_p = 0;
+ t->current_mark_stack_ptr = NULL;
+ t->current_mark_stack_limit = NULL;
t->exited = 0;
t->freelist = SCM_EOL;
SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist);
SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2);
- pthread_setspecific (scm_i_thread_key, t);
+ scm_i_pthread_setspecific (scm_i_thread_key, t);
- pthread_mutex_lock (&t->heap_mutex);
+ scm_i_pthread_mutex_lock (&t->heap_mutex);
- pthread_mutex_lock (&thread_admin_mutex);
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
t->next_thread = all_threads;
all_threads = t;
thread_count++;
- pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
}
+/* Perform second stage of thread initialisation, in guile mode.
+ */
static void
guilify_self_2 (SCM parent)
{
- scm_thread *t = SCM_CURRENT_THREAD;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
- scm_gc_register_collectable_memory (t, sizeof (scm_thread), "thread");
- t->root = scm_make_root (SCM_BOOL_F);
- scm_set_root (SCM_ROOT_STATE (t->root));
- start_stack (t->base);
- if (SCM_ROOTP (parent))
- {
- scm_root_state *thread_root = SCM_ROOT_STATE (t->root);
- scm_root_state *parent_root = SCM_ROOT_STATE (parent);
-
- thread_root->cur_inp = parent_root->cur_inp;
- thread_root->cur_outp = parent_root->cur_outp;
- thread_root->cur_errp = parent_root->cur_errp;
- thread_root->fluids = parent_root->fluids;
- scm_i_copy_fluids (thread_root);
- }
+ t->continuation_root = scm_cons (t->handle, SCM_EOL);
+ t->continuation_base = t->base;
+
+ if (scm_is_true (parent))
+ t->dynamic_state = scm_make_dynamic_state (parent);
+ else
+ t->dynamic_state = scm_i_make_initial_dynamic_state ();
+
+ t->join_queue = make_queue ();
+ t->block_asyncs = 0;
+}
+
+/* Perform thread tear-down, in guile mode.
+ */
+static void *
+do_thread_exit (void *v)
+{
+ scm_i_thread *t = (scm_i_thread *)v;
+
+ scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
+
+ t->exited = 1;
+ close (t->sleep_pipe[0]);
+ close (t->sleep_pipe[1]);
+ while (scm_is_true (unblock_from_queue (t->join_queue)))
+ ;
+
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ return NULL;
}
static void
on_thread_exit (void *v)
{
- scm_thread *t = (scm_thread *)v, **tp;
+ scm_i_thread *t = (scm_i_thread *)v, **tp;
- pthread_mutex_lock (&thread_admin_mutex);
- t->exited = 1;
+ scm_i_pthread_setspecific (scm_i_thread_key, v);
+
+ /* Unblocking the joining threads needs to happen in guile mode
+ since the queue is a SCM data structure.
+ */
+ scm_with_guile (do_thread_exit, v);
+
+ /* Removing ourself from the list of all threads needs to happen in
+ non-guile mode since all SCM values on our stack become
+ unprotected once we are no longer in the list.
+ */
+ scm_leave_guile ();
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
if (*tp == t)
{
break;
}
thread_count--;
- pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
+ scm_i_pthread_setspecific (scm_i_thread_key, NULL);
}
+static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
+
static void
+init_thread_key (void)
+{
+ scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
+}
+
+/* Perform any initializations necessary to bring the current thread
+ into guile mode, initializing Guile itself, if necessary.
+
+ BASE is the stack base to use with GC.
+
+ PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
+ which case the default dynamic state is used.
+
+ Return zero when the thread was in guile mode already; otherwise
+ return 1.
+*/
+
+static int
scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
{
- scm_thread *t;
+ scm_i_thread *t;
+
+ scm_i_pthread_once (&init_thread_key_once, init_thread_key);
- pthread_mutex_lock (&scm_i_init_mutex);
- if (scm_initialized_p == 0)
+ if ((t = SCM_I_CURRENT_THREAD) == NULL)
{
- /* First thread ever to enter Guile. Run the full
- initialization.
- */
- scm_i_init_guile (base);
+ /* This thread has not been guilified yet.
+ */
+
+ scm_i_pthread_mutex_lock (&scm_i_init_mutex);
+ if (scm_initialized_p == 0)
+ {
+ /* First thread ever to enter Guile. Run the full
+ initialization.
+ */
+ scm_i_init_guile (base);
+ scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+ }
+ else
+ {
+ /* Guile is already initialized, but this thread enters it for
+ the first time. Only initialize this thread.
+ */
+ scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+ guilify_self_1 (base);
+ guilify_self_2 (parent);
+ }
+ return 1;
}
- else if ((t = SCM_CURRENT_THREAD) == NULL)
+ else if (t->top)
{
- /* Guile is already initialized, but this thread enters it for
- the first time. Only initialize this thread.
- */
- guilify_self_1 (base);
- guilify_self_2 (parent);
+ /* This thread is already guilified but not in guile mode, just
+ resume it.
+
+ XXX - base might be lower than when this thread was first
+ guilified.
+ */
+ scm_enter_guile ((scm_t_guile_ticket) t);
+ return 1;
}
else
{
- /* This thread is already guilified, just resume it.
- */
- scm_i_enter_guile (t);
+ /* Thread is already in guile mode. Nothing to do.
+ */
+ return 0;
}
- pthread_mutex_unlock (&scm_i_init_mutex);
}
-extern void *__libc_stack_end;
+#if SCM_USE_PTHREAD_THREADS
+/* pthread_getattr_np not available on MacOS X and Solaris 10. */
+#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
+
+#define HAVE_GET_THREAD_STACK_BASE
static SCM_STACKITEM *
get_thread_stack_base ()
pthread_attr_t attr;
void *start, *end;
size_t size;
- int res;
-
- /* XXX - pthread_getattr_np does not seem to work for the main
- thread, but we can use __libc_stack_end in that case.
- */
pthread_getattr_np (pthread_self (), &attr);
pthread_attr_getstack (&attr, &start, &size);
end = (char *)start + size;
+ /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
+ for the main thread, but we can use scm_get_stack_base in that
+ case.
+ */
+
+#ifndef PTHREAD_ATTR_GETSTACK_WORKS
if ((void *)&attr < start || (void *)&attr >= end)
- return __libc_stack_end;
+ return scm_get_stack_base ();
else
+#endif
{
#if SCM_STACK_GROWS_UP
return start;
}
}
-void
-scm_init_guile ()
-{
- scm_i_init_thread_for_guile (get_thread_stack_base (), scm_i_root_root);
-}
+#endif /* HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP */
-void
-scm_enter_guile ()
+#else /* !SCM_USE_PTHREAD_THREADS */
+
+#define HAVE_GET_THREAD_STACK_BASE
+
+static SCM_STACKITEM *
+get_thread_stack_base ()
{
- SCM_STACKITEM base_item;
- scm_i_init_thread_for_guile (&base_item, scm_i_root_root);
+ return scm_get_stack_base ();
}
+#endif /* !SCM_USE_PTHREAD_THREADS */
+
+#ifdef HAVE_GET_THREAD_STACK_BASE
+
void
-scm_leave_guile ()
+scm_init_guile ()
{
- scm_i_leave_guile ();
+ scm_i_init_thread_for_guile (get_thread_stack_base (),
+ scm_i_default_dynamic_state);
}
+#endif
+
void *
scm_with_guile (void *(*func)(void *), void *data)
{
- return scm_i_with_guile_and_parent (func, data, scm_i_root_root);
+ return scm_i_with_guile_and_parent (func, data,
+ scm_i_default_dynamic_state);
}
void *
SCM parent)
{
void *res;
+ int really_entered;
SCM_STACKITEM base_item;
- scm_i_init_thread_for_guile (&base_item, parent);
- res = func (data);
- scm_i_leave_guile ();
+ really_entered = scm_i_init_thread_for_guile (&base_item, parent);
+ res = scm_c_with_continuation_barrier (func, data);
+ if (really_entered)
+ scm_leave_guile ();
return res;
}
scm_without_guile (void *(*func)(void *), void *data)
{
void *res;
- scm_thread *t;
- t = scm_i_leave_guile ();
+ scm_t_guile_ticket t;
+ t = scm_leave_guile ();
res = func (data);
- scm_i_enter_guile (t);
+ scm_enter_guile (t);
return res;
}
SCM thunk;
SCM handler;
SCM thread;
- pthread_mutex_t mutex;
- pthread_cond_t cond;
+ scm_i_pthread_mutex_t mutex;
+ scm_i_pthread_cond_t cond;
} launch_data;
static void *
{
launch_data *data = (launch_data *)d;
SCM thunk = data->thunk, handler = data->handler;
- scm_thread *t;
+ scm_i_thread *t;
- t = SCM_CURRENT_THREAD;
+ t = SCM_I_CURRENT_THREAD;
- pthread_mutex_lock (&data->mutex);
+ scm_i_scm_pthread_mutex_lock (&data->mutex);
data->thread = scm_current_thread ();
- pthread_cond_signal (&data->cond);
- pthread_mutex_unlock (&data->mutex);
+ scm_i_pthread_cond_signal (&data->cond);
+ scm_i_pthread_mutex_unlock (&data->mutex);
- t->result = scm_catch (SCM_BOOL_T, thunk, handler);
-
- t->exited = 1;
- pthread_detach (t->pthread);
+ if (SCM_UNBNDP (handler))
+ t->result = scm_call_0 (thunk);
+ else
+ t->result = scm_catch (SCM_BOOL_T, thunk, handler);
return 0;
}
launch_thread (void *d)
{
launch_data *data = (launch_data *)d;
- return scm_i_with_guile_and_parent (really_launch, d, data->parent);
+ scm_i_pthread_detach (scm_i_pthread_self ());
+ scm_i_with_guile_and_parent (really_launch, d, data->parent);
+ return NULL;
}
-SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 2, 0, 0,
+SCM_DEFINE (scm_call_with_new_thread, "call-with-new-thread", 1, 1, 0,
(SCM thunk, SCM handler),
-"Evaluate @code{(@var{thunk})} in a new thread, and new dynamic context, "
-"returning a new thread object representing the thread. "
-"If an error occurs during evaluation, call error-thunk, passing it an "
-"error code describing the condition. "
-"If this happens, the error-thunk is called outside the scope of the new "
-"root -- it is called in the same dynamic context in which "
-"with-new-thread was evaluated, but not in the callers thread. "
-"All the evaluation rules for dynamic roots apply to threads.")
+ "Call @code{thunk} in a new thread and with a new dynamic state,\n"
+ "returning a new thread object representing the thread. The procedure\n"
+ "@var{thunk} is called via @code{with-continuation-barrier}.\n"
+ "\n"
+ "When @var{handler} is specified, then @var{thunk} is called from\n"
+ "within a @code{catch} with tag @code{#t} that has @var{handler} as its\n"
+ "handler. This catch is established inside the continuation barrier.\n"
+ "\n"
+ "Once @var{thunk} or @var{handler} returns, the return value is made\n"
+ "the @emph{exit value} of the thread and the thread is terminated.")
#define FUNC_NAME s_scm_call_with_new_thread
{
launch_data data;
- pthread_t id;
+ scm_i_pthread_t id;
+ int err;
SCM_ASSERT (scm_is_true (scm_thunk_p (thunk)), thunk, SCM_ARG1, FUNC_NAME);
- SCM_ASSERT (scm_is_true (scm_procedure_p (handler)), handler, SCM_ARG2,
- FUNC_NAME);
+ SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
+ handler, SCM_ARG2, FUNC_NAME);
- data.parent = scm_root->handle;
+ data.parent = scm_current_dynamic_state ();
data.thunk = thunk;
data.handler = handler;
data.thread = SCM_BOOL_F;
- pthread_mutex_init (&data.mutex, NULL);
- pthread_cond_init (&data.cond, NULL);
+ scm_i_pthread_mutex_init (&data.mutex, NULL);
+ scm_i_pthread_cond_init (&data.cond, NULL);
- pthread_mutex_lock (&data.mutex);
- if (pthread_create (&id, NULL, launch_thread, &data))
+ scm_i_scm_pthread_mutex_lock (&data.mutex);
+ err = scm_i_pthread_create (&id, NULL, launch_thread, &data);
+ if (err)
{
- pthread_mutex_unlock (&data.mutex);
- SCM_SYSERROR;
+ scm_i_pthread_mutex_unlock (&data.mutex);
+ errno = err;
+ scm_syserror (NULL);
}
- pthread_cond_wait (&data.cond, &data.mutex);
- pthread_mutex_unlock (&data.mutex);
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+ scm_i_pthread_mutex_unlock (&data.mutex);
return data.thread;
}
scm_t_catch_handler handler;
void *handler_data;
SCM thread;
- pthread_mutex_t mutex;
- pthread_cond_t cond;
+ scm_i_pthread_mutex_t mutex;
+ scm_i_pthread_cond_t cond;
} spawn_data;
static void *
void *body_data = data->body_data;
scm_t_catch_handler handler = data->handler;
void *handler_data = data->handler_data;
- scm_thread *t = SCM_CURRENT_THREAD;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
- pthread_mutex_lock (&data->mutex);
+ scm_i_scm_pthread_mutex_lock (&data->mutex);
data->thread = scm_current_thread ();
- pthread_cond_signal (&data->cond);
- pthread_mutex_unlock (&data->mutex);
+ scm_i_pthread_cond_signal (&data->cond);
+ scm_i_pthread_mutex_unlock (&data->mutex);
- t->result = scm_internal_catch (SCM_BOOL_T,
- body, body_data,
- handler, handler_data);
-
- t->exited = 1;
- pthread_detach (t->pthread);
+ if (handler == NULL)
+ t->result = body (body_data);
+ else
+ t->result = scm_internal_catch (SCM_BOOL_T,
+ body, body_data,
+ handler, handler_data);
return 0;
}
spawn_thread (void *d)
{
spawn_data *data = (spawn_data *)d;
- return scm_i_with_guile_and_parent (really_spawn, d, data->parent);
+ scm_i_pthread_detach (scm_i_pthread_self ());
+ scm_i_with_guile_and_parent (really_spawn, d, data->parent);
+ return NULL;
}
SCM
scm_t_catch_handler handler, void *handler_data)
{
spawn_data data;
- pthread_t id;
+ scm_i_pthread_t id;
+ int err;
- data.parent = scm_root->handle;
+ data.parent = scm_current_dynamic_state ();
data.body = body;
data.body_data = body_data;
data.handler = handler;
data.handler_data = handler_data;
data.thread = SCM_BOOL_F;
- pthread_mutex_init (&data.mutex, NULL);
- pthread_cond_init (&data.cond, NULL);
+ scm_i_pthread_mutex_init (&data.mutex, NULL);
+ scm_i_pthread_cond_init (&data.cond, NULL);
- pthread_mutex_lock (&data.mutex);
- if (pthread_create (&id, NULL, spawn_thread, &data))
+ scm_i_scm_pthread_mutex_lock (&data.mutex);
+ err = scm_i_pthread_create (&id, NULL, spawn_thread, &data);
+ if (err)
{
- pthread_mutex_unlock (&data.mutex);
+ scm_i_pthread_mutex_unlock (&data.mutex);
+ errno = err;
scm_syserror (NULL);
}
- pthread_cond_wait (&data.cond, &data.mutex);
- pthread_mutex_unlock (&data.mutex);
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+ scm_i_pthread_mutex_unlock (&data.mutex);
return data.thread;
}
"Move the calling thread to the end of the scheduling queue.")
#define FUNC_NAME s_scm_yield
{
- return scm_from_bool (sched_yield ());
+ return scm_from_bool (scm_i_sched_yield ());
}
#undef FUNC_NAME
"terminates, unless the target @var{thread} has already terminated. ")
#define FUNC_NAME s_scm_join_thread
{
- scm_thread *t;
+ scm_i_thread *t;
SCM res;
SCM_VALIDATE_THREAD (1, thread);
- if (scm_is_eq (cur_thread, thread))
+ if (scm_is_eq (scm_current_thread (), thread))
SCM_MISC_ERROR ("can not join the current thread", SCM_EOL);
- t = SCM_THREAD_DATA (thread);
+ scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
+
+ t = SCM_I_THREAD_DATA (thread);
if (!t->exited)
{
- scm_thread *c;
- c = scm_i_leave_guile ();
- pthread_join (t->pthread, 0);
- scm_i_enter_guile (c);
+ while (1)
+ {
+ block_self (t->join_queue, thread, &thread_admin_mutex, NULL);
+ if (t->exited)
+ break;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ SCM_TICK;
+ scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
+ }
}
res = t->result;
- t->result = SCM_BOOL_F;
+
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
return res;
}
#undef FUNC_NAME
/* We implement our own mutex type since we want them to be 'fair', we
want to do fancy things while waiting for them (like running
- asyncs) and we want to support waiting on many things at once.
- Also, we might add things that are nice for debugging.
+ asyncs) and we might want to add things that are nice for
+ debugging.
*/
typedef struct {
- pthread_mutex_t lock;
+ scm_i_pthread_mutex_t lock;
SCM owner;
int level; /* how much the owner owns us.
< 0 for non-recursive mutexes */
SCM waiting; /* the threads waiting for this mutex. */
} fat_mutex;
-static SCM
-fat_mutex_mark (SCM mx)
-{
- fat_mutex *m = SCM_MUTEX_DATA (mx);
- scm_gc_mark (m->owner);
- return m->waiting;
-}
+#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
+#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
+
static size_t
fat_mutex_free (SCM mx)
{
fat_mutex *m = SCM_MUTEX_DATA (mx);
- pthread_mutex_destroy (&m->lock);
+ scm_i_pthread_mutex_destroy (&m->lock);
scm_gc_free (m, sizeof (fat_mutex), "mutex");
return 0;
}
scm_puts (">", port);
return 1;
}
-
+
static SCM
make_fat_mutex (int recursive)
{
SCM mx;
m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
- pthread_mutex_init (&m->lock, NULL);
+ scm_i_pthread_mutex_init (&m->lock, NULL);
m->owner = SCM_BOOL_F;
m->level = recursive? 0 : -1;
m->waiting = SCM_EOL;
}
#undef FUNC_NAME
-static void
-fat_mutex_lock (fat_mutex *m)
+static char *
+fat_mutex_lock (SCM mutex)
{
- pthread_mutex_lock (&m->lock);
-
+ fat_mutex *m = SCM_MUTEX_DATA (mutex);
+ SCM thread = scm_current_thread ();
+ char *msg = NULL;
+
+ scm_i_scm_pthread_mutex_lock (&m->lock);
if (scm_is_false (m->owner))
- m->owner = cur_thread;
- else if (scm_is_eq (m->owner, cur_thread))
+ m->owner = thread;
+ else if (scm_is_eq (m->owner, thread))
{
if (m->level >= 0)
m->level++;
else
- {
- pthread_mutex_unlock (&m->lock);
- scm_misc_error (NULL, "mutex already locked by current thread",
- SCM_EOL);
- }
+ msg = "mutex already locked by current thread";
}
else
{
while (1)
{
- SCM c = enqueue (m->waiting, cur_thread);
- int err;
- /* Note: It's important that m->lock is never locked for
- any longer amount of time since that could prevent GC */
- pthread_mutex_unlock (&m->lock);
- err = block ();
- if (scm_is_eq (m->owner, cur_thread))
- return;
- pthread_mutex_lock (&m->lock);
- remqueue (m->waiting, c);
- pthread_mutex_unlock (&m->lock);
- if (err)
- {
- errno = err;
- scm_syserror (NULL);
- }
- SCM_ASYNC_TICK;
- pthread_mutex_lock (&m->lock);
+ block_self (m->waiting, mutex, &m->lock, NULL);
+ if (scm_is_eq (m->owner, thread))
+ break;
+ scm_i_pthread_mutex_unlock (&m->lock);
+ SCM_TICK;
+ scm_i_scm_pthread_mutex_lock (&m->lock);
}
}
- pthread_mutex_unlock (&m->lock);
+ scm_i_pthread_mutex_unlock (&m->lock);
+ return msg;
}
SCM_DEFINE (scm_lock_mutex, "lock-mutex", 1, 0, 0,
"thread. That is, Guile's mutexes are @emph{recursive}. ")
#define FUNC_NAME s_scm_lock_mutex
{
+ char *msg;
+
SCM_VALIDATE_MUTEX (1, mx);
-
- fat_mutex_lock (SCM_MUTEX_DATA (mx));
+ msg = fat_mutex_lock (mx);
+ if (msg)
+ scm_misc_error (NULL, msg, SCM_EOL);
return SCM_BOOL_T;
}
#undef FUNC_NAME
-static int
-fat_mutex_trylock (fat_mutex *m)
+void
+scm_dynwind_lock_mutex (SCM mutex)
{
- pthread_mutex_lock (&m->lock);
+ scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
+ SCM_F_WIND_EXPLICITLY);
+ scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
+ SCM_F_WIND_EXPLICITLY);
+}
+
+static char *
+fat_mutex_trylock (fat_mutex *m, int *resp)
+{
+ char *msg = NULL;
+ SCM thread = scm_current_thread ();
+
+ *resp = 1;
+ scm_i_pthread_mutex_lock (&m->lock);
if (scm_is_false (m->owner))
- m->owner = cur_thread;
- else if (scm_is_eq (m->owner, cur_thread))
+ m->owner = thread;
+ else if (scm_is_eq (m->owner, thread))
{
if (m->level >= 0)
m->level++;
else
- {
- pthread_mutex_unlock (&m->lock);
- scm_misc_error (NULL, "mutex already locked by current thread",
- SCM_EOL);
- }
+ msg = "mutex already locked by current thread";
}
else
- {
- pthread_mutex_unlock (&m->lock);
- return 0;
- }
- pthread_mutex_unlock (&m->lock);
- return 1;
+ *resp = 0;
+ scm_i_pthread_mutex_unlock (&m->lock);
+ return msg;
}
SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
- (SCM mx),
+ (SCM mutex),
"Try to lock @var{mutex}. If the mutex is already locked by someone "
"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
#define FUNC_NAME s_scm_try_mutex
{
- SCM_VALIDATE_MUTEX (1, mx);
+ char *msg;
+ int res;
+
+ SCM_VALIDATE_MUTEX (1, mutex);
- return scm_from_bool (fat_mutex_trylock (SCM_MUTEX_DATA (mx)));
+ msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
+ if (msg)
+ scm_misc_error (NULL, msg, SCM_EOL);
+ return scm_from_bool (res);
}
#undef FUNC_NAME
-static void
+static char *
fat_mutex_unlock (fat_mutex *m)
{
- pthread_mutex_lock (&m->lock);
- if (!scm_is_eq (m->owner, cur_thread))
+ char *msg = NULL;
+
+ scm_i_scm_pthread_mutex_lock (&m->lock);
+ if (!scm_is_eq (m->owner, scm_current_thread ()))
{
- const char *msg;
if (scm_is_false (m->owner))
msg = "mutex not locked";
else
msg = "mutex not locked by current thread";
-
- pthread_mutex_unlock (&m->lock);
- scm_misc_error (NULL, msg, SCM_EOL);
}
else if (m->level > 0)
m->level--;
else
- {
- SCM next = dequeue (m->waiting);
- if (scm_is_true (next))
- {
- m->owner = next;
- unblock (SCM_THREAD_DATA (next));
- }
- else
- m->owner = SCM_BOOL_F;
- }
- pthread_mutex_unlock (&m->lock);
+ m->owner = unblock_from_queue (m->waiting);
+ scm_i_pthread_mutex_unlock (&m->lock);
+
+ return msg;
}
SCM_DEFINE (scm_unlock_mutex, "unlock-mutex", 1, 0, 0,
"@code{unlock-mutex} will actually unlock the mutex. ")
#define FUNC_NAME s_scm_unlock_mutex
{
+ char *msg;
SCM_VALIDATE_MUTEX (1, mx);
- fat_mutex_unlock (SCM_MUTEX_DATA (mx));
+ msg = fat_mutex_unlock (SCM_MUTEX_DATA (mx));
+ if (msg)
+ scm_misc_error (NULL, msg, SCM_EOL);
return SCM_BOOL_T;
}
#undef FUNC_NAME
-/*** Fat condition variables */
+#if 0
-/* Like mutexes, we implement our own condition variables using the
- primitives above.
-*/
+SCM_DEFINE (scm_mutex_owner, "mutex-owner", 1, 0, 0,
+ (SCM mx),
+ "Return the thread owning @var{mx}, or @code{#f}.")
+#define FUNC_NAME s_scm_mutex_owner
+{
+ SCM_VALIDATE_MUTEX (1, mx);
+ return (SCM_MUTEX_DATA(mx))->owner;
+}
+#undef FUNC_NAME
+
+SCM_DEFINE (scm_mutex_level, "mutex-level", 1, 0, 0,
+ (SCM mx),
+ "Return the lock level of a recursive mutex, or -1\n"
+ "for a standard mutex.")
+#define FUNC_NAME s_scm_mutex_level
+{
+ SCM_VALIDATE_MUTEX (1, mx);
+ return scm_from_int (SCM_MUTEX_DATA(mx)->level);
+}
+#undef FUNC_NAME
+
+#endif
+
+/*** Fat condition variables */
typedef struct {
- pthread_mutex_t lock;
+ scm_i_pthread_mutex_t lock;
SCM waiting; /* the threads waiting for this condition. */
} fat_cond;
-static SCM
-fat_cond_mark (SCM cv)
-{
- fat_cond *c = SCM_CONDVAR_DATA (cv);
- return c->waiting;
-}
+#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
+#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
static size_t
fat_cond_free (SCM mx)
{
fat_cond *c = SCM_CONDVAR_DATA (mx);
- pthread_mutex_destroy (&c->lock);
+ scm_i_pthread_mutex_destroy (&c->lock);
scm_gc_free (c, sizeof (fat_cond), "condition-variable");
return 0;
}
SCM cv;
c = scm_gc_malloc (sizeof (fat_cond), "condition variable");
- pthread_mutex_init (&c->lock, 0);
+ scm_i_pthread_mutex_init (&c->lock, 0);
c->waiting = SCM_EOL;
SCM_NEWSMOB (cv, scm_tc16_condvar, (scm_t_bits) c);
c->waiting = make_queue ();
}
#undef FUNC_NAME
-static void
-fat_cond_timedwait (fat_cond *c,
- fat_mutex *m,
+static int
+fat_cond_timedwait (SCM cond, SCM mutex,
const scm_t_timespec *waittime)
{
- int err;
- pthread_mutex_lock (&c->lock);
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
+ fat_cond *c = SCM_CONDVAR_DATA (cond);
+ fat_mutex *m = SCM_MUTEX_DATA (mutex);
+ const char *msg;
+ int err = 0;
while (1)
{
- enqueue (c->waiting, cur_thread);
- pthread_mutex_unlock (&c->lock);
- fat_mutex_unlock (m); /*fixme* - not thread safe */
- if (waittime == NULL)
- err = block ();
+ scm_i_scm_pthread_mutex_lock (&c->lock);
+ msg = fat_mutex_unlock (m);
+ t->block_asyncs++;
+ if (msg == NULL)
+ {
+ err = block_self (c->waiting, cond, &c->lock, waittime);
+ scm_i_pthread_mutex_unlock (&c->lock);
+ fat_mutex_lock (mutex);
+ }
else
- err = timed_block (waittime);
- fat_mutex_lock (m);
- if (err)
+ scm_i_pthread_mutex_unlock (&c->lock);
+ t->block_asyncs--;
+ scm_async_click ();
+
+ if (msg)
+ scm_misc_error (NULL, msg, SCM_EOL);
+
+ scm_remember_upto_here_2 (cond, mutex);
+
+ if (err == 0)
+ return 1;
+ if (err == ETIMEDOUT)
+ return 0;
+ if (err != EINTR)
{
errno = err;
scm_syserror (NULL);
}
- /* XXX - check whether we have been signalled. */
- break;
}
}
"is returned. ")
#define FUNC_NAME s_scm_timed_wait_condition_variable
{
- scm_t_timespec waittime;
+ scm_t_timespec waittime, *waitptr = NULL;
SCM_VALIDATE_CONDVAR (1, cv);
SCM_VALIDATE_MUTEX (2, mx);
waittime.tv_sec = scm_to_ulong (t);
waittime.tv_nsec = 0;
}
+ waitptr = &waittime;
}
- fat_cond_timedwait (SCM_CONDVAR_DATA (cv),
- SCM_MUTEX_DATA (mx),
- SCM_UNBNDP (t) ? NULL : &waittime);
- return SCM_BOOL_T;
+ return scm_from_bool (fat_cond_timedwait (cv, mx, waitptr));
}
#undef FUNC_NAME
-static int
+static void
fat_cond_signal (fat_cond *c)
{
- SCM th;
- pthread_mutex_lock (&c->lock);
- if (scm_is_true (th = dequeue (c->waiting)))
- unblock (SCM_THREAD_DATA (th));
- pthread_mutex_unlock (&c->lock);
- return 0;
+ scm_i_scm_pthread_mutex_lock (&c->lock);
+ unblock_from_queue (c->waiting);
+ scm_i_pthread_mutex_unlock (&c->lock);
}
SCM_DEFINE (scm_signal_condition_variable, "signal-condition-variable", 1, 0, 0,
}
#undef FUNC_NAME
-static int
+static void
fat_cond_broadcast (fat_cond *c)
{
- SCM th;
- pthread_mutex_lock (&c->lock);
- while (scm_is_true (th = dequeue (c->waiting)))
- unblock (SCM_THREAD_DATA (th));
- pthread_mutex_unlock (&c->lock);
- return 0;
+ scm_i_scm_pthread_mutex_lock (&c->lock);
+ while (scm_is_true (unblock_from_queue (c->waiting)))
+ ;
+ scm_i_pthread_mutex_unlock (&c->lock);
}
SCM_DEFINE (scm_broadcast_condition_variable, "broadcast-condition-variable", 1, 0, 0,
scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
/ sizeof (SCM_STACKITEM))); \
- bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
- top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
+ bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
+ top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
scm_mark_locations (bot, top - bot); } while (0)
#else
# define SCM_MARK_BACKING_STORE()
#endif
-void
-scm_threads_mark_stacks (void)
-{
- scm_thread *t;
- for (t = all_threads; t; t = t->next_thread)
- {
- /* Check that thread has indeed been suspended.
- */
- assert (t->top);
-
- scm_gc_mark (t->handle);
-#if SCM_STACK_GROWS_UP
- scm_mark_locations (t->base, t->top - t->base);
-#else
- scm_mark_locations (t->top, t->base - t->top);
-#endif
- scm_mark_locations ((SCM_STACKITEM *) t->regs,
- ((size_t) sizeof(t->regs)
- / sizeof (SCM_STACKITEM)));
- }
-
- SCM_MARK_BACKING_STORE ();
-}
/*** Select */
int
-scm_internal_select (int nfds,
- SELECT_TYPE *readfds,
- SELECT_TYPE *writefds,
- SELECT_TYPE *exceptfds,
- struct timeval *timeout)
-{
- int res, eno;
- scm_thread *c = scm_i_leave_guile ();
+scm_std_select (int nfds,
+ SELECT_TYPE *readfds,
+ SELECT_TYPE *writefds,
+ SELECT_TYPE *exceptfds,
+ struct timeval *timeout)
+{
+ fd_set my_readfds;
+ int res, eno, wakeup_fd;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
+ scm_t_guile_ticket ticket;
+
+ if (readfds == NULL)
+ {
+ FD_ZERO (&my_readfds);
+ readfds = &my_readfds;
+ }
+
+ while (scm_i_setup_sleep (t, SCM_BOOL_F, NULL, t->sleep_pipe[1]))
+ SCM_TICK;
+
+ wakeup_fd = t->sleep_pipe[0];
+ ticket = scm_leave_guile ();
+ FD_SET (wakeup_fd, readfds);
+ if (wakeup_fd >= nfds)
+ nfds = wakeup_fd+1;
res = select (nfds, readfds, writefds, exceptfds, timeout);
+ t->sleep_fd = -1;
eno = errno;
- scm_i_enter_guile (c);
- SCM_ASYNC_TICK;
+ scm_enter_guile (ticket);
+
+ scm_i_reset_sleep (t);
+
+ if (res > 0 && FD_ISSET (wakeup_fd, readfds))
+ {
+ char dummy;
+ read (wakeup_fd, &dummy, 1);
+ FD_CLR (wakeup_fd, readfds);
+ res -= 1;
+ if (res == 0)
+ {
+ eno = EINTR;
+ res = -1;
+ }
+ }
errno = eno;
return res;
}
-/* Convenience API */
+/* Convenience API for blocking while in guile mode. */
+
+#if SCM_USE_PTHREAD_THREADS
int
-scm_pthread_mutex_lock (pthread_mutex_t *mutex)
+scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
{
- scm_thread *t = scm_i_leave_guile ();
- int res = pthread_mutex_lock (mutex);
- scm_i_enter_guile (t);
+ scm_t_guile_ticket t = scm_leave_guile ();
+ int res = scm_i_pthread_mutex_lock (mutex);
+ scm_enter_guile (t);
return res;
}
static void
-unlock (void *data)
+do_unlock (void *data)
{
- pthread_mutex_unlock ((pthread_mutex_t *)data);
+ scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
}
void
-scm_frame_pthread_mutex_lock (pthread_mutex_t *mutex)
+scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
{
- scm_pthread_mutex_lock (mutex);
- scm_frame_unwind_handler (unlock, mutex, SCM_F_WIND_EXPLICITLY);
+ scm_i_scm_pthread_mutex_lock (mutex);
+ scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
}
int
-scm_pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)
+scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
{
- scm_thread *t = scm_i_leave_guile ();
- int res = pthread_cond_wait (cond, mutex);
- scm_i_enter_guile (t);
+ scm_t_guile_ticket t = scm_leave_guile ();
+ int res = scm_i_pthread_cond_wait (cond, mutex);
+ scm_enter_guile (t);
return res;
}
int
-scm_pthread_cond_timedwait (pthread_cond_t *cond,
- pthread_mutex_t *mutex,
+scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond,
+ scm_i_pthread_mutex_t *mutex,
const scm_t_timespec *wt)
{
- scm_thread *t = scm_i_leave_guile ();
- int res = pthread_cond_timedwait (cond, mutex, wt);
- scm_i_enter_guile (t);
+ scm_t_guile_ticket t = scm_leave_guile ();
+ int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
+ scm_enter_guile (t);
return res;
}
+#endif
+
unsigned long
-scm_thread_usleep (unsigned long usecs)
+scm_std_usleep (unsigned long usecs)
{
struct timeval tv;
tv.tv_usec = usecs % 1000000;
tv.tv_sec = usecs / 1000000;
- scm_internal_select (0, NULL, NULL, NULL, &tv);
- return tv.tv_usec + tv.tv_sec*1000000;
+ scm_std_select (0, NULL, NULL, NULL, &tv);
+ return tv.tv_sec * 1000000 + tv.tv_usec;
}
-unsigned long
-scm_thread_sleep (unsigned long secs)
+unsigned int
+scm_std_sleep (unsigned int secs)
{
struct timeval tv;
tv.tv_usec = 0;
tv.tv_sec = secs;
- scm_internal_select (0, NULL, NULL, NULL, &tv);
+ scm_std_select (0, NULL, NULL, NULL, &tv);
return tv.tv_sec;
}
"Return the thread that called this function.")
#define FUNC_NAME s_scm_current_thread
{
- return cur_thread;
+ return SCM_I_CURRENT_THREAD->handle;
}
#undef FUNC_NAME
of the way GC is done.
*/
int n = thread_count;
- scm_thread *t;
+ scm_i_thread *t;
SCM list = scm_c_make_list (n, SCM_UNSPECIFIED), *l;
- pthread_mutex_lock (&thread_admin_mutex);
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
l = &list;
for (t = all_threads; t && n > 0; t = t->next_thread)
{
n--;
}
*l = SCM_EOL;
- pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
return list;
}
#undef FUNC_NAME
-scm_root_state *
-scm_i_thread_root (SCM thread)
-{
- return SCM_ROOT_STATE ((SCM_CURRENT_THREAD)->root);
-}
-
SCM_DEFINE (scm_thread_exited_p, "thread-exited?", 1, 0, 0,
(SCM thread),
"Return @code{#t} iff @var{thread} has exited.\n")
scm_c_thread_exited_p (SCM thread)
#define FUNC_NAME s_scm_thread_exited_p
{
- scm_thread *t;
+ scm_i_thread *t;
SCM_VALIDATE_THREAD (1, thread);
- t = SCM_THREAD_DATA (thread);
+ t = SCM_I_THREAD_DATA (thread);
return t->exited;
}
#undef FUNC_NAME
-static pthread_cond_t wake_up_cond;
+static scm_i_pthread_cond_t wake_up_cond;
int scm_i_thread_go_to_sleep;
static int threads_initialized_p = 0;
{
if (threads_initialized_p)
{
- scm_thread *t;
+ scm_i_thread *t;
- /* We leave Guile completely before locking the
- thread_admin_mutex. This ensures that other threads can put
- us to sleep while we block on that mutex.
- */
- scm_i_leave_guile ();
- pthread_mutex_lock (&thread_admin_mutex);
- /* Signal all threads to go to sleep */
+ scm_leave_guile ();
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+
+ /* Signal all threads to go to sleep
+ */
scm_i_thread_go_to_sleep = 1;
for (t = all_threads; t; t = t->next_thread)
- pthread_mutex_lock (&t->heap_mutex);
+ scm_i_pthread_mutex_lock (&t->heap_mutex);
scm_i_thread_go_to_sleep = 0;
}
}
{
/* thread_admin_mutex is already locked. */
- scm_thread *t;
+ scm_i_thread *t;
for (t = all_threads; t; t = t->next_thread)
- if (t != SCM_CURRENT_THREAD)
+ if (t != SCM_I_CURRENT_THREAD)
t->clear_freelists_p = 1;
}
{
if (threads_initialized_p)
{
- scm_thread *t;
- pthread_cond_broadcast (&wake_up_cond);
+ scm_i_thread *t;
+
+ scm_i_pthread_cond_broadcast (&wake_up_cond);
for (t = all_threads; t; t = t->next_thread)
- pthread_mutex_unlock (&t->heap_mutex);
- pthread_mutex_unlock (&thread_admin_mutex);
- scm_i_enter_guile (SCM_CURRENT_THREAD);
+ scm_i_pthread_mutex_unlock (&t->heap_mutex);
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
}
}
void
scm_i_thread_sleep_for_gc ()
{
- scm_thread *t;
- t = suspend ();
- pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
+ scm_i_thread *t = suspend ();
+ scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex);
resume (t);
}
-pthread_mutex_t scm_i_critical_section_mutex = PTHREAD_MUTEX_INITIALIZER;
+/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
+ */
+scm_i_pthread_mutex_t scm_i_critical_section_mutex;
+int scm_i_critical_section_level = 0;
+
+static SCM dynwind_critical_section_mutex;
+
+void
+scm_dynwind_critical_section (SCM mutex)
+{
+ if (scm_is_false (mutex))
+ mutex = dynwind_critical_section_mutex;
+ scm_dynwind_lock_mutex (mutex);
+ scm_dynwind_block_asyncs ();
+}
/*** Initialization */
-pthread_key_t scm_i_freelist, scm_i_freelist2;
-pthread_mutex_t scm_i_misc_mutex;
+scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
+scm_i_pthread_mutex_t scm_i_misc_mutex;
+
+#if SCM_USE_PTHREAD_THREADS
+pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
+#endif
void
scm_threads_prehistory (SCM_STACKITEM *base)
{
- pthread_mutex_init (&thread_admin_mutex, NULL);
- pthread_mutex_init (&scm_i_misc_mutex, NULL);
- pthread_cond_init (&wake_up_cond, NULL);
- pthread_mutex_init (&scm_i_critical_section_mutex, NULL);
- pthread_key_create (&scm_i_thread_key, on_thread_exit);
- pthread_key_create (&scm_i_root_key, NULL);
- pthread_key_create (&scm_i_freelist, NULL);
- pthread_key_create (&scm_i_freelist2, NULL);
+#if SCM_USE_PTHREAD_THREADS
+ pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
+ pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
+ PTHREAD_MUTEX_RECURSIVE);
+#endif
+
+ scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
+ scm_i_pthread_mutexattr_recursive);
+ scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
+ scm_i_pthread_cond_init (&wake_up_cond, NULL);
+ scm_i_pthread_key_create (&scm_i_freelist, NULL);
+ scm_i_pthread_key_create (&scm_i_freelist2, NULL);
guilify_self_1 (base);
}
void
scm_init_threads ()
{
- scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_thread));
- scm_set_smob_mark (scm_tc16_thread, thread_mark);
+ scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
scm_set_smob_print (scm_tc16_thread, thread_print);
- scm_set_smob_free (scm_tc16_thread, thread_free);
+ scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
- scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
scm_tc16_condvar = scm_make_smob_type ("condition-variable",
sizeof (fat_cond));
- scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
- scm_i_root_root = SCM_BOOL_F;
+ scm_i_default_dynamic_state = SCM_BOOL_F;
guilify_self_2 (SCM_BOOL_F);
threads_initialized_p = 1;
+
+ dynwind_critical_section_mutex =
+ scm_permanent_object (scm_make_recursive_mutex ());
}
void
-scm_init_threads_root_root ()
+scm_init_threads_default_dynamic_state ()
{
- scm_root_state *rr;
-
- scm_i_root_root = scm_permanent_object (scm_make_root (SCM_BOOL_F));
- rr = SCM_ROOT_STATE (scm_i_root_root);
- rr->cur_inp = scm_cur_inp;
- rr->cur_outp = scm_cur_outp;
- rr->cur_errp = scm_cur_errp;
- rr->fluids = scm_root->fluids;
- scm_i_copy_fluids (rr);
+ SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
+ scm_i_default_dynamic_state = scm_permanent_object (state);
}
void
#include "libguile/threads.x"
}
-/* XXX */
-
-void
-scm_init_iselect ()
-{
-}
-
/*
Local Variables:
c-file-style: "gnu"