-/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _GNU_SOURCE
+#include "libguile/boehm-gc.h"
#include "libguile/_scm.h"
#if HAVE_UNISTD_H
#endif
#include <stdio.h>
#include <assert.h>
+
+#ifdef HAVE_STRING_H
+#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
+#endif
+
#if HAVE_SYS_TIME_H
#include <sys/time.h>
#endif
#include "libguile/iselect.h"
#include "libguile/fluids.h"
#include "libguile/continuations.h"
+#include "libguile/gc.h"
#include "libguile/init.h"
+#ifdef __MINGW32__
+#ifndef ETIMEDOUT
+# define ETIMEDOUT WSAETIMEDOUT
+#endif
+# include <fcntl.h>
+# include <process.h>
+# define pipe(fd) _pipe (fd, 256, O_BINARY)
+#endif /* __MINGW32__ */
+
/*** Queues */
/* Make an empty queue data structure.
/*** Thread smob routines */
-static SCM
-thread_mark (SCM obj)
-{
- scm_i_thread *t = SCM_I_THREAD_DATA (obj);
- scm_gc_mark (t->result);
- scm_gc_mark (t->join_queue);
- scm_gc_mark (t->dynwinds);
- scm_gc_mark (t->active_asyncs);
- scm_gc_mark (t->signal_asyncs);
- scm_gc_mark (t->continuation_root);
- return t->dynamic_state;
-}
static int
thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED)
/* Getting into and out of guile mode.
*/
+/* Ken Raeburn observes that the implementation of suspend and resume
+ (and the things that build on top of them) are very likely not
+ correct (see below). We will need fix this eventually, and that's
+ why scm_leave_guile/scm_enter_guile are not exported in the API.
+
+ Ken writes:
+
+ Consider this sequence:
+
+ Function foo, called in Guile mode, calls suspend (maybe indirectly
+ through scm_leave_guile), which does this:
+
+ // record top of stack for the GC
+ t->top = SCM_STACK_PTR (&t); // just takes address of automatic
+ var 't'
+ // save registers.
+ SCM_FLUSH_REGISTER_WINDOWS; // sparc only
+ setjmp (t->regs); // here's most of the magic
+
+ ... and returns.
+
+ Function foo has a SCM value X, a handle on a non-immediate object, in
+ a caller-saved register R, and it's the only reference to the object
+ currently.
+
+ The compiler wants to use R in suspend, so it pushes the current
+ value, X, into a stack slot which will be reloaded on exit from
+ suspend; then it loads stuff into R and goes about its business. The
+ setjmp call saves (some of) the current registers, including R, which
+ no longer contains X. (This isn't a problem for a normal
+ setjmp/longjmp situation, where longjmp would be called before
+ setjmp's caller returns; the old value for X would be loaded back from
+ the stack after the longjmp, before the function returned.)
+
+ So, suspend returns, loading X back into R (and invalidating the jump
+ buffer) in the process. The caller foo then goes off and calls a
+ bunch of other functions out of Guile mode, occasionally storing X on
+ the stack again, but, say, much deeper on the stack than suspend's
+ stack frame went, and the stack slot where suspend had written X has
+ long since been overwritten with other values.
+
+ Okay, nothing actively broken so far. Now, let garbage collection
+ run, triggered by another thread.
+
+ The thread calling foo is out of Guile mode at the time, so the
+ garbage collector just scans a range of stack addresses. Too bad that
+ X isn't stored there. So the pointed-to storage goes onto the free
+ list, and I think you can see where things go from there.
+
+ Is there anything I'm missing that'll prevent this scenario from
+ happening? I mean, aside from, "well, suspend and scm_leave_guile
+ don't have many local variables, so they probably won't need to save
+ any registers on most systems, so we hope everything will wind up in
+ the jump buffer and we'll just get away with it"?
+
+ (And, going the other direction, if scm_leave_guile and suspend push
+ the stack pointer over onto a new page, and foo doesn't make further
+ function calls and thus the stack pointer no longer includes that
+ page, are we guaranteed that the kernel cannot release the now-unused
+ stack page that contains the top-of-stack pointer we just saved? I
+ don't know if any OS actually does that. If it does, we could get
+ faults in garbage collection.)
+
+ I don't think scm_without_guile has to have this problem, as it gets
+ more control over the stack handling -- but it should call setjmp
+ itself. I'd probably try something like:
+
+ // record top of stack for the GC
+ t->top = SCM_STACK_PTR (&t);
+ // save registers.
+ SCM_FLUSH_REGISTER_WINDOWS;
+ setjmp (t->regs);
+ res = func(data);
+ scm_enter_guile (t);
+
+ ... though even that's making some assumptions about the stack
+ ordering of local variables versus caller-saved registers.
+
+ For something like scm_leave_guile to work, I don't think it can just
+ rely on invalidated jump buffers. A valid jump buffer, and a handle
+ on the stack state at the point when the jump buffer was initialized,
+ together, would work fine, but I think then we're talking about macros
+ invoking setjmp in the caller's stack frame, and requiring that the
+ caller of scm_leave_guile also call scm_enter_guile before returning,
+ kind of like pthread_cleanup_push/pop calls that have to be paired up
+ in a function. (In fact, the pthread ones have to be paired up
+ syntactically, as if they might expand to a compound statement
+ incorporating the user's code, and invoking a compiler's
+ exception-handling primitives. Which might be something to think
+ about for cases where Guile is used with C++ exceptions or
+ pthread_cancel.)
+*/
+
scm_i_pthread_key_t scm_i_thread_key;
static void
}
}
-void
+typedef void* scm_t_guile_ticket;
+
+static void
scm_enter_guile (scm_t_guile_ticket ticket)
{
scm_i_thread *t = (scm_i_thread *)ticket;
return t;
}
-scm_t_guile_ticket
+static scm_t_guile_ticket
scm_leave_guile ()
{
scm_i_thread *t = suspend ();
static void
guilify_self_1 (SCM_STACKITEM *base)
{
- scm_i_thread *t = malloc (sizeof (scm_i_thread));
+ scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
t->pthread = scm_i_pthread_self ();
t->handle = SCM_BOOL_F;
t->dynamic_state = SCM_BOOL_F;
t->dynwinds = SCM_EOL;
t->active_asyncs = SCM_EOL;
- t->signal_asyncs = SCM_EOL;
t->block_asyncs = 1;
t->pending_asyncs = 1;
t->last_debug_frame = NULL;
t->base = base;
+ t->continuation_root = SCM_EOL;
t->continuation_base = base;
scm_i_pthread_cond_init (&t->sleep_cond, NULL);
t->sleep_mutex = NULL;
t->sleep_object = SCM_BOOL_F;
t->sleep_fd = -1;
+ /* XXX - check for errors. */
pipe (t->sleep_pipe);
scm_i_pthread_mutex_init (&t->heap_mutex, NULL);
t->clear_freelists_p = 0;
+ t->gc_running_p = 0;
+ t->current_mark_stack_ptr = NULL;
+ t->current_mark_stack_limit = NULL;
t->exited = 0;
t->freelist = SCM_EOL;
scm_i_thread *t = SCM_I_CURRENT_THREAD;
SCM_NEWSMOB (t->handle, scm_tc16_thread, t);
- scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread");
+
t->continuation_root = scm_cons (t->handle, SCM_EOL);
t->continuation_base = t->base;
static void *
do_thread_exit (void *v)
{
- scm_i_thread *t = (scm_i_thread *)v, **tp;
+ scm_i_thread *t = (scm_i_thread *)v;
scm_i_scm_pthread_mutex_lock (&thread_admin_mutex);
t->exited = 1;
+ close (t->sleep_pipe[0]);
+ close (t->sleep_pipe[1]);
while (scm_is_true (unblock_from_queue (t->join_queue)))
;
-
- for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
- if (*tp == t)
- {
- *tp = t->next_thread;
- break;
- }
- thread_count--;
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
return NULL;
static void
on_thread_exit (void *v)
{
+ scm_i_thread *t = (scm_i_thread *)v, **tp;
+
scm_i_pthread_setspecific (scm_i_thread_key, v);
+
+ /* Unblocking the joining threads needs to happen in guile mode
+ since the queue is a SCM data structure.
+ */
scm_with_guile (do_thread_exit, v);
+
+ /* Removing ourself from the list of all threads needs to happen in
+ non-guile mode since all SCM values on our stack become
+ unprotected once we are no longer in the list.
+ */
+ scm_leave_guile ();
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ for (tp = &all_threads; *tp; tp = &(*tp)->next_thread)
+ if (*tp == t)
+ {
+ *tp = t->next_thread;
+ break;
+ }
+ thread_count--;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
scm_i_pthread_setspecific (scm_i_thread_key, NULL);
}
}
}
-#ifdef HAVE_LIBC_STACK_END
-
-extern void *__libc_stack_end;
-
#if SCM_USE_PTHREAD_THREADS
-#ifdef HAVE_PTHREAD_ATTR_GETSTACK
+/* pthread_getattr_np not available on MacOS X and Solaris 10. */
+#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
#define HAVE_GET_THREAD_STACK_BASE
void *start, *end;
size_t size;
- /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
- for the main thread, but we can use __libc_stack_end in that
- case.
- */
-
pthread_getattr_np (pthread_self (), &attr);
pthread_attr_getstack (&attr, &start, &size);
end = (char *)start + size;
+ /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
+ for the main thread, but we can use scm_get_stack_base in that
+ case.
+ */
+
+#ifndef PTHREAD_ATTR_GETSTACK_WORKS
if ((void *)&attr < start || (void *)&attr >= end)
- return __libc_stack_end;
+ return scm_get_stack_base ();
else
+#endif
{
#if SCM_STACK_GROWS_UP
return start;
}
}
-#endif /* HAVE_PTHREAD_ATTR_GETSTACK */
+#endif /* HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP */
#else /* !SCM_USE_PTHREAD_THREADS */
static SCM_STACKITEM *
get_thread_stack_base ()
{
- return __libc_stack_end;
+ return scm_get_stack_base ();
}
#endif /* !SCM_USE_PTHREAD_THREADS */
-#endif /* HAVE_LIBC_STACK_END */
#ifdef HAVE_GET_THREAD_STACK_BASE
#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
-static SCM
-fat_mutex_mark (SCM mx)
-{
- fat_mutex *m = SCM_MUTEX_DATA (mx);
- scm_gc_mark (m->owner);
- return m->waiting;
-}
static size_t
fat_mutex_free (SCM mx)
"thread. That is, Guile's mutexes are @emph{recursive}. ")
#define FUNC_NAME s_scm_lock_mutex
{
- SCM_VALIDATE_MUTEX (1, mx);
char *msg;
+ SCM_VALIDATE_MUTEX (1, mx);
msg = fat_mutex_lock (mx);
if (msg)
scm_misc_error (NULL, msg, SCM_EOL);
}
#undef FUNC_NAME
+void
+scm_dynwind_lock_mutex (SCM mutex)
+{
+ scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
+ SCM_F_WIND_EXPLICITLY);
+ scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
+ SCM_F_WIND_EXPLICITLY);
+}
+
static char *
fat_mutex_trylock (fat_mutex *m, int *resp)
{
}
SCM_DEFINE (scm_try_mutex, "try-mutex", 1, 0, 0,
- (SCM mx),
+ (SCM mutex),
"Try to lock @var{mutex}. If the mutex is already locked by someone "
"else, return @code{#f}. Else lock the mutex and return @code{#t}. ")
#define FUNC_NAME s_scm_try_mutex
char *msg;
int res;
- SCM_VALIDATE_MUTEX (1, mx);
+ SCM_VALIDATE_MUTEX (1, mutex);
- msg = fat_mutex_trylock (SCM_MUTEX_DATA (mx), &res);
+ msg = fat_mutex_trylock (SCM_MUTEX_DATA (mutex), &res);
if (msg)
scm_misc_error (NULL, msg, SCM_EOL);
return scm_from_bool (res);
#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
-static SCM
-fat_cond_mark (SCM cv)
-{
- fat_cond *c = SCM_CONDVAR_DATA (cv);
- return c->waiting;
-}
-
static size_t
fat_cond_free (SCM mx)
{
while (1)
{
- fprintf (stderr, "cond wait on %p\n", &c->lock);
-
scm_i_scm_pthread_mutex_lock (&c->lock);
msg = fat_mutex_unlock (m);
t->block_asyncs++;
{
err = block_self (c->waiting, cond, &c->lock, waittime);
scm_i_pthread_mutex_unlock (&c->lock);
- fprintf (stderr, "locking mutex\n");
fat_mutex_lock (mutex);
}
else
t->block_asyncs--;
scm_async_click ();
- fprintf (stderr, "back: %s, %d\n", msg, err);
-
if (msg)
scm_misc_error (NULL, msg, SCM_EOL);
static void
fat_cond_signal (fat_cond *c)
{
- fprintf (stderr, "cond signal on %p\n", &c->lock);
-
scm_i_scm_pthread_mutex_lock (&c->lock);
unblock_from_queue (c->waiting);
scm_i_pthread_mutex_unlock (&c->lock);
scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
/ sizeof (SCM_STACKITEM))); \
- bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
- top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
+ bot = (SCM_STACKITEM *) scm_ia64_register_backing_store_base (); \
+ top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
scm_mark_locations (bot, top - bot); } while (0)
#else
# define SCM_MARK_BACKING_STORE()
#endif
-void
-scm_threads_mark_stacks (void)
-{
- scm_i_thread *t;
- for (t = all_threads; t; t = t->next_thread)
- {
- /* Check that thread has indeed been suspended.
- */
- assert (t->top);
-
- scm_gc_mark (t->handle);
-
-#if SCM_STACK_GROWS_UP
- scm_mark_locations (t->base, t->top - t->base);
-#else
- scm_mark_locations (t->top, t->base - t->top);
-#endif
- scm_mark_locations ((SCM_STACKITEM *) t->regs,
- ((size_t) sizeof(t->regs)
- / sizeof (SCM_STACKITEM)));
- }
- SCM_MARK_BACKING_STORE ();
-}
/*** Select */
}
static void
-unlock (void *data)
+do_unlock (void *data)
{
scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
}
void
-scm_frame_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
+scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex)
{
scm_i_scm_pthread_mutex_lock (mutex);
- scm_frame_unwind_handler (unlock, mutex, SCM_F_WIND_EXPLICITLY);
+ scm_dynwind_unwind_handler (do_unlock, mutex, SCM_F_WIND_EXPLICITLY);
}
int
static scm_i_pthread_cond_t wake_up_cond;
int scm_i_thread_go_to_sleep;
static int threads_initialized_p = 0;
-static int sleep_level = 0;
void
scm_i_thread_put_to_sleep ()
scm_leave_guile ();
scm_i_pthread_mutex_lock (&thread_admin_mutex);
- if (sleep_level == 0)
- {
- /* Signal all threads to go to sleep
- */
- scm_i_thread_go_to_sleep = 1;
- for (t = all_threads; t; t = t->next_thread)
- scm_i_pthread_mutex_lock (&t->heap_mutex);
- scm_i_thread_go_to_sleep = 0;
- }
- else
- {
- /* We are already single threaded. Suspend again to update
- the recorded stack information.
- */
- suspend ();
- }
- sleep_level += 1;
-
- scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ /* Signal all threads to go to sleep
+ */
+ scm_i_thread_go_to_sleep = 1;
+ for (t = all_threads; t; t = t->next_thread)
+ scm_i_pthread_mutex_lock (&t->heap_mutex);
+ scm_i_thread_go_to_sleep = 0;
}
}
if (threads_initialized_p)
{
scm_i_thread *t;
- scm_i_pthread_mutex_lock (&thread_admin_mutex);
-
- sleep_level -= 1;
- if (sleep_level == 0)
- {
- scm_i_pthread_cond_broadcast (&wake_up_cond);
- for (t = all_threads; t; t = t->next_thread)
- scm_i_pthread_mutex_unlock (&t->heap_mutex);
- }
+ scm_i_pthread_cond_broadcast (&wake_up_cond);
+ for (t = all_threads; t; t = t->next_thread)
+ scm_i_pthread_mutex_unlock (&t->heap_mutex);
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD);
}
resume (t);
}
-static void
-put_to_sleep (void *unused)
-{
- scm_i_thread_put_to_sleep ();
-}
-
-static void
-wake_up (void *unused)
-{
- scm_i_thread_wake_up ();
-}
-
-void
-scm_i_frame_single_threaded ()
-{
- scm_frame_rewind_handler (put_to_sleep, NULL, SCM_F_WIND_EXPLICITLY);
- scm_frame_unwind_handler (wake_up, NULL, SCM_F_WIND_EXPLICITLY);
-}
+/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
+ */
+scm_i_pthread_mutex_t scm_i_critical_section_mutex;
+int scm_i_critical_section_level = 0;
-scm_i_pthread_mutex_t scm_i_critical_section_mutex =
- SCM_I_PTHREAD_MUTEX_INITIALIZER;
+static SCM dynwind_critical_section_mutex;
void
-scm_frame_critical_section ()
+scm_dynwind_critical_section (SCM mutex)
{
- scm_i_frame_pthread_mutex_lock (&scm_i_critical_section_mutex);
- scm_frame_block_asyncs ();
+ if (scm_is_false (mutex))
+ mutex = dynwind_critical_section_mutex;
+ scm_dynwind_lock_mutex (mutex);
+ scm_dynwind_block_asyncs ();
}
/*** Initialization */
scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2;
scm_i_pthread_mutex_t scm_i_misc_mutex;
+#if SCM_USE_PTHREAD_THREADS
+pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1];
+#endif
+
void
scm_threads_prehistory (SCM_STACKITEM *base)
{
- scm_i_pthread_mutex_init (&thread_admin_mutex, NULL);
+#if SCM_USE_PTHREAD_THREADS
+ pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
+ pthread_mutexattr_settype (scm_i_pthread_mutexattr_recursive,
+ PTHREAD_MUTEX_RECURSIVE);
+#endif
+
+ scm_i_pthread_mutex_init (&scm_i_critical_section_mutex,
+ scm_i_pthread_mutexattr_recursive);
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
- scm_i_pthread_mutex_init (&scm_i_critical_section_mutex, NULL);
scm_i_pthread_key_create (&scm_i_freelist, NULL);
scm_i_pthread_key_create (&scm_i_freelist2, NULL);
scm_init_threads ()
{
scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
- scm_set_smob_mark (scm_tc16_thread, thread_mark);
scm_set_smob_print (scm_tc16_thread, thread_print);
- scm_set_smob_free (scm_tc16_thread, thread_free);
+ scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
- scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark);
scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
scm_tc16_condvar = scm_make_smob_type ("condition-variable",
sizeof (fat_cond));
- scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark);
scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
scm_i_default_dynamic_state = SCM_BOOL_F;
guilify_self_2 (SCM_BOOL_F);
threads_initialized_p = 1;
+
+ dynwind_critical_section_mutex =
+ scm_permanent_object (scm_make_recursive_mutex ());
}
void