-/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+ * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+ * Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
#include "libguile/bdw-gc.h"
#include "libguile/_scm.h"
+#include <stdlib.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <stdio.h>
-#include <assert.h>
#ifdef HAVE_STRING_H
#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
#include <sys/time.h>
#endif
+#if HAVE_PTHREAD_NP_H
+# include <pthread_np.h>
+#endif
+
+#include <assert.h>
+#include <fcntl.h>
+#include <nproc.h>
+
#include "libguile/validate.h"
#include "libguile/root.h"
#include "libguile/eval.h"
#include "libguile/strings.h"
#include "libguile/weaks.h"
-#ifdef __MINGW32__
-#ifndef ETIMEDOUT
-# define ETIMEDOUT WSAETIMEDOUT
+#include <full-read.h>
+
+
+\f
+
+/* First some libgc shims. */
+
+/* Make sure GC_fn_type is defined; it is missing from the public
+ headers of GC 7.1 and earlier. */
+#ifndef HAVE_GC_FN_TYPE
+typedef void * (* GC_fn_type) (void *);
+#endif
+
+
+#ifndef GC_SUCCESS
+#define GC_SUCCESS 0
#endif
-# include <fcntl.h>
-# include <process.h>
-# define pipe(fd) _pipe (fd, 256, O_BINARY)
-#endif /* __MINGW32__ */
-#include <full-read.h>
+#ifndef GC_UNIMPLEMENTED
+#define GC_UNIMPLEMENTED 3
+#endif
+
+/* Likewise struct GC_stack_base is missing before 7.1. */
+#ifndef HAVE_GC_STACK_BASE
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+#ifdef __ia64__
+ void * reg_base; /* Base of separate register stack. */
+#endif
+};
+
+static int
+GC_register_my_thread (struct GC_stack_base *stack_base)
+{
+ return GC_UNIMPLEMENTED;
+}
+
+static void
+GC_unregister_my_thread ()
+{
+}
+
+#if !SCM_USE_PTHREAD_THREADS
+/* No threads; we can just use GC_stackbottom. */
+static void *
+get_thread_stack_base ()
+{
+ return GC_stackbottom;
+}
+
+#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
+ && defined PTHREAD_ATTR_GETSTACK_WORKS
+/* This method for GNU/Linux and perhaps some other systems.
+ It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
+ available on them. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_getattr_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
+/* This method for MacOS X.
+ It'd be nice if there was some documentation on pthread_get_stackaddr_np,
+ but as of 2006 there's nothing obvious at apple.com. */
+static void *
+get_thread_stack_base ()
+{
+ return pthread_get_stackaddr_np (pthread_self ());
+}
+
+#elif HAVE_PTHREAD_ATTR_GET_NP
+/* This one is for FreeBSD 9. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_attr_init (&attr);
+ pthread_attr_get_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ pthread_attr_destroy (&attr);
+
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#else
+#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
+#endif
+
+static int
+GC_get_stack_base (struct GC_stack_base *stack_base)
+{
+ stack_base->mem_base = get_thread_stack_base ();
+#ifdef __ia64__
+ /* Calculate and store off the base of this thread's register
+ backing store (RBS). Unfortunately our implementation(s) of
+ scm_ia64_register_backing_store_base are only reliable for the
+ main thread. For other threads, therefore, find out the current
+ top of the RBS, and use that as a maximum. */
+ stack_base->reg_base = scm_ia64_register_backing_store_base ();
+ {
+ ucontext_t ctx;
+ void *bsp;
+ getcontext (&ctx);
+ bsp = scm_ia64_ar_bsp (&ctx);
+ if (stack_base->reg_base > bsp)
+ stack_base->reg_base = bsp;
+ }
+#endif
+ return GC_SUCCESS;
+}
+
+static void *
+GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
+{
+ struct GC_stack_base stack_base;
+
+ stack_base.mem_base = (void*)&stack_base;
+#ifdef __ia64__
+ /* FIXME: Untested. */
+ {
+ ucontext_t ctx;
+ getcontext (&ctx);
+ stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
+ }
+#endif
+
+ return fn (&stack_base, arg);
+}
+#endif /* HAVE_GC_STACK_BASE */
+
+
+/* Now define with_gc_active and with_gc_inactive. */
+
+#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
+
+/* We have a sufficiently new libgc (7.2 or newer). */
+
+static void*
+with_gc_inactive (GC_fn_type func, void *data)
+{
+ return GC_do_blocking (func, data);
+}
+
+static void*
+with_gc_active (GC_fn_type func, void *data)
+{
+ return GC_call_with_gc_active (func, data);
+}
+
+#else
+
+/* libgc not new enough, so never actually deactivate GC.
+
+ Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
+ GC_call_with_gc_active. */
+
+static void*
+with_gc_inactive (GC_fn_type func, void *data)
+{
+ return func (data);
+}
+
+static void*
+with_gc_active (GC_fn_type func, void *data)
+{
+ return func (data);
+}
+
+#endif /* HAVE_GC_DO_BLOCKING */
+
\f
static void
/* Getting into and out of guile mode.
*/
+/* Key used to attach a cleanup handler to a given thread. Also, if
+ thread-local storage is unavailable, this key is used to retrieve the
+ current thread with `pthread_getspecific ()'. */
+scm_i_pthread_key_t scm_i_thread_key;
+
+
#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
/* When thread-local storage (TLS) is available, a pointer to the
represent. */
SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
-# define SET_CURRENT_THREAD(_t) scm_i_current_thread = (_t)
-
-#else /* !SCM_HAVE_THREAD_STORAGE_CLASS */
-
-/* Key used to retrieve the current thread with `pthread_getspecific ()'. */
-scm_i_pthread_key_t scm_i_thread_key;
-
-# define SET_CURRENT_THREAD(_t) \
- scm_i_pthread_setspecific (scm_i_thread_key, (_t))
-
-#endif /* !SCM_HAVE_THREAD_STORAGE_CLASS */
+#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
static SCM scm_i_default_dynamic_state;
+/* Run when a fluid is collected. */
+void
+scm_i_reset_fluid (size_t n)
+{
+ scm_i_thread *t;
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ for (t = all_threads; t; t = t->next_thread)
+ if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
+ {
+ SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
+
+ if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
+ SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
+ }
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+}
+
/* Perform first stage of thread initialisation, in non-guile mode.
*/
static void
-guilify_self_1 (SCM_STACKITEM *base)
-{
- scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
-
- t->pthread = scm_i_pthread_self ();
- t->handle = SCM_BOOL_F;
- t->result = SCM_BOOL_F;
- t->cleanup_handler = SCM_BOOL_F;
- t->mutexes = SCM_EOL;
- t->held_mutex = NULL;
- t->join_queue = SCM_EOL;
- t->dynamic_state = SCM_BOOL_F;
- t->dynwinds = SCM_EOL;
- t->active_asyncs = SCM_EOL;
- t->block_asyncs = 1;
- t->pending_asyncs = 1;
- t->critical_section_level = 0;
- t->last_debug_frame = NULL;
- t->base = base;
+guilify_self_1 (struct GC_stack_base *base)
+{
+ scm_i_thread t;
+
+ /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
+ before allocating anything in this thread, because allocation could
+ cause GC to run, and GC could cause finalizers, which could invoke
+ Scheme functions, which need the current thread to be set. */
+
+ t.pthread = scm_i_pthread_self ();
+ t.handle = SCM_BOOL_F;
+ t.result = SCM_BOOL_F;
+ t.cleanup_handler = SCM_BOOL_F;
+ t.mutexes = SCM_EOL;
+ t.held_mutex = NULL;
+ t.join_queue = SCM_EOL;
+ t.dynamic_state = SCM_BOOL_F;
+ t.dynwinds = SCM_EOL;
+ t.active_asyncs = SCM_EOL;
+ t.block_asyncs = 1;
+ t.pending_asyncs = 1;
+ t.critical_section_level = 0;
+ t.base = base->mem_base;
#ifdef __ia64__
- /* Calculate and store off the base of this thread's register
- backing store (RBS). Unfortunately our implementation(s) of
- scm_ia64_register_backing_store_base are only reliable for the
- main thread. For other threads, therefore, find out the current
- top of the RBS, and use that as a maximum. */
- t->register_backing_store_base = scm_ia64_register_backing_store_base ();
- {
- ucontext_t ctx;
- void *bsp;
- getcontext (&ctx);
- bsp = scm_ia64_ar_bsp (&ctx);
- if (t->register_backing_store_base > bsp)
- t->register_backing_store_base = bsp;
- }
+ t.register_backing_store_base = base->reg_base;
#endif
- t->continuation_root = SCM_EOL;
- t->continuation_base = base;
- scm_i_pthread_cond_init (&t->sleep_cond, NULL);
- t->sleep_mutex = NULL;
- t->sleep_object = SCM_BOOL_F;
- t->sleep_fd = -1;
-
- if (pipe (t->sleep_pipe) != 0)
+ t.continuation_root = SCM_EOL;
+ t.continuation_base = t.base;
+ scm_i_pthread_cond_init (&t.sleep_cond, NULL);
+ t.sleep_mutex = NULL;
+ t.sleep_object = SCM_BOOL_F;
+ t.sleep_fd = -1;
+
+ if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
/* FIXME: Error conditions during the initialization phase are handled
gracelessly since public functions such as `scm_init_guile ()'
currently have type `void'. */
abort ();
- scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
- t->current_mark_stack_ptr = NULL;
- t->current_mark_stack_limit = NULL;
- t->canceled = 0;
- t->exited = 0;
- t->guile_mode = 0;
+ scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
+ t.current_mark_stack_ptr = NULL;
+ t.current_mark_stack_limit = NULL;
+ t.canceled = 0;
+ t.exited = 0;
+ t.guile_mode = 0;
+
+ /* The switcheroo. */
+ {
+ scm_i_thread *t_ptr = &t;
+
+ GC_disable ();
+ t_ptr = GC_malloc (sizeof (scm_i_thread));
+ memcpy (t_ptr, &t, sizeof t);
- SET_CURRENT_THREAD (t);
+ scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
- scm_i_pthread_mutex_lock (&thread_admin_mutex);
- t->next_thread = all_threads;
- all_threads = t;
- thread_count++;
- scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
+ /* Cache the current thread in TLS for faster lookup. */
+ scm_i_current_thread = t_ptr;
+#endif
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ t_ptr->next_thread = all_threads;
+ all_threads = t_ptr;
+ thread_count++;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
+ GC_enable ();
+ }
}
/* Perform second stage of thread initialisation, in guile mode.
t->join_queue = make_queue ();
t->block_asyncs = 0;
+
+ /* See note in finalizers.c:queue_finalizer_async(). */
+ GC_invoke_finalizers ();
}
\f
#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
+static SCM
+call_cleanup (void *data)
+{
+ SCM *proc_p = data;
+ return scm_call_0 (*proc_p);
+}
+
/* Perform thread tear-down, in guile mode.
*/
static void *
{
scm_i_thread *t = (scm_i_thread *) v;
+ /* Ensure the signal handling thread has been launched, because we might be
+ shutting it down. This needs to be done in Guile mode. */
+ scm_i_ensure_signal_delivery_thread ();
+
if (!scm_is_false (t->cleanup_handler))
{
SCM ptr = t->cleanup_handler;
t->cleanup_handler = SCM_BOOL_F;
t->result = scm_internal_catch (SCM_BOOL_T,
- (scm_t_catch_body) scm_call_0, ptr,
+ call_cleanup, &ptr,
scm_handle_by_message_noexit, NULL);
}
fat_mutex *m = SCM_MUTEX_DATA (mutex);
scm_i_pthread_mutex_lock (&m->lock);
+
+ /* Since MUTEX is in `t->mutexes', T must be its owner. */
+ assert (scm_is_eq (m->owner, t->handle));
+
unblock_from_queue (m->waiting);
+
scm_i_pthread_mutex_unlock (&m->lock);
}
return NULL;
}
+static void *
+do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
+{
+ /* Won't hurt if we are already registered. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (sb);
+#endif
+
+ return scm_with_guile (do_thread_exit, v);
+}
+
static void
on_thread_exit (void *v)
{
/* This handler is executed in non-guile mode. */
scm_i_thread *t = (scm_i_thread *) v, **tp;
+ /* If we were canceled, we were unable to clear `t->guile_mode', so do
+ it here. */
+ t->guile_mode = 0;
+
/* If this thread was cancelled while doing a cond wait, it will
still have a mutex locked, so we unlock it here. */
if (t->held_mutex)
t->held_mutex = NULL;
}
- SET_CURRENT_THREAD (v);
+ /* Reinstate the current thread for purposes of scm_with_guile
+ guile-mode cleanup handlers. Only really needed in the non-TLS
+ case but it doesn't hurt to be consistent. */
+ scm_i_pthread_setspecific (scm_i_thread_key, t);
- /* Ensure the signal handling thread has been launched, because we might be
- shutting it down. */
- scm_i_ensure_signal_delivery_thread ();
-
- /* Unblocking the joining threads needs to happen in guile mode
- since the queue is a SCM data structure. */
-
- /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
- assume the GC is usable at this point, and notably that thread-local
- storage (TLS) hasn't been deallocated yet. */
- do_thread_exit (v);
+ /* Scheme-level thread finalizers and other cleanup needs to happen in
+ guile mode. */
+ GC_call_with_stack_base (do_thread_exit_trampoline, t);
/* Removing ourself from the list of all threads needs to happen in
non-guile mode since all SCM values on our stack become
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
- SET_CURRENT_THREAD (NULL);
-}
+ scm_i_pthread_setspecific (scm_i_thread_key, NULL);
-#ifndef SCM_HAVE_THREAD_STORAGE_CLASS
+#if SCM_USE_PTHREAD_THREADS
+ GC_unregister_my_thread ();
+#endif
+}
static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
static void
init_thread_key (void)
{
- scm_i_pthread_key_create (&scm_i_thread_key, NULL);
+ scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
}
-#endif
-
-/* Perform any initializations necessary to bring the current thread
- into guile mode, initializing Guile itself, if necessary.
+/* Perform any initializations necessary to make the current thread
+ known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
+ if necessary.
BASE is the stack base to use with GC.
PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
which case the default dynamic state is used.
- Return zero when the thread was in guile mode already; otherwise
+ Returns zero when the thread was known to guile already; otherwise
return 1.
-*/
+
+ Note that it could be the case that the thread was known
+ to Guile, but not in guile mode (because we are within a
+ scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
+ be sure. New threads are put into guile mode implicitly. */
static int
-scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
+scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
{
- scm_i_thread *t;
-
-#ifndef SCM_HAVE_THREAD_STORAGE_CLASS
scm_i_pthread_once (&init_thread_key_once, init_thread_key);
-#endif
- t = SCM_I_CURRENT_THREAD;
- if (t == NULL)
+ if (SCM_I_CURRENT_THREAD)
+ {
+ /* Thread is already known to Guile.
+ */
+ return 0;
+ }
+ else
{
/* This thread has not been guilified yet.
*/
initialization.
*/
scm_i_init_guile (base);
+
+#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
+ /* Allow other threads to come in later. */
+ GC_allow_register_threads ();
+#endif
+
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
}
else
the first time. Only initialize this thread.
*/
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+
+ /* Register this thread with libgc. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (base);
+#endif
+
guilify_self_1 (base);
guilify_self_2 (parent);
}
return 1;
}
- else if (t->top)
- {
- /* This thread is already guilified but not in guile mode, just
- resume it.
-
- A user call to scm_with_guile() will lead us to here. This could
- happen from anywhere on the stack, and in particular lower on the
- stack than when it was when this thread was first guilified. Thus,
- `base' must be updated. */
-#if SCM_STACK_GROWS_UP
- if (base < t->base)
- t->base = base;
-#else
- if (base > t->base)
- t->base = base;
-#endif
+}
- t->top = NULL;
- return 1;
- }
+void
+scm_init_guile ()
+{
+ struct GC_stack_base stack_base;
+
+ if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
+ scm_i_init_thread_for_guile (&stack_base,
+ scm_i_default_dynamic_state);
else
{
- /* Thread is already in guile mode. Nothing to do.
- */
- return 0;
+ fprintf (stderr, "Failed to get stack base for current thread.\n");
+ exit (EXIT_FAILURE);
}
}
-#if SCM_USE_PTHREAD_THREADS
+struct with_guile_args
+{
+ GC_fn_type func;
+ void *data;
+ SCM parent;
+};
-#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
-/* This method for GNU/Linux and perhaps some other systems.
- It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
- available on them. */
-#define HAVE_GET_THREAD_STACK_BASE
+static void *
+with_guile_trampoline (void *data)
+{
+ struct with_guile_args *args = data;
-static SCM_STACKITEM *
-get_thread_stack_base ()
+ return scm_c_with_continuation_barrier (args->func, args->data);
+}
+
+static void *
+with_guile_and_parent (struct GC_stack_base *base, void *data)
{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
+ void *res;
+ int new_thread;
+ scm_i_thread *t;
+ struct with_guile_args *args = data;
- pthread_getattr_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- end = (char *)start + size;
+ new_thread = scm_i_init_thread_for_guile (base, args->parent);
+ t = SCM_I_CURRENT_THREAD;
+ if (new_thread)
+ {
+ /* We are in Guile mode. */
+ assert (t->guile_mode);
- /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
- for the main thread, but we can use scm_get_stack_base in that
- case.
- */
+ res = scm_c_with_continuation_barrier (args->func, args->data);
-#ifndef PTHREAD_ATTR_GETSTACK_WORKS
- if ((void *)&attr < start || (void *)&attr >= end)
- return (SCM_STACKITEM *) GC_stackbottom;
+ /* Leave Guile mode. */
+ t->guile_mode = 0;
+ }
+ else if (t->guile_mode)
+ {
+ /* Already in Guile mode. */
+ res = scm_c_with_continuation_barrier (args->func, args->data);
+ }
else
-#endif
{
+ /* We are not in Guile mode, either because we are not within a
+ scm_with_guile, or because we are within a scm_without_guile.
+
+ This call to scm_with_guile() could happen from anywhere on the
+ stack, and in particular lower on the stack than when it was
+ when this thread was first guilified. Thus, `base' must be
+ updated. */
#if SCM_STACK_GROWS_UP
- return start;
+ if (SCM_STACK_PTR (base->mem_base) < t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#else
- return end;
+ if (SCM_STACK_PTR (base->mem_base) > t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#endif
- }
-}
-#elif HAVE_PTHREAD_GET_STACKADDR_NP
-/* This method for MacOS X.
- It'd be nice if there was some documentation on pthread_get_stackaddr_np,
- but as of 2006 there's nothing obvious at apple.com. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return pthread_get_stackaddr_np (pthread_self ());
-}
-
-#elif defined (__MINGW32__)
-/* This method for mingw. In mingw the basic scm_get_stack_base can be used
- in any thread. We don't like hard-coding the name of a system, but there
- doesn't seem to be a cleaner way of knowing scm_get_stack_base can
- work. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
+ t->guile_mode = 1;
+ res = with_gc_active (with_guile_trampoline, args);
+ t->guile_mode = 0;
+ }
+ return res;
}
-#endif /* pthread methods of get_thread_stack_base */
-
-#else /* !SCM_USE_PTHREAD_THREADS */
-
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
+static void *
+scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* !SCM_USE_PTHREAD_THREADS */
+ struct with_guile_args args;
-#ifdef HAVE_GET_THREAD_STACK_BASE
-
-void
-scm_init_guile ()
-{
- scm_i_init_thread_for_guile (get_thread_stack_base (),
- scm_i_default_dynamic_state);
+ args.func = func;
+ args.data = data;
+ args.parent = parent;
+
+ return GC_call_with_stack_base (with_guile_and_parent, &args);
}
-#endif
-
void *
scm_with_guile (void *(*func)(void *), void *data)
{
scm_i_default_dynamic_state);
}
-SCM_UNUSED static void
-scm_leave_guile_cleanup (void *x)
-{
- on_thread_exit (SCM_I_CURRENT_THREAD);
-}
-
-void *
-scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
-{
- void *res;
- int really_entered;
- SCM_STACKITEM base_item;
-
- really_entered = scm_i_init_thread_for_guile (&base_item, parent);
- if (really_entered)
- {
- scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
- res = scm_c_with_continuation_barrier (func, data);
- scm_i_pthread_cleanup_pop (0);
- }
- else
- res = scm_c_with_continuation_barrier (func, data);
-
- return res;
-}
-
-\f
-/*** Non-guile mode. */
-
-#ifdef HAVE_GC_DO_BLOCKING
-
-# ifndef HAVE_GC_FN_TYPE
-/* This typedef is missing from the public headers of GC 7.1 and earlier. */
-typedef void * (* GC_fn_type) (void *);
-# endif /* HAVE_GC_FN_TYPE */
-
-# ifndef HAVE_DECL_GC_DO_BLOCKING
-/* This declaration is missing from the public headers of GC 7.1. */
-extern void GC_do_blocking (GC_fn_type, void *);
-# endif /* HAVE_DECL_GC_DO_BLOCKING */
-
-struct without_guile_arg
-{
- void * (*function) (void *);
- void *data;
- void *result;
-};
-
-static void
-without_guile_trampoline (void *closure)
-{
- struct without_guile_arg *arg;
-
- SCM_I_CURRENT_THREAD->guile_mode = 0;
-
- arg = (struct without_guile_arg *) closure;
- arg->result = arg->function (arg->data);
-
- SCM_I_CURRENT_THREAD->guile_mode = 1;
-}
-
-#endif /* HAVE_GC_DO_BLOCKING */
-
-
void *
scm_without_guile (void *(*func)(void *), void *data)
{
void *result;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
-#ifdef HAVE_GC_DO_BLOCKING
- if (SCM_I_CURRENT_THREAD->guile_mode)
+ if (t->guile_mode)
{
- struct without_guile_arg arg;
-
- arg.function = func;
- arg.data = data;
- GC_do_blocking ((GC_fn_type) without_guile_trampoline, &arg);
- result = arg.result;
+ SCM_I_CURRENT_THREAD->guile_mode = 0;
+ result = with_gc_inactive (func, data);
+ SCM_I_CURRENT_THREAD->guile_mode = 1;
}
else
-#endif
+ /* Otherwise we're not in guile mode, so nothing to do. */
result = func (data);
return result;
else
t->result = scm_catch (SCM_BOOL_T, thunk, handler);
- /* Trigger a call to `on_thread_exit ()'. */
- pthread_exit (NULL);
-
return 0;
}
SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
handler, SCM_ARG2, FUNC_NAME);
+ GC_collect_a_little ();
data.parent = scm_current_dynamic_state ();
data.thunk = thunk;
data.handler = handler;
scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
scm_i_pthread_mutex_unlock (&data.mutex);
+ assert (SCM_I_IS_THREAD (data.thread));
+
return data.thread;
}
if (SCM_I_IS_THREAD (new_owner))
{
scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
+
+ /* FIXME: The order in which `t->admin_mutex' and
+ `m->lock' are taken differs from that in
+ `on_thread_exit', potentially leading to deadlocks. */
scm_i_pthread_mutex_lock (&t->admin_mutex);
/* Only keep a weak reference to MUTEX so that it's not
- retained when not referenced elsewhere (bug #27450). Note
- that the weak pair itself it still retained, but it's better
- than retaining MUTEX and the threads referred to by its
- associated queue. */
+ retained when not referenced elsewhere (bug #27450).
+ The weak pair itself is eventually removed when MUTEX
+ is unlocked. Note that `t->mutexes' lists mutexes
+ currently held by T, so it should be small. */
t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
scm_i_pthread_mutex_unlock (&t->admin_mutex);
SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
(SCM m, SCM timeout, SCM owner),
-"Lock @var{mutex}. If the mutex is already locked, the calling thread "
-"blocks until the mutex becomes available. The function returns when "
-"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
-"a thread already owns will succeed right away and will not block the "
-"thread. That is, Guile's mutexes are @emph{recursive}. ")
+ "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
+ "thread blocks until the mutex becomes available. The function\n"
+ "returns when the calling thread owns the lock on @var{m}.\n"
+ "Locking a mutex that a thread already owns will succeed right\n"
+ "away and will not block the thread. That is, Guile's mutexes\n"
+ "are @emph{recursive}.")
#define FUNC_NAME s_scm_lock_mutex_timed
{
SCM exception;
waittime = &cwaittime;
}
+ if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
+ SCM_VALIDATE_THREAD (3, owner);
+
exception = fat_mutex_lock (m, waittime, owner, &ret);
if (!scm_is_false (exception))
scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
}
#undef FUNC_NAME
+static void
+lock_mutex_return_void (SCM mx)
+{
+ (void) scm_lock_mutex (mx);
+}
+
+static void
+unlock_mutex_return_void (SCM mx)
+{
+ (void) scm_unlock_mutex (mx);
+}
+
void
scm_dynwind_lock_mutex (SCM mutex)
{
- scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
+ scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
- scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
+ scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
}
fat_mutex_unlock (SCM mutex, SCM cond,
const scm_t_timespec *waittime, int relock)
{
+ SCM owner;
fat_mutex *m = SCM_MUTEX_DATA (mutex);
fat_cond *c = NULL;
scm_i_thread *t = SCM_I_CURRENT_THREAD;
scm_i_scm_pthread_mutex_lock (&m->lock);
- SCM owner = m->owner;
+ owner = m->owner;
- if (!scm_is_eq (owner, scm_current_thread ()))
+ if (!scm_is_eq (owner, t->handle))
{
if (m->level == 0)
{
scm_i_pthread_mutex_unlock (&m->lock);
scm_misc_error (NULL, "mutex not locked", SCM_EOL);
}
- owner = scm_current_thread ();
+ owner = t->handle;
}
else if (!m->allow_external_unlock)
{
if (m->level > 0)
m->level--;
if (m->level == 0)
- m->owner = unblock_from_queue (m->waiting);
+ {
+ /* Change the owner of MUTEX. */
+ t->mutexes = scm_delq_x (mutex, t->mutexes);
+ m->owner = unblock_from_queue (m->waiting);
+ }
t->block_asyncs++;
if (m->level > 0)
m->level--;
if (m->level == 0)
- m->owner = unblock_from_queue (m->waiting);
+ {
+ /* Change the owner of MUTEX. */
+ t->mutexes = scm_delq_x (mutex, t->mutexes);
+ m->owner = unblock_from_queue (m->waiting);
+ }
scm_i_pthread_mutex_unlock (&m->lock);
ret = 1;
SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
(SCM cv, SCM mx, SCM t),
-"Wait until @var{cond-var} has been signalled. While waiting, "
-"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
-"is locked again when this function returns. When @var{time} is given, "
+"Wait until condition variable @var{cv} has been signalled. While waiting, "
+"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
+"is locked again when this function returns. When @var{t} is given, "
"it specifies a point in time where the waiting should be aborted. It "
"can be either a integer as returned by @code{current-time} or a pair "
"as returned by @code{gettimeofday}. When the waiting is aborted the "
}
#undef FUNC_NAME
+SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
+ (void),
+ "Return the total number of processors of the machine, which\n"
+ "is guaranteed to be at least 1. A ``processor'' here is a\n"
+ "thread execution unit, which can be either:\n\n"
+ "@itemize\n"
+ "@item an execution core in a (possibly multi-core) chip, in a\n"
+ " (possibly multi- chip) module, in a single computer, or\n"
+ "@item a thread execution unit inside a core in the case of\n"
+ " @dfn{hyper-threaded} CPUs.\n"
+ "@end itemize\n\n"
+ "Which of the two definitions is used, is unspecified.\n")
+#define FUNC_NAME s_scm_total_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_ALL));
+}
+#undef FUNC_NAME
+
+SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
+ (void),
+ "Like @code{total-processor-count}, but return the number of\n"
+ "processors available to the current process. See\n"
+ "@code{setaffinity} and @code{getaffinity} for more\n"
+ "information.\n")
+#define FUNC_NAME s_scm_current_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_CURRENT));
+}
+#undef FUNC_NAME
+
+
+\f
+
static scm_i_pthread_cond_t wake_up_cond;
static int threads_initialized_p = 0;
#endif
void
-scm_threads_prehistory (SCM_STACKITEM *base)
+scm_threads_prehistory (void *base)
{
#if SCM_USE_PTHREAD_THREADS
pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
- guilify_self_1 (base);
+ guilify_self_1 ((struct GC_stack_base *) base);
}
scm_t_bits scm_tc16_thread;
guilify_self_2 (SCM_BOOL_F);
threads_initialized_p = 1;
- dynwind_critical_section_mutex =
- scm_permanent_object (scm_make_recursive_mutex ());
+ dynwind_critical_section_mutex = scm_make_recursive_mutex ();
}
void
scm_init_threads_default_dynamic_state ()
{
SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
- scm_i_default_dynamic_state = scm_permanent_object (state);
+ scm_i_default_dynamic_state = state;
}
void
return (void *) ctx->uc_mcontext.sc_ar_bsp;
}
# endif /* linux */
+# ifdef __FreeBSD__
+# include <ucontext.h>
+void *
+scm_ia64_register_backing_store_base (void)
+{
+ return (void *)0x8000000000000000;
+}
+void *
+scm_ia64_ar_bsp (const void *opaque)
+{
+ const ucontext_t *ctx = opaque;
+ return (void *)(ctx->uc_mcontext.mc_special.bspstore
+ + ctx->uc_mcontext.mc_special.ndirty);
+}
+# endif /* __FreeBSD__ */
#endif /* __ia64__ */