-/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+ * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013
+ * Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
#include "libguile/bdw-gc.h"
#include "libguile/_scm.h"
+#include <stdlib.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <sys/time.h>
#endif
+#if HAVE_PTHREAD_NP_H
+# include <pthread_np.h>
+#endif
+
+#include <sys/select.h>
+
#include <assert.h>
+#include <fcntl.h>
+#include <nproc.h>
#include "libguile/validate.h"
#include "libguile/root.h"
#include "libguile/strings.h"
#include "libguile/weaks.h"
-#ifdef __MINGW32__
-#ifndef ETIMEDOUT
-# define ETIMEDOUT WSAETIMEDOUT
-#endif
-# include <fcntl.h>
-# include <process.h>
-# define pipe(fd) _pipe (fd, 256, O_BINARY)
-#endif /* __MINGW32__ */
-
#include <full-read.h>
#endif
+#ifndef GC_SUCCESS
+#define GC_SUCCESS 0
+#endif
+
+#ifndef GC_UNIMPLEMENTED
+#define GC_UNIMPLEMENTED 3
+#endif
+
+/* Likewise struct GC_stack_base is missing before 7.1. */
+#ifndef HAVE_GC_STACK_BASE
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+#ifdef __ia64__
+ void * reg_base; /* Base of separate register stack. */
+#endif
+};
+
+static int
+GC_register_my_thread (struct GC_stack_base *stack_base)
+{
+ return GC_UNIMPLEMENTED;
+}
+
+static void
+GC_unregister_my_thread ()
+{
+}
+
+#if !SCM_USE_PTHREAD_THREADS
+/* No threads; we can just use GC_stackbottom. */
+static void *
+get_thread_stack_base ()
+{
+ return GC_stackbottom;
+}
+
+#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
+ && defined PTHREAD_ATTR_GETSTACK_WORKS
+/* This method for GNU/Linux and perhaps some other systems.
+ It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
+ available on them. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_getattr_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
+/* This method for MacOS X.
+ It'd be nice if there was some documentation on pthread_get_stackaddr_np,
+ but as of 2006 there's nothing obvious at apple.com. */
+static void *
+get_thread_stack_base ()
+{
+ return pthread_get_stackaddr_np (pthread_self ());
+}
+
+#elif HAVE_PTHREAD_ATTR_GET_NP
+/* This one is for FreeBSD 9. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_attr_init (&attr);
+ pthread_attr_get_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ pthread_attr_destroy (&attr);
+
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#else
+#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
+#endif
+
+static int
+GC_get_stack_base (struct GC_stack_base *stack_base)
+{
+ stack_base->mem_base = get_thread_stack_base ();
+#ifdef __ia64__
+ /* Calculate and store off the base of this thread's register
+ backing store (RBS). Unfortunately our implementation(s) of
+ scm_ia64_register_backing_store_base are only reliable for the
+ main thread. For other threads, therefore, find out the current
+ top of the RBS, and use that as a maximum. */
+ stack_base->reg_base = scm_ia64_register_backing_store_base ();
+ {
+ ucontext_t ctx;
+ void *bsp;
+ getcontext (&ctx);
+ bsp = scm_ia64_ar_bsp (&ctx);
+ if (stack_base->reg_base > bsp)
+ stack_base->reg_base = bsp;
+ }
+#endif
+ return GC_SUCCESS;
+}
+
+static void *
+GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
+{
+ struct GC_stack_base stack_base;
+
+ stack_base.mem_base = (void*)&stack_base;
+#ifdef __ia64__
+ /* FIXME: Untested. */
+ {
+ ucontext_t ctx;
+ getcontext (&ctx);
+ stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
+ }
+#endif
+
+ return fn (&stack_base, arg);
+}
+#endif /* HAVE_GC_STACK_BASE */
+
+
/* Now define with_gc_active and with_gc_inactive. */
#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
static SCM scm_i_default_dynamic_state;
+/* Run when a fluid is collected. */
+void
+scm_i_reset_fluid (size_t n)
+{
+ scm_i_thread *t;
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ for (t = all_threads; t; t = t->next_thread)
+ if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
+ {
+ SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
+
+ if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
+ SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
+ }
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+}
+
/* Perform first stage of thread initialisation, in non-guile mode.
*/
static void
-guilify_self_1 (SCM_STACKITEM *base)
-{
- scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
-
- t->pthread = scm_i_pthread_self ();
- t->handle = SCM_BOOL_F;
- t->result = SCM_BOOL_F;
- t->cleanup_handler = SCM_BOOL_F;
- t->mutexes = SCM_EOL;
- t->held_mutex = NULL;
- t->join_queue = SCM_EOL;
- t->dynamic_state = SCM_BOOL_F;
- t->dynwinds = SCM_EOL;
- t->active_asyncs = SCM_EOL;
- t->block_asyncs = 1;
- t->pending_asyncs = 1;
- t->critical_section_level = 0;
- t->base = base;
+guilify_self_1 (struct GC_stack_base *base)
+{
+ scm_i_thread t;
+
+ /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
+ before allocating anything in this thread, because allocation could
+ cause GC to run, and GC could cause finalizers, which could invoke
+ Scheme functions, which need the current thread to be set. */
+
+ t.pthread = scm_i_pthread_self ();
+ t.handle = SCM_BOOL_F;
+ t.result = SCM_BOOL_F;
+ t.cleanup_handler = SCM_BOOL_F;
+ t.mutexes = SCM_EOL;
+ t.held_mutex = NULL;
+ t.join_queue = SCM_EOL;
+ t.dynamic_state = SCM_BOOL_F;
+ t.dynwinds = SCM_EOL;
+ t.active_asyncs = SCM_EOL;
+ t.block_asyncs = 1;
+ t.pending_asyncs = 1;
+ t.critical_section_level = 0;
+ t.base = base->mem_base;
#ifdef __ia64__
- /* Calculate and store off the base of this thread's register
- backing store (RBS). Unfortunately our implementation(s) of
- scm_ia64_register_backing_store_base are only reliable for the
- main thread. For other threads, therefore, find out the current
- top of the RBS, and use that as a maximum. */
- t->register_backing_store_base = scm_ia64_register_backing_store_base ();
- {
- ucontext_t ctx;
- void *bsp;
- getcontext (&ctx);
- bsp = scm_ia64_ar_bsp (&ctx);
- if (t->register_backing_store_base > bsp)
- t->register_backing_store_base = bsp;
- }
+ t.register_backing_store_base = base->reg_base;
#endif
- t->continuation_root = SCM_EOL;
- t->continuation_base = base;
- scm_i_pthread_cond_init (&t->sleep_cond, NULL);
- t->sleep_mutex = NULL;
- t->sleep_object = SCM_BOOL_F;
- t->sleep_fd = -1;
-
- if (pipe (t->sleep_pipe) != 0)
+ t.continuation_root = SCM_EOL;
+ t.continuation_base = t.base;
+ scm_i_pthread_cond_init (&t.sleep_cond, NULL);
+ t.sleep_mutex = NULL;
+ t.sleep_object = SCM_BOOL_F;
+ t.sleep_fd = -1;
+
+ if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
/* FIXME: Error conditions during the initialization phase are handled
gracelessly since public functions such as `scm_init_guile ()'
currently have type `void'. */
abort ();
- scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
- t->current_mark_stack_ptr = NULL;
- t->current_mark_stack_limit = NULL;
- t->canceled = 0;
- t->exited = 0;
- t->guile_mode = 0;
+ scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
+ t.current_mark_stack_ptr = NULL;
+ t.current_mark_stack_limit = NULL;
+ t.canceled = 0;
+ t.exited = 0;
+ t.guile_mode = 0;
- scm_i_pthread_setspecific (scm_i_thread_key, t);
+ /* The switcheroo. */
+ {
+ scm_i_thread *t_ptr = &t;
+
+ GC_disable ();
+ t_ptr = GC_malloc (sizeof (scm_i_thread));
+ memcpy (t_ptr, &t, sizeof t);
+
+ scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
- /* Cache the current thread in TLS for faster lookup. */
- scm_i_current_thread = t;
+ /* Cache the current thread in TLS for faster lookup. */
+ scm_i_current_thread = t_ptr;
#endif
- scm_i_pthread_mutex_lock (&thread_admin_mutex);
- t->next_thread = all_threads;
- all_threads = t;
- thread_count++;
- scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ t_ptr->next_thread = all_threads;
+ all_threads = t_ptr;
+ thread_count++;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
+ GC_enable ();
+ }
}
/* Perform second stage of thread initialisation, in guile mode.
t->join_queue = make_queue ();
t->block_asyncs = 0;
+
+ /* See note in finalizers.c:queue_finalizer_async(). */
+ GC_invoke_finalizers ();
}
\f
#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
+static SCM
+call_cleanup (void *data)
+{
+ SCM *proc_p = data;
+ return scm_call_0 (*proc_p);
+}
+
/* Perform thread tear-down, in guile mode.
*/
static void *
{
scm_i_thread *t = (scm_i_thread *) v;
+ /* Ensure the signal handling thread has been launched, because we might be
+ shutting it down. This needs to be done in Guile mode. */
+ scm_i_ensure_signal_delivery_thread ();
+
if (!scm_is_false (t->cleanup_handler))
{
SCM ptr = t->cleanup_handler;
t->cleanup_handler = SCM_BOOL_F;
t->result = scm_internal_catch (SCM_BOOL_T,
- (scm_t_catch_body) scm_call_0, ptr,
+ call_cleanup, &ptr,
scm_handle_by_message_noexit, NULL);
}
scm_i_pthread_mutex_lock (&m->lock);
- /* Since MUTEX is in `t->mutexes', T must be its owner. */
- assert (scm_is_eq (m->owner, t->handle));
-
- unblock_from_queue (m->waiting);
+ /* Check whether T owns MUTEX. This is usually the case, unless
+ T abandoned MUTEX; in that case, T is no longer its owner (see
+ `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
+ if (scm_is_eq (m->owner, t->handle))
+ unblock_from_queue (m->waiting);
scm_i_pthread_mutex_unlock (&m->lock);
}
static void *
do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
{
- void *ret;
- int registered;
-
- registered = GC_register_my_thread (sb);
-
- ret = scm_with_guile (do_thread_exit, v);
-
- if (registered == GC_SUCCESS)
- GC_unregister_my_thread ();
+ /* Won't hurt if we are already registered. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (sb);
+#endif
- return ret;
+ return scm_with_guile (do_thread_exit, v);
}
static void
/* This handler is executed in non-guile mode. */
scm_i_thread *t = (scm_i_thread *) v, **tp;
+ /* If we were canceled, we were unable to clear `t->guile_mode', so do
+ it here. */
+ t->guile_mode = 0;
+
/* If this thread was cancelled while doing a cond wait, it will
still have a mutex locked, so we unlock it here. */
if (t->held_mutex)
case but it doesn't hurt to be consistent. */
scm_i_pthread_setspecific (scm_i_thread_key, t);
- /* Ensure the signal handling thread has been launched, because we might be
- shutting it down. */
- scm_i_ensure_signal_delivery_thread ();
-
- /* Unblocking the joining threads needs to happen in guile mode
- since the queue is a SCM data structure. Trampoline through
- GC_call_with_stack_base so that the GC works even if it already
- cleaned up for this thread. */
- GC_call_with_stack_base (do_thread_exit_trampoline, v);
+ /* Scheme-level thread finalizers and other cleanup needs to happen in
+ guile mode. */
+ GC_call_with_stack_base (do_thread_exit_trampoline, t);
/* Removing ourself from the list of all threads needs to happen in
non-guile mode since all SCM values on our stack become
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
scm_i_pthread_setspecific (scm_i_thread_key, NULL);
+
+#if SCM_USE_PTHREAD_THREADS
+ GC_unregister_my_thread ();
+#endif
}
static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
be sure. New threads are put into guile mode implicitly. */
static int
-scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
+scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
{
scm_i_pthread_once (&init_thread_key_once, init_thread_key);
initialization.
*/
scm_i_init_guile (base);
+
+#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
+ /* Allow other threads to come in later. */
+ GC_allow_register_threads ();
+#endif
+
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
}
else
the first time. Only initialize this thread.
*/
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+
+ /* Register this thread with libgc. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (base);
+#endif
+
guilify_self_1 (base);
guilify_self_2 (parent);
}
}
}
-#if SCM_USE_PTHREAD_THREADS
-
-#if defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP
-/* This method for GNU/Linux and perhaps some other systems.
- It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
- available on them. */
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
+void
+scm_init_guile ()
{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
-
- pthread_getattr_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- end = (char *)start + size;
-
- /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
- for the main thread, but we can use scm_get_stack_base in that
- case.
- */
-
-#ifndef PTHREAD_ATTR_GETSTACK_WORKS
- if ((void *)&attr < start || (void *)&attr >= end)
- return (SCM_STACKITEM *) GC_stackbottom;
+ struct GC_stack_base stack_base;
+
+ if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
+ scm_i_init_thread_for_guile (&stack_base,
+ scm_i_default_dynamic_state);
else
-#endif
{
-#if SCM_STACK_GROWS_UP
- return start;
-#else
- return end;
-#endif
+ fprintf (stderr, "Failed to get stack base for current thread.\n");
+ exit (EXIT_FAILURE);
}
}
-#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
-/* This method for MacOS X.
- It'd be nice if there was some documentation on pthread_get_stackaddr_np,
- but as of 2006 there's nothing obvious at apple.com. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return pthread_get_stackaddr_np (pthread_self ());
-}
-
-#elif defined (__MINGW32__)
-/* This method for mingw. In mingw the basic scm_get_stack_base can be used
- in any thread. We don't like hard-coding the name of a system, but there
- doesn't seem to be a cleaner way of knowing scm_get_stack_base can
- work. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* pthread methods of get_thread_stack_base */
-
-#else /* !SCM_USE_PTHREAD_THREADS */
-
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* !SCM_USE_PTHREAD_THREADS */
-
-#ifdef HAVE_GET_THREAD_STACK_BASE
-
-void
-scm_init_guile ()
-{
- scm_i_init_thread_for_guile (get_thread_stack_base (),
- scm_i_default_dynamic_state);
-}
-
-#endif
-
-void *
-scm_with_guile (void *(*func)(void *), void *data)
-{
- return scm_i_with_guile_and_parent (func, data,
- scm_i_default_dynamic_state);
-}
-
-SCM_UNUSED static void
-scm_leave_guile_cleanup (void *x)
-{
- on_thread_exit (SCM_I_CURRENT_THREAD);
-}
-
-struct with_guile_trampoline_args
+struct with_guile_args
{
GC_fn_type func;
void *data;
+ SCM parent;
};
static void *
with_guile_trampoline (void *data)
{
- struct with_guile_trampoline_args *args = data;
+ struct with_guile_args *args = data;
return scm_c_with_continuation_barrier (args->func, args->data);
}
-void *
-scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
+static void *
+with_guile_and_parent (struct GC_stack_base *base, void *data)
{
void *res;
int new_thread;
scm_i_thread *t;
- SCM_STACKITEM base_item;
+ struct with_guile_args *args = data;
- new_thread = scm_i_init_thread_for_guile (&base_item, parent);
+ new_thread = scm_i_init_thread_for_guile (base, args->parent);
t = SCM_I_CURRENT_THREAD;
if (new_thread)
{
/* We are in Guile mode. */
assert (t->guile_mode);
- res = scm_c_with_continuation_barrier (func, data);
+ res = scm_c_with_continuation_barrier (args->func, args->data);
/* Leave Guile mode. */
t->guile_mode = 0;
else if (t->guile_mode)
{
/* Already in Guile mode. */
- res = scm_c_with_continuation_barrier (func, data);
+ res = scm_c_with_continuation_barrier (args->func, args->data);
}
else
{
- struct with_guile_trampoline_args args;
- args.func = func;
- args.data = data;
-
/* We are not in Guile mode, either because we are not within a
scm_with_guile, or because we are within a scm_without_guile.
when this thread was first guilified. Thus, `base' must be
updated. */
#if SCM_STACK_GROWS_UP
- if (SCM_STACK_PTR (&base_item) < t->base)
- t->base = SCM_STACK_PTR (&base_item);
+ if (SCM_STACK_PTR (base->mem_base) < t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#else
- if (SCM_STACK_PTR (&base_item) > t->base)
- t->base = SCM_STACK_PTR (&base_item);
+ if (SCM_STACK_PTR (base->mem_base) > t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#endif
t->guile_mode = 1;
- res = with_gc_active (with_guile_trampoline, &args);
+ res = with_gc_active (with_guile_trampoline, args);
t->guile_mode = 0;
}
return res;
}
+static void *
+scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
+{
+ struct with_guile_args args;
+
+ args.func = func;
+ args.data = data;
+ args.parent = parent;
+
+ return GC_call_with_stack_base (with_guile_and_parent, &args);
+}
+
+void *
+scm_with_guile (void *(*func)(void *), void *data)
+{
+ return scm_i_with_guile_and_parent (func, data,
+ scm_i_default_dynamic_state);
+}
+
void *
scm_without_guile (void *(*func)(void *), void *data)
{
SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
handler, SCM_ARG2, FUNC_NAME);
+ GC_collect_a_little ();
data.parent = scm_current_dynamic_state ();
data.thunk = thunk;
data.handler = handler;
errno = err;
scm_syserror (NULL);
}
- scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
+ while (scm_is_false (data.thread))
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
scm_i_pthread_mutex_unlock (&data.mutex);
return data.thread;
errno = err;
scm_syserror (NULL);
}
- scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
+ while (scm_is_false (data.thread))
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
scm_i_pthread_mutex_unlock (&data.mutex);
+ assert (SCM_I_IS_THREAD (data.thread));
+
return data.thread;
}
if (SCM_I_IS_THREAD (new_owner))
{
scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
+
+ /* FIXME: The order in which `t->admin_mutex' and
+ `m->lock' are taken differs from that in
+ `on_thread_exit', potentially leading to deadlocks. */
scm_i_pthread_mutex_lock (&t->admin_mutex);
/* Only keep a weak reference to MUTEX so that it's not
SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
(SCM m, SCM timeout, SCM owner),
-"Lock @var{mutex}. If the mutex is already locked, the calling thread "
-"blocks until the mutex becomes available. The function returns when "
-"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
-"a thread already owns will succeed right away and will not block the "
-"thread. That is, Guile's mutexes are @emph{recursive}. ")
+ "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
+ "thread blocks until the mutex becomes available. The function\n"
+ "returns when the calling thread owns the lock on @var{m}.\n"
+ "Locking a mutex that a thread already owns will succeed right\n"
+ "away and will not block the thread. That is, Guile's mutexes\n"
+ "are @emph{recursive}.")
#define FUNC_NAME s_scm_lock_mutex_timed
{
SCM exception;
waittime = &cwaittime;
}
+ if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
+ SCM_VALIDATE_THREAD (3, owner);
+
exception = fat_mutex_lock (m, waittime, owner, &ret);
if (!scm_is_false (exception))
scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
}
#undef FUNC_NAME
+static void
+lock_mutex_return_void (SCM mx)
+{
+ (void) scm_lock_mutex (mx);
+}
+
+static void
+unlock_mutex_return_void (SCM mx)
+{
+ (void) scm_unlock_mutex (mx);
+}
+
void
scm_dynwind_lock_mutex (SCM mutex)
{
- scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
+ scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
- scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
+ scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
}
SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
(SCM cv, SCM mx, SCM t),
-"Wait until @var{cond-var} has been signalled. While waiting, "
-"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
-"is locked again when this function returns. When @var{time} is given, "
+"Wait until condition variable @var{cv} has been signalled. While waiting, "
+"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
+"is locked again when this function returns. When @var{t} is given, "
"it specifies a point in time where the waiting should be aborted. It "
"can be either a integer as returned by @code{current-time} or a pair "
"as returned by @code{gettimeofday}. When the waiting is aborted the "
struct select_args
{
int nfds;
- SELECT_TYPE *read_fds;
- SELECT_TYPE *write_fds;
- SELECT_TYPE *except_fds;
+ fd_set *read_fds;
+ fd_set *write_fds;
+ fd_set *except_fds;
struct timeval *timeout;
int result;
return NULL;
}
+#if !SCM_HAVE_SYS_SELECT_H
+static int scm_std_select (int nfds,
+ fd_set *readfds,
+ fd_set *writefds,
+ fd_set *exceptfds,
+ struct timeval *timeout);
+#endif
+
int
scm_std_select (int nfds,
- SELECT_TYPE *readfds,
- SELECT_TYPE *writefds,
- SELECT_TYPE *exceptfds,
+ fd_set *readfds,
+ fd_set *writefds,
+ fd_set *exceptfds,
struct timeval *timeout)
{
fd_set my_readfds;
}
#undef FUNC_NAME
+SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
+ (void),
+ "Return the total number of processors of the machine, which\n"
+ "is guaranteed to be at least 1. A ``processor'' here is a\n"
+ "thread execution unit, which can be either:\n\n"
+ "@itemize\n"
+ "@item an execution core in a (possibly multi-core) chip, in a\n"
+ " (possibly multi- chip) module, in a single computer, or\n"
+ "@item a thread execution unit inside a core in the case of\n"
+ " @dfn{hyper-threaded} CPUs.\n"
+ "@end itemize\n\n"
+ "Which of the two definitions is used, is unspecified.\n")
+#define FUNC_NAME s_scm_total_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_ALL));
+}
+#undef FUNC_NAME
+
+SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
+ (void),
+ "Like @code{total-processor-count}, but return the number of\n"
+ "processors available to the current process. See\n"
+ "@code{setaffinity} and @code{getaffinity} for more\n"
+ "information.\n")
+#define FUNC_NAME s_scm_current_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_CURRENT));
+}
+#undef FUNC_NAME
+
+
+\f
+
static scm_i_pthread_cond_t wake_up_cond;
static int threads_initialized_p = 0;
#endif
void
-scm_threads_prehistory (SCM_STACKITEM *base)
+scm_threads_prehistory (void *base)
{
#if SCM_USE_PTHREAD_THREADS
pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
- guilify_self_1 (base);
+ guilify_self_1 ((struct GC_stack_base *) base);
}
scm_t_bits scm_tc16_thread;
return (void *) ctx->uc_mcontext.sc_ar_bsp;
}
# endif /* linux */
+# ifdef __FreeBSD__
+# include <ucontext.h>
+void *
+scm_ia64_register_backing_store_base (void)
+{
+ return (void *)0x8000000000000000;
+}
+void *
+scm_ia64_ar_bsp (const void *opaque)
+{
+ const ucontext_t *ctx = opaque;
+ return (void *)(ctx->uc_mcontext.mc_special.bspstore
+ + ctx->uc_mcontext.mc_special.ndirty);
+}
+# endif /* __FreeBSD__ */
#endif /* __ia64__ */