-/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
+ * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+ * Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either version 3 of
+ * the License, or (at your option) any later version.
*
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
*/
# include <config.h>
#endif
-#include "libguile/boehm-gc.h"
+#include "libguile/bdw-gc.h"
#include "libguile/_scm.h"
+#include <stdlib.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <stdio.h>
-#include <assert.h>
#ifdef HAVE_STRING_H
#include <string.h> /* for memset used by FD_ZERO on Solaris 10 */
#include <sys/time.h>
#endif
+#if HAVE_PTHREAD_NP_H
+# include <pthread_np.h>
+#endif
+
+#include <assert.h>
+#include <fcntl.h>
+#include <nproc.h>
+
#include "libguile/validate.h"
#include "libguile/root.h"
#include "libguile/eval.h"
#include "libguile/init.h"
#include "libguile/scmsigs.h"
#include "libguile/strings.h"
+#include "libguile/weaks.h"
+
+#include <full-read.h>
-#ifdef __MINGW32__
-#ifndef ETIMEDOUT
-# define ETIMEDOUT WSAETIMEDOUT
+
+\f
+
+/* First some libgc shims. */
+
+/* Make sure GC_fn_type is defined; it is missing from the public
+ headers of GC 7.1 and earlier. */
+#ifndef HAVE_GC_FN_TYPE
+typedef void * (* GC_fn_type) (void *);
#endif
-# include <fcntl.h>
-# include <process.h>
-# define pipe(fd) _pipe (fd, 256, O_BINARY)
-#endif /* __MINGW32__ */
-#include <full-read.h>
+
+#ifndef GC_SUCCESS
+#define GC_SUCCESS 0
+#endif
+
+#ifndef GC_UNIMPLEMENTED
+#define GC_UNIMPLEMENTED 3
+#endif
+
+/* Likewise struct GC_stack_base is missing before 7.1. */
+#ifndef HAVE_GC_STACK_BASE
+struct GC_stack_base {
+ void * mem_base; /* Base of memory stack. */
+#ifdef __ia64__
+ void * reg_base; /* Base of separate register stack. */
+#endif
+};
+
+static int
+GC_register_my_thread (struct GC_stack_base *stack_base)
+{
+ return GC_UNIMPLEMENTED;
+}
+
+static void
+GC_unregister_my_thread ()
+{
+}
+
+#if !SCM_USE_PTHREAD_THREADS
+/* No threads; we can just use GC_stackbottom. */
+static void *
+get_thread_stack_base ()
+{
+ return GC_stackbottom;
+}
+
+#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
+ && defined PTHREAD_ATTR_GETSTACK_WORKS
+/* This method for GNU/Linux and perhaps some other systems.
+ It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
+ available on them. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_getattr_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
+/* This method for MacOS X.
+ It'd be nice if there was some documentation on pthread_get_stackaddr_np,
+ but as of 2006 there's nothing obvious at apple.com. */
+static void *
+get_thread_stack_base ()
+{
+ return pthread_get_stackaddr_np (pthread_self ());
+}
+
+#elif HAVE_PTHREAD_ATTR_GET_NP
+/* This one is for FreeBSD 9. */
+static void *
+get_thread_stack_base ()
+{
+ pthread_attr_t attr;
+ void *start, *end;
+ size_t size;
+
+ pthread_attr_init (&attr);
+ pthread_attr_get_np (pthread_self (), &attr);
+ pthread_attr_getstack (&attr, &start, &size);
+ pthread_attr_destroy (&attr);
+
+ end = (char *)start + size;
+
+#if SCM_STACK_GROWS_UP
+ return start;
+#else
+ return end;
+#endif
+}
+
+#else
+#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
+#endif
+
+static int
+GC_get_stack_base (struct GC_stack_base *stack_base)
+{
+ stack_base->mem_base = get_thread_stack_base ();
+#ifdef __ia64__
+ /* Calculate and store off the base of this thread's register
+ backing store (RBS). Unfortunately our implementation(s) of
+ scm_ia64_register_backing_store_base are only reliable for the
+ main thread. For other threads, therefore, find out the current
+ top of the RBS, and use that as a maximum. */
+ stack_base->reg_base = scm_ia64_register_backing_store_base ();
+ {
+ ucontext_t ctx;
+ void *bsp;
+ getcontext (&ctx);
+ bsp = scm_ia64_ar_bsp (&ctx);
+ if (stack_base->reg_base > bsp)
+ stack_base->reg_base = bsp;
+ }
+#endif
+ return GC_SUCCESS;
+}
+
+static void *
+GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
+{
+ struct GC_stack_base stack_base;
+
+ stack_base.mem_base = (void*)&stack_base;
+#ifdef __ia64__
+ /* FIXME: Untested. */
+ {
+ ucontext_t ctx;
+ getcontext (&ctx);
+ stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
+ }
+#endif
+
+ return fn (&stack_base, arg);
+}
+#endif /* HAVE_GC_STACK_BASE */
+
+
+/* Now define with_gc_active and with_gc_inactive. */
+
+#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
+
+/* We have a sufficiently new libgc (7.2 or newer). */
+
+static void*
+with_gc_inactive (GC_fn_type func, void *data)
+{
+ return GC_do_blocking (func, data);
+}
+
+static void*
+with_gc_active (GC_fn_type func, void *data)
+{
+ return GC_call_with_gc_active (func, data);
+}
+
+#else
+
+/* libgc not new enough, so never actually deactivate GC.
+
+ Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
+ GC_call_with_gc_active. */
+
+static void*
+with_gc_inactive (GC_fn_type func, void *data)
+{
+ return func (data);
+}
+
+static void*
+with_gc_active (GC_fn_type func, void *data)
+{
+ return func (data);
+}
+
+#endif /* HAVE_GC_DO_BLOCKING */
+
\f
static void
}
}
+\f
/*** Queues */
+/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid
+ the risk of false references leading to unbounded retained space as
+ described in "Bounding Space Usage of Conservative Garbage Collectors",
+ H.J. Boehm, 2001. */
+
/* Make an empty queue data structure.
*/
static SCM
if (scm_is_eq (c, SCM_CAR (q)))
SCM_SETCAR (q, SCM_CDR (c));
SCM_SETCDR (prev, SCM_CDR (c));
+
+ /* GC-robust */
+ SCM_SETCDR (c, SCM_EOL);
+
SCM_CRITICAL_SECTION_END;
return 1;
}
if (scm_is_null (SCM_CDR (q)))
SCM_SETCAR (q, SCM_EOL);
SCM_CRITICAL_SECTION_END;
+
+ /* GC-robust */
+ SCM_SETCDR (c, SCM_EOL);
+
return SCM_CAR (c);
}
}
return 1;
}
-static size_t
-thread_free (SCM obj)
-{
- scm_i_thread *t = SCM_I_THREAD_DATA (obj);
- assert (t->exited);
- scm_gc_free (t, sizeof (*t), "thread");
- return 0;
-}
-
+\f
/*** Blocking on queues. */
/* See also scm_i_queue_async_cell for how such a block is
return thread;
}
+\f
/* Getting into and out of guile mode.
*/
-/* Ken Raeburn observes that the implementation of suspend and resume
- (and the things that build on top of them) are very likely not
- correct (see below). We will need fix this eventually, and that's
- why scm_leave_guile/scm_enter_guile are not exported in the API.
-
- Ken writes:
-
- Consider this sequence:
-
- Function foo, called in Guile mode, calls suspend (maybe indirectly
- through scm_leave_guile), which does this:
-
- // record top of stack for the GC
- t->top = SCM_STACK_PTR (&t); // just takes address of automatic
- var 't'
- // save registers.
- SCM_FLUSH_REGISTER_WINDOWS; // sparc only
- setjmp (t->regs); // here's most of the magic
-
- ... and returns.
-
- Function foo has a SCM value X, a handle on a non-immediate object, in
- a caller-saved register R, and it's the only reference to the object
- currently.
-
- The compiler wants to use R in suspend, so it pushes the current
- value, X, into a stack slot which will be reloaded on exit from
- suspend; then it loads stuff into R and goes about its business. The
- setjmp call saves (some of) the current registers, including R, which
- no longer contains X. (This isn't a problem for a normal
- setjmp/longjmp situation, where longjmp would be called before
- setjmp's caller returns; the old value for X would be loaded back from
- the stack after the longjmp, before the function returned.)
-
- So, suspend returns, loading X back into R (and invalidating the jump
- buffer) in the process. The caller foo then goes off and calls a
- bunch of other functions out of Guile mode, occasionally storing X on
- the stack again, but, say, much deeper on the stack than suspend's
- stack frame went, and the stack slot where suspend had written X has
- long since been overwritten with other values.
-
- Okay, nothing actively broken so far. Now, let garbage collection
- run, triggered by another thread.
-
- The thread calling foo is out of Guile mode at the time, so the
- garbage collector just scans a range of stack addresses. Too bad that
- X isn't stored there. So the pointed-to storage goes onto the free
- list, and I think you can see where things go from there.
-
- Is there anything I'm missing that'll prevent this scenario from
- happening? I mean, aside from, "well, suspend and scm_leave_guile
- don't have many local variables, so they probably won't need to save
- any registers on most systems, so we hope everything will wind up in
- the jump buffer and we'll just get away with it"?
-
- (And, going the other direction, if scm_leave_guile and suspend push
- the stack pointer over onto a new page, and foo doesn't make further
- function calls and thus the stack pointer no longer includes that
- page, are we guaranteed that the kernel cannot release the now-unused
- stack page that contains the top-of-stack pointer we just saved? I
- don't know if any OS actually does that. If it does, we could get
- faults in garbage collection.)
-
- I don't think scm_without_guile has to have this problem, as it gets
- more control over the stack handling -- but it should call setjmp
- itself. I'd probably try something like:
-
- // record top of stack for the GC
- t->top = SCM_STACK_PTR (&t);
- // save registers.
- SCM_FLUSH_REGISTER_WINDOWS;
- setjmp (t->regs);
- res = func(data);
- scm_enter_guile (t);
-
- ... though even that's making some assumptions about the stack
- ordering of local variables versus caller-saved registers.
-
- For something like scm_leave_guile to work, I don't think it can just
- rely on invalidated jump buffers. A valid jump buffer, and a handle
- on the stack state at the point when the jump buffer was initialized,
- together, would work fine, but I think then we're talking about macros
- invoking setjmp in the caller's stack frame, and requiring that the
- caller of scm_leave_guile also call scm_enter_guile before returning,
- kind of like pthread_cleanup_push/pop calls that have to be paired up
- in a function. (In fact, the pthread ones have to be paired up
- syntactically, as if they might expand to a compound statement
- incorporating the user's code, and invoking a compiler's
- exception-handling primitives. Which might be something to think
- about for cases where Guile is used with C++ exceptions or
- pthread_cancel.)
-*/
-
+/* Key used to attach a cleanup handler to a given thread. Also, if
+ thread-local storage is unavailable, this key is used to retrieve the
+ current thread with `pthread_getspecific ()'. */
scm_i_pthread_key_t scm_i_thread_key;
-static void
-resume (scm_i_thread *t)
-{
- t->top = NULL;
-}
-
-typedef void* scm_t_guile_ticket;
-static void
-scm_enter_guile (scm_t_guile_ticket ticket)
-{
- scm_i_thread *t = (scm_i_thread *)ticket;
- if (t)
- {
- resume (t);
- }
-}
+#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
-static scm_i_thread *
-suspend (void)
-{
- scm_i_thread *t = SCM_I_CURRENT_THREAD;
+/* When thread-local storage (TLS) is available, a pointer to the
+ current-thread object is kept in TLS. Note that storing the thread-object
+ itself in TLS (rather than a pointer to some malloc'd memory) is not
+ possible since thread objects may live longer than the actual thread they
+ represent. */
+SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL;
- /* record top of stack for the GC */
- t->top = SCM_STACK_PTR (&t);
- /* save registers. */
- SCM_FLUSH_REGISTER_WINDOWS;
- setjmp (t->regs);
- return t;
-}
+#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */
-static scm_t_guile_ticket
-scm_leave_guile ()
-{
- scm_i_thread *t = suspend ();
- return (scm_t_guile_ticket) t;
-}
static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
static scm_i_thread *all_threads = NULL;
static SCM scm_i_default_dynamic_state;
+/* Run when a fluid is collected. */
+void
+scm_i_reset_fluid (size_t n)
+{
+ scm_i_thread *t;
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ for (t = all_threads; t; t = t->next_thread)
+ if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state))
+ {
+ SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state);
+
+ if (n < SCM_SIMPLE_VECTOR_LENGTH (v))
+ SCM_SIMPLE_VECTOR_SET (v, n, SCM_UNDEFINED);
+ }
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+}
+
/* Perform first stage of thread initialisation, in non-guile mode.
*/
static void
-guilify_self_1 (SCM_STACKITEM *base)
-{
- scm_i_thread *t = scm_gc_malloc (sizeof (scm_i_thread), "thread");
-
- t->pthread = scm_i_pthread_self ();
- t->handle = SCM_BOOL_F;
- t->result = SCM_BOOL_F;
- t->cleanup_handler = SCM_BOOL_F;
- t->mutexes = SCM_EOL;
- t->held_mutex = NULL;
- t->join_queue = SCM_EOL;
- t->dynamic_state = SCM_BOOL_F;
- t->dynwinds = SCM_EOL;
- t->active_asyncs = SCM_EOL;
- t->block_asyncs = 1;
- t->pending_asyncs = 1;
- t->last_debug_frame = NULL;
- t->base = base;
+guilify_self_1 (struct GC_stack_base *base)
+{
+ scm_i_thread t;
+
+ /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value
+ before allocating anything in this thread, because allocation could
+ cause GC to run, and GC could cause finalizers, which could invoke
+ Scheme functions, which need the current thread to be set. */
+
+ t.pthread = scm_i_pthread_self ();
+ t.handle = SCM_BOOL_F;
+ t.result = SCM_BOOL_F;
+ t.cleanup_handler = SCM_BOOL_F;
+ t.mutexes = SCM_EOL;
+ t.held_mutex = NULL;
+ t.join_queue = SCM_EOL;
+ t.dynamic_state = SCM_BOOL_F;
+ t.dynwinds = SCM_EOL;
+ t.active_asyncs = SCM_EOL;
+ t.block_asyncs = 1;
+ t.pending_asyncs = 1;
+ t.critical_section_level = 0;
+ t.base = base->mem_base;
#ifdef __ia64__
- /* Calculate and store off the base of this thread's register
- backing store (RBS). Unfortunately our implementation(s) of
- scm_ia64_register_backing_store_base are only reliable for the
- main thread. For other threads, therefore, find out the current
- top of the RBS, and use that as a maximum. */
- t->register_backing_store_base = scm_ia64_register_backing_store_base ();
- {
- ucontext_t ctx;
- void *bsp;
- getcontext (&ctx);
- bsp = scm_ia64_ar_bsp (&ctx);
- if (t->register_backing_store_base > bsp)
- t->register_backing_store_base = bsp;
- }
+ t.register_backing_store_base = base->reg_base;
#endif
- t->continuation_root = SCM_EOL;
- t->continuation_base = base;
- scm_i_pthread_cond_init (&t->sleep_cond, NULL);
- t->sleep_mutex = NULL;
- t->sleep_object = SCM_BOOL_F;
- t->sleep_fd = -1;
-
- if (pipe (t->sleep_pipe) != 0)
+ t.continuation_root = SCM_EOL;
+ t.continuation_base = t.base;
+ scm_i_pthread_cond_init (&t.sleep_cond, NULL);
+ t.sleep_mutex = NULL;
+ t.sleep_object = SCM_BOOL_F;
+ t.sleep_fd = -1;
+
+ if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
/* FIXME: Error conditions during the initialization phase are handled
gracelessly since public functions such as `scm_init_guile ()'
currently have type `void'. */
abort ();
- scm_i_pthread_mutex_init (&t->admin_mutex, NULL);
- t->current_mark_stack_ptr = NULL;
- t->current_mark_stack_limit = NULL;
- t->canceled = 0;
- t->exited = 0;
- t->guile_mode = 0;
+ scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
+ t.current_mark_stack_ptr = NULL;
+ t.current_mark_stack_limit = NULL;
+ t.canceled = 0;
+ t.exited = 0;
+ t.guile_mode = 0;
- scm_i_pthread_setspecific (scm_i_thread_key, t);
+ /* The switcheroo. */
+ {
+ scm_i_thread *t_ptr = &t;
+
+ GC_disable ();
+ t_ptr = GC_malloc (sizeof (scm_i_thread));
+ memcpy (t_ptr, &t, sizeof t);
- scm_i_pthread_mutex_lock (&thread_admin_mutex);
- t->next_thread = all_threads;
- all_threads = t;
- thread_count++;
- scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+ scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
+
+#ifdef SCM_HAVE_THREAD_STORAGE_CLASS
+ /* Cache the current thread in TLS for faster lookup. */
+ scm_i_current_thread = t_ptr;
+#endif
+
+ scm_i_pthread_mutex_lock (&thread_admin_mutex);
+ t_ptr->next_thread = all_threads;
+ all_threads = t_ptr;
+ thread_count++;
+ scm_i_pthread_mutex_unlock (&thread_admin_mutex);
+
+ GC_enable ();
+ }
}
/* Perform second stage of thread initialisation, in guile mode.
t->continuation_root = scm_cons (t->handle, SCM_EOL);
t->continuation_base = t->base;
+ t->vm = SCM_BOOL_F;
if (scm_is_true (parent))
t->dynamic_state = scm_make_dynamic_state (parent);
t->join_queue = make_queue ();
t->block_asyncs = 0;
+
+ /* See note in finalizers.c:queue_finalizer_async(). */
+ GC_invoke_finalizers ();
}
\f
#define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x)
#define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x))
+static SCM
+call_cleanup (void *data)
+{
+ SCM *proc_p = data;
+ return scm_call_0 (*proc_p);
+}
+
/* Perform thread tear-down, in guile mode.
*/
static void *
{
scm_i_thread *t = (scm_i_thread *) v;
+ /* Ensure the signal handling thread has been launched, because we might be
+ shutting it down. This needs to be done in Guile mode. */
+ scm_i_ensure_signal_delivery_thread ();
+
if (!scm_is_false (t->cleanup_handler))
{
SCM ptr = t->cleanup_handler;
t->cleanup_handler = SCM_BOOL_F;
t->result = scm_internal_catch (SCM_BOOL_T,
- (scm_t_catch_body) scm_call_0, ptr,
+ call_cleanup, &ptr,
scm_handle_by_message_noexit, NULL);
}
while (!scm_is_null (t->mutexes))
{
- SCM mutex = SCM_CAR (t->mutexes);
- fat_mutex *m = SCM_MUTEX_DATA (mutex);
- scm_i_pthread_mutex_lock (&m->lock);
+ SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
- unblock_from_queue (m->waiting);
+ if (!SCM_UNBNDP (mutex))
+ {
+ fat_mutex *m = SCM_MUTEX_DATA (mutex);
- scm_i_pthread_mutex_unlock (&m->lock);
- t->mutexes = SCM_CDR (t->mutexes);
+ scm_i_pthread_mutex_lock (&m->lock);
+
+ /* Check whether T owns MUTEX. This is usually the case, unless
+ T abandoned MUTEX; in that case, T is no longer its owner (see
+ `fat_mutex_lock') but MUTEX is still in `t->mutexes'. */
+ if (scm_is_eq (m->owner, t->handle))
+ unblock_from_queue (m->waiting);
+
+ scm_i_pthread_mutex_unlock (&m->lock);
+ }
+
+ t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
}
scm_i_pthread_mutex_unlock (&t->admin_mutex);
return NULL;
}
+static void *
+do_thread_exit_trampoline (struct GC_stack_base *sb, void *v)
+{
+ /* Won't hurt if we are already registered. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (sb);
+#endif
+
+ return scm_with_guile (do_thread_exit, v);
+}
+
static void
on_thread_exit (void *v)
{
/* This handler is executed in non-guile mode. */
scm_i_thread *t = (scm_i_thread *) v, **tp;
+ /* If we were canceled, we were unable to clear `t->guile_mode', so do
+ it here. */
+ t->guile_mode = 0;
+
/* If this thread was cancelled while doing a cond wait, it will
still have a mutex locked, so we unlock it here. */
if (t->held_mutex)
t->held_mutex = NULL;
}
- scm_i_pthread_setspecific (scm_i_thread_key, v);
-
- /* Ensure the signal handling thread has been launched, because we might be
- shutting it down. */
- scm_i_ensure_signal_delivery_thread ();
-
- /* Unblocking the joining threads needs to happen in guile mode
- since the queue is a SCM data structure. */
+ /* Reinstate the current thread for purposes of scm_with_guile
+ guile-mode cleanup handlers. Only really needed in the non-TLS
+ case but it doesn't hurt to be consistent. */
+ scm_i_pthread_setspecific (scm_i_thread_key, t);
- /* Note: Since `do_thread_exit ()' uses allocates memory via `libgc', we
- assume the GC is usable at this point, and notably that thread-local
- storage (TLS) hasn't been deallocated yet. */
- do_thread_exit (v);
+ /* Scheme-level thread finalizers and other cleanup needs to happen in
+ guile mode. */
+ GC_call_with_stack_base (do_thread_exit_trampoline, t);
/* Removing ourself from the list of all threads needs to happen in
non-guile mode since all SCM values on our stack become
if (*tp == t)
{
*tp = t->next_thread;
+
+ /* GC-robust */
+ t->next_thread = NULL;
+
break;
}
thread_count--;
scm_i_pthread_mutex_unlock (&thread_admin_mutex);
scm_i_pthread_setspecific (scm_i_thread_key, NULL);
+
+#if SCM_USE_PTHREAD_THREADS
+ GC_unregister_my_thread ();
+#endif
}
static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT;
static void
init_thread_key (void)
{
- scm_i_pthread_key_create (&scm_i_thread_key, NULL);
+ scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit);
}
-/* Perform any initializations necessary to bring the current thread
- into guile mode, initializing Guile itself, if necessary.
+/* Perform any initializations necessary to make the current thread
+ known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself,
+ if necessary.
BASE is the stack base to use with GC.
PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in
which case the default dynamic state is used.
- Return zero when the thread was in guile mode already; otherwise
+ Returns zero when the thread was known to guile already; otherwise
return 1.
-*/
+
+ Note that it could be the case that the thread was known
+ to Guile, but not in guile mode (because we are within a
+ scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to
+ be sure. New threads are put into guile mode implicitly. */
static int
-scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent)
+scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent)
{
- scm_i_thread *t;
-
scm_i_pthread_once (&init_thread_key_once, init_thread_key);
- if ((t = SCM_I_CURRENT_THREAD) == NULL)
+ if (SCM_I_CURRENT_THREAD)
+ {
+ /* Thread is already known to Guile.
+ */
+ return 0;
+ }
+ else
{
/* This thread has not been guilified yet.
*/
initialization.
*/
scm_i_init_guile (base);
+
+#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
+ /* Allow other threads to come in later. */
+ GC_allow_register_threads ();
+#endif
+
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
}
else
the first time. Only initialize this thread.
*/
scm_i_pthread_mutex_unlock (&scm_i_init_mutex);
+
+ /* Register this thread with libgc. */
+#if SCM_USE_PTHREAD_THREADS
+ GC_register_my_thread (base);
+#endif
+
guilify_self_1 (base);
guilify_self_2 (parent);
}
return 1;
}
- else if (t->top)
- {
- /* This thread is already guilified but not in guile mode, just
- resume it.
-
- A user call to scm_with_guile() will lead us to here. This could
- happen from anywhere on the stack, and in particular lower on the
- stack than when it was when this thread was first guilified. Thus,
- `base' must be updated. */
-#if SCM_STACK_GROWS_UP
- if (base < t->base)
- t->base = base;
-#else
- if (base > t->base)
- t->base = base;
-#endif
+}
- scm_enter_guile ((scm_t_guile_ticket) t);
- return 1;
- }
+void
+scm_init_guile ()
+{
+ struct GC_stack_base stack_base;
+
+ if (GC_get_stack_base (&stack_base) == GC_SUCCESS)
+ scm_i_init_thread_for_guile (&stack_base,
+ scm_i_default_dynamic_state);
else
{
- /* Thread is already in guile mode. Nothing to do.
- */
- return 0;
+ fprintf (stderr, "Failed to get stack base for current thread.\n");
+ exit (EXIT_FAILURE);
}
}
-#if SCM_USE_PTHREAD_THREADS
+struct with_guile_args
+{
+ GC_fn_type func;
+ void *data;
+ SCM parent;
+};
-#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP
-/* This method for GNU/Linux and perhaps some other systems.
- It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
- available on them. */
-#define HAVE_GET_THREAD_STACK_BASE
+static void *
+with_guile_trampoline (void *data)
+{
+ struct with_guile_args *args = data;
-static SCM_STACKITEM *
-get_thread_stack_base ()
+ return scm_c_with_continuation_barrier (args->func, args->data);
+}
+
+static void *
+with_guile_and_parent (struct GC_stack_base *base, void *data)
{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
+ void *res;
+ int new_thread;
+ scm_i_thread *t;
+ struct with_guile_args *args = data;
- pthread_getattr_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- end = (char *)start + size;
+ new_thread = scm_i_init_thread_for_guile (base, args->parent);
+ t = SCM_I_CURRENT_THREAD;
+ if (new_thread)
+ {
+ /* We are in Guile mode. */
+ assert (t->guile_mode);
- /* XXX - pthread_getattr_np from LinuxThreads does not seem to work
- for the main thread, but we can use scm_get_stack_base in that
- case.
- */
+ res = scm_c_with_continuation_barrier (args->func, args->data);
-#ifndef PTHREAD_ATTR_GETSTACK_WORKS
- if ((void *)&attr < start || (void *)&attr >= end)
- return (SCM_STACKITEM *) GC_stackbottom;
+ /* Leave Guile mode. */
+ t->guile_mode = 0;
+ }
+ else if (t->guile_mode)
+ {
+ /* Already in Guile mode. */
+ res = scm_c_with_continuation_barrier (args->func, args->data);
+ }
else
-#endif
{
+ /* We are not in Guile mode, either because we are not within a
+ scm_with_guile, or because we are within a scm_without_guile.
+
+ This call to scm_with_guile() could happen from anywhere on the
+ stack, and in particular lower on the stack than when it was
+ when this thread was first guilified. Thus, `base' must be
+ updated. */
#if SCM_STACK_GROWS_UP
- return start;
+ if (SCM_STACK_PTR (base->mem_base) < t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#else
- return end;
+ if (SCM_STACK_PTR (base->mem_base) > t->base)
+ t->base = SCM_STACK_PTR (base->mem_base);
#endif
- }
-}
-#elif HAVE_PTHREAD_GET_STACKADDR_NP
-/* This method for MacOS X.
- It'd be nice if there was some documentation on pthread_get_stackaddr_np,
- but as of 2006 there's nothing obvious at apple.com. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return pthread_get_stackaddr_np (pthread_self ());
-}
-
-#elif defined (__MINGW32__)
-/* This method for mingw. In mingw the basic scm_get_stack_base can be used
- in any thread. We don't like hard-coding the name of a system, but there
- doesn't seem to be a cleaner way of knowing scm_get_stack_base can
- work. */
-#define HAVE_GET_THREAD_STACK_BASE
-static SCM_STACKITEM *
-get_thread_stack_base ()
-{
- return (SCM_STACKITEM *) GC_stackbottom;
+ t->guile_mode = 1;
+ res = with_gc_active (with_guile_trampoline, args);
+ t->guile_mode = 0;
+ }
+ return res;
}
-#endif /* pthread methods of get_thread_stack_base */
-
-#else /* !SCM_USE_PTHREAD_THREADS */
-
-#define HAVE_GET_THREAD_STACK_BASE
-
-static SCM_STACKITEM *
-get_thread_stack_base ()
+static void *
+scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
{
- return (SCM_STACKITEM *) GC_stackbottom;
-}
-
-#endif /* !SCM_USE_PTHREAD_THREADS */
+ struct with_guile_args args;
-#ifdef HAVE_GET_THREAD_STACK_BASE
-
-void
-scm_init_guile ()
-{
- scm_i_init_thread_for_guile (get_thread_stack_base (),
- scm_i_default_dynamic_state);
+ args.func = func;
+ args.data = data;
+ args.parent = parent;
+
+ return GC_call_with_stack_base (with_guile_and_parent, &args);
}
-#endif
-
void *
scm_with_guile (void *(*func)(void *), void *data)
{
scm_i_default_dynamic_state);
}
-SCM_UNUSED static void
-scm_leave_guile_cleanup (void *x)
-{
- scm_leave_guile ();
- on_thread_exit (SCM_I_CURRENT_THREAD);
-}
-
-void *
-scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent)
-{
- void *res;
- int really_entered;
- SCM_STACKITEM base_item;
-
- really_entered = scm_i_init_thread_for_guile (&base_item, parent);
- if (really_entered)
- {
- scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL);
- res = scm_c_with_continuation_barrier (func, data);
- scm_i_pthread_cleanup_pop (0);
- scm_leave_guile ();
- }
- else
- res = scm_c_with_continuation_barrier (func, data);
-
- return res;
-}
-
-\f
-/*** Non-guile mode. */
-
-#if (defined HAVE_GC_DO_BLOCKING) && (!defined HAVE_DECL_GC_DO_BLOCKING)
-
-/* This declaration is missing from the public headers of GC 7.1. */
-extern void GC_do_blocking (void (*) (void *), void *);
-
-#endif
-
-#ifdef HAVE_GC_DO_BLOCKING
-struct without_guile_arg
-{
- void * (*function) (void *);
- void *data;
- void *result;
-};
-
-static void
-without_guile_trampoline (void *closure)
-{
- struct without_guile_arg *arg;
-
- SCM_I_CURRENT_THREAD->guile_mode = 0;
-
- arg = (struct without_guile_arg *) closure;
- arg->result = arg->function (arg->data);
-
- SCM_I_CURRENT_THREAD->guile_mode = 1;
-}
-#endif
-
void *
scm_without_guile (void *(*func)(void *), void *data)
{
void *result;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
-#ifdef HAVE_GC_DO_BLOCKING
- if (SCM_I_CURRENT_THREAD->guile_mode)
+ if (t->guile_mode)
{
- struct without_guile_arg arg;
-
- arg.function = func;
- arg.data = data;
- GC_do_blocking (without_guile_trampoline, &arg);
- result = arg.result;
+ SCM_I_CURRENT_THREAD->guile_mode = 0;
+ result = with_gc_inactive (func, data);
+ SCM_I_CURRENT_THREAD->guile_mode = 1;
}
else
-#endif
+ /* Otherwise we're not in guile mode, so nothing to do. */
result = func (data);
return result;
else
t->result = scm_catch (SCM_BOOL_T, thunk, handler);
- /* Trigger a call to `on_thread_exit ()'. */
- pthread_exit (NULL);
-
return 0;
}
SCM_ASSERT (SCM_UNBNDP (handler) || scm_is_true (scm_procedure_p (handler)),
handler, SCM_ARG2, FUNC_NAME);
+ GC_collect_a_little ();
data.parent = scm_current_dynamic_state ();
data.thunk = thunk;
data.handler = handler;
scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
scm_i_pthread_mutex_unlock (&data.mutex);
+ assert (SCM_I_IS_THREAD (data.thread));
+
return data.thread;
}
scm_i_pthread_mutex_unlock (&t->admin_mutex);
SCM_TICK;
scm_i_scm_pthread_mutex_lock (&t->admin_mutex);
+
+ /* Check for exit again, since we just released and
+ reacquired the admin mutex, before the next block_self
+ call (which would block forever if t has already
+ exited). */
+ if (t->exited)
+ {
+ res = t->result;
+ break;
+ }
}
}
{
fat_mutex *m = SCM_MUTEX_DATA (mx);
scm_i_pthread_mutex_destroy (&m->lock);
- scm_gc_free (m, sizeof (fat_mutex), "mutex");
return 0;
}
if (SCM_I_IS_THREAD (new_owner))
{
scm_i_thread *t = SCM_I_THREAD_DATA (new_owner);
+
+ /* FIXME: The order in which `t->admin_mutex' and
+ `m->lock' are taken differs from that in
+ `on_thread_exit', potentially leading to deadlocks. */
scm_i_pthread_mutex_lock (&t->admin_mutex);
- t->mutexes = scm_cons (mutex, t->mutexes);
+
+ /* Only keep a weak reference to MUTEX so that it's not
+ retained when not referenced elsewhere (bug #27450).
+ The weak pair itself is eventually removed when MUTEX
+ is unlocked. Note that `t->mutexes' lists mutexes
+ currently held by T, so it should be small. */
+ t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
+
scm_i_pthread_mutex_unlock (&t->admin_mutex);
}
*ret = 1;
SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0,
(SCM m, SCM timeout, SCM owner),
-"Lock @var{mutex}. If the mutex is already locked, the calling thread "
-"blocks until the mutex becomes available. The function returns when "
-"the calling thread owns the lock on @var{mutex}. Locking a mutex that "
-"a thread already owns will succeed right away and will not block the "
-"thread. That is, Guile's mutexes are @emph{recursive}. ")
+ "Lock mutex @var{m}. If the mutex is already locked, the calling\n"
+ "thread blocks until the mutex becomes available. The function\n"
+ "returns when the calling thread owns the lock on @var{m}.\n"
+ "Locking a mutex that a thread already owns will succeed right\n"
+ "away and will not block the thread. That is, Guile's mutexes\n"
+ "are @emph{recursive}.")
#define FUNC_NAME s_scm_lock_mutex_timed
{
SCM exception;
waittime = &cwaittime;
}
+ if (!SCM_UNBNDP (owner) && !scm_is_false (owner))
+ SCM_VALIDATE_THREAD (3, owner);
+
exception = fat_mutex_lock (m, waittime, owner, &ret);
if (!scm_is_false (exception))
scm_ithrow (SCM_CAR (exception), scm_list_1 (SCM_CDR (exception)), 1);
}
#undef FUNC_NAME
+static void
+lock_mutex_return_void (SCM mx)
+{
+ (void) scm_lock_mutex (mx);
+}
+
+static void
+unlock_mutex_return_void (SCM mx)
+{
+ (void) scm_unlock_mutex (mx);
+}
+
void
scm_dynwind_lock_mutex (SCM mutex)
{
- scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex,
+ scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
- scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex,
+ scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex,
SCM_F_WIND_EXPLICITLY);
}
fat_mutex_unlock (SCM mutex, SCM cond,
const scm_t_timespec *waittime, int relock)
{
+ SCM owner;
fat_mutex *m = SCM_MUTEX_DATA (mutex);
fat_cond *c = NULL;
scm_i_thread *t = SCM_I_CURRENT_THREAD;
scm_i_scm_pthread_mutex_lock (&m->lock);
- SCM owner = m->owner;
+ owner = m->owner;
- if (!scm_is_eq (owner, scm_current_thread ()))
+ if (!scm_is_eq (owner, t->handle))
{
if (m->level == 0)
{
scm_i_pthread_mutex_unlock (&m->lock);
scm_misc_error (NULL, "mutex not locked", SCM_EOL);
}
- owner = scm_current_thread ();
+ owner = t->handle;
}
else if (!m->allow_external_unlock)
{
if (m->level > 0)
m->level--;
if (m->level == 0)
- m->owner = unblock_from_queue (m->waiting);
+ {
+ /* Change the owner of MUTEX. */
+ t->mutexes = scm_delq_x (mutex, t->mutexes);
+ m->owner = unblock_from_queue (m->waiting);
+ }
t->block_asyncs++;
{
if (relock)
scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner);
+ t->block_asyncs--;
break;
}
if (m->level > 0)
m->level--;
if (m->level == 0)
- m->owner = unblock_from_queue (m->waiting);
+ {
+ /* Change the owner of MUTEX. */
+ t->mutexes = scm_delq_x (mutex, t->mutexes);
+ m->owner = unblock_from_queue (m->waiting);
+ }
scm_i_pthread_mutex_unlock (&m->lock);
ret = 1;
}
#undef FUNC_NAME
-static size_t
-fat_cond_free (SCM mx)
-{
- fat_cond *c = SCM_CONDVAR_DATA (mx);
- scm_gc_free (c, sizeof (fat_cond), "condition-variable");
- return 0;
-}
-
static int
fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
{
SCM_DEFINE (scm_timed_wait_condition_variable, "wait-condition-variable", 2, 1, 0,
(SCM cv, SCM mx, SCM t),
-"Wait until @var{cond-var} has been signalled. While waiting, "
-"@var{mutex} is atomically unlocked (as with @code{unlock-mutex}) and "
-"is locked again when this function returns. When @var{time} is given, "
+"Wait until condition variable @var{cv} has been signalled. While waiting, "
+"mutex @var{mx} is atomically unlocked (as with @code{unlock-mutex}) and "
+"is locked again when this function returns. When @var{t} is given, "
"it specifies a point in time where the waiting should be aborted. It "
"can be either a integer as returned by @code{current-time} or a pair "
"as returned by @code{gettimeofday}. When the waiting is aborted the "
}
#undef FUNC_NAME
-/*** Marking stacks */
-
-/* XXX - what to do with this? Do we need to handle this for blocked
- threads as well?
-*/
-#ifdef __ia64__
-# define SCM_MARK_BACKING_STORE() do { \
- ucontext_t ctx; \
- SCM_STACKITEM * top, * bot; \
- getcontext (&ctx); \
- scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
- ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
- / sizeof (SCM_STACKITEM))); \
- bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \
- top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \
- scm_mark_locations (bot, top - bot); } while (0)
-#else
-# define SCM_MARK_BACKING_STORE()
-#endif
-
\f
/*** Select */
int
scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex)
{
- scm_t_guile_ticket t = scm_leave_guile ();
- ((scm_i_thread *)t)->held_mutex = mutex;
- int res = scm_i_pthread_cond_wait (cond, mutex);
- ((scm_i_thread *)t)->held_mutex = NULL;
- scm_enter_guile (t);
+ int res;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
+
+ t->held_mutex = mutex;
+ res = scm_i_pthread_cond_wait (cond, mutex);
+ t->held_mutex = NULL;
+
return res;
}
scm_i_pthread_mutex_t *mutex,
const scm_t_timespec *wt)
{
- scm_t_guile_ticket t = scm_leave_guile ();
- ((scm_i_thread *)t)->held_mutex = mutex;
- int res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
- ((scm_i_thread *)t)->held_mutex = NULL;
- scm_enter_guile (t);
+ int res;
+ scm_i_thread *t = SCM_I_CURRENT_THREAD;
+
+ t->held_mutex = mutex;
+ res = scm_i_pthread_cond_timedwait (cond, mutex, wt);
+ t->held_mutex = NULL;
+
return res;
}
}
#undef FUNC_NAME
+SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0,
+ (void),
+ "Return the total number of processors of the machine, which\n"
+ "is guaranteed to be at least 1. A ``processor'' here is a\n"
+ "thread execution unit, which can be either:\n\n"
+ "@itemize\n"
+ "@item an execution core in a (possibly multi-core) chip, in a\n"
+ " (possibly multi- chip) module, in a single computer, or\n"
+ "@item a thread execution unit inside a core in the case of\n"
+ " @dfn{hyper-threaded} CPUs.\n"
+ "@end itemize\n\n"
+ "Which of the two definitions is used, is unspecified.\n")
+#define FUNC_NAME s_scm_total_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_ALL));
+}
+#undef FUNC_NAME
+
+SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0,
+ (void),
+ "Like @code{total-processor-count}, but return the number of\n"
+ "processors available to the current process. See\n"
+ "@code{setaffinity} and @code{getaffinity} for more\n"
+ "information.\n")
+#define FUNC_NAME s_scm_current_processor_count
+{
+ return scm_from_ulong (num_processors (NPROC_CURRENT));
+}
+#undef FUNC_NAME
+
+
+\f
+
static scm_i_pthread_cond_t wake_up_cond;
static int threads_initialized_p = 0;
/* This mutex is used by SCM_CRITICAL_SECTION_START/END.
*/
scm_i_pthread_mutex_t scm_i_critical_section_mutex;
-int scm_i_critical_section_level = 0;
static SCM dynwind_critical_section_mutex;
#endif
void
-scm_threads_prehistory (SCM_STACKITEM *base)
+scm_threads_prehistory (void *base)
{
#if SCM_USE_PTHREAD_THREADS
pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive);
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
- guilify_self_1 (base);
+ guilify_self_1 ((struct GC_stack_base *) base);
}
scm_t_bits scm_tc16_thread;
{
scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread));
scm_set_smob_print (scm_tc16_thread, thread_print);
- scm_set_smob_free (scm_tc16_thread, thread_free); /* XXX: Could be removed */
scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
scm_tc16_condvar = scm_make_smob_type ("condition-variable",
sizeof (fat_cond));
scm_set_smob_print (scm_tc16_condvar, fat_cond_print);
- scm_set_smob_free (scm_tc16_condvar, fat_cond_free);
scm_i_default_dynamic_state = SCM_BOOL_F;
guilify_self_2 (SCM_BOOL_F);
threads_initialized_p = 1;
- dynwind_critical_section_mutex =
- scm_permanent_object (scm_make_recursive_mutex ());
+ dynwind_critical_section_mutex = scm_make_recursive_mutex ();
}
void
scm_init_threads_default_dynamic_state ()
{
SCM state = scm_make_dynamic_state (scm_current_dynamic_state ());
- scm_i_default_dynamic_state = scm_permanent_object (state);
+ scm_i_default_dynamic_state = state;
}
void
#include "libguile/threads.x"
}
+\f
+/* IA64-specific things. */
+
+#ifdef __ia64__
+# ifdef __hpux
+# include <sys/param.h>
+# include <sys/pstat.h>
+void *
+scm_ia64_register_backing_store_base (void)
+{
+ struct pst_vm_status vm_status;
+ int i = 0;
+ while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1)
+ if (vm_status.pst_type == PS_RSESTACK)
+ return (void *) vm_status.pst_vaddr;
+ abort ();
+}
+void *
+scm_ia64_ar_bsp (const void *ctx)
+{
+ uint64_t bsp;
+ __uc_get_ar_bsp (ctx, &bsp);
+ return (void *) bsp;
+}
+# endif /* hpux */
+# ifdef linux
+# include <ucontext.h>
+void *
+scm_ia64_register_backing_store_base (void)
+{
+ extern void *__libc_ia64_register_backing_store_base;
+ return __libc_ia64_register_backing_store_base;
+}
+void *
+scm_ia64_ar_bsp (const void *opaque)
+{
+ const ucontext_t *ctx = opaque;
+ return (void *) ctx->uc_mcontext.sc_ar_bsp;
+}
+# endif /* linux */
+# ifdef __FreeBSD__
+# include <ucontext.h>
+void *
+scm_ia64_register_backing_store_base (void)
+{
+ return (void *)0x8000000000000000;
+}
+void *
+scm_ia64_ar_bsp (const void *opaque)
+{
+ const ucontext_t *ctx = opaque;
+ return (void *)(ctx->uc_mcontext.mc_special.bspstore
+ + ctx->uc_mcontext.mc_special.ndirty);
+}
+# endif /* __FreeBSD__ */
+#endif /* __ia64__ */
+
+
/*
Local Variables:
c-file-style: "gnu"