X-Git-Url: http://git.hcoop.net/bpt/guile.git/blobdiff_plain/57692c07427cb2b3f193df2e998e30cf7616e567..99cc1092a89292c6e6db2db537988e5963b19868:/libguile/threads.c diff --git a/libguile/threads.c b/libguile/threads.c index 27aad3d62..9a9392057 100644 --- a/libguile/threads.c +++ b/libguile/threads.c @@ -1,18 +1,19 @@ -/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. +/* Copyright (C) 1995,1996,1997,1998,2000,2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. * * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either version 3 of + * the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301 USA */ @@ -21,13 +22,13 @@ # include #endif +#include "libguile/bdw-gc.h" #include "libguile/_scm.h" #if HAVE_UNISTD_H #include #endif #include -#include #ifdef HAVE_STRING_H #include /* for memset used by FD_ZERO on Solaris 10 */ @@ -37,6 +38,10 @@ #include #endif +#include +#include +#include + #include "libguile/validate.h" #include "libguile/root.h" #include "libguile/eval.h" @@ -51,17 +56,177 @@ #include "libguile/init.h" #include "libguile/scmsigs.h" #include "libguile/strings.h" +#include "libguile/weaks.h" + +#include + + + + +/* First some libgc shims. */ -#ifdef __MINGW32__ -#ifndef ETIMEDOUT -# define ETIMEDOUT WSAETIMEDOUT +/* Make sure GC_fn_type is defined; it is missing from the public + headers of GC 7.1 and earlier. */ +#ifndef HAVE_GC_FN_TYPE +typedef void * (* GC_fn_type) (void *); #endif -# include -# include -# define pipe(fd) _pipe (fd, 256, O_BINARY) -#endif /* __MINGW32__ */ -#include + +#ifndef GC_SUCCESS +#define GC_SUCCESS 0 +#endif + +#ifndef GC_UNIMPLEMENTED +#define GC_UNIMPLEMENTED 3 +#endif + +/* Likewise struct GC_stack_base is missing before 7.1. */ +#ifndef HAVE_GC_STACK_BASE +struct GC_stack_base { + void * mem_base; /* Base of memory stack. */ +#ifdef __ia64__ + void * reg_base; /* Base of separate register stack. */ +#endif +}; + +static int +GC_register_my_thread (struct GC_stack_base *stack_base) +{ + return GC_UNIMPLEMENTED; +} + +static void +GC_unregister_my_thread () +{ +} + +#if !SCM_USE_PTHREAD_THREADS +/* No threads; we can just use GC_stackbottom. */ +static void * +get_thread_stack_base () +{ + return GC_stackbottom; +} + +#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \ + && defined PTHREAD_ATTR_GETSTACK_WORKS +/* This method for GNU/Linux and perhaps some other systems. + It's not for MacOS X or Solaris 10, since pthread_getattr_np is not + available on them. */ +static void * +get_thread_stack_base () +{ + pthread_attr_t attr; + void *start, *end; + size_t size; + + pthread_getattr_np (pthread_self (), &attr); + pthread_attr_getstack (&attr, &start, &size); + end = (char *)start + size; + +#if SCM_STACK_GROWS_UP + return start; +#else + return end; +#endif +} + +#elif defined HAVE_PTHREAD_GET_STACKADDR_NP +/* This method for MacOS X. + It'd be nice if there was some documentation on pthread_get_stackaddr_np, + but as of 2006 there's nothing obvious at apple.com. */ +static void * +get_thread_stack_base () +{ + return pthread_get_stackaddr_np (pthread_self ()); +} + +#else +#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1. +#endif + +static int +GC_get_stack_base (struct GC_stack_base *stack_base) +{ + stack_base->mem_base = get_thread_stack_base (); +#ifdef __ia64__ + /* Calculate and store off the base of this thread's register + backing store (RBS). Unfortunately our implementation(s) of + scm_ia64_register_backing_store_base are only reliable for the + main thread. For other threads, therefore, find out the current + top of the RBS, and use that as a maximum. */ + stack_base->reg_base = scm_ia64_register_backing_store_base (); + { + ucontext_t ctx; + void *bsp; + getcontext (&ctx); + bsp = scm_ia64_ar_bsp (&ctx); + if (stack_base->reg_base > bsp) + stack_base->reg_base = bsp; + } +#endif + return GC_SUCCESS; +} + +static void * +GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg) +{ + struct GC_stack_base stack_base; + + stack_base.mem_base = (void*)&stack_base; +#ifdef __ia64__ + /* FIXME: Untested. */ + { + ucontext_t ctx; + getcontext (&ctx); + stack_base.reg_base = scm_ia64_ar_bsp (&ctx); + } +#endif + + return fn (&stack_base, arg); +} +#endif /* HAVE_GC_STACK_BASE */ + + +/* Now define with_gc_active and with_gc_inactive. */ + +#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE)) + +/* We have a sufficiently new libgc (7.2 or newer). */ + +static void* +with_gc_inactive (GC_fn_type func, void *data) +{ + return GC_do_blocking (func, data); +} + +static void* +with_gc_active (GC_fn_type func, void *data) +{ + return GC_call_with_gc_active (func, data); +} + +#else + +/* libgc not new enough, so never actually deactivate GC. + + Note that though GC 7.1 does have a GC_do_blocking, it doesn't have + GC_call_with_gc_active. */ + +static void* +with_gc_inactive (GC_fn_type func, void *data) +{ + return func (data); +} + +static void* +with_gc_active (GC_fn_type func, void *data) +{ + return func (data); +} + +#endif /* HAVE_GC_DO_BLOCKING */ + static void @@ -82,8 +247,14 @@ to_timespec (SCM t, scm_t_timespec *waittime) } } + /*** Queues */ +/* Note: We annotate with "GC-robust" assignments whose purpose is to avoid + the risk of false references leading to unbounded retained space as + described in "Bounding Space Usage of Conservative Garbage Collectors", + H.J. Boehm, 2001. */ + /* Make an empty queue data structure. */ static SCM @@ -126,6 +297,10 @@ remqueue (SCM q, SCM c) if (scm_is_eq (c, SCM_CAR (q))) SCM_SETCAR (q, SCM_CDR (c)); SCM_SETCDR (prev, SCM_CDR (c)); + + /* GC-robust */ + SCM_SETCDR (c, SCM_EOL); + SCM_CRITICAL_SECTION_END; return 1; } @@ -155,25 +330,16 @@ dequeue (SCM q) if (scm_is_null (SCM_CDR (q))) SCM_SETCAR (q, SCM_EOL); SCM_CRITICAL_SECTION_END; + + /* GC-robust */ + SCM_SETCDR (c, SCM_EOL); + return SCM_CAR (c); } } /*** Thread smob routines */ -static SCM -thread_mark (SCM obj) -{ - scm_i_thread *t = SCM_I_THREAD_DATA (obj); - scm_gc_mark (t->result); - scm_gc_mark (t->cleanup_handler); - scm_gc_mark (t->join_queue); - scm_gc_mark (t->mutexes); - scm_gc_mark (t->dynwinds); - scm_gc_mark (t->active_asyncs); - scm_gc_mark (t->continuation_root); - return t->dynamic_state; -} static int thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) @@ -210,15 +376,7 @@ thread_print (SCM exp, SCM port, scm_print_state *pstate SCM_UNUSED) return 1; } -static size_t -thread_free (SCM obj) -{ - scm_i_thread *t = SCM_I_THREAD_DATA (obj); - assert (t->exited); - scm_gc_free (t, sizeof (*t), "thread"); - return 0; -} - + /*** Blocking on queues. */ /* See also scm_i_queue_async_cell for how such a block is @@ -290,154 +448,27 @@ unblock_from_queue (SCM queue) return thread; } + /* Getting into and out of guile mode. */ -/* Ken Raeburn observes that the implementation of suspend and resume - (and the things that build on top of them) are very likely not - correct (see below). We will need fix this eventually, and that's - why scm_leave_guile/scm_enter_guile are not exported in the API. - - Ken writes: - - Consider this sequence: - - Function foo, called in Guile mode, calls suspend (maybe indirectly - through scm_leave_guile), which does this: - - // record top of stack for the GC - t->top = SCM_STACK_PTR (&t); // just takes address of automatic - var 't' - // save registers. - SCM_FLUSH_REGISTER_WINDOWS; // sparc only - setjmp (t->regs); // here's most of the magic - - ... and returns. - - Function foo has a SCM value X, a handle on a non-immediate object, in - a caller-saved register R, and it's the only reference to the object - currently. - - The compiler wants to use R in suspend, so it pushes the current - value, X, into a stack slot which will be reloaded on exit from - suspend; then it loads stuff into R and goes about its business. The - setjmp call saves (some of) the current registers, including R, which - no longer contains X. (This isn't a problem for a normal - setjmp/longjmp situation, where longjmp would be called before - setjmp's caller returns; the old value for X would be loaded back from - the stack after the longjmp, before the function returned.) - - So, suspend returns, loading X back into R (and invalidating the jump - buffer) in the process. The caller foo then goes off and calls a - bunch of other functions out of Guile mode, occasionally storing X on - the stack again, but, say, much deeper on the stack than suspend's - stack frame went, and the stack slot where suspend had written X has - long since been overwritten with other values. - - Okay, nothing actively broken so far. Now, let garbage collection - run, triggered by another thread. - - The thread calling foo is out of Guile mode at the time, so the - garbage collector just scans a range of stack addresses. Too bad that - X isn't stored there. So the pointed-to storage goes onto the free - list, and I think you can see where things go from there. - - Is there anything I'm missing that'll prevent this scenario from - happening? I mean, aside from, "well, suspend and scm_leave_guile - don't have many local variables, so they probably won't need to save - any registers on most systems, so we hope everything will wind up in - the jump buffer and we'll just get away with it"? - - (And, going the other direction, if scm_leave_guile and suspend push - the stack pointer over onto a new page, and foo doesn't make further - function calls and thus the stack pointer no longer includes that - page, are we guaranteed that the kernel cannot release the now-unused - stack page that contains the top-of-stack pointer we just saved? I - don't know if any OS actually does that. If it does, we could get - faults in garbage collection.) - - I don't think scm_without_guile has to have this problem, as it gets - more control over the stack handling -- but it should call setjmp - itself. I'd probably try something like: - - // record top of stack for the GC - t->top = SCM_STACK_PTR (&t); - // save registers. - SCM_FLUSH_REGISTER_WINDOWS; - setjmp (t->regs); - res = func(data); - scm_enter_guile (t); - - ... though even that's making some assumptions about the stack - ordering of local variables versus caller-saved registers. - - For something like scm_leave_guile to work, I don't think it can just - rely on invalidated jump buffers. A valid jump buffer, and a handle - on the stack state at the point when the jump buffer was initialized, - together, would work fine, but I think then we're talking about macros - invoking setjmp in the caller's stack frame, and requiring that the - caller of scm_leave_guile also call scm_enter_guile before returning, - kind of like pthread_cleanup_push/pop calls that have to be paired up - in a function. (In fact, the pthread ones have to be paired up - syntactically, as if they might expand to a compound statement - incorporating the user's code, and invoking a compiler's - exception-handling primitives. Which might be something to think - about for cases where Guile is used with C++ exceptions or - pthread_cancel.) -*/ - +/* Key used to attach a cleanup handler to a given thread. Also, if + thread-local storage is unavailable, this key is used to retrieve the + current thread with `pthread_getspecific ()'. */ scm_i_pthread_key_t scm_i_thread_key; -static void -resume (scm_i_thread *t) -{ - t->top = NULL; - if (t->clear_freelists_p) - { - *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL; - *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL; - t->clear_freelists_p = 0; - } -} -typedef void* scm_t_guile_ticket; +#ifdef SCM_HAVE_THREAD_STORAGE_CLASS -static void -scm_enter_guile (scm_t_guile_ticket ticket) -{ - scm_i_thread *t = (scm_i_thread *)ticket; - if (t) - { - scm_i_pthread_mutex_lock (&t->heap_mutex); - t->heap_mutex_locked_by_self = 1; - resume (t); - } -} +/* When thread-local storage (TLS) is available, a pointer to the + current-thread object is kept in TLS. Note that storing the thread-object + itself in TLS (rather than a pointer to some malloc'd memory) is not + possible since thread objects may live longer than the actual thread they + represent. */ +SCM_THREAD_LOCAL scm_i_thread *scm_i_current_thread = NULL; -static scm_i_thread * -suspend (void) -{ - scm_i_thread *t = SCM_I_CURRENT_THREAD; +#endif /* SCM_HAVE_THREAD_STORAGE_CLASS */ - /* record top of stack for the GC */ - t->top = SCM_STACK_PTR (&t); - /* save registers. */ - SCM_FLUSH_REGISTER_WINDOWS; - setjmp (t->regs); - return t; -} - -static scm_t_guile_ticket -scm_leave_guile () -{ - scm_i_thread *t = suspend (); - if (t->heap_mutex_locked_by_self) - { - t->heap_mutex_locked_by_self = 0; - scm_i_pthread_mutex_unlock (&t->heap_mutex); - } - return (scm_t_guile_ticket) t; -} static scm_i_pthread_mutex_t thread_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER; static scm_i_thread *all_threads = NULL; @@ -445,79 +476,96 @@ static int thread_count; static SCM scm_i_default_dynamic_state; +/* Run when a fluid is collected. */ +void +scm_i_reset_fluid (size_t n, SCM val) +{ + scm_i_thread *t; + + scm_i_pthread_mutex_lock (&thread_admin_mutex); + for (t = all_threads; t; t = t->next_thread) + if (SCM_I_DYNAMIC_STATE_P (t->dynamic_state)) + { + SCM v = SCM_I_DYNAMIC_STATE_FLUIDS (t->dynamic_state); + + if (n < SCM_SIMPLE_VECTOR_LENGTH (v)) + SCM_SIMPLE_VECTOR_SET (v, n, val); + } + scm_i_pthread_mutex_unlock (&thread_admin_mutex); +} + /* Perform first stage of thread initialisation, in non-guile mode. */ static void -guilify_self_1 (SCM_STACKITEM *base) -{ - scm_i_thread *t = malloc (sizeof (scm_i_thread)); - - t->pthread = scm_i_pthread_self (); - t->handle = SCM_BOOL_F; - t->result = SCM_BOOL_F; - t->cleanup_handler = SCM_BOOL_F; - t->mutexes = SCM_EOL; - t->held_mutex = NULL; - t->join_queue = SCM_EOL; - t->dynamic_state = SCM_BOOL_F; - t->dynwinds = SCM_EOL; - t->active_asyncs = SCM_EOL; - t->block_asyncs = 1; - t->pending_asyncs = 1; - t->last_debug_frame = NULL; - t->base = base; +guilify_self_1 (struct GC_stack_base *base) +{ + scm_i_thread t; + + /* We must arrange for SCM_I_CURRENT_THREAD to point to a valid value + before allocating anything in this thread, because allocation could + cause GC to run, and GC could cause finalizers, which could invoke + Scheme functions, which need the current thread to be set. */ + + t.pthread = scm_i_pthread_self (); + t.handle = SCM_BOOL_F; + t.result = SCM_BOOL_F; + t.cleanup_handler = SCM_BOOL_F; + t.mutexes = SCM_EOL; + t.held_mutex = NULL; + t.join_queue = SCM_EOL; + t.dynamic_state = SCM_BOOL_F; + t.dynwinds = SCM_EOL; + t.active_asyncs = SCM_EOL; + t.block_asyncs = 1; + t.pending_asyncs = 1; + t.critical_section_level = 0; + t.base = base->mem_base; #ifdef __ia64__ - /* Calculate and store off the base of this thread's register - backing store (RBS). Unfortunately our implementation(s) of - scm_ia64_register_backing_store_base are only reliable for the - main thread. For other threads, therefore, find out the current - top of the RBS, and use that as a maximum. */ - t->register_backing_store_base = scm_ia64_register_backing_store_base (); - { - ucontext_t ctx; - void *bsp; - getcontext (&ctx); - bsp = scm_ia64_ar_bsp (&ctx); - if (t->register_backing_store_base > bsp) - t->register_backing_store_base = bsp; - } + t.register_backing_store_base = base->reg-base; #endif - t->continuation_root = SCM_EOL; - t->continuation_base = base; - scm_i_pthread_cond_init (&t->sleep_cond, NULL); - t->sleep_mutex = NULL; - t->sleep_object = SCM_BOOL_F; - t->sleep_fd = -1; - - if (pipe (t->sleep_pipe) != 0) + t.continuation_root = SCM_EOL; + t.continuation_base = t.base; + scm_i_pthread_cond_init (&t.sleep_cond, NULL); + t.sleep_mutex = NULL; + t.sleep_object = SCM_BOOL_F; + t.sleep_fd = -1; + + if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0) /* FIXME: Error conditions during the initialization phase are handled gracelessly since public functions such as `scm_init_guile ()' currently have type `void'. */ abort (); - scm_i_pthread_mutex_init (&t->heap_mutex, NULL); - t->heap_mutex_locked_by_self = 0; - scm_i_pthread_mutex_init (&t->admin_mutex, NULL); - t->clear_freelists_p = 0; - t->gc_running_p = 0; - t->canceled = 0; - t->exited = 0; + scm_i_pthread_mutex_init (&t.admin_mutex, NULL); + t.current_mark_stack_ptr = NULL; + t.current_mark_stack_limit = NULL; + t.canceled = 0; + t.exited = 0; + t.guile_mode = 0; - t->freelist = SCM_EOL; - t->freelist2 = SCM_EOL; - SCM_SET_FREELIST_LOC (scm_i_freelist, &t->freelist); - SCM_SET_FREELIST_LOC (scm_i_freelist2, &t->freelist2); + /* The switcheroo. */ + { + scm_i_thread *t_ptr = &t; + + GC_disable (); + t_ptr = GC_malloc (sizeof (scm_i_thread)); + memcpy (t_ptr, &t, sizeof t); - scm_i_pthread_setspecific (scm_i_thread_key, t); + scm_i_pthread_setspecific (scm_i_thread_key, t_ptr); - scm_i_pthread_mutex_lock (&t->heap_mutex); - t->heap_mutex_locked_by_self = 1; +#ifdef SCM_HAVE_THREAD_STORAGE_CLASS + /* Cache the current thread in TLS for faster lookup. */ + scm_i_current_thread = t_ptr; +#endif - scm_i_pthread_mutex_lock (&thread_admin_mutex); - t->next_thread = all_threads; - all_threads = t; - thread_count++; - scm_i_pthread_mutex_unlock (&thread_admin_mutex); + scm_i_pthread_mutex_lock (&thread_admin_mutex); + t_ptr->next_thread = all_threads; + all_threads = t_ptr; + thread_count++; + scm_i_pthread_mutex_unlock (&thread_admin_mutex); + + GC_enable (); + } } /* Perform second stage of thread initialisation, in guile mode. @@ -527,10 +575,13 @@ guilify_self_2 (SCM parent) { scm_i_thread *t = SCM_I_CURRENT_THREAD; + t->guile_mode = 1; + SCM_NEWSMOB (t->handle, scm_tc16_thread, t); - scm_gc_register_collectable_memory (t, sizeof (scm_i_thread), "thread"); + t->continuation_root = scm_cons (t->handle, SCM_EOL); t->continuation_base = t->base; + t->vm = SCM_BOOL_F; if (scm_is_true (parent)) t->dynamic_state = scm_make_dynamic_state (parent); @@ -566,6 +617,13 @@ typedef struct { #define SCM_MUTEXP(x) SCM_SMOB_PREDICATE (scm_tc16_mutex, x) #define SCM_MUTEX_DATA(x) ((fat_mutex *) SCM_SMOB_DATA (x)) +static SCM +call_cleanup (void *data) +{ + SCM *proc_p = data; + return scm_call_0 (*proc_p); +} + /* Perform thread tear-down, in guile mode. */ static void * @@ -573,13 +631,17 @@ do_thread_exit (void *v) { scm_i_thread *t = (scm_i_thread *) v; + /* Ensure the signal handling thread has been launched, because we might be + shutting it down. This needs to be done in Guile mode. */ + scm_i_ensure_signal_delivery_thread (); + if (!scm_is_false (t->cleanup_handler)) { SCM ptr = t->cleanup_handler; t->cleanup_handler = SCM_BOOL_F; t->result = scm_internal_catch (SCM_BOOL_T, - (scm_t_catch_body) scm_call_0, ptr, + call_cleanup, &ptr, scm_handle_by_message_noexit, NULL); } @@ -593,14 +655,23 @@ do_thread_exit (void *v) while (!scm_is_null (t->mutexes)) { - SCM mutex = SCM_CAR (t->mutexes); - fat_mutex *m = SCM_MUTEX_DATA (mutex); - scm_i_pthread_mutex_lock (&m->lock); + SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes); - unblock_from_queue (m->waiting); + if (!SCM_UNBNDP (mutex)) + { + fat_mutex *m = SCM_MUTEX_DATA (mutex); - scm_i_pthread_mutex_unlock (&m->lock); - t->mutexes = SCM_CDR (t->mutexes); + scm_i_pthread_mutex_lock (&m->lock); + + /* Since MUTEX is in `t->mutexes', T must be its owner. */ + assert (scm_is_eq (m->owner, t->handle)); + + unblock_from_queue (m->waiting); + + scm_i_pthread_mutex_unlock (&m->lock); + } + + t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes); } scm_i_pthread_mutex_unlock (&t->admin_mutex); @@ -608,6 +679,17 @@ do_thread_exit (void *v) return NULL; } +static void * +do_thread_exit_trampoline (struct GC_stack_base *sb, void *v) +{ + /* Won't hurt if we are already registered. */ +#if SCM_USE_PTHREAD_THREADS + GC_register_my_thread (sb); +#endif + + return scm_with_guile (do_thread_exit, v); +} + static void on_thread_exit (void *v) { @@ -622,15 +704,14 @@ on_thread_exit (void *v) t->held_mutex = NULL; } - scm_i_pthread_setspecific (scm_i_thread_key, v); - - /* Ensure the signal handling thread has been launched, because we might be - shutting it down. */ - scm_i_ensure_signal_delivery_thread (); + /* Reinstate the current thread for purposes of scm_with_guile + guile-mode cleanup handlers. Only really needed in the non-TLS + case but it doesn't hurt to be consistent. */ + scm_i_pthread_setspecific (scm_i_thread_key, t); - /* Unblocking the joining threads needs to happen in guile mode - since the queue is a SCM data structure. */ - scm_with_guile (do_thread_exit, v); + /* Scheme-level thread finalizers and other cleanup needs to happen in + guile mode. */ + GC_call_with_stack_base (do_thread_exit_trampoline, t); /* Removing ourself from the list of all threads needs to happen in non-guile mode since all SCM values on our stack become @@ -640,6 +721,10 @@ on_thread_exit (void *v) if (*tp == t) { *tp = t->next_thread; + + /* GC-robust */ + t->next_thread = NULL; + break; } thread_count--; @@ -654,6 +739,10 @@ on_thread_exit (void *v) scm_i_pthread_mutex_unlock (&thread_admin_mutex); scm_i_pthread_setspecific (scm_i_thread_key, NULL); + +#if SCM_USE_PTHREAD_THREADS + GC_unregister_my_thread (); +#endif } static scm_i_pthread_once_t init_thread_key_once = SCM_I_PTHREAD_ONCE_INIT; @@ -664,26 +753,35 @@ init_thread_key (void) scm_i_pthread_key_create (&scm_i_thread_key, on_thread_exit); } -/* Perform any initializations necessary to bring the current thread - into guile mode, initializing Guile itself, if necessary. +/* Perform any initializations necessary to make the current thread + known to Guile (via SCM_I_CURRENT_THREAD), initializing Guile itself, + if necessary. BASE is the stack base to use with GC. PARENT is the dynamic state to use as the parent, ot SCM_BOOL_F in which case the default dynamic state is used. - Return zero when the thread was in guile mode already; otherwise + Returns zero when the thread was known to guile already; otherwise return 1. -*/ + + Note that it could be the case that the thread was known + to Guile, but not in guile mode (because we are within a + scm_without_guile call). Check SCM_I_CURRENT_THREAD->guile_mode to + be sure. New threads are put into guile mode implicitly. */ static int -scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent) +scm_i_init_thread_for_guile (struct GC_stack_base *base, SCM parent) { - scm_i_thread *t; - scm_i_pthread_once (&init_thread_key_once, init_thread_key); - if ((t = SCM_I_CURRENT_THREAD) == NULL) + if (SCM_I_CURRENT_THREAD) + { + /* Thread is already known to Guile. + */ + return 0; + } + else { /* This thread has not been guilified yet. */ @@ -695,6 +793,12 @@ scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent) initialization. */ scm_i_init_guile (base); + +#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS + /* Allow other threads to come in later. */ + GC_allow_register_threads (); +#endif + scm_i_pthread_mutex_unlock (&scm_i_init_mutex); } else @@ -703,125 +807,116 @@ scm_i_init_thread_for_guile (SCM_STACKITEM *base, SCM parent) the first time. Only initialize this thread. */ scm_i_pthread_mutex_unlock (&scm_i_init_mutex); + + /* Register this thread with libgc. */ +#if SCM_USE_PTHREAD_THREADS + GC_register_my_thread (base); +#endif + guilify_self_1 (base); guilify_self_2 (parent); } return 1; } - else if (t->top) - { - /* This thread is already guilified but not in guile mode, just - resume it. - - A user call to scm_with_guile() will lead us to here. This could - happen from anywhere on the stack, and in particular lower on the - stack than when it was when this thread was first guilified. Thus, - `base' must be updated. */ -#if SCM_STACK_GROWS_UP - if (base < t->base) - t->base = base; -#else - if (base > t->base) - t->base = base; -#endif +} - scm_enter_guile ((scm_t_guile_ticket) t); - return 1; - } +void +scm_init_guile () +{ + struct GC_stack_base stack_base; + + if (GC_get_stack_base (&stack_base) == GC_SUCCESS) + scm_i_init_thread_for_guile (&stack_base, + scm_i_default_dynamic_state); else { - /* Thread is already in guile mode. Nothing to do. - */ - return 0; + fprintf (stderr, "Failed to get stack base for current thread.\n"); + exit (EXIT_FAILURE); } } -#if SCM_USE_PTHREAD_THREADS +SCM_UNUSED static void +scm_leave_guile_cleanup (void *x) +{ + on_thread_exit (SCM_I_CURRENT_THREAD); +} -#if HAVE_PTHREAD_ATTR_GETSTACK && HAVE_PTHREAD_GETATTR_NP -/* This method for GNU/Linux and perhaps some other systems. - It's not for MacOS X or Solaris 10, since pthread_getattr_np is not - available on them. */ -#define HAVE_GET_THREAD_STACK_BASE +struct with_guile_args +{ + GC_fn_type func; + void *data; + SCM parent; +}; -static SCM_STACKITEM * -get_thread_stack_base () +static void * +with_guile_trampoline (void *data) { - pthread_attr_t attr; - void *start, *end; - size_t size; + struct with_guile_args *args = data; - pthread_getattr_np (pthread_self (), &attr); - pthread_attr_getstack (&attr, &start, &size); - end = (char *)start + size; + return scm_c_with_continuation_barrier (args->func, args->data); +} + +static void * +with_guile_and_parent (struct GC_stack_base *base, void *data) +{ + void *res; + int new_thread; + scm_i_thread *t; + struct with_guile_args *args = data; - /* XXX - pthread_getattr_np from LinuxThreads does not seem to work - for the main thread, but we can use scm_get_stack_base in that - case. - */ + new_thread = scm_i_init_thread_for_guile (base, args->parent); + t = SCM_I_CURRENT_THREAD; + if (new_thread) + { + /* We are in Guile mode. */ + assert (t->guile_mode); -#ifndef PTHREAD_ATTR_GETSTACK_WORKS - if ((void *)&attr < start || (void *)&attr >= end) - return scm_get_stack_base (); + res = scm_c_with_continuation_barrier (args->func, args->data); + + /* Leave Guile mode. */ + t->guile_mode = 0; + } + else if (t->guile_mode) + { + /* Already in Guile mode. */ + res = scm_c_with_continuation_barrier (args->func, args->data); + } else -#endif { + /* We are not in Guile mode, either because we are not within a + scm_with_guile, or because we are within a scm_without_guile. + + This call to scm_with_guile() could happen from anywhere on the + stack, and in particular lower on the stack than when it was + when this thread was first guilified. Thus, `base' must be + updated. */ #if SCM_STACK_GROWS_UP - return start; + if (SCM_STACK_PTR (base->mem_base) < t->base) + t->base = SCM_STACK_PTR (base->mem_base); #else - return end; + if (SCM_STACK_PTR (base->mem_base) > t->base) + t->base = SCM_STACK_PTR (base->mem_base); #endif - } -} - -#elif HAVE_PTHREAD_GET_STACKADDR_NP -/* This method for MacOS X. - It'd be nice if there was some documentation on pthread_get_stackaddr_np, - but as of 2006 there's nothing obvious at apple.com. */ -#define HAVE_GET_THREAD_STACK_BASE -static SCM_STACKITEM * -get_thread_stack_base () -{ - return pthread_get_stackaddr_np (pthread_self ()); -} -#elif defined (__MINGW32__) -/* This method for mingw. In mingw the basic scm_get_stack_base can be used - in any thread. We don't like hard-coding the name of a system, but there - doesn't seem to be a cleaner way of knowing scm_get_stack_base can - work. */ -#define HAVE_GET_THREAD_STACK_BASE -static SCM_STACKITEM * -get_thread_stack_base () -{ - return scm_get_stack_base (); + t->guile_mode = 1; + res = with_gc_active (with_guile_trampoline, args); + t->guile_mode = 0; + } + return res; } -#endif /* pthread methods of get_thread_stack_base */ - -#else /* !SCM_USE_PTHREAD_THREADS */ - -#define HAVE_GET_THREAD_STACK_BASE - -static SCM_STACKITEM * -get_thread_stack_base () +static void * +scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent) { - return scm_get_stack_base (); -} + struct with_guile_args args; -#endif /* !SCM_USE_PTHREAD_THREADS */ - -#ifdef HAVE_GET_THREAD_STACK_BASE - -void -scm_init_guile () -{ - scm_i_init_thread_for_guile (get_thread_stack_base (), - scm_i_default_dynamic_state); + args.func = func; + args.data = data; + args.parent = parent; + + return GC_call_with_stack_base (with_guile_and_parent, &args); } -#endif - void * scm_with_guile (void *(*func)(void *), void *data) { @@ -829,44 +924,26 @@ scm_with_guile (void *(*func)(void *), void *data) scm_i_default_dynamic_state); } -SCM_UNUSED static void -scm_leave_guile_cleanup (void *x) -{ - scm_leave_guile (); -} - void * -scm_i_with_guile_and_parent (void *(*func)(void *), void *data, SCM parent) +scm_without_guile (void *(*func)(void *), void *data) { - void *res; - int really_entered; - SCM_STACKITEM base_item; + void *result; + scm_i_thread *t = SCM_I_CURRENT_THREAD; - really_entered = scm_i_init_thread_for_guile (&base_item, parent); - if (really_entered) + if (t->guile_mode) { - scm_i_pthread_cleanup_push (scm_leave_guile_cleanup, NULL); - res = scm_c_with_continuation_barrier (func, data); - scm_i_pthread_cleanup_pop (0); - scm_leave_guile (); + SCM_I_CURRENT_THREAD->guile_mode = 0; + result = with_gc_inactive (func, data); + SCM_I_CURRENT_THREAD->guile_mode = 1; } else - res = scm_c_with_continuation_barrier (func, data); + /* Otherwise we're not in guile mode, so nothing to do. */ + result = func (data); - return res; -} - -void * -scm_without_guile (void *(*func)(void *), void *data) -{ - void *res; - scm_t_guile_ticket t; - t = scm_leave_guile (); - res = func (data); - scm_enter_guile (t); - return res; + return result; } + /*** Thread creation */ typedef struct { @@ -1159,6 +1236,16 @@ SCM_DEFINE (scm_join_thread_timed, "join-thread", 1, 2, 0, scm_i_pthread_mutex_unlock (&t->admin_mutex); SCM_TICK; scm_i_scm_pthread_mutex_lock (&t->admin_mutex); + + /* Check for exit again, since we just released and + reacquired the admin mutex, before the next block_self + call (which would block forever if t has already + exited). */ + if (t->exited) + { + res = t->result; + break; + } } } @@ -1177,20 +1264,12 @@ SCM_DEFINE (scm_thread_p, "thread?", 1, 0, 0, } #undef FUNC_NAME -static SCM -fat_mutex_mark (SCM mx) -{ - fat_mutex *m = SCM_MUTEX_DATA (mx); - scm_gc_mark (m->owner); - return m->waiting; -} static size_t fat_mutex_free (SCM mx) { fat_mutex *m = SCM_MUTEX_DATA (mx); scm_i_pthread_mutex_destroy (&m->lock); - scm_gc_free (m, sizeof (fat_mutex), "mutex"); return 0; } @@ -1293,7 +1372,14 @@ fat_mutex_lock (SCM mutex, scm_t_timespec *timeout, SCM owner, int *ret) { scm_i_thread *t = SCM_I_THREAD_DATA (new_owner); scm_i_pthread_mutex_lock (&t->admin_mutex); - t->mutexes = scm_cons (mutex, t->mutexes); + + /* Only keep a weak reference to MUTEX so that it's not + retained when not referenced elsewhere (bug #27450). + The weak pair itself is eventually removed when MUTEX + is unlocked. Note that `t->mutexes' lists mutexes + currently held by T, so it should be small. */ + t->mutexes = scm_weak_car_pair (mutex, t->mutexes); + scm_i_pthread_mutex_unlock (&t->admin_mutex); } *ret = 1; @@ -1380,12 +1466,24 @@ SCM_DEFINE (scm_lock_mutex_timed, "lock-mutex", 1, 2, 0, } #undef FUNC_NAME +static void +lock_mutex_return_void (SCM mx) +{ + (void) scm_lock_mutex (mx); +} + +static void +unlock_mutex_return_void (SCM mx) +{ + (void) scm_unlock_mutex (mx); +} + void scm_dynwind_lock_mutex (SCM mutex) { - scm_dynwind_unwind_handler_with_scm ((void(*)(SCM))scm_unlock_mutex, mutex, + scm_dynwind_unwind_handler_with_scm (unlock_mutex_return_void, mutex, SCM_F_WIND_EXPLICITLY); - scm_dynwind_rewind_handler_with_scm ((void(*)(SCM))scm_lock_mutex, mutex, + scm_dynwind_rewind_handler_with_scm (lock_mutex_return_void, mutex, SCM_F_WIND_EXPLICITLY); } @@ -1425,6 +1523,7 @@ static int fat_mutex_unlock (SCM mutex, SCM cond, const scm_t_timespec *waittime, int relock) { + SCM owner; fat_mutex *m = SCM_MUTEX_DATA (mutex); fat_cond *c = NULL; scm_i_thread *t = SCM_I_CURRENT_THREAD; @@ -1432,9 +1531,9 @@ fat_mutex_unlock (SCM mutex, SCM cond, scm_i_scm_pthread_mutex_lock (&m->lock); - SCM owner = m->owner; + owner = m->owner; - if (!scm_is_eq (owner, scm_current_thread ())) + if (!scm_is_eq (owner, t->handle)) { if (m->level == 0) { @@ -1443,7 +1542,7 @@ fat_mutex_unlock (SCM mutex, SCM cond, scm_i_pthread_mutex_unlock (&m->lock); scm_misc_error (NULL, "mutex not locked", SCM_EOL); } - owner = scm_current_thread (); + owner = t->handle; } else if (!m->allow_external_unlock) { @@ -1462,7 +1561,11 @@ fat_mutex_unlock (SCM mutex, SCM cond, if (m->level > 0) m->level--; if (m->level == 0) - m->owner = unblock_from_queue (m->waiting); + { + /* Change the owner of MUTEX. */ + t->mutexes = scm_delq_x (mutex, t->mutexes); + m->owner = unblock_from_queue (m->waiting); + } t->block_asyncs++; @@ -1489,11 +1592,12 @@ fat_mutex_unlock (SCM mutex, SCM cond, { if (relock) scm_lock_mutex_timed (mutex, SCM_UNDEFINED, owner); + t->block_asyncs--; break; } t->block_asyncs--; - scm_async_click (); + scm_async_tick (); scm_remember_upto_here_2 (cond, mutex); @@ -1505,7 +1609,11 @@ fat_mutex_unlock (SCM mutex, SCM cond, if (m->level > 0) m->level--; if (m->level == 0) - m->owner = unblock_from_queue (m->waiting); + { + /* Change the owner of MUTEX. */ + t->mutexes = scm_delq_x (mutex, t->mutexes); + m->owner = unblock_from_queue (m->waiting); + } scm_i_pthread_mutex_unlock (&m->lock); ret = 1; @@ -1595,21 +1703,6 @@ SCM_DEFINE (scm_mutex_locked_p, "mutex-locked?", 1, 0, 0, } #undef FUNC_NAME -static SCM -fat_cond_mark (SCM cv) -{ - fat_cond *c = SCM_CONDVAR_DATA (cv); - return c->waiting; -} - -static size_t -fat_cond_free (SCM mx) -{ - fat_cond *c = SCM_CONDVAR_DATA (mx); - scm_gc_free (c, sizeof (fat_cond), "condition-variable"); - return 0; -} - static int fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED) { @@ -1708,52 +1801,37 @@ SCM_DEFINE (scm_condition_variable_p, "condition-variable?", 1, 0, 0, } #undef FUNC_NAME -/*** Marking stacks */ -/* XXX - what to do with this? Do we need to handle this for blocked - threads as well? -*/ -#ifdef __ia64__ -# define SCM_MARK_BACKING_STORE() do { \ - ucontext_t ctx; \ - SCM_STACKITEM * top, * bot; \ - getcontext (&ctx); \ - scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \ - ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \ - / sizeof (SCM_STACKITEM))); \ - bot = (SCM_STACKITEM *) SCM_I_CURRENT_THREAD->register_backing_store_base; \ - top = (SCM_STACKITEM *) scm_ia64_ar_bsp (&ctx); \ - scm_mark_locations (bot, top - bot); } while (0) -#else -# define SCM_MARK_BACKING_STORE() -#endif + +/*** Select */ -void -scm_threads_mark_stacks (void) +struct select_args { - scm_i_thread *t; - for (t = all_threads; t; t = t->next_thread) - { - /* Check that thread has indeed been suspended. - */ - assert (t->top); + int nfds; + SELECT_TYPE *read_fds; + SELECT_TYPE *write_fds; + SELECT_TYPE *except_fds; + struct timeval *timeout; - scm_gc_mark (t->handle); + int result; + int errno_value; +}; -#if SCM_STACK_GROWS_UP - scm_mark_locations (t->base, t->top - t->base); -#else - scm_mark_locations (t->top, t->base - t->top); -#endif - scm_mark_locations ((void *) &t->regs, - ((size_t) sizeof(t->regs) - / sizeof (SCM_STACKITEM))); - } +static void * +do_std_select (void *args) +{ + struct select_args *select_args; - SCM_MARK_BACKING_STORE (); -} + select_args = (struct select_args *) args; -/*** Select */ + select_args->result = + select (select_args->nfds, + select_args->read_fds, select_args->write_fds, + select_args->except_fds, select_args->timeout); + select_args->errno_value = errno; + + return NULL; +} int scm_std_select (int nfds, @@ -1765,7 +1843,7 @@ scm_std_select (int nfds, fd_set my_readfds; int res, eno, wakeup_fd; scm_i_thread *t = SCM_I_CURRENT_THREAD; - scm_t_guile_ticket ticket; + struct select_args args; if (readfds == NULL) { @@ -1777,15 +1855,23 @@ scm_std_select (int nfds, SCM_TICK; wakeup_fd = t->sleep_pipe[0]; - ticket = scm_leave_guile (); FD_SET (wakeup_fd, readfds); if (wakeup_fd >= nfds) nfds = wakeup_fd+1; - res = select (nfds, readfds, writefds, exceptfds, timeout); - t->sleep_fd = -1; - eno = errno; - scm_enter_guile (ticket); + args.nfds = nfds; + args.read_fds = readfds; + args.write_fds = writefds; + args.except_fds = exceptfds; + args.timeout = timeout; + + /* Explicitly cooperate with the GC. */ + scm_without_guile (do_std_select, &args); + + res = args.result; + eno = args.errno_value; + + t->sleep_fd = -1; scm_i_reset_sleep (t); if (res > 0 && FD_ISSET (wakeup_fd, readfds)) @@ -1809,12 +1895,16 @@ scm_std_select (int nfds, #if SCM_USE_PTHREAD_THREADS +/* It seems reasonable to not run procedures related to mutex and condition + variables within `GC_do_blocking ()' since, (i) the GC can operate even + without it, and (ii) the only potential gain would be GC latency. See + http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/2245/focus=2251 + for a discussion of the pros and cons. */ + int scm_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex) { - scm_t_guile_ticket t = scm_leave_guile (); int res = scm_i_pthread_mutex_lock (mutex); - scm_enter_guile (t); return res; } @@ -1834,11 +1924,13 @@ scm_dynwind_pthread_mutex_lock (scm_i_pthread_mutex_t *mutex) int scm_pthread_cond_wait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex) { - scm_t_guile_ticket t = scm_leave_guile (); - ((scm_i_thread *)t)->held_mutex = mutex; - int res = scm_i_pthread_cond_wait (cond, mutex); - ((scm_i_thread *)t)->held_mutex = NULL; - scm_enter_guile (t); + int res; + scm_i_thread *t = SCM_I_CURRENT_THREAD; + + t->held_mutex = mutex; + res = scm_i_pthread_cond_wait (cond, mutex); + t->held_mutex = NULL; + return res; } @@ -1847,11 +1939,13 @@ scm_pthread_cond_timedwait (scm_i_pthread_cond_t *cond, scm_i_pthread_mutex_t *mutex, const scm_t_timespec *wt) { - scm_t_guile_ticket t = scm_leave_guile (); - ((scm_i_thread *)t)->held_mutex = mutex; - int res = scm_i_pthread_cond_timedwait (cond, mutex, wt); - ((scm_i_thread *)t)->held_mutex = NULL; - scm_enter_guile (t); + int res; + scm_i_thread *t = SCM_I_CURRENT_THREAD; + + t->held_mutex = mutex; + res = scm_i_pthread_cond_timedwait (cond, mutex, wt); + t->held_mutex = NULL; + return res; } @@ -1946,74 +2040,46 @@ scm_c_thread_exited_p (SCM thread) } #undef FUNC_NAME -static scm_i_pthread_cond_t wake_up_cond; -int scm_i_thread_go_to_sleep; -static int threads_initialized_p = 0; - -void -scm_i_thread_put_to_sleep () -{ - if (threads_initialized_p) - { - scm_i_thread *t; - - scm_leave_guile (); - scm_i_pthread_mutex_lock (&thread_admin_mutex); - - /* Signal all threads to go to sleep - */ - scm_i_thread_go_to_sleep = 1; - for (t = all_threads; t; t = t->next_thread) - scm_i_pthread_mutex_lock (&t->heap_mutex); - scm_i_thread_go_to_sleep = 0; - } +SCM_DEFINE (scm_total_processor_count, "total-processor-count", 0, 0, 0, + (void), + "Return the total number of processors of the machine, which\n" + "is guaranteed to be at least 1. A ``processor'' here is a\n" + "thread execution unit, which can be either:\n\n" + "@itemize\n" + "@item an execution core in a (possibly multi-core) chip, in a\n" + " (possibly multi- chip) module, in a single computer, or\n" + "@item a thread execution unit inside a core in the case of\n" + " @dfn{hyper-threaded} CPUs.\n" + "@end itemize\n\n" + "Which of the two definitions is used, is unspecified.\n") +#define FUNC_NAME s_scm_total_processor_count +{ + return scm_from_ulong (num_processors (NPROC_ALL)); } +#undef FUNC_NAME -void -scm_i_thread_invalidate_freelists () +SCM_DEFINE (scm_current_processor_count, "current-processor-count", 0, 0, 0, + (void), + "Like @code{total-processor-count}, but return the number of\n" + "processors available to the current process. See\n" + "@code{setaffinity} and @code{getaffinity} for more\n" + "information.\n") +#define FUNC_NAME s_scm_current_processor_count { - /* thread_admin_mutex is already locked. */ - - scm_i_thread *t; - for (t = all_threads; t; t = t->next_thread) - if (t != SCM_I_CURRENT_THREAD) - t->clear_freelists_p = 1; + return scm_from_ulong (num_processors (NPROC_CURRENT)); } +#undef FUNC_NAME -void -scm_i_thread_wake_up () -{ - if (threads_initialized_p) - { - scm_i_thread *t; - - scm_i_pthread_cond_broadcast (&wake_up_cond); - for (t = all_threads; t; t = t->next_thread) - scm_i_pthread_mutex_unlock (&t->heap_mutex); - scm_i_pthread_mutex_unlock (&thread_admin_mutex); - scm_enter_guile ((scm_t_guile_ticket) SCM_I_CURRENT_THREAD); - } -} -void -scm_i_thread_sleep_for_gc () -{ - scm_i_thread *t = suspend (); + - /* Don't put t->heap_mutex in t->held_mutex here, because if the - thread is cancelled during the cond wait, the thread's cleanup - function (scm_leave_guile_cleanup) will handle unlocking the - heap_mutex, so we don't need to do that again in on_thread_exit. - */ - scm_i_pthread_cond_wait (&wake_up_cond, &t->heap_mutex); +static scm_i_pthread_cond_t wake_up_cond; +static int threads_initialized_p = 0; - resume (t); -} /* This mutex is used by SCM_CRITICAL_SECTION_START/END. */ scm_i_pthread_mutex_t scm_i_critical_section_mutex; -int scm_i_critical_section_level = 0; static SCM dynwind_critical_section_mutex; @@ -2028,7 +2094,6 @@ scm_dynwind_critical_section (SCM mutex) /*** Initialization */ -scm_i_pthread_key_t scm_i_freelist, scm_i_freelist2; scm_i_pthread_mutex_t scm_i_misc_mutex; #if SCM_USE_PTHREAD_THREADS @@ -2036,7 +2101,7 @@ pthread_mutexattr_t scm_i_pthread_mutexattr_recursive[1]; #endif void -scm_threads_prehistory (SCM_STACKITEM *base) +scm_threads_prehistory (void *base) { #if SCM_USE_PTHREAD_THREADS pthread_mutexattr_init (scm_i_pthread_mutexattr_recursive); @@ -2048,10 +2113,8 @@ scm_threads_prehistory (SCM_STACKITEM *base) scm_i_pthread_mutexattr_recursive); scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL); scm_i_pthread_cond_init (&wake_up_cond, NULL); - scm_i_pthread_key_create (&scm_i_freelist, NULL); - scm_i_pthread_key_create (&scm_i_freelist2, NULL); - guilify_self_1 (base); + guilify_self_1 ((struct GC_stack_base *) base); } scm_t_bits scm_tc16_thread; @@ -2062,34 +2125,28 @@ void scm_init_threads () { scm_tc16_thread = scm_make_smob_type ("thread", sizeof (scm_i_thread)); - scm_set_smob_mark (scm_tc16_thread, thread_mark); scm_set_smob_print (scm_tc16_thread, thread_print); - scm_set_smob_free (scm_tc16_thread, thread_free); scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex)); - scm_set_smob_mark (scm_tc16_mutex, fat_mutex_mark); scm_set_smob_print (scm_tc16_mutex, fat_mutex_print); scm_set_smob_free (scm_tc16_mutex, fat_mutex_free); scm_tc16_condvar = scm_make_smob_type ("condition-variable", sizeof (fat_cond)); - scm_set_smob_mark (scm_tc16_condvar, fat_cond_mark); scm_set_smob_print (scm_tc16_condvar, fat_cond_print); - scm_set_smob_free (scm_tc16_condvar, fat_cond_free); scm_i_default_dynamic_state = SCM_BOOL_F; guilify_self_2 (SCM_BOOL_F); threads_initialized_p = 1; - dynwind_critical_section_mutex = - scm_permanent_object (scm_make_recursive_mutex ()); + dynwind_critical_section_mutex = scm_make_recursive_mutex (); } void scm_init_threads_default_dynamic_state () { SCM state = scm_make_dynamic_state (scm_current_dynamic_state ()); - scm_i_default_dynamic_state = scm_permanent_object (state); + scm_i_default_dynamic_state = state; } void @@ -2098,6 +2155,49 @@ scm_init_thread_procs () #include "libguile/threads.x" } + +/* IA64-specific things. */ + +#ifdef __ia64__ +# ifdef __hpux +# include +# include +void * +scm_ia64_register_backing_store_base (void) +{ + struct pst_vm_status vm_status; + int i = 0; + while (pstat_getprocvm (&vm_status, sizeof (vm_status), 0, i++) == 1) + if (vm_status.pst_type == PS_RSESTACK) + return (void *) vm_status.pst_vaddr; + abort (); +} +void * +scm_ia64_ar_bsp (const void *ctx) +{ + uint64_t bsp; + __uc_get_ar_bsp (ctx, &bsp); + return (void *) bsp; +} +# endif /* hpux */ +# ifdef linux +# include +void * +scm_ia64_register_backing_store_base (void) +{ + extern void *__libc_ia64_register_backing_store_base; + return __libc_ia64_register_backing_store_base; +} +void * +scm_ia64_ar_bsp (const void *opaque) +{ + const ucontext_t *ctx = opaque; + return (void *) ctx->uc_mcontext.sc_ar_bsp; +} +# endif /* linux */ +#endif /* __ia64__ */ + + /* Local Variables: c-file-style: "gnu"