/* Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001, 2002, 2003, 2004,
- * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
- * Free Software Foundation, Inc.
+ * 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013,
+ * 2014 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
#endif
#include "libguile/bdw-gc.h"
+#include <gc/gc_mark.h>
#include "libguile/_scm.h"
#include <stdlib.h>
-#if HAVE_UNISTD_H
#include <unistd.h>
-#endif
#include <stdio.h>
#ifdef HAVE_STRING_H
#include "libguile/fluids.h"
#include "libguile/continuations.h"
#include "libguile/gc.h"
+#include "libguile/gc-inline.h"
#include "libguile/init.h"
#include "libguile/scmsigs.h"
#include "libguile/strings.h"
-#include "libguile/weaks.h"
+#include "libguile/vm.h"
#include <full-read.h>
\f
-/* First some libgc shims. */
-
-/* Make sure GC_fn_type is defined; it is missing from the public
- headers of GC 7.1 and earlier. */
-#ifndef HAVE_GC_FN_TYPE
-typedef void * (* GC_fn_type) (void *);
-#endif
-
-
-#ifndef GC_SUCCESS
-#define GC_SUCCESS 0
-#endif
-
-#ifndef GC_UNIMPLEMENTED
-#define GC_UNIMPLEMENTED 3
-#endif
-
-/* Likewise struct GC_stack_base is missing before 7.1. */
-#ifndef HAVE_GC_STACK_BASE
-struct GC_stack_base {
- void * mem_base; /* Base of memory stack. */
-#ifdef __ia64__
- void * reg_base; /* Base of separate register stack. */
-#endif
-};
-
-static int
-GC_register_my_thread (struct GC_stack_base *stack_base)
-{
- return GC_UNIMPLEMENTED;
-}
-
-static void
-GC_unregister_my_thread ()
-{
-}
-
-#if !SCM_USE_PTHREAD_THREADS
-/* No threads; we can just use GC_stackbottom. */
-static void *
-get_thread_stack_base ()
-{
- return GC_stackbottom;
-}
-
-#elif defined HAVE_PTHREAD_ATTR_GETSTACK && defined HAVE_PTHREAD_GETATTR_NP \
- && defined PTHREAD_ATTR_GETSTACK_WORKS
-/* This method for GNU/Linux and perhaps some other systems.
- It's not for MacOS X or Solaris 10, since pthread_getattr_np is not
- available on them. */
-static void *
-get_thread_stack_base ()
-{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
-
- pthread_getattr_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- end = (char *)start + size;
-
-#if SCM_STACK_GROWS_UP
- return start;
-#else
- return end;
-#endif
-}
-
-#elif defined HAVE_PTHREAD_GET_STACKADDR_NP
-/* This method for MacOS X.
- It'd be nice if there was some documentation on pthread_get_stackaddr_np,
- but as of 2006 there's nothing obvious at apple.com. */
-static void *
-get_thread_stack_base ()
-{
- return pthread_get_stackaddr_np (pthread_self ());
-}
-
-#elif HAVE_PTHREAD_ATTR_GET_NP
-/* This one is for FreeBSD 9. */
-static void *
-get_thread_stack_base ()
-{
- pthread_attr_t attr;
- void *start, *end;
- size_t size;
-
- pthread_attr_init (&attr);
- pthread_attr_get_np (pthread_self (), &attr);
- pthread_attr_getstack (&attr, &start, &size);
- pthread_attr_destroy (&attr);
-
- end = (char *)start + size;
-
-#if SCM_STACK_GROWS_UP
- return start;
-#else
- return end;
-#endif
-}
-
-#else
-#error Threads enabled with old BDW-GC, but missing get_thread_stack_base impl. Please upgrade to libgc >= 7.1.
-#endif
-
-static int
-GC_get_stack_base (struct GC_stack_base *stack_base)
-{
- stack_base->mem_base = get_thread_stack_base ();
-#ifdef __ia64__
- /* Calculate and store off the base of this thread's register
- backing store (RBS). Unfortunately our implementation(s) of
- scm_ia64_register_backing_store_base are only reliable for the
- main thread. For other threads, therefore, find out the current
- top of the RBS, and use that as a maximum. */
- stack_base->reg_base = scm_ia64_register_backing_store_base ();
- {
- ucontext_t ctx;
- void *bsp;
- getcontext (&ctx);
- bsp = scm_ia64_ar_bsp (&ctx);
- if (stack_base->reg_base > bsp)
- stack_base->reg_base = bsp;
- }
-#endif
- return GC_SUCCESS;
-}
-
-static void *
-GC_call_with_stack_base(void * (*fn) (struct GC_stack_base*, void*), void *arg)
-{
- struct GC_stack_base stack_base;
-
- stack_base.mem_base = (void*)&stack_base;
-#ifdef __ia64__
- /* FIXME: Untested. */
- {
- ucontext_t ctx;
- getcontext (&ctx);
- stack_base.reg_base = scm_ia64_ar_bsp (&ctx);
- }
-#endif
-
- return fn (&stack_base, arg);
-}
-#endif /* HAVE_GC_STACK_BASE */
-
-
-/* Now define with_gc_active and with_gc_inactive. */
-
-#if (defined(HAVE_GC_DO_BLOCKING) && defined (HAVE_DECL_GC_DO_BLOCKING) && defined (HAVE_GC_CALL_WITH_GC_ACTIVE))
-
-/* We have a sufficiently new libgc (7.2 or newer). */
-
-static void*
-with_gc_inactive (GC_fn_type func, void *data)
-{
- return GC_do_blocking (func, data);
-}
-
-static void*
-with_gc_active (GC_fn_type func, void *data)
-{
- return GC_call_with_gc_active (func, data);
-}
-
-#else
-
-/* libgc not new enough, so never actually deactivate GC.
-
- Note that though GC 7.1 does have a GC_do_blocking, it doesn't have
- GC_call_with_gc_active. */
+/* The GC "kind" for threads that allow them to mark their VM
+ stacks. */
+static int thread_gc_kind;
+
+static struct GC_ms_entry *
+thread_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
+ struct GC_ms_entry *mark_stack_limit, GC_word env)
+{
+ int word;
+ const struct scm_i_thread *t = (struct scm_i_thread *) addr;
+
+ if (SCM_UNPACK (t->handle) == 0)
+ /* T must be on the free-list; ignore. (See warning in
+ gc_mark.h.) */
+ return mark_stack_ptr;
+
+ /* Mark T. We could be more precise, but it doesn't matter. */
+ for (word = 0; word * sizeof (*addr) < sizeof (*t); word++)
+ mark_stack_ptr = GC_MARK_AND_PUSH ((void *) addr[word],
+ mark_stack_ptr, mark_stack_limit,
+ NULL);
+
+ /* The pointerless freelists are threaded through their first word,
+ but GC doesn't know to trace them (as they are pointerless), so we
+ need to do that here. See the comments at the top of libgc's
+ gc_inline.h. */
+ if (t->pointerless_freelists)
+ {
+ size_t n;
+ for (n = 0; n < SCM_INLINE_GC_FREELIST_COUNT; n++)
+ {
+ void *chain = t->pointerless_freelists[n];
+ if (chain)
+ {
+ /* The first link is already marked by the freelist vector,
+ so we just have to mark the tail. */
+ while ((chain = *(void **)chain))
+ mark_stack_ptr = GC_mark_and_push (chain, mark_stack_ptr,
+ mark_stack_limit, NULL);
+ }
+ }
+ }
-static void*
-with_gc_inactive (GC_fn_type func, void *data)
-{
- return func (data);
-}
+ if (t->vp)
+ mark_stack_ptr = scm_i_vm_mark_stack (t->vp, mark_stack_ptr,
+ mark_stack_limit);
-static void*
-with_gc_active (GC_fn_type func, void *data)
-{
- return func (data);
+ return mark_stack_ptr;
}
-#endif /* HAVE_GC_DO_BLOCKING */
-
\f
static void
}
}
+
\f
/*** Queues */
if (scm_is_eq (p, c))
{
if (scm_is_eq (c, SCM_CAR (q)))
- SCM_SETCAR (q, SCM_CDR (c));
+ SCM_SETCAR (q, scm_is_eq (prev, q) ? SCM_EOL : prev);
SCM_SETCDR (prev, SCM_CDR (c));
/* GC-robust */
else
id = u.um;
- scm_puts ("#<thread ", port);
+ scm_puts_unlocked ("#<thread ", port);
scm_uintprint (id, 10, port);
- scm_puts (" (", port);
+ scm_puts_unlocked (" (", port);
scm_uintprint ((scm_t_bits)t, 16, port);
- scm_puts (")>", port);
+ scm_puts_unlocked (")>", port);
return 1;
}
t.mutexes = SCM_EOL;
t.held_mutex = NULL;
t.join_queue = SCM_EOL;
+ t.freelists = NULL;
+ t.pointerless_freelists = NULL;
t.dynamic_state = SCM_BOOL_F;
- t.dynwinds = SCM_EOL;
+ t.dynstack.base = NULL;
+ t.dynstack.top = NULL;
+ t.dynstack.limit = NULL;
t.active_asyncs = SCM_EOL;
t.block_asyncs = 1;
t.pending_asyncs = 1;
t.sleep_mutex = NULL;
t.sleep_object = SCM_BOOL_F;
t.sleep_fd = -1;
+ t.vp = NULL;
if (pipe2 (t.sleep_pipe, O_CLOEXEC) != 0)
/* FIXME: Error conditions during the initialization phase are handled
abort ();
scm_i_pthread_mutex_init (&t.admin_mutex, NULL);
- t.current_mark_stack_ptr = NULL;
- t.current_mark_stack_limit = NULL;
t.canceled = 0;
t.exited = 0;
t.guile_mode = 0;
scm_i_thread *t_ptr = &t;
GC_disable ();
- t_ptr = GC_malloc (sizeof (scm_i_thread));
+ t_ptr = GC_generic_malloc (sizeof (*t_ptr), thread_gc_kind);
memcpy (t_ptr, &t, sizeof t);
scm_i_pthread_setspecific (scm_i_thread_key, t_ptr);
t->continuation_root = scm_cons (t->handle, SCM_EOL);
t->continuation_base = t->base;
- t->vm = SCM_BOOL_F;
+
+ {
+ size_t size = SCM_INLINE_GC_FREELIST_COUNT * sizeof (void *);
+ t->freelists = scm_gc_malloc (size, "freelists");
+ t->pointerless_freelists = scm_gc_malloc (size, "atomic freelists");
+ }
if (scm_is_true (parent))
t->dynamic_state = scm_make_dynamic_state (parent);
else
t->dynamic_state = scm_i_make_initial_dynamic_state ();
+ t->dynstack.base = scm_gc_malloc (16 * sizeof (scm_t_bits), "dynstack");
+ t->dynstack.limit = t->dynstack.base + 16;
+ t->dynstack.top = t->dynstack.base + SCM_DYNSTACK_HEADER_LEN;
+
t->join_queue = make_queue ();
t->block_asyncs = 0;
{
scm_i_thread *t = (scm_i_thread *) v;
- /* Ensure the signal handling thread has been launched, because we might be
- shutting it down. This needs to be done in Guile mode. */
- scm_i_ensure_signal_delivery_thread ();
-
if (!scm_is_false (t->cleanup_handler))
{
SCM ptr = t->cleanup_handler;
while (!scm_is_null (t->mutexes))
{
- SCM mutex = SCM_WEAK_PAIR_CAR (t->mutexes);
+ SCM mutex = scm_c_weak_vector_ref (scm_car (t->mutexes), 0);
- if (!SCM_UNBNDP (mutex))
+ if (scm_is_true (mutex))
{
fat_mutex *m = SCM_MUTEX_DATA (mutex);
scm_i_pthread_mutex_unlock (&m->lock);
}
- t->mutexes = SCM_WEAK_PAIR_CDR (t->mutexes);
+ t->mutexes = scm_cdr (t->mutexes);
}
scm_i_pthread_mutex_unlock (&t->admin_mutex);
scm_i_pthread_setspecific (scm_i_thread_key, NULL);
+ if (t->vp)
+ {
+ scm_i_vm_free_stack (t->vp);
+ t->vp = NULL;
+ }
+
#if SCM_USE_PTHREAD_THREADS
GC_unregister_my_thread ();
#endif
*/
scm_i_init_guile (base);
-#if defined (HAVE_GC_ALLOW_REGISTER_THREADS) && SCM_USE_PTHREAD_THREADS
+#if SCM_USE_PTHREAD_THREADS
/* Allow other threads to come in later. */
GC_allow_register_threads ();
#endif
#endif
t->guile_mode = 1;
- res = with_gc_active (with_guile_trampoline, args);
+ res = GC_call_with_gc_active (with_guile_trampoline, args);
t->guile_mode = 0;
}
return res;
if (t->guile_mode)
{
SCM_I_CURRENT_THREAD->guile_mode = 0;
- result = with_gc_inactive (func, data);
+ result = GC_do_blocking (func, data);
SCM_I_CURRENT_THREAD->guile_mode = 1;
}
else
errno = err;
scm_syserror (NULL);
}
- scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
+ while (scm_is_false (data.thread))
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
scm_i_pthread_mutex_unlock (&data.mutex);
return data.thread;
errno = err;
scm_syserror (NULL);
}
- scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
+ while (scm_is_false (data.thread))
+ scm_i_scm_pthread_cond_wait (&data.cond, &data.mutex);
+
scm_i_pthread_mutex_unlock (&data.mutex);
assert (SCM_I_IS_THREAD (data.thread));
}
#undef FUNC_NAME
+/* Some systems, notably Android, lack 'pthread_cancel'. Don't provide
+ 'cancel-thread' on these systems. */
+
+#if !SCM_USE_PTHREAD_THREADS || defined HAVE_PTHREAD_CANCEL
+
SCM_DEFINE (scm_cancel_thread, "cancel-thread", 1, 0, 0,
(SCM thread),
"Asynchronously force the target @var{thread} to terminate. @var{thread} "
}
#undef FUNC_NAME
+#endif
+
SCM_DEFINE (scm_set_thread_cleanup_x, "set-thread-cleanup!", 2, 0, 0,
(SCM thread, SCM proc),
"Set the thunk @var{proc} as the cleanup handler for the thread @var{thread}. "
#undef FUNC_NAME
-static size_t
-fat_mutex_free (SCM mx)
-{
- fat_mutex *m = SCM_MUTEX_DATA (mx);
- scm_i_pthread_mutex_destroy (&m->lock);
- return 0;
-}
-
static int
fat_mutex_print (SCM mx, SCM port, scm_print_state *pstate SCM_UNUSED)
{
fat_mutex *m = SCM_MUTEX_DATA (mx);
- scm_puts ("#<mutex ", port);
+ scm_puts_unlocked ("#<mutex ", port);
scm_uintprint ((scm_t_bits)m, 16, port);
- scm_puts (">", port);
+ scm_puts_unlocked (">", port);
return 1;
}
{
fat_mutex *m;
SCM mx;
+ scm_i_pthread_mutex_t lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
m = scm_gc_malloc (sizeof (fat_mutex), "mutex");
- scm_i_pthread_mutex_init (&m->lock, NULL);
+ /* Because PTHREAD_MUTEX_INITIALIZER is static, it's plain old data,
+ and so we can just copy it. */
+ memcpy (&m->lock, &lock, sizeof (m->lock));
m->owner = SCM_BOOL_F;
m->level = 0;
The weak pair itself is eventually removed when MUTEX
is unlocked. Note that `t->mutexes' lists mutexes
currently held by T, so it should be small. */
- t->mutexes = scm_weak_car_pair (mutex, t->mutexes);
+ t->mutexes = scm_cons (scm_make_weak_vector (SCM_INUM1, mutex),
+ t->mutexes);
scm_i_pthread_mutex_unlock (&t->admin_mutex);
}
#define SCM_CONDVARP(x) SCM_SMOB_PREDICATE (scm_tc16_condvar, x)
#define SCM_CONDVAR_DATA(x) ((fat_cond *) SCM_SMOB_DATA (x))
+static void
+remove_mutex_from_thread (SCM mutex, scm_i_thread *t)
+{
+ SCM walk, prev;
+
+ for (prev = SCM_BOOL_F, walk = t->mutexes; scm_is_pair (walk);
+ walk = SCM_CDR (walk))
+ {
+ if (scm_is_eq (mutex, scm_c_weak_vector_ref (SCM_CAR (walk), 0)))
+ {
+ if (scm_is_pair (prev))
+ SCM_SETCDR (prev, SCM_CDR (walk));
+ else
+ t->mutexes = SCM_CDR (walk);
+ break;
+ }
+ }
+}
+
static int
fat_mutex_unlock (SCM mutex, SCM cond,
const scm_t_timespec *waittime, int relock)
if (m->level == 0)
{
/* Change the owner of MUTEX. */
- t->mutexes = scm_delq_x (mutex, t->mutexes);
+ remove_mutex_from_thread (mutex, t);
m->owner = unblock_from_queue (m->waiting);
}
}
t->block_asyncs--;
- scm_async_click ();
+ scm_async_tick ();
scm_remember_upto_here_2 (cond, mutex);
if (m->level == 0)
{
/* Change the owner of MUTEX. */
- t->mutexes = scm_delq_x (mutex, t->mutexes);
+ remove_mutex_from_thread (mutex, t);
m->owner = unblock_from_queue (m->waiting);
}
{
SCM_VALIDATE_CONDVAR (2, cond);
- if (! (SCM_UNBNDP (timeout)))
+ if (! SCM_UNBNDP (timeout) && ! scm_is_false (timeout))
{
to_timespec (timeout, &cwaittime);
waittime = &cwaittime;
fat_cond_print (SCM cv, SCM port, scm_print_state *pstate SCM_UNUSED)
{
fat_cond *c = SCM_CONDVAR_DATA (cv);
- scm_puts ("#<condition-variable ", port);
+ scm_puts_unlocked ("#<condition-variable ", port);
scm_uintprint ((scm_t_bits)c, 16, port);
- scm_puts (">", port);
+ scm_puts_unlocked (">", port);
return 1;
}
return NULL;
}
-#if !SCM_HAVE_SYS_SELECT_H
-static int scm_std_select (int nfds,
- fd_set *readfds,
- fd_set *writefds,
- fd_set *exceptfds,
- struct timeval *timeout);
-#endif
-
int
scm_std_select (int nfds,
fd_set *readfds,
#endif
+static void
+do_unlock_with_asyncs (void *data)
+{
+ scm_i_pthread_mutex_unlock ((scm_i_pthread_mutex_t *)data);
+ SCM_I_CURRENT_THREAD->block_asyncs--;
+}
+
+void
+scm_i_dynwind_pthread_mutex_lock_block_asyncs (scm_i_pthread_mutex_t *mutex)
+{
+ SCM_I_CURRENT_THREAD->block_asyncs++;
+ scm_i_scm_pthread_mutex_lock (mutex);
+ scm_dynwind_unwind_handler (do_unlock_with_asyncs, mutex,
+ SCM_F_WIND_EXPLICITLY);
+}
+
unsigned long
scm_std_usleep (unsigned long usecs)
{
scm_i_pthread_mutex_init (&scm_i_misc_mutex, NULL);
scm_i_pthread_cond_init (&wake_up_cond, NULL);
+ thread_gc_kind =
+ GC_new_kind (GC_new_free_list (),
+ GC_MAKE_PROC (GC_new_proc (thread_mark), 0),
+ 0, 1);
+
guilify_self_1 ((struct GC_stack_base *) base);
}
scm_tc16_mutex = scm_make_smob_type ("mutex", sizeof (fat_mutex));
scm_set_smob_print (scm_tc16_mutex, fat_mutex_print);
- scm_set_smob_free (scm_tc16_mutex, fat_mutex_free);
scm_tc16_condvar = scm_make_smob_type ("condition-variable",
sizeof (fat_cond));