-/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
#include <alignof.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#include "_scm.h"
#include "control.h"
#include "frames.h"
+#include "gc-inline.h"
#include "instructions.h"
#include "loader.h"
#include "programs.h"
/* #define VM_ENABLE_PARANOID_ASSERTIONS */
+static void vm_expand_stack (struct scm_vm *vp) SCM_NOINLINE;
+
+/* RESTORE is for the case where we know we have done a PUSH of equal or
+ greater stack size in the past. Otherwise PUSH is the thing, which
+ may expand the stack. */
+enum vm_increase_sp_kind { VM_SP_PUSH, VM_SP_RESTORE };
+
+static inline void
+vm_increase_sp (struct scm_vm *vp, SCM *new_sp, enum vm_increase_sp_kind kind)
+{
+ vp->sp = new_sp;
+ if (new_sp > vp->sp_max_since_gc)
+ {
+ vp->sp_max_since_gc = new_sp;
+ if (kind == VM_SP_PUSH && new_sp >= vp->stack_limit)
+ vm_expand_stack (vp);
+ }
+}
+
+static inline void
+vm_push_sp (struct scm_vm *vp, SCM *new_sp)
+{
+ vm_increase_sp (vp, new_sp, VM_SP_PUSH);
+}
+
+static inline void
+vm_restore_sp (struct scm_vm *vp, SCM *new_sp)
+{
+ vm_increase_sp (vp, new_sp, VM_SP_RESTORE);
+}
\f
/*
{
struct scm_vm_cont *cp;
SCM *argv_copy;
+ scm_t_ptrdiff reloc;
argv_copy = alloca (n * sizeof(SCM));
memcpy (argv_copy, argv, n * sizeof(SCM));
cp = SCM_VM_CONT_DATA (cont);
- if (vp->stack_size < cp->stack_size + n + 3)
- scm_misc_error ("vm-engine", "not enough space to reinstate continuation",
- scm_list_1 (cont));
+ /* FIXME: Need to prevent GC while futzing with the stack; otherwise,
+ another thread causing GC may initiate a mark of a stack in an
+ inconsistent state. */
- vp->sp = cp->sp;
- vp->fp = cp->fp;
+ /* We know that there is enough space for the continuation, because we
+ captured it in the past. However there may have been an expansion
+ since the capture, so we may have to re-link the frame
+ pointers. */
+ reloc = (vp->stack_base - (cp->stack_base - cp->reloc));
+ vp->fp = cp->fp + reloc;
memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM));
+ vm_restore_sp (vp, cp->sp + reloc);
+ if (reloc)
+ {
+ SCM *fp = vp->fp;
+ while (fp)
+ {
+ SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp);
+ if (next_fp)
+ {
+ next_fp += reloc;
+ SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp);
+ }
+ fp = next_fp;
+ }
+ }
+
+ /* Now we have the continuation properly copied over. We just need to
+ copy the arguments. It is not guaranteed that there is actually
+ space for the arguments, though, so we have to bump the SP first. */
+ vm_push_sp (vp, vp->sp + 3 + n);
+
+ /* Now copy on an empty frame and the return values, as the
+ continuation expects. */
{
+ SCM *base = vp->sp + 1 - 3 - n;
size_t i;
- /* Push on an empty frame, as the continuation expects. */
for (i = 0; i < 3; i++)
- {
- vp->sp++;
- *vp->sp = SCM_BOOL_F;
- }
+ base[i] = SCM_BOOL_F;
- /* Push the return values. */
for (i = 0; i < n; i++)
- {
- vp->sp++;
- *vp->sp = argv_copy[i];
- }
- vp->ip = cp->ra;
+ base[i + 3] = argv_copy[i];
}
+
+ vp->ip = cp->ra;
}
static struct scm_vm * thread_vm (scm_i_thread *t);
memcpy (argv_copy, argv, n * sizeof(SCM));
cp = SCM_VM_CONT_DATA (cont);
- base = SCM_FRAME_LOCALS_ADDRESS (vp->fp);
- reloc = cp->reloc + (base - cp->stack_base);
-#define RELOC(scm_p) \
- (((SCM *) (scm_p)) + reloc)
+ vm_push_sp (vp, SCM_FRAME_LOCALS_ADDRESS (vp->fp) + cp->stack_size + n - 1);
- if ((base - vp->stack_base) + cp->stack_size + n + 1 > vp->stack_size)
- scm_misc_error ("vm-engine",
- "not enough space to instate partial continuation",
- scm_list_1 (cont));
+ base = SCM_FRAME_LOCALS_ADDRESS (vp->fp);
+ reloc = cp->reloc + (base - cp->stack_base);
memcpy (base, cp->stack_base, cp->stack_size * sizeof (SCM));
+ vp->fp = cp->fp + reloc;
+ vp->ip = cp->ra;
+
/* now relocate frame pointers */
{
SCM *fp;
- for (fp = RELOC (cp->fp);
+ for (fp = vp->fp;
SCM_FRAME_LOWER_ADDRESS (fp) > base;
fp = SCM_FRAME_DYNAMIC_LINK (fp))
- SCM_FRAME_SET_DYNAMIC_LINK (fp, RELOC (SCM_FRAME_DYNAMIC_LINK (fp)));
+ SCM_FRAME_SET_DYNAMIC_LINK (fp, SCM_FRAME_DYNAMIC_LINK (fp) + reloc);
}
- vp->sp = base - 1 + cp->stack_size;
- vp->fp = RELOC (cp->fp);
- vp->ip = cp->ra;
-
/* Push the arguments. */
for (i = 0; i < n; i++)
- {
- vp->sp++;
- *vp->sp = argv_copy[i];
- }
+ vp->sp[i + 1 - n] = argv_copy[i];
/* The prompt captured a slice of the dynamic stack. Here we wind
those entries onto the current thread's stack. We also have to
scm_dynstack_wind_1 (dynstack, walk);
}
}
-#undef RELOC
}
\f
static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_out_of_range (const char *subr, SCM k) SCM_NORETURN SCM_NOINLINE;
static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE;
static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE;
scm_wrong_type_arg_msg (subr, 1, x, "struct");
}
+static void
+vm_error_not_a_vector (const char *subr, SCM x)
+{
+ scm_wrong_type_arg_msg (subr, 1, x, "vector");
+}
+
+static void
+vm_error_out_of_range (const char *subr, SCM k)
+{
+ scm_to_size_t (k);
+ scm_out_of_range (subr, k);
+}
+
static void
vm_error_no_values (void)
{
* VM
*/
+/* The page size. */
+static size_t page_size;
+
/* Hard stack limit is 512M words: 2 gigabytes on 32-bit machines, 4 on
64-bit machines. */
static const size_t hard_max_stack_size = 512 * 1024 * 1024;
-/* Initial stack size: 4 or 8 kB. */
-static const size_t initial_stack_size = 1024;
+/* Initial stack size. Defaults to one page. */
+static size_t initial_stack_size;
/* Default soft stack limit is 1M words (4 or 8 megabytes). */
static size_t default_max_stack_size = 1024 * 1024;
static void
initialize_default_stack_size (void)
{
- int size = scm_getenv_int ("GUILE_STACK_SIZE", (int) default_max_stack_size);
- if (size >= initial_stack_size && (size_t) size < ((size_t) -1) / sizeof(SCM))
- default_max_stack_size = size;
+ initial_stack_size = page_size / sizeof (SCM);
+
+ {
+ int size;
+ size = scm_getenv_int ("GUILE_STACK_SIZE", (int) default_max_stack_size);
+ if (size >= initial_stack_size
+ && (size_t) size < ((size_t) -1) / sizeof(SCM))
+ default_max_stack_size = size;
+ }
}
-static void vm_expand_stack (struct scm_vm *vp) SCM_NOINLINE;
#define VM_NAME vm_regular_engine
#define VM_USE_HOOKS 0
#define FUNC_NAME "vm-regular-engine"
ret = mmap (NULL, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ret == MAP_FAILED)
- SCM_SYSERROR;
+ ret = NULL;
#else
ret = malloc (size);
- if (!ret)
- SCM_SYSERROR;
#endif
+ if (!ret)
+ {
+ perror ("allocate_stack failed");
+ return NULL;
+ }
+
return (SCM *) ret;
}
#undef FUNC_NAME
new_stack = mremap (old_stack, old_size, new_size, MREMAP_MAYMOVE);
if (new_stack == MAP_FAILED)
- SCM_SYSERROR;
+ return NULL;
return (SCM *) new_stack;
#else
SCM *new_stack;
new_stack = allocate_stack (new_size);
+ if (!new_stack)
+ return NULL;
+
memcpy (new_stack, old_stack, old_size * sizeof (SCM));
free_stack (old_stack, old_size);
vp->stack_size = initial_stack_size;
vp->stack_base = allocate_stack (vp->stack_size);
+ if (!vp->stack_base)
+ /* As in expand_stack, we don't have any way to throw an exception
+ if we can't allocate one measely page -- there's no stack to
+ handle it. For now, abort. */
+ abort ();
vp->stack_limit = vp->stack_base + vp->stack_size;
vp->max_stack_size = default_max_stack_size;
vp->ip = NULL;
}
#undef FUNC_NAME
+static void
+return_unused_stack_to_os (struct scm_vm *vp)
+{
+#if HAVE_SYS_MMAN_H
+ scm_t_uintptr start = (scm_t_uintptr) (vp->sp + 1);
+ scm_t_uintptr end = (scm_t_uintptr) vp->stack_limit;
+ /* The second condition is needed to protect against wrap-around. */
+ if (vp->sp_max_since_gc < vp->stack_limit && vp->sp < vp->sp_max_since_gc)
+ end = (scm_t_uintptr) (vp->sp_max_since_gc + 1);
+
+ start = ((start - 1U) | (page_size - 1U)) + 1U; /* round up */
+ end = ((end - 1U) | (page_size - 1U)) + 1U; /* round up */
+
+ /* Return these pages to the OS. The next time they are paged in,
+ they will be zeroed. */
+ if (start < end)
+ {
+ int ret = 0;
+
+ do
+ ret = madvise ((void *) start, end - start, MADV_DONTNEED);
+ while (ret && errno == -EAGAIN);
+
+ if (ret)
+ perror ("madvise failed");
+ }
+
+ vp->sp_max_since_gc = vp->sp;
+#endif
+}
+
+#define DEAD_SLOT_MAP_CACHE_SIZE 32U
+struct dead_slot_map_cache_entry
+{
+ scm_t_uint32 *ip;
+ const scm_t_uint8 *map;
+};
+
+struct dead_slot_map_cache
+{
+ struct dead_slot_map_cache_entry entries[DEAD_SLOT_MAP_CACHE_SIZE];
+};
+
+static const scm_t_uint8 *
+find_dead_slot_map (scm_t_uint32 *ip, struct dead_slot_map_cache *cache)
+{
+ /* The lower two bits should be zero. FIXME: Use a better hash
+ function; we don't expose scm_raw_hashq currently. */
+ size_t slot = (((scm_t_uintptr) ip) >> 2) % DEAD_SLOT_MAP_CACHE_SIZE;
+ const scm_t_uint8 *map;
+
+ if (cache->entries[slot].ip == ip)
+ map = cache->entries[slot].map;
+ else
+ {
+ map = scm_find_dead_slot_map_unlocked (ip);
+ cache->entries[slot].ip = ip;
+ cache->entries[slot].map = map;
+ }
+
+ return map;
+}
+
/* Mark the VM stack region between its base and its current top. */
struct GC_ms_entry *
scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr,
struct GC_ms_entry *mark_stack_limit)
{
SCM *sp, *fp;
+ /* The first frame will be marked conservatively (without a dead
+ slot map). This is because GC can happen at any point within the
+ hottest activation, due to multiple threads or per-instruction
+ hooks, and providing dead slot maps for all points in a program
+ would take a prohibitive amount of space. */
+ const scm_t_uint8 *dead_slots = NULL;
+ scm_t_uintptr upper = (scm_t_uintptr) GC_greatest_plausible_heap_addr;
+ scm_t_uintptr lower = (scm_t_uintptr) GC_least_plausible_heap_addr;
+ struct dead_slot_map_cache cache;
+
+ memset (&cache, 0, sizeof (cache));
for (fp = vp->fp, sp = vp->sp; fp; fp = SCM_FRAME_DYNAMIC_LINK (fp))
{
for (; sp >= &SCM_FRAME_LOCAL (fp, 0); sp--)
{
SCM elt = *sp;
- if (SCM_NIMP (elt))
- mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word *) elt,
- mark_stack_ptr, mark_stack_limit,
- NULL);
+ if (SCM_NIMP (elt)
+ && SCM_UNPACK (elt) >= lower && SCM_UNPACK (elt) <= upper)
+ {
+ if (dead_slots)
+ {
+ size_t slot = sp - &SCM_FRAME_LOCAL (fp, 0);
+ if (dead_slots[slot / 8U] & (1U << (slot % 8U)))
+ {
+ /* This value may become dead as a result of GC,
+ so we can't just leave it on the stack. */
+ *sp = SCM_UNBOUND;
+ continue;
+ }
+ }
+
+ mark_stack_ptr = GC_mark_and_push ((void *) elt,
+ mark_stack_ptr,
+ mark_stack_limit,
+ NULL);
+ }
}
sp = SCM_FRAME_PREVIOUS_SP (fp);
+ /* Inner frames may have a dead slots map for precise marking.
+ Note that there may be other reasons to not have a dead slots
+ map, e.g. if all of the frame's slots below the callee frame
+ are live. */
+ dead_slots = find_dead_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache);
}
+ return_unused_stack_to_os (vp);
+
return mark_stack_ptr;
}
abort ();
}
+ /* FIXME: Prevent GC while we expand the stack, to ensure that a
+ stack marker can trace the stack. */
if (stack_size > vp->stack_size)
{
- SCM *old_stack;
+ SCM *old_stack, *new_stack;
size_t new_size;
scm_t_ptrdiff reloc;
while (new_size < stack_size)
new_size *= 2;
old_stack = vp->stack_base;
- vp->stack_base = expand_stack (old_stack, vp->stack_size, new_size);
+ new_stack = expand_stack (vp->stack_base, vp->stack_size, new_size);
+ if (!new_stack)
+ /* It would be nice to throw an exception here, but that is
+ extraordinarily hard. Exceptionally hard, you might say!
+ "throw" is implemented in Scheme, and there may be arbitrary
+ pre-unwind handlers that push on more frames. We will
+ endeavor to do so in the future, but for now we just
+ abort. */
+ abort ();
+
+ vp->stack_base = new_stack;
vp->stack_size = new_size;
vp->stack_limit = vp->stack_base + new_size;
reloc = vp->stack_base - old_stack;
if (reloc)
{
SCM *fp;
- vp->fp += reloc;
+ if (vp->fp)
+ vp->fp += reloc;
vp->sp += reloc;
+ vp->sp_max_since_gc += reloc;
fp = vp->fp;
while (fp)
{
SCM_CHECK_STACK;
- /* Check that we have enough space: 3 words for the boot
- continuation, 3 + nargs for the procedure application, and 3 for
- setting up a new frame. */
- base_frame_size = 3 + 3 + nargs + 3;
- vp->sp += base_frame_size;
- if (vp->sp >= vp->stack_limit)
- vm_expand_stack (vp);
+ /* Check that we have enough space: 3 words for the boot continuation,
+ and 3 + nargs for the procedure application. */
+ base_frame_size = 3 + 3 + nargs;
+ vm_push_sp (vp, vp->sp + base_frame_size);
base = vp->sp + 1 - base_frame_size;
/* Since it's possible to receive the arguments on the stack itself,
base[4] = SCM_PACK (vp->ip); /* ra */
base[5] = proc;
vp->fp = &base[5];
- vp->sp = &SCM_FRAME_LOCAL (vp->fp, nargs);
{
int resume = SCM_I_SETJMP (registers);
(scm_t_extension_init_func)scm_init_vm_builtins,
NULL);
+ page_size = getpagesize ();
+ /* page_size should be a power of two. */
+ if (page_size & (page_size - 1))
+ abort ();
+
initialize_default_stack_size ();
sym_vm_run = scm_from_latin1_symbol ("vm-run");