-/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* 02110-1301 USA
*/
+/* For mremap(2) on GNU/Linux systems. */
+#define _GNU_SOURCE
+
#if HAVE_CONFIG_H
# include <config.h>
#endif
#include <alignof.h>
#include <string.h>
#include <stdint.h>
+#include <unistd.h>
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
#include "libguile/bdw-gc.h"
#include <gc/gc_mark.h>
#include "_scm.h"
#include "control.h"
#include "frames.h"
+#include "gc-inline.h"
#include "instructions.h"
#include "loader.h"
#include "programs.h"
+#include "simpos.h"
#include "vm.h"
#include "vm-builtins.h"
-#include "private-gc.h" /* scm_getenv_int */
-
static int vm_default_engine = SCM_VM_REGULAR_ENGINE;
/* Unfortunately we can't snarf these: snarfed things are only loaded up from
/* #define VM_ENABLE_PARANOID_ASSERTIONS */
-/* When defined, arrange so that the GC doesn't scan the VM stack beyond its
- current SP. This should help avoid excess data retention. See
- http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/3001
- for a discussion. */
-#define VM_ENABLE_PRECISE_STACK_GC_SCAN
+static void vm_expand_stack (struct scm_vm *vp) SCM_NOINLINE;
+
+/* RESTORE is for the case where we know we have done a PUSH of equal or
+ greater stack size in the past. Otherwise PUSH is the thing, which
+ may expand the stack. */
+enum vm_increase_sp_kind { VM_SP_PUSH, VM_SP_RESTORE };
-/* Size in SCM objects of the stack reserve. The reserve is used to run
- exception handling code in case of a VM stack overflow. */
-#define VM_STACK_RESERVE_SIZE 512
+static inline void
+vm_increase_sp (struct scm_vm *vp, SCM *new_sp, enum vm_increase_sp_kind kind)
+{
+ vp->sp = new_sp;
+ if (new_sp > vp->sp_max_since_gc)
+ {
+ vp->sp_max_since_gc = new_sp;
+ if (kind == VM_SP_PUSH && new_sp >= vp->stack_limit)
+ vm_expand_stack (vp);
+ }
+}
+static inline void
+vm_push_sp (struct scm_vm *vp, SCM *new_sp)
+{
+ vm_increase_sp (vp, new_sp, VM_SP_PUSH);
+}
+
+static inline void
+vm_restore_sp (struct scm_vm *vp, SCM *new_sp)
+{
+ vm_increase_sp (vp, new_sp, VM_SP_RESTORE);
+}
\f
/*
{
struct scm_vm_cont *cp;
SCM *argv_copy;
+ scm_t_ptrdiff reloc;
argv_copy = alloca (n * sizeof(SCM));
memcpy (argv_copy, argv, n * sizeof(SCM));
cp = SCM_VM_CONT_DATA (cont);
- if (vp->stack_size < cp->stack_size + n + 3)
- scm_misc_error ("vm-engine", "not enough space to reinstate continuation",
- scm_list_1 (cont));
+ /* FIXME: Need to prevent GC while futzing with the stack; otherwise,
+ another thread causing GC may initiate a mark of a stack in an
+ inconsistent state. */
- vp->sp = cp->sp;
- vp->fp = cp->fp;
+ /* We know that there is enough space for the continuation, because we
+ captured it in the past. However there may have been an expansion
+ since the capture, so we may have to re-link the frame
+ pointers. */
+ reloc = (vp->stack_base - (cp->stack_base - cp->reloc));
+ vp->fp = cp->fp + reloc;
memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM));
+ vm_restore_sp (vp, cp->sp + reloc);
+
+ if (reloc)
+ {
+ SCM *fp = vp->fp;
+ while (fp)
+ {
+ SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp);
+ if (next_fp)
+ {
+ next_fp += reloc;
+ SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp);
+ }
+ fp = next_fp;
+ }
+ }
+ /* Now we have the continuation properly copied over. We just need to
+ copy the arguments. It is not guaranteed that there is actually
+ space for the arguments, though, so we have to bump the SP first. */
+ vm_push_sp (vp, vp->sp + 3 + n);
+
+ /* Now copy on an empty frame and the return values, as the
+ continuation expects. */
{
+ SCM *base = vp->sp + 1 - 3 - n;
size_t i;
- /* Push on an empty frame, as the continuation expects. */
for (i = 0; i < 3; i++)
- {
- vp->sp++;
- *vp->sp = SCM_BOOL_F;
- }
+ base[i] = SCM_BOOL_F;
- /* Push the return values. */
for (i = 0; i < n; i++)
- {
- vp->sp++;
- *vp->sp = argv_copy[i];
- }
- vp->ip = cp->ra;
+ base[i + 3] = argv_copy[i];
}
+
+ vp->ip = cp->ra;
}
+static struct scm_vm * thread_vm (scm_i_thread *t);
SCM
scm_i_capture_current_stack (void)
{
struct scm_vm *vp;
thread = SCM_I_CURRENT_THREAD;
- vp = scm_the_vm ();
+ vp = thread_vm (thread);
return scm_i_vm_capture_stack (vp->stack_base, vp->fp, vp->sp, vp->ip,
scm_dynstack_capture_all (&thread->dynstack),
static void vm_dispatch_pop_continuation_hook (struct scm_vm *vp, SCM *old_fp) SCM_NOINLINE;
static void vm_dispatch_next_hook (struct scm_vm *vp) SCM_NOINLINE;
static void vm_dispatch_abort_hook (struct scm_vm *vp) SCM_NOINLINE;
-static void vm_dispatch_restore_continuation_hook (struct scm_vm *vp) SCM_NOINLINE;
static void
vm_dispatch_hook (struct scm_vm *vp, int hook_num, SCM *argv, int n)
&SCM_FRAME_LOCAL (vp->fp, 1),
SCM_FRAME_NUM_LOCALS (vp->fp, vp->sp) - 1);
}
-static void vm_dispatch_restore_continuation_hook (struct scm_vm *vp)
-{
- return vm_dispatch_hook (vp, SCM_VM_RESTORE_CONTINUATION_HOOK, NULL, 0);
-}
static void
vm_abort (struct scm_vm *vp, SCM tag,
memcpy (argv_copy, argv, n * sizeof(SCM));
cp = SCM_VM_CONT_DATA (cont);
- base = SCM_FRAME_LOCALS_ADDRESS (vp->fp);
- reloc = cp->reloc + (base - cp->stack_base);
-#define RELOC(scm_p) \
- (((SCM *) (scm_p)) + reloc)
+ vm_push_sp (vp, SCM_FRAME_LOCALS_ADDRESS (vp->fp) + cp->stack_size + n - 1);
- if ((base - vp->stack_base) + cp->stack_size + n + 1 > vp->stack_size)
- scm_misc_error ("vm-engine",
- "not enough space to instate partial continuation",
- scm_list_1 (cont));
+ base = SCM_FRAME_LOCALS_ADDRESS (vp->fp);
+ reloc = cp->reloc + (base - cp->stack_base);
memcpy (base, cp->stack_base, cp->stack_size * sizeof (SCM));
+ vp->fp = cp->fp + reloc;
+ vp->ip = cp->ra;
+
/* now relocate frame pointers */
{
SCM *fp;
- for (fp = RELOC (cp->fp);
+ for (fp = vp->fp;
SCM_FRAME_LOWER_ADDRESS (fp) > base;
fp = SCM_FRAME_DYNAMIC_LINK (fp))
- SCM_FRAME_SET_DYNAMIC_LINK (fp, RELOC (SCM_FRAME_DYNAMIC_LINK (fp)));
+ SCM_FRAME_SET_DYNAMIC_LINK (fp, SCM_FRAME_DYNAMIC_LINK (fp) + reloc);
}
- vp->sp = base - 1 + cp->stack_size;
- vp->fp = RELOC (cp->fp);
- vp->ip = cp->ra;
-
/* Push the arguments. */
for (i = 0; i < n; i++)
- {
- vp->sp++;
- *vp->sp = argv_copy[i];
- }
+ vp->sp[i + 1 - n] = argv_copy[i];
/* The prompt captured a slice of the dynamic stack. Here we wind
those entries onto the current thread's stack. We also have to
scm_dynstack_wind_1 (dynstack, walk);
}
}
-#undef RELOC
}
\f
static void vm_error_too_many_args (int nargs) SCM_NORETURN SCM_NOINLINE;
static void vm_error_wrong_num_args (SCM proc) SCM_NORETURN SCM_NOINLINE;
static void vm_error_wrong_type_apply (SCM proc) SCM_NORETURN SCM_NOINLINE;
-static void vm_error_stack_overflow (struct scm_vm *vp) SCM_NORETURN SCM_NOINLINE;
static void vm_error_stack_underflow (void) SCM_NORETURN SCM_NOINLINE;
static void vm_error_improper_list (SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_out_of_range (const char *subr, SCM k) SCM_NORETURN SCM_NOINLINE;
static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE;
static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE;
scm_list_1 (proc), scm_list_1 (proc));
}
-static void
-vm_error_stack_overflow (struct scm_vm *vp)
-{
- if (vp->stack_limit < vp->stack_base + vp->stack_size)
- /* There are VM_STACK_RESERVE_SIZE bytes left. Make them available so
- that `throw' below can run on this VM. */
- vp->stack_limit = vp->stack_base + vp->stack_size;
- else
- /* There is no space left on the stack. FIXME: Do something more
- sensible here! */
- abort ();
- vm_error ("VM: Stack overflow", SCM_UNDEFINED);
-}
-
static void
vm_error_stack_underflow (void)
{
scm_wrong_type_arg_msg (subr, 1, x, "struct");
}
+static void
+vm_error_not_a_vector (const char *subr, SCM x)
+{
+ scm_wrong_type_arg_msg (subr, 1, x, "vector");
+}
+
+static void
+vm_error_out_of_range (const char *subr, SCM k)
+{
+ scm_to_size_t (k);
+ scm_out_of_range (subr, k);
+}
+
static void
vm_error_no_values (void)
{
* VM
*/
-#define VM_MIN_STACK_SIZE (1024)
-#define VM_DEFAULT_STACK_SIZE (256 * 1024)
-static size_t vm_stack_size = VM_DEFAULT_STACK_SIZE;
+/* The page size. */
+static size_t page_size;
+
+/* Hard stack limit is 512M words: 2 gigabytes on 32-bit machines, 4 on
+ 64-bit machines. */
+static const size_t hard_max_stack_size = 512 * 1024 * 1024;
+
+/* Initial stack size. Defaults to one page. */
+static size_t initial_stack_size;
+
+/* Default soft stack limit is 1M words (4 or 8 megabytes). */
+static size_t default_max_stack_size = 1024 * 1024;
static void
initialize_default_stack_size (void)
{
- int size = scm_getenv_int ("GUILE_STACK_SIZE", vm_stack_size);
- if (size >= VM_MIN_STACK_SIZE)
- vm_stack_size = size;
+ initial_stack_size = page_size / sizeof (SCM);
+
+ {
+ int size;
+ size = scm_getenv_int ("GUILE_STACK_SIZE", (int) default_max_stack_size);
+ if (size >= initial_stack_size
+ && (size_t) size < ((size_t) -1) / sizeof(SCM))
+ default_max_stack_size = size;
+ }
}
#define VM_NAME vm_regular_engine
#undef VM_USE_HOOKS
#undef VM_NAME
-typedef SCM (*scm_t_vm_engine) (struct scm_vm *vp,
- SCM program, SCM *argv, size_t nargs);
+typedef SCM (*scm_t_vm_engine) (scm_i_thread *current_thread, struct scm_vm *vp,
+ scm_i_jmp_buf *registers, int resume);
static const scm_t_vm_engine vm_engines[SCM_VM_NUM_ENGINES] =
{ vm_regular_engine, vm_debug_engine };
-#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN
+static SCM*
+allocate_stack (size_t size)
+#define FUNC_NAME "make_vm"
+{
+ void *ret;
+
+ if (size >= ((size_t) -1) / sizeof (SCM))
+ abort ();
+
+ size *= sizeof (SCM);
+
+#if HAVE_SYS_MMAN_H
+ ret = mmap (NULL, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ret == MAP_FAILED)
+ ret = NULL;
+#else
+ ret = malloc (size);
+#endif
+
+ if (!ret)
+ {
+ perror ("allocate_stack failed");
+ return NULL;
+ }
+
+ return (SCM *) ret;
+}
+#undef FUNC_NAME
+
+static void
+free_stack (SCM *stack, size_t size)
+{
+ size *= sizeof (SCM);
+
+#if HAVE_SYS_MMAN_H
+ munmap (stack, size);
+#else
+ free (stack);
+#endif
+}
+
+static SCM*
+expand_stack (SCM *old_stack, size_t old_size, size_t new_size)
+#define FUNC_NAME "expand_stack"
+{
+#if defined MREMAP_MAYMOVE
+ void *new_stack;
+
+ if (new_size >= ((size_t) -1) / sizeof (SCM))
+ abort ();
+
+ old_size *= sizeof (SCM);
+ new_size *= sizeof (SCM);
-/* The GC "kind" for the VM stack. */
-static int vm_stack_gc_kind;
+ new_stack = mremap (old_stack, old_size, new_size, MREMAP_MAYMOVE);
+ if (new_stack == MAP_FAILED)
+ return NULL;
+ return (SCM *) new_stack;
+#else
+ SCM *new_stack;
+
+ new_stack = allocate_stack (new_size);
+ if (!new_stack)
+ return NULL;
+
+ memcpy (new_stack, old_stack, old_size * sizeof (SCM));
+ free_stack (old_stack, old_size);
+
+ return new_stack;
#endif
+}
+#undef FUNC_NAME
static struct scm_vm *
make_vm (void)
vp = scm_gc_malloc (sizeof (struct scm_vm), "vm");
- vp->stack_size= vm_stack_size;
-
-#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN
- vp->stack_base = (SCM *)
- GC_generic_malloc (vp->stack_size * sizeof (SCM), vm_stack_gc_kind);
-
- /* Keep a pointer to VP so that `vm_stack_mark ()' can know what the stack
- top is. */
- *vp->stack_base = SCM_PACK_POINTER (vp);
- vp->stack_base++;
- vp->stack_size--;
-#else
- vp->stack_base = scm_gc_malloc (vp->stack_size * sizeof (SCM),
- "stack-base");
-#endif
-
- vp->stack_limit = vp->stack_base + vp->stack_size - VM_STACK_RESERVE_SIZE;
+ vp->stack_size = initial_stack_size;
+ vp->stack_base = allocate_stack (vp->stack_size);
+ if (!vp->stack_base)
+ /* As in expand_stack, we don't have any way to throw an exception
+ if we can't allocate one measely page -- there's no stack to
+ handle it. For now, abort. */
+ abort ();
+ vp->stack_limit = vp->stack_base + vp->stack_size;
+ vp->max_stack_size = default_max_stack_size;
vp->ip = NULL;
vp->sp = vp->stack_base - 1;
vp->fp = NULL;
}
#undef FUNC_NAME
-#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN
+static void
+return_unused_stack_to_os (struct scm_vm *vp)
+{
+#if HAVE_SYS_MMAN_H
+ scm_t_uintptr start = (scm_t_uintptr) (vp->sp + 1);
+ scm_t_uintptr end = (scm_t_uintptr) vp->stack_limit;
+ /* The second condition is needed to protect against wrap-around. */
+ if (vp->sp_max_since_gc < vp->stack_limit && vp->sp < vp->sp_max_since_gc)
+ end = (scm_t_uintptr) (vp->sp_max_since_gc + 1);
-/* Mark the VM stack region between its base and its current top. */
-static struct GC_ms_entry *
-vm_stack_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
- struct GC_ms_entry *mark_stack_limit, GC_word env)
+ start = ((start - 1U) | (page_size - 1U)) + 1U; /* round up */
+ end = ((end - 1U) | (page_size - 1U)) + 1U; /* round up */
+
+ /* Return these pages to the OS. The next time they are paged in,
+ they will be zeroed. */
+ if (start < end)
+ {
+ int ret = 0;
+
+ do
+ ret = madvise ((void *) start, end - start, MADV_DONTNEED);
+ while (ret && errno == -EAGAIN);
+
+ if (ret)
+ perror ("madvise failed");
+ }
+
+ vp->sp_max_since_gc = vp->sp;
+#endif
+}
+
+#define DEAD_SLOT_MAP_CACHE_SIZE 32U
+struct dead_slot_map_cache_entry
{
- GC_word *word;
- const struct scm_vm *vm;
+ scm_t_uint32 *ip;
+ const scm_t_uint8 *map;
+};
- /* The first word of the VM stack should contain a pointer to the
- corresponding VM. */
- vm = * ((struct scm_vm **) addr);
+struct dead_slot_map_cache
+{
+ struct dead_slot_map_cache_entry entries[DEAD_SLOT_MAP_CACHE_SIZE];
+};
- if (vm == NULL
- || (SCM *) addr != vm->stack_base - 1)
- /* ADDR must be a pointer to a free-list element, which we must ignore
- (see warning in <gc/gc_mark.h>). */
- return mark_stack_ptr;
+static const scm_t_uint8 *
+find_dead_slot_map (scm_t_uint32 *ip, struct dead_slot_map_cache *cache)
+{
+ /* The lower two bits should be zero. FIXME: Use a better hash
+ function; we don't expose scm_raw_hashq currently. */
+ size_t slot = (((scm_t_uintptr) ip) >> 2) % DEAD_SLOT_MAP_CACHE_SIZE;
+ const scm_t_uint8 *map;
- for (word = (GC_word *) vm->stack_base; word <= (GC_word *) vm->sp; word++)
- mark_stack_ptr = GC_MARK_AND_PUSH ((* (GC_word **) word),
- mark_stack_ptr, mark_stack_limit,
- NULL);
+ if (cache->entries[slot].ip == ip)
+ map = cache->entries[slot].map;
+ else
+ {
+ map = scm_find_dead_slot_map_unlocked (ip);
+ cache->entries[slot].ip = ip;
+ cache->entries[slot].map = map;
+ }
- return mark_stack_ptr;
+ return map;
}
-#endif /* VM_ENABLE_PRECISE_STACK_GC_SCAN */
+/* Mark the VM stack region between its base and its current top. */
+struct GC_ms_entry *
+scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr,
+ struct GC_ms_entry *mark_stack_limit)
+{
+ SCM *sp, *fp;
+ /* The first frame will be marked conservatively (without a dead
+ slot map). This is because GC can happen at any point within the
+ hottest activation, due to multiple threads or per-instruction
+ hooks, and providing dead slot maps for all points in a program
+ would take a prohibitive amount of space. */
+ const scm_t_uint8 *dead_slots = NULL;
+ scm_t_uintptr upper = (scm_t_uintptr) GC_greatest_plausible_heap_addr;
+ scm_t_uintptr lower = (scm_t_uintptr) GC_least_plausible_heap_addr;
+ struct dead_slot_map_cache cache;
+
+ memset (&cache, 0, sizeof (cache));
+
+ for (fp = vp->fp, sp = vp->sp; fp; fp = SCM_FRAME_DYNAMIC_LINK (fp))
+ {
+ for (; sp >= &SCM_FRAME_LOCAL (fp, 0); sp--)
+ {
+ SCM elt = *sp;
+ if (SCM_NIMP (elt)
+ && SCM_UNPACK (elt) >= lower && SCM_UNPACK (elt) <= upper)
+ {
+ if (dead_slots)
+ {
+ size_t slot = sp - &SCM_FRAME_LOCAL (fp, 0);
+ if (dead_slots[slot / 8U] & (1U << (slot % 8U)))
+ {
+ /* This value may become dead as a result of GC,
+ so we can't just leave it on the stack. */
+ *sp = SCM_UNBOUND;
+ continue;
+ }
+ }
+
+ mark_stack_ptr = GC_mark_and_push ((void *) elt,
+ mark_stack_ptr,
+ mark_stack_limit,
+ NULL);
+ }
+ }
+ sp = SCM_FRAME_PREVIOUS_SP (fp);
+ /* Inner frames may have a dead slots map for precise marking.
+ Note that there may be other reasons to not have a dead slots
+ map, e.g. if all of the frame's slots below the callee frame
+ are live. */
+ dead_slots = find_dead_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache);
+ }
+ return_unused_stack_to_os (vp);
-SCM
-scm_call_n (SCM proc, SCM *argv, size_t nargs)
+ return mark_stack_ptr;
+}
+
+/* Free the VM stack, as this thread is exiting. */
+void
+scm_i_vm_free_stack (struct scm_vm *vp)
{
- struct scm_vm *vp = scm_the_vm ();
- SCM_CHECK_STACK;
- return vm_engines[vp->engine](vp, proc, argv, nargs);
+ free_stack (vp->stack_base, vp->stack_size);
+ vp->stack_base = vp->stack_limit = NULL;
+ vp->stack_size = 0;
}
-struct scm_vm *
-scm_the_vm (void)
+static void
+vm_expand_stack (struct scm_vm *vp)
{
- scm_i_thread *t = SCM_I_CURRENT_THREAD;
+ scm_t_ptrdiff stack_size = vp->sp + 1 - vp->stack_base;
+
+ if (stack_size > hard_max_stack_size)
+ {
+ /* We have expanded the soft limit to the point that we reached a
+ hard limit. There is nothing sensible to do. */
+ fprintf (stderr, "Hard stack size limit (%zu words) reached; aborting.\n",
+ hard_max_stack_size);
+ abort ();
+ }
+
+ /* FIXME: Prevent GC while we expand the stack, to ensure that a
+ stack marker can trace the stack. */
+ if (stack_size > vp->stack_size)
+ {
+ SCM *old_stack, *new_stack;
+ size_t new_size;
+ scm_t_ptrdiff reloc;
+
+ new_size = vp->stack_size;
+ while (new_size < stack_size)
+ new_size *= 2;
+ old_stack = vp->stack_base;
+ new_stack = expand_stack (vp->stack_base, vp->stack_size, new_size);
+ if (!new_stack)
+ /* It would be nice to throw an exception here, but that is
+ extraordinarily hard. Exceptionally hard, you might say!
+ "throw" is implemented in Scheme, and there may be arbitrary
+ pre-unwind handlers that push on more frames. We will
+ endeavor to do so in the future, but for now we just
+ abort. */
+ abort ();
+
+ vp->stack_base = new_stack;
+ vp->stack_size = new_size;
+ vp->stack_limit = vp->stack_base + new_size;
+ reloc = vp->stack_base - old_stack;
+
+ if (reloc)
+ {
+ SCM *fp;
+ if (vp->fp)
+ vp->fp += reloc;
+ vp->sp += reloc;
+ vp->sp_max_since_gc += reloc;
+ fp = vp->fp;
+ while (fp)
+ {
+ SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp);
+ if (next_fp)
+ {
+ next_fp += reloc;
+ SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp);
+ }
+ fp = next_fp;
+ }
+ }
+ }
+ if (stack_size >= vp->max_stack_size)
+ {
+ /* Expand the soft limit by 256K entries to give us space to
+ handle the error. */
+ vp->max_stack_size += 256 * 1024;
+
+ /* If it's still not big enough... it's quite improbable, but go
+ ahead and set to the full available stack size. */
+ if (vp->max_stack_size < stack_size)
+ vp->max_stack_size = vp->stack_size;
+
+ /* But don't exceed the hard maximum. */
+ if (vp->max_stack_size > hard_max_stack_size)
+ vp->max_stack_size = hard_max_stack_size;
+
+ /* Finally, reset the limit, to catch further overflows. */
+ vp->stack_limit = vp->stack_base + vp->max_stack_size;
+
+ vm_error ("VM: Stack overflow", SCM_UNDEFINED);
+ }
+
+ /* Otherwise continue, with the new enlarged stack. */
+}
+
+static struct scm_vm *
+thread_vm (scm_i_thread *t)
+{
if (SCM_UNLIKELY (!t->vp))
t->vp = make_vm ();
return t->vp;
}
+struct scm_vm *
+scm_the_vm (void)
+{
+ return thread_vm (SCM_I_CURRENT_THREAD);
+}
+
+SCM
+scm_call_n (SCM proc, SCM *argv, size_t nargs)
+{
+ scm_i_thread *thread;
+ struct scm_vm *vp;
+ SCM *base;
+ ptrdiff_t base_frame_size;
+ /* Cached variables. */
+ scm_i_jmp_buf registers; /* used for prompts */
+ size_t i;
+
+ thread = SCM_I_CURRENT_THREAD;
+ vp = thread_vm (thread);
+
+ SCM_CHECK_STACK;
+
+ /* Check that we have enough space: 3 words for the boot continuation,
+ and 3 + nargs for the procedure application. */
+ base_frame_size = 3 + 3 + nargs;
+ vm_push_sp (vp, vp->sp + base_frame_size);
+ base = vp->sp + 1 - base_frame_size;
+
+ /* Since it's possible to receive the arguments on the stack itself,
+ shuffle up the arguments first. */
+ for (i = nargs; i > 0; i--)
+ base[6 + i - 1] = argv[i - 1];
+
+ /* Push the boot continuation, which calls PROC and returns its
+ result(s). */
+ base[0] = SCM_PACK (vp->fp); /* dynamic link */
+ base[1] = SCM_PACK (vp->ip); /* ra */
+ base[2] = vm_boot_continuation;
+ vp->fp = &base[2];
+ vp->ip = (scm_t_uint32 *) vm_boot_continuation_code;
+
+ /* The pending call to PROC. */
+ base[3] = SCM_PACK (vp->fp); /* dynamic link */
+ base[4] = SCM_PACK (vp->ip); /* ra */
+ base[5] = proc;
+ vp->fp = &base[5];
+
+ {
+ int resume = SCM_I_SETJMP (registers);
+
+ if (SCM_UNLIKELY (resume))
+ /* Non-local return. */
+ vm_dispatch_abort_hook (vp);
+
+ return vm_engines[vp->engine](thread, vp, ®isters, resume);
+ }
+}
+
/* Scheme interface */
#define VM_DEFINE_HOOK(n) \
}
#undef FUNC_NAME
-SCM_DEFINE (scm_vm_restore_continuation_hook, "vm-restore-continuation-hook", 0, 0, 0,
- (void),
- "")
-#define FUNC_NAME s_scm_vm_restore_continuation_hook
-{
- VM_DEFINE_HOOK (SCM_VM_RESTORE_CONTINUATION_HOOK);
-}
-#undef FUNC_NAME
-
SCM_DEFINE (scm_vm_trace_level, "vm-trace-level", 0, 0, 0,
(void),
"")
(scm_t_extension_init_func)scm_init_vm_builtins,
NULL);
+ page_size = getpagesize ();
+ /* page_size should be a power of two. */
+ if (page_size & (page_size - 1))
+ abort ();
+
initialize_default_stack_size ();
sym_vm_run = scm_from_latin1_symbol ("vm-run");
vm_builtin_##builtin = scm_i_make_program (vm_builtin_##builtin##_code);
FOR_EACH_VM_BUILTIN (DEFINE_BUILTIN);
#undef DEFINE_BUILTIN
-
-#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN
- vm_stack_gc_kind =
- GC_new_kind (GC_new_free_list (),
- GC_MAKE_PROC (GC_new_proc (vm_stack_mark), 0),
- 0, 1);
-
-#endif
}
void