X-Git-Url: https://git.hcoop.net/bpt/guile.git/blobdiff_plain/62e16606950f2e9aae6410e90ab3bfff882a3767..eb3d623da57e6d31a58d95f932345fb761f9b701:/libguile/vm.c diff --git a/libguile/vm.c b/libguile/vm.c index b1b594133..0e5983575 100644 --- a/libguile/vm.c +++ b/libguile/vm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc. +/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Free Software Foundation, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License @@ -16,6 +16,9 @@ * 02110-1301 USA */ +/* For mremap(2) on GNU/Linux systems. */ +#define _GNU_SOURCE + #if HAVE_CONFIG_H # include #endif @@ -25,6 +28,11 @@ #include #include #include +#include + +#ifdef HAVE_SYS_MMAN_H +#include +#endif #include "libguile/bdw-gc.h" #include @@ -32,14 +40,14 @@ #include "_scm.h" #include "control.h" #include "frames.h" +#include "gc-inline.h" #include "instructions.h" #include "loader.h" #include "programs.h" +#include "simpos.h" #include "vm.h" #include "vm-builtins.h" -#include "private-gc.h" /* scm_getenv_int */ - static int vm_default_engine = SCM_VM_REGULAR_ENGINE; /* Unfortunately we can't snarf these: snarfed things are only loaded up from @@ -50,22 +58,46 @@ static SCM sym_keyword_argument_error; static SCM sym_regular; static SCM sym_debug; +/* The page size. */ +static size_t page_size; + /* The VM has a number of internal assertions that shouldn't normally be necessary, but might be if you think you found a bug in the VM. */ -#define VM_ENABLE_ASSERTIONS +/* #define VM_ENABLE_ASSERTIONS */ -/* #define VM_ENABLE_PARANOID_ASSERTIONS */ +static void vm_expand_stack (struct scm_vm *vp, SCM *new_sp) SCM_NOINLINE; + +/* RESTORE is for the case where we know we have done a PUSH of equal or + greater stack size in the past. Otherwise PUSH is the thing, which + may expand the stack. */ +enum vm_increase_sp_kind { VM_SP_PUSH, VM_SP_RESTORE }; + +static inline void +vm_increase_sp (struct scm_vm *vp, SCM *new_sp, enum vm_increase_sp_kind kind) +{ + if (new_sp <= vp->sp_max_since_gc) + { + vp->sp = new_sp; + return; + } -/* When defined, arrange so that the GC doesn't scan the VM stack beyond its - current SP. This should help avoid excess data retention. See - http://thread.gmane.org/gmane.comp.programming.garbage-collection.boehmgc/3001 - for a discussion. */ -#define VM_ENABLE_PRECISE_STACK_GC_SCAN + if (kind == VM_SP_PUSH && new_sp >= vp->stack_limit) + vm_expand_stack (vp, new_sp); + else + vp->sp_max_since_gc = vp->sp = new_sp; +} -/* Size in SCM objects of the stack reserve. The reserve is used to run - exception handling code in case of a VM stack overflow. */ -#define VM_STACK_RESERVE_SIZE 512 +static inline void +vm_push_sp (struct scm_vm *vp, SCM *new_sp) +{ + vm_increase_sp (vp, new_sp, VM_SP_PUSH); +} +static inline void +vm_restore_sp (struct scm_vm *vp, SCM *new_sp) +{ + vm_increase_sp (vp, new_sp, VM_SP_RESTORE); +} /* @@ -80,17 +112,22 @@ scm_i_vm_cont_print (SCM x, SCM port, scm_print_state *pstate) scm_puts_unlocked (">", port); } -/* In theory, a number of vm instances can be active in the call trace, and we - only want to reify the continuations of those in the current continuation - root. I don't see a nice way to do this -- ideally it would involve dynwinds, - and previous values of the *the-vm* fluid within the current continuation - root. But we don't have access to continuation roots in the dynwind stack. - So, just punt for now, we just capture the continuation for the current VM. +int +scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame) +{ + struct scm_vm_cont *data = SCM_VM_CONT_DATA (cont); - While I'm on the topic, ideally we could avoid copying the C stack if the - continuation root is inside VM code, and call/cc was invoked within that same - call to vm_run; but that's currently not implemented. - */ + frame->stack_holder = data; + frame->fp_offset = (data->fp + data->reloc) - data->stack_base; + frame->sp_offset = (data->sp + data->reloc) - data->stack_base; + frame->ip = data->ra; + + return 1; +} + +/* Ideally we could avoid copying the C stack if the continuation root + is inside VM code, and call/cc was invoked within that same call to + vm_run. That's currently not implemented. */ SCM scm_i_vm_capture_stack (SCM *stack_base, SCM *fp, SCM *sp, scm_t_uint32 *ra, scm_t_dynstack *dynstack, scm_t_uint32 flags) @@ -111,80 +148,115 @@ scm_i_vm_capture_stack (SCM *stack_base, SCM *fp, SCM *sp, scm_t_uint32 *ra, return scm_cell (scm_tc7_vm_cont, (scm_t_bits)p); } -static void -vm_return_to_continuation (SCM vm, SCM cont, size_t n, SCM *argv) +struct return_to_continuation_data { + struct scm_vm_cont *cp; struct scm_vm *vp; +}; + +/* Called with the GC lock to prevent the stack marker from traversing a + stack in an inconsistent state. */ +static void * +vm_return_to_continuation_inner (void *data_ptr) +{ + struct return_to_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + scm_t_ptrdiff reloc; + + /* We know that there is enough space for the continuation, because we + captured it in the past. However there may have been an expansion + since the capture, so we may have to re-link the frame + pointers. */ + reloc = (vp->stack_base - (cp->stack_base - cp->reloc)); + vp->fp = cp->fp + reloc; + memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM)); + vm_restore_sp (vp, cp->sp + reloc); + + if (reloc) + { + SCM *fp = vp->fp; + while (fp) + { + SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp); + if (next_fp) + { + next_fp += reloc; + SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp); + } + fp = next_fp; + } + } + + return NULL; +} + +static void +vm_return_to_continuation (struct scm_vm *vp, SCM cont, size_t n, SCM *argv) +{ struct scm_vm_cont *cp; SCM *argv_copy; + struct return_to_continuation_data data; argv_copy = alloca (n * sizeof(SCM)); memcpy (argv_copy, argv, n * sizeof(SCM)); - vp = SCM_VM_DATA (vm); cp = SCM_VM_CONT_DATA (cont); - if (vp->stack_size < cp->stack_size + n + 3) - scm_misc_error ("vm-engine", "not enough space to reinstate continuation", - scm_list_2 (vm, cont)); + data.cp = cp; + data.vp = vp; + GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data); - vp->sp = cp->sp; - vp->fp = cp->fp; - memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM)); + /* Now we have the continuation properly copied over. We just need to + copy the arguments. It is not guaranteed that there is actually + space for the arguments, though, so we have to bump the SP first. */ + vm_push_sp (vp, vp->sp + 3 + n); + /* Now copy on an empty frame and the return values, as the + continuation expects. */ { + SCM *base = vp->sp + 1 - 3 - n; size_t i; - /* Push on an empty frame, as the continuation expects. */ for (i = 0; i < 3; i++) - { - vp->sp++; - *vp->sp = SCM_BOOL_F; - } + base[i] = SCM_BOOL_F; - /* Push the return values. */ for (i = 0; i < n; i++) - { - vp->sp++; - *vp->sp = argv_copy[i]; - } - vp->ip = cp->ra; + base[i + 3] = argv_copy[i]; } + + vp->ip = cp->ra; } +static struct scm_vm * thread_vm (scm_i_thread *t); SCM scm_i_capture_current_stack (void) { scm_i_thread *thread; - SCM vm; struct scm_vm *vp; thread = SCM_I_CURRENT_THREAD; - vm = scm_the_vm (); - vp = SCM_VM_DATA (vm); + vp = thread_vm (thread); return scm_i_vm_capture_stack (vp->stack_base, vp->fp, vp->sp, vp->ip, scm_dynstack_capture_all (&thread->dynstack), 0); } -static void vm_dispatch_apply_hook (SCM vm) SCM_NOINLINE; -static void vm_dispatch_push_continuation_hook (SCM vm) SCM_NOINLINE; -static void vm_dispatch_pop_continuation_hook (SCM vm, SCM *old_fp) SCM_NOINLINE; -static void vm_dispatch_next_hook (SCM vm) SCM_NOINLINE; -static void vm_dispatch_abort_hook (SCM vm) SCM_NOINLINE; -static void vm_dispatch_restore_continuation_hook (SCM vm) SCM_NOINLINE; +static void vm_dispatch_apply_hook (struct scm_vm *vp) SCM_NOINLINE; +static void vm_dispatch_push_continuation_hook (struct scm_vm *vp) SCM_NOINLINE; +static void vm_dispatch_pop_continuation_hook (struct scm_vm *vp, SCM *old_fp) SCM_NOINLINE; +static void vm_dispatch_next_hook (struct scm_vm *vp) SCM_NOINLINE; +static void vm_dispatch_abort_hook (struct scm_vm *vp) SCM_NOINLINE; static void -vm_dispatch_hook (SCM vm, int hook_num, SCM *argv, int n) +vm_dispatch_hook (struct scm_vm *vp, int hook_num, SCM *argv, int n) { - struct scm_vm *vp; SCM hook; struct scm_frame c_frame; scm_t_cell *frame; int saved_trace_level; - vp = SCM_VM_DATA (vm); hook = vp->hooks[hook_num]; if (SCM_LIKELY (scm_is_false (hook)) @@ -202,17 +274,16 @@ vm_dispatch_hook (SCM vm, int hook_num, SCM *argv, int n) while the stack frame represented by the frame object is visible, so it seems reasonable to limit the lifetime of frame objects. */ - c_frame.stack_holder = vm; - c_frame.fp = vp->fp; - c_frame.sp = vp->sp; + c_frame.stack_holder = vp; + c_frame.fp_offset = vp->fp - vp->stack_base; + c_frame.sp_offset = vp->sp - vp->stack_base; c_frame.ip = vp->ip; - c_frame.offset = 0; /* Arrange for FRAME to be 8-byte aligned, like any other cell. */ frame = alloca (sizeof (*frame) + 8); frame = (scm_t_cell *) ROUND_UP ((scm_t_uintptr) frame, 8UL); - frame->word_0 = SCM_PACK (scm_tc7_frame); + frame->word_0 = SCM_PACK (scm_tc7_frame | (SCM_VM_FRAME_KIND_VM << 8)); frame->word_1 = SCM_PACK_POINTER (&c_frame); if (n == 0) @@ -243,43 +314,39 @@ vm_dispatch_hook (SCM vm, int hook_num, SCM *argv, int n) } static void -vm_dispatch_apply_hook (SCM vm) +vm_dispatch_apply_hook (struct scm_vm *vp) { - return vm_dispatch_hook (vm, SCM_VM_APPLY_HOOK, NULL, 0); + return vm_dispatch_hook (vp, SCM_VM_APPLY_HOOK, NULL, 0); } -static void vm_dispatch_push_continuation_hook (SCM vm) +static void vm_dispatch_push_continuation_hook (struct scm_vm *vp) { - return vm_dispatch_hook (vm, SCM_VM_PUSH_CONTINUATION_HOOK, NULL, 0); + return vm_dispatch_hook (vp, SCM_VM_PUSH_CONTINUATION_HOOK, NULL, 0); } -static void vm_dispatch_pop_continuation_hook (SCM vm, SCM *old_fp) +static void vm_dispatch_pop_continuation_hook (struct scm_vm *vp, SCM *old_fp) { - struct scm_vm *vp = SCM_VM_DATA (vm); - return vm_dispatch_hook (vm, SCM_VM_POP_CONTINUATION_HOOK, + return vm_dispatch_hook (vp, SCM_VM_POP_CONTINUATION_HOOK, &SCM_FRAME_LOCAL (old_fp, 1), SCM_FRAME_NUM_LOCALS (old_fp, vp->sp) - 1); } -static void vm_dispatch_next_hook (SCM vm) +static void vm_dispatch_next_hook (struct scm_vm *vp) { - return vm_dispatch_hook (vm, SCM_VM_NEXT_HOOK, NULL, 0); + return vm_dispatch_hook (vp, SCM_VM_NEXT_HOOK, NULL, 0); } -static void vm_dispatch_abort_hook (SCM vm) +static void vm_dispatch_abort_hook (struct scm_vm *vp) { - struct scm_vm *vp = SCM_VM_DATA (vm); - return vm_dispatch_hook (vm, SCM_VM_ABORT_CONTINUATION_HOOK, + return vm_dispatch_hook (vp, SCM_VM_ABORT_CONTINUATION_HOOK, &SCM_FRAME_LOCAL (vp->fp, 1), SCM_FRAME_NUM_LOCALS (vp->fp, vp->sp) - 1); } -static void vm_dispatch_restore_continuation_hook (SCM vm) -{ - return vm_dispatch_hook (vm, SCM_VM_RESTORE_CONTINUATION_HOOK, NULL, 0); -} static void -vm_abort (SCM vm, SCM tag, size_t nstack, SCM *stack_args, SCM tail, SCM *sp, +vm_abort (struct scm_vm *vp, SCM tag, + size_t nstack, SCM *stack_args, SCM tail, SCM *sp, scm_i_jmp_buf *current_registers) SCM_NORETURN; static void -vm_abort (SCM vm, SCM tag, size_t nstack, SCM *stack_args, SCM tail, SCM *sp, +vm_abort (struct scm_vm *vp, SCM tag, + size_t nstack, SCM *stack_args, SCM tail, SCM *sp, scm_i_jmp_buf *current_registers) { size_t i; @@ -297,60 +364,76 @@ vm_abort (SCM vm, SCM tag, size_t nstack, SCM *stack_args, SCM tail, SCM *sp, for (; i < nstack + tail_len; i++, tail = scm_cdr (tail)) argv[i] = scm_car (tail); - /* FIXME: NULLSTACK (SCM_VM_DATA (vp)->sp - sp) */ - SCM_VM_DATA (vm)->sp = sp; + vp->sp = sp; - scm_c_abort (vm, tag, nstack + tail_len, argv, current_registers); + scm_c_abort (vp, tag, nstack + tail_len, argv, current_registers); } -static void -vm_reinstate_partial_continuation (SCM vm, SCM cont, size_t n, SCM *argv, - scm_t_dynstack *dynstack, - scm_i_jmp_buf *registers) +struct vm_reinstate_partial_continuation_data { struct scm_vm *vp; struct scm_vm_cont *cp; - SCM *argv_copy, *base; scm_t_ptrdiff reloc; - size_t i; +}; - argv_copy = alloca (n * sizeof(SCM)); - memcpy (argv_copy, argv, n * sizeof(SCM)); +static void * +vm_reinstate_partial_continuation_inner (void *data_ptr) +{ + struct vm_reinstate_partial_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + SCM *base; + scm_t_ptrdiff reloc; - vp = SCM_VM_DATA (vm); - cp = SCM_VM_CONT_DATA (cont); base = SCM_FRAME_LOCALS_ADDRESS (vp->fp); reloc = cp->reloc + (base - cp->stack_base); -#define RELOC(scm_p) \ - (((SCM *) (scm_p)) + reloc) - - if ((base - vp->stack_base) + cp->stack_size + n + 1 > vp->stack_size) - scm_misc_error ("vm-engine", - "not enough space to instate partial continuation", - scm_list_2 (vm, cont)); - memcpy (base, cp->stack_base, cp->stack_size * sizeof (SCM)); + vp->fp = cp->fp + reloc; + vp->ip = cp->ra; + /* now relocate frame pointers */ { SCM *fp; - for (fp = RELOC (cp->fp); - SCM_FRAME_LOWER_ADDRESS (fp) > base; + for (fp = vp->fp; + SCM_FRAME_LOWER_ADDRESS (fp) >= base; fp = SCM_FRAME_DYNAMIC_LINK (fp)) - SCM_FRAME_SET_DYNAMIC_LINK (fp, RELOC (SCM_FRAME_DYNAMIC_LINK (fp))); + SCM_FRAME_SET_DYNAMIC_LINK (fp, SCM_FRAME_DYNAMIC_LINK (fp) + reloc); } - vp->sp = base - 1 + cp->stack_size; - vp->fp = RELOC (cp->fp); - vp->ip = cp->ra; + data->reloc = reloc; + + return NULL; +} + +static void +vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, + size_t n, SCM *argv, + scm_t_dynstack *dynstack, + scm_i_jmp_buf *registers) +{ + struct vm_reinstate_partial_continuation_data data; + struct scm_vm_cont *cp; + SCM *argv_copy; + scm_t_ptrdiff reloc; + size_t i; + + argv_copy = alloca (n * sizeof(SCM)); + memcpy (argv_copy, argv, n * sizeof(SCM)); + + cp = SCM_VM_CONT_DATA (cont); + + vm_push_sp (vp, SCM_FRAME_LOCALS_ADDRESS (vp->fp) + cp->stack_size + n - 1); + + data.vp = vp; + data.cp = cp; + GC_call_with_alloc_lock (vm_reinstate_partial_continuation_inner, &data); + reloc = data.reloc; /* Push the arguments. */ for (i = 0; i < n; i++) - { - vp->sp++; - *vp->sp = argv_copy[i]; - } + vp->sp[i + 1 - n] = argv_copy[i]; /* The prompt captured a slice of the dynamic stack. Here we wind those entries onto the current thread's stack. We also have to @@ -370,37 +453,6 @@ vm_reinstate_partial_continuation (SCM vm, SCM cont, size_t n, SCM *argv, scm_dynstack_wind_1 (dynstack, walk); } } -#undef RELOC -} - - -/* - * VM Internal functions - */ - -void -scm_i_vm_print (SCM x, SCM port, scm_print_state *pstate) -{ - const struct scm_vm *vm; - - vm = SCM_VM_DATA (x); - - scm_puts_unlocked ("#engine) - { - case SCM_VM_REGULAR_ENGINE: - scm_puts_unlocked ("regular-engine ", port); - break; - - case SCM_VM_DEBUG_ENGINE: - scm_puts_unlocked ("debug-engine ", port); - break; - - default: - scm_puts_unlocked ("unknown-engine ", port); - } - scm_uintprint (SCM_UNPACK (x), 16, port); - scm_puts_unlocked (">", port); } @@ -410,8 +462,8 @@ scm_i_vm_print (SCM x, SCM port, scm_print_state *pstate) static void vm_error (const char *msg, SCM arg) SCM_NORETURN; static void vm_error_bad_instruction (scm_t_uint32 inst) SCM_NORETURN SCM_NOINLINE; -static void vm_error_unbound (SCM proc, SCM sym) SCM_NORETURN SCM_NOINLINE; -static void vm_error_unbound_fluid (SCM proc, SCM fluid) SCM_NORETURN SCM_NOINLINE; +static void vm_error_unbound (SCM sym) SCM_NORETURN SCM_NOINLINE; +static void vm_error_unbound_fluid (SCM fluid) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_variable (const char *func_name, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_apply_to_non_list (SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_kwargs_length_not_even (SCM proc) SCM_NORETURN SCM_NOINLINE; @@ -420,12 +472,13 @@ static void vm_error_kwargs_unrecognized_keyword (SCM proc, SCM kw) SCM_NORETURN static void vm_error_too_many_args (int nargs) SCM_NORETURN SCM_NOINLINE; static void vm_error_wrong_num_args (SCM proc) SCM_NORETURN SCM_NOINLINE; static void vm_error_wrong_type_apply (SCM proc) SCM_NORETURN SCM_NOINLINE; -static void vm_error_stack_overflow (struct scm_vm *vp) SCM_NORETURN SCM_NOINLINE; static void vm_error_stack_underflow (void) SCM_NORETURN SCM_NOINLINE; static void vm_error_improper_list (SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; +static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE; +static void vm_error_out_of_range (const char *subr, SCM k) SCM_NORETURN SCM_NOINLINE; static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE; static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE; @@ -448,17 +501,17 @@ vm_error_bad_instruction (scm_t_uint32 inst) } static void -vm_error_unbound (SCM proc, SCM sym) +vm_error_unbound (SCM sym) { - scm_error_scm (scm_misc_error_key, proc, + scm_error_scm (scm_misc_error_key, SCM_BOOL_F, scm_from_latin1_string ("Unbound variable: ~s"), scm_list_1 (sym), SCM_BOOL_F); } static void -vm_error_unbound_fluid (SCM proc, SCM fluid) +vm_error_unbound_fluid (SCM fluid) { - scm_error_scm (scm_misc_error_key, proc, + scm_error_scm (scm_misc_error_key, SCM_BOOL_F, scm_from_latin1_string ("Unbound fluid: ~s"), scm_list_1 (fluid), SCM_BOOL_F); } @@ -520,20 +573,6 @@ vm_error_wrong_type_apply (SCM proc) scm_list_1 (proc), scm_list_1 (proc)); } -static void -vm_error_stack_overflow (struct scm_vm *vp) -{ - if (vp->stack_limit < vp->stack_base + vp->stack_size) - /* There are VM_STACK_RESERVE_SIZE bytes left. Make them available so - that `throw' below can run on this VM. */ - vp->stack_limit = vp->stack_base + vp->stack_size; - else - /* There is no space left on the stack. FIXME: Do something more - sensible here! */ - abort (); - vm_error ("VM: Stack overflow", SCM_UNDEFINED); -} - static void vm_error_stack_underflow (void) { @@ -564,6 +603,19 @@ vm_error_not_a_struct (const char *subr, SCM x) scm_wrong_type_arg_msg (subr, 1, x, "struct"); } +static void +vm_error_not_a_vector (const char *subr, SCM x) +{ + scm_wrong_type_arg_msg (subr, 1, x, "vector"); +} + +static void +vm_error_out_of_range (const char *subr, SCM k) +{ + scm_to_size_t (k); + scm_out_of_range (subr, k); +} + static void vm_error_no_values (void) { @@ -715,18 +767,6 @@ scm_i_call_with_current_continuation (SCM proc) * VM */ -#define VM_MIN_STACK_SIZE (1024) -#define VM_DEFAULT_STACK_SIZE (256 * 1024) -static size_t vm_stack_size = VM_DEFAULT_STACK_SIZE; - -static void -initialize_default_stack_size (void) -{ - int size = scm_getenv_int ("GUILE_STACK_SIZE", vm_stack_size); - if (size >= VM_MIN_STACK_SIZE) - vm_stack_size = size; -} - #define VM_NAME vm_regular_engine #define VM_USE_HOOKS 0 #define FUNC_NAME "vm-regular-engine" @@ -743,44 +783,105 @@ initialize_default_stack_size (void) #undef VM_USE_HOOKS #undef VM_NAME -typedef SCM (*scm_t_vm_engine) (SCM vm, SCM program, SCM *argv, size_t nargs); +typedef SCM (*scm_t_vm_engine) (scm_i_thread *current_thread, struct scm_vm *vp, + scm_i_jmp_buf *registers, int resume); static const scm_t_vm_engine vm_engines[SCM_VM_NUM_ENGINES] = { vm_regular_engine, vm_debug_engine }; -#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN +static SCM* +allocate_stack (size_t size) +#define FUNC_NAME "make_vm" +{ + void *ret; + + if (size >= ((size_t) -1) / sizeof (SCM)) + abort (); -/* The GC "kind" for the VM stack. */ -static int vm_stack_gc_kind; + size *= sizeof (SCM); +#if HAVE_SYS_MMAN_H + ret = mmap (NULL, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (ret == MAP_FAILED) + ret = NULL; +#else + ret = malloc (size); #endif -static SCM -make_vm (void) -#define FUNC_NAME "make_vm" + if (!ret) + { + perror ("allocate_stack failed"); + return NULL; + } + + return (SCM *) ret; +} +#undef FUNC_NAME + +static void +free_stack (SCM *stack, size_t size) { - int i; - struct scm_vm *vp; + size *= sizeof (SCM); - vp = scm_gc_malloc (sizeof (struct scm_vm), "vm"); +#if HAVE_SYS_MMAN_H + munmap (stack, size); +#else + free (stack); +#endif +} + +static SCM* +expand_stack (SCM *old_stack, size_t old_size, size_t new_size) +#define FUNC_NAME "expand_stack" +{ +#if defined MREMAP_MAYMOVE + void *new_stack; + + if (new_size >= ((size_t) -1) / sizeof (SCM)) + abort (); - vp->stack_size= vm_stack_size; + old_size *= sizeof (SCM); + new_size *= sizeof (SCM); -#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN - vp->stack_base = (SCM *) - GC_generic_malloc (vp->stack_size * sizeof (SCM), vm_stack_gc_kind); + new_stack = mremap (old_stack, old_size, new_size, MREMAP_MAYMOVE); + if (new_stack == MAP_FAILED) + return NULL; - /* Keep a pointer to VP so that `vm_stack_mark ()' can know what the stack - top is. */ - *vp->stack_base = SCM_PACK_POINTER (vp); - vp->stack_base++; - vp->stack_size--; + return (SCM *) new_stack; #else - vp->stack_base = scm_gc_malloc (vp->stack_size * sizeof (SCM), - "stack-base"); + SCM *new_stack; + + new_stack = allocate_stack (new_size); + if (!new_stack) + return NULL; + + memcpy (new_stack, old_stack, old_size * sizeof (SCM)); + free_stack (old_stack, old_size); + + return new_stack; #endif +} +#undef FUNC_NAME + +static struct scm_vm * +make_vm (void) +#define FUNC_NAME "make_vm" +{ + int i; + struct scm_vm *vp; - vp->stack_limit = vp->stack_base + vp->stack_size - VM_STACK_RESERVE_SIZE; + vp = scm_gc_malloc (sizeof (struct scm_vm), "vm"); + + vp->stack_size = page_size / sizeof (SCM); + vp->stack_base = allocate_stack (vp->stack_size); + if (!vp->stack_base) + /* As in expand_stack, we don't have any way to throw an exception + if we can't allocate one measely page -- there's no stack to + handle it. For now, abort. */ + abort (); + vp->stack_limit = vp->stack_base + vp->stack_size; + vp->overflow_handler_stack = SCM_EOL; vp->ip = NULL; vp->sp = vp->stack_base - 1; vp->fp = NULL; @@ -788,126 +889,389 @@ make_vm (void) vp->trace_level = 0; for (i = 0; i < SCM_VM_NUM_HOOKS; i++) vp->hooks[i] = SCM_BOOL_F; - return scm_cell (scm_tc7_vm, (scm_t_bits)vp); + + return vp; } #undef FUNC_NAME -#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN +static void +return_unused_stack_to_os (struct scm_vm *vp) +{ +#if HAVE_SYS_MMAN_H + scm_t_uintptr start = (scm_t_uintptr) (vp->sp + 1); + scm_t_uintptr end = (scm_t_uintptr) vp->stack_limit; + /* The second condition is needed to protect against wrap-around. */ + if (vp->sp_max_since_gc < vp->stack_limit && vp->sp < vp->sp_max_since_gc) + end = (scm_t_uintptr) (vp->sp_max_since_gc + 1); + + start = ((start - 1U) | (page_size - 1U)) + 1U; /* round up */ + end = ((end - 1U) | (page_size - 1U)) + 1U; /* round up */ -/* Mark the VM stack region between its base and its current top. */ -static struct GC_ms_entry * -vm_stack_mark (GC_word *addr, struct GC_ms_entry *mark_stack_ptr, - struct GC_ms_entry *mark_stack_limit, GC_word env) + /* Return these pages to the OS. The next time they are paged in, + they will be zeroed. */ + if (start < end) + { + int ret = 0; + + do + ret = madvise ((void *) start, end - start, MADV_DONTNEED); + while (ret && errno == -EAGAIN); + + if (ret) + perror ("madvise failed"); + } + + vp->sp_max_since_gc = vp->sp; +#endif +} + +#define DEAD_SLOT_MAP_CACHE_SIZE 32U +struct dead_slot_map_cache_entry { - GC_word *word; - const struct scm_vm *vm; + scm_t_uint32 *ip; + const scm_t_uint8 *map; +}; - /* The first word of the VM stack should contain a pointer to the - corresponding VM. */ - vm = * ((struct scm_vm **) addr); +struct dead_slot_map_cache +{ + struct dead_slot_map_cache_entry entries[DEAD_SLOT_MAP_CACHE_SIZE]; +}; - if (vm == NULL - || (SCM *) addr != vm->stack_base - 1) - /* ADDR must be a pointer to a free-list element, which we must ignore - (see warning in ). */ - return mark_stack_ptr; +static const scm_t_uint8 * +find_dead_slot_map (scm_t_uint32 *ip, struct dead_slot_map_cache *cache) +{ + /* The lower two bits should be zero. FIXME: Use a better hash + function; we don't expose scm_raw_hashq currently. */ + size_t slot = (((scm_t_uintptr) ip) >> 2) % DEAD_SLOT_MAP_CACHE_SIZE; + const scm_t_uint8 *map; - for (word = (GC_word *) vm->stack_base; word <= (GC_word *) vm->sp; word++) - mark_stack_ptr = GC_MARK_AND_PUSH ((* (GC_word **) word), - mark_stack_ptr, mark_stack_limit, - NULL); + if (cache->entries[slot].ip == ip) + map = cache->entries[slot].map; + else + { + map = scm_find_dead_slot_map_unlocked (ip); + cache->entries[slot].ip = ip; + cache->entries[slot].map = map; + } - return mark_stack_ptr; + return map; } -#endif /* VM_ENABLE_PRECISE_STACK_GC_SCAN */ +/* Mark the VM stack region between its base and its current top. */ +struct GC_ms_entry * +scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, + struct GC_ms_entry *mark_stack_limit) +{ + SCM *sp, *fp; + /* The first frame will be marked conservatively (without a dead + slot map). This is because GC can happen at any point within the + hottest activation, due to multiple threads or per-instruction + hooks, and providing dead slot maps for all points in a program + would take a prohibitive amount of space. */ + const scm_t_uint8 *dead_slots = NULL; + scm_t_uintptr upper = (scm_t_uintptr) GC_greatest_plausible_heap_addr; + scm_t_uintptr lower = (scm_t_uintptr) GC_least_plausible_heap_addr; + struct dead_slot_map_cache cache; + + memset (&cache, 0, sizeof (cache)); + + for (fp = vp->fp, sp = vp->sp; fp; fp = SCM_FRAME_DYNAMIC_LINK (fp)) + { + for (; sp >= &SCM_FRAME_LOCAL (fp, 0); sp--) + { + SCM elt = *sp; + if (SCM_NIMP (elt) + && SCM_UNPACK (elt) >= lower && SCM_UNPACK (elt) <= upper) + { + if (dead_slots) + { + size_t slot = sp - &SCM_FRAME_LOCAL (fp, 0); + if (dead_slots[slot / 8U] & (1U << (slot % 8U))) + { + /* This value may become dead as a result of GC, + so we can't just leave it on the stack. */ + *sp = SCM_UNSPECIFIED; + continue; + } + } + + mark_stack_ptr = GC_mark_and_push ((void *) elt, + mark_stack_ptr, + mark_stack_limit, + NULL); + } + } + sp = SCM_FRAME_PREVIOUS_SP (fp); + /* Inner frames may have a dead slots map for precise marking. + Note that there may be other reasons to not have a dead slots + map, e.g. if all of the frame's slots below the callee frame + are live. */ + dead_slots = find_dead_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache); + } + return_unused_stack_to_os (vp); -SCM -scm_c_vm_run (SCM vm, SCM program, SCM *argv, int nargs) + return mark_stack_ptr; +} + +/* Free the VM stack, as this thread is exiting. */ +void +scm_i_vm_free_stack (struct scm_vm *vp) { - struct scm_vm *vp = SCM_VM_DATA (vm); - SCM_CHECK_STACK; - return vm_engines[vp->engine](vm, program, argv, nargs); + free_stack (vp->stack_base, vp->stack_size); + vp->stack_base = vp->stack_limit = NULL; + vp->stack_size = 0; } -/* Scheme interface */ +struct vm_expand_stack_data +{ + struct scm_vm *vp; + size_t stack_size; + SCM *new_sp; +}; -SCM_DEFINE (scm_the_vm, "the-vm", 0, 0, 0, - (void), - "Return the current thread's VM.") -#define FUNC_NAME s_scm_the_vm +static void * +vm_expand_stack_inner (void *data_ptr) { - scm_i_thread *t = SCM_I_CURRENT_THREAD; + struct vm_expand_stack_data *data = data_ptr; + + struct scm_vm *vp = data->vp; + SCM *old_stack, *new_stack; + size_t new_size; + scm_t_ptrdiff reloc; - if (SCM_UNLIKELY (scm_is_false (t->vm))) - t->vm = make_vm (); + new_size = vp->stack_size; + while (new_size < data->stack_size) + new_size *= 2; + old_stack = vp->stack_base; - return t->vm; + new_stack = expand_stack (vp->stack_base, vp->stack_size, new_size); + if (!new_stack) + return NULL; + + vp->stack_base = new_stack; + vp->stack_size = new_size; + vp->stack_limit = vp->stack_base + new_size; + reloc = vp->stack_base - old_stack; + + if (reloc) + { + SCM *fp; + if (vp->fp) + vp->fp += reloc; + data->new_sp += reloc; + fp = vp->fp; + while (fp) + { + SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp); + if (next_fp) + { + next_fp += reloc; + SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp); + } + fp = next_fp; + } + } + + return new_stack; } -#undef FUNC_NAME +static scm_t_ptrdiff +current_overflow_size (struct scm_vm *vp) +{ + if (scm_is_pair (vp->overflow_handler_stack)) + return scm_to_ptrdiff_t (scm_caar (vp->overflow_handler_stack)); + return -1; +} -SCM_DEFINE (scm_vm_p, "vm?", 1, 0, 0, - (SCM obj), - "") -#define FUNC_NAME s_scm_vm_p +static int +should_handle_stack_overflow (struct scm_vm *vp, scm_t_ptrdiff stack_size) { - return scm_from_bool (SCM_VM_P (obj)); + scm_t_ptrdiff overflow_size = current_overflow_size (vp); + return overflow_size >= 0 && stack_size >= overflow_size; } -#undef FUNC_NAME -SCM_DEFINE (scm_make_vm, "make-vm", 0, 0, 0, - (void), - "") -#define FUNC_NAME s_scm_make_vm, +static void +reset_stack_limit (struct scm_vm *vp) { - return make_vm (); + if (should_handle_stack_overflow (vp, vp->stack_size)) + vp->stack_limit = vp->stack_base + current_overflow_size (vp); + else + vp->stack_limit = vp->stack_base + vp->stack_size; } -#undef FUNC_NAME -SCM_DEFINE (scm_vm_ip, "vm:ip", 1, 0, 0, - (SCM vm), - "") -#define FUNC_NAME s_scm_vm_ip +struct overflow_handler_data { - SCM_VALIDATE_VM (1, vm); - return scm_from_unsigned_integer ((scm_t_bits) SCM_VM_DATA (vm)->ip); + struct scm_vm *vp; + SCM overflow_handler_stack; +}; + +static void +wind_overflow_handler (void *ptr) +{ + struct overflow_handler_data *data = ptr; + + data->vp->overflow_handler_stack = data->overflow_handler_stack; + + reset_stack_limit (data->vp); } -#undef FUNC_NAME -SCM_DEFINE (scm_vm_sp, "vm:sp", 1, 0, 0, - (SCM vm), - "") -#define FUNC_NAME s_scm_vm_sp +static void +unwind_overflow_handler (void *ptr) { - SCM_VALIDATE_VM (1, vm); - return scm_from_unsigned_integer ((scm_t_bits) SCM_VM_DATA (vm)->sp); + struct overflow_handler_data *data = ptr; + + data->vp->overflow_handler_stack = scm_cdr (data->overflow_handler_stack); + + reset_stack_limit (data->vp); } -#undef FUNC_NAME -SCM_DEFINE (scm_vm_fp, "vm:fp", 1, 0, 0, - (SCM vm), - "") -#define FUNC_NAME s_scm_vm_fp +static void +vm_expand_stack (struct scm_vm *vp, SCM *new_sp) { - SCM_VALIDATE_VM (1, vm); - return scm_from_unsigned_integer ((scm_t_bits) SCM_VM_DATA (vm)->fp); + scm_t_ptrdiff stack_size = new_sp + 1 - vp->stack_base; + + if (stack_size > vp->stack_size) + { + struct vm_expand_stack_data data; + + data.vp = vp; + data.stack_size = stack_size; + data.new_sp = new_sp; + + if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data)) + /* Throw an unwind-only exception. */ + scm_report_stack_overflow (); + + new_sp = data.new_sp; + } + + vp->sp_max_since_gc = vp->sp = new_sp; + + if (should_handle_stack_overflow (vp, stack_size)) + { + SCM more_stack, new_limit; + + struct overflow_handler_data data; + data.vp = vp; + data.overflow_handler_stack = vp->overflow_handler_stack; + + scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE); + + scm_dynwind_rewind_handler (unwind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + scm_dynwind_unwind_handler (wind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + + /* Call the overflow handler. */ + more_stack = scm_call_0 (scm_cdar (data.overflow_handler_stack)); + + /* If the overflow handler returns, its return value should be an + integral number of words from the outer stack limit to transfer + to the inner limit. */ + if (scm_to_ptrdiff_t (more_stack) <= 0) + scm_out_of_range (NULL, more_stack); + new_limit = scm_sum (scm_caar (data.overflow_handler_stack), more_stack); + if (scm_is_pair (scm_cdr (data.overflow_handler_stack))) + new_limit = scm_min (new_limit, + scm_caadr (data.overflow_handler_stack)); + + /* Ensure the new limit is in range. */ + scm_to_ptrdiff_t (new_limit); + + /* Increase the limit that we will restore. */ + scm_set_car_x (scm_car (data.overflow_handler_stack), new_limit); + + scm_dynwind_end (); + + /* Recurse */ + return vm_expand_stack (vp, new_sp); + } } -#undef FUNC_NAME + +static struct scm_vm * +thread_vm (scm_i_thread *t) +{ + if (SCM_UNLIKELY (!t->vp)) + t->vp = make_vm (); + + return t->vp; +} + +struct scm_vm * +scm_the_vm (void) +{ + return thread_vm (SCM_I_CURRENT_THREAD); +} + +SCM +scm_call_n (SCM proc, SCM *argv, size_t nargs) +{ + scm_i_thread *thread; + struct scm_vm *vp; + SCM *base; + ptrdiff_t base_frame_size; + /* Cached variables. */ + scm_i_jmp_buf registers; /* used for prompts */ + size_t i; + + thread = SCM_I_CURRENT_THREAD; + vp = thread_vm (thread); + + SCM_CHECK_STACK; + + /* Check that we have enough space: 3 words for the boot continuation, + and 3 + nargs for the procedure application. */ + base_frame_size = 3 + 3 + nargs; + vm_push_sp (vp, vp->sp + base_frame_size); + base = vp->sp + 1 - base_frame_size; + + /* Since it's possible to receive the arguments on the stack itself, + shuffle up the arguments first. */ + for (i = nargs; i > 0; i--) + base[6 + i - 1] = argv[i - 1]; + + /* Push the boot continuation, which calls PROC and returns its + result(s). */ + base[0] = SCM_PACK (vp->fp); /* dynamic link */ + base[1] = SCM_PACK (vp->ip); /* ra */ + base[2] = vm_boot_continuation; + vp->fp = &base[2]; + vp->ip = (scm_t_uint32 *) vm_boot_continuation_code; + + /* The pending call to PROC. */ + base[3] = SCM_PACK (vp->fp); /* dynamic link */ + base[4] = SCM_PACK (vp->ip); /* ra */ + base[5] = proc; + vp->fp = &base[5]; + + { + int resume = SCM_I_SETJMP (registers); + + if (SCM_UNLIKELY (resume)) + { + scm_gc_after_nonlocal_exit (); + /* Non-local return. */ + vm_dispatch_abort_hook (vp); + } + + return vm_engines[vp->engine](thread, vp, ®isters, resume); + } +} + +/* Scheme interface */ #define VM_DEFINE_HOOK(n) \ { \ struct scm_vm *vp; \ - SCM_VALIDATE_VM (1, vm); \ - vp = SCM_VM_DATA (vm); \ + vp = scm_the_vm (); \ if (scm_is_false (vp->hooks[n])) \ vp->hooks[n] = scm_make_hook (SCM_I_MAKINUM (1)); \ return vp->hooks[n]; \ } -SCM_DEFINE (scm_vm_apply_hook, "vm-apply-hook", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_apply_hook, "vm-apply-hook", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_apply_hook { @@ -915,8 +1279,8 @@ SCM_DEFINE (scm_vm_apply_hook, "vm-apply-hook", 1, 0, 0, } #undef FUNC_NAME -SCM_DEFINE (scm_vm_push_continuation_hook, "vm-push-continuation-hook", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_push_continuation_hook, "vm-push-continuation-hook", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_push_continuation_hook { @@ -924,8 +1288,8 @@ SCM_DEFINE (scm_vm_push_continuation_hook, "vm-push-continuation-hook", 1, 0, 0, } #undef FUNC_NAME -SCM_DEFINE (scm_vm_pop_continuation_hook, "vm-pop-continuation-hook", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_pop_continuation_hook, "vm-pop-continuation-hook", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_pop_continuation_hook { @@ -933,8 +1297,8 @@ SCM_DEFINE (scm_vm_pop_continuation_hook, "vm-pop-continuation-hook", 1, 0, 0, } #undef FUNC_NAME -SCM_DEFINE (scm_vm_next_hook, "vm-next-hook", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_next_hook, "vm-next-hook", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_next_hook { @@ -942,8 +1306,8 @@ SCM_DEFINE (scm_vm_next_hook, "vm-next-hook", 1, 0, 0, } #undef FUNC_NAME -SCM_DEFINE (scm_vm_abort_continuation_hook, "vm-abort-continuation-hook", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_abort_continuation_hook, "vm-abort-continuation-hook", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_abort_continuation_hook { @@ -951,32 +1315,21 @@ SCM_DEFINE (scm_vm_abort_continuation_hook, "vm-abort-continuation-hook", 1, 0, } #undef FUNC_NAME -SCM_DEFINE (scm_vm_restore_continuation_hook, "vm-restore-continuation-hook", 1, 0, 0, - (SCM vm), - "") -#define FUNC_NAME s_scm_vm_restore_continuation_hook -{ - VM_DEFINE_HOOK (SCM_VM_RESTORE_CONTINUATION_HOOK); -} -#undef FUNC_NAME - -SCM_DEFINE (scm_vm_trace_level, "vm-trace-level", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_trace_level, "vm-trace-level", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_trace_level { - SCM_VALIDATE_VM (1, vm); - return scm_from_int (SCM_VM_DATA (vm)->trace_level); + return scm_from_int (scm_the_vm ()->trace_level); } #undef FUNC_NAME -SCM_DEFINE (scm_set_vm_trace_level_x, "set-vm-trace-level!", 2, 0, 0, - (SCM vm, SCM level), +SCM_DEFINE (scm_set_vm_trace_level_x, "set-vm-trace-level!", 1, 0, 0, + (SCM level), "") #define FUNC_NAME s_scm_set_vm_trace_level_x { - SCM_VALIDATE_VM (1, vm); - SCM_VM_DATA (vm)->trace_level = scm_to_int (level); + scm_the_vm ()->trace_level = scm_to_int (level); return SCM_UNSPECIFIED; } #undef FUNC_NAME @@ -1013,36 +1366,33 @@ vm_engine_to_symbol (int engine, const char *FUNC_NAME) } } -SCM_DEFINE (scm_vm_engine, "vm-engine", 1, 0, 0, - (SCM vm), +SCM_DEFINE (scm_vm_engine, "vm-engine", 0, 0, 0, + (void), "") #define FUNC_NAME s_scm_vm_engine { - SCM_VALIDATE_VM (1, vm); - return vm_engine_to_symbol (SCM_VM_DATA (vm)->engine, FUNC_NAME); + return vm_engine_to_symbol (scm_the_vm ()->engine, FUNC_NAME); } #undef FUNC_NAME void -scm_c_set_vm_engine_x (SCM vm, int engine) +scm_c_set_vm_engine_x (int engine) #define FUNC_NAME "set-vm-engine!" { - SCM_VALIDATE_VM (1, vm); - if (engine < 0 || engine >= SCM_VM_NUM_ENGINES) SCM_MISC_ERROR ("Unknown VM engine: ~a", scm_list_1 (scm_from_int (engine))); - SCM_VM_DATA (vm)->engine = engine; + scm_the_vm ()->engine = engine; } #undef FUNC_NAME -SCM_DEFINE (scm_set_vm_engine_x, "set-vm-engine!", 2, 0, 0, - (SCM vm, SCM engine), +SCM_DEFINE (scm_set_vm_engine_x, "set-vm-engine!", 1, 0, 0, + (SCM engine), "") #define FUNC_NAME s_scm_set_vm_engine_x { - scm_c_set_vm_engine_x (vm, symbol_to_vm_engine (engine, FUNC_NAME)); + scm_c_set_vm_engine_x (symbol_to_vm_engine (engine, FUNC_NAME)); return SCM_UNSPECIFIED; } #undef FUNC_NAME @@ -1069,62 +1419,69 @@ SCM_DEFINE (scm_set_default_vm_engine_x, "set-default-vm-engine!", 1, 0, 0, } #undef FUNC_NAME -static void reinstate_vm (SCM vm) +/* FIXME: This function makes no sense, but we keep it to make sure we + have a way of switching to the debug or regular VM. */ +SCM_DEFINE (scm_call_with_vm, "call-with-vm", 1, 0, 1, + (SCM proc, SCM args), + "Apply @var{proc} to @var{args} in a dynamic extent in which\n" + "@var{vm} is the current VM.") +#define FUNC_NAME s_scm_call_with_vm { - scm_i_thread *t = SCM_I_CURRENT_THREAD; - t->vm = vm; + return scm_apply_0 (proc, args); } +#undef FUNC_NAME -SCM_DEFINE (scm_call_with_vm, "call-with-vm", 2, 0, 1, - (SCM vm, SCM proc, SCM args), - "Apply @var{proc} to @var{args} in a dynamic extent in which\n" - "@var{vm} is the current VM.\n\n" - "As an implementation restriction, if @var{vm} is not the same\n" - "as the current thread's VM, continuations captured within the\n" - "call to @var{proc} may not be reinstated once control leaves\n" - "@var{proc}.") -#define FUNC_NAME s_scm_call_with_vm +SCM_DEFINE (scm_call_with_stack_overflow_handler, + "call-with-stack-overflow-handler", 3, 0, 0, + (SCM limit, SCM thunk, SCM handler), + "Call @var{thunk} in an environment in which the stack limit has\n" + "been reduced to @var{limit} additional words. If the limit is\n" + "reached, @var{handler} (a thunk) will be invoked in the dynamic\n" + "environment of the error. For the extent of the call to\n" + "@var{handler}, the stack limit and handler are restored to the\n" + "values that were in place when\n" + "@code{call-with-stack-overflow-handler} was called.") +#define FUNC_NAME s_scm_call_with_stack_overflow_handler { - SCM prev_vm, ret; - SCM *argv; - int i, nargs; - scm_t_wind_flags flags; - scm_i_thread *t = SCM_I_CURRENT_THREAD; + struct scm_vm *vp; + scm_t_ptrdiff c_limit, stack_size; + struct overflow_handler_data data; + SCM new_limit, ret; - SCM_VALIDATE_VM (1, vm); - SCM_VALIDATE_PROC (2, proc); + vp = scm_the_vm (); + stack_size = vp->sp - vp->stack_base; - nargs = scm_ilength (args); - if (SCM_UNLIKELY (nargs < 0)) - scm_wrong_type_arg_msg (FUNC_NAME, 3, args, "list"); - - argv = alloca (nargs * sizeof(SCM)); - for (i = 0; i < nargs; i++) - { - argv[i] = SCM_CAR (args); - args = SCM_CDR (args); - } + c_limit = scm_to_ptrdiff_t (limit); + if (c_limit <= 0) + scm_out_of_range (FUNC_NAME, limit); - prev_vm = t->vm; + new_limit = scm_sum (scm_from_ptrdiff_t (stack_size), limit); + if (scm_is_pair (vp->overflow_handler_stack)) + new_limit = scm_min (new_limit, scm_caar (vp->overflow_handler_stack)); - /* Reentry can happen via invokation of a saved continuation, but - continuations only save the state of the VM that they are in at - capture-time, which might be different from this one. So, in the - case that the VMs are different, set up a non-rewindable frame to - prevent reinstating an incomplete continuation. */ - flags = scm_is_eq (prev_vm, vm) ? 0 : SCM_F_WIND_EXPLICITLY; - if (flags) - { - scm_dynwind_begin (0); - scm_dynwind_unwind_handler_with_scm (reinstate_vm, prev_vm, flags); - t->vm = vm; - } + /* Hacky check that the current stack depth plus the limit is within + the range of a ptrdiff_t. */ + scm_to_ptrdiff_t (new_limit); - ret = scm_c_vm_run (vm, proc, argv, nargs); + data.vp = vp; + data.overflow_handler_stack = + scm_acons (limit, handler, vp->overflow_handler_stack); + + scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE); + + scm_dynwind_rewind_handler (wind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + scm_dynwind_unwind_handler (unwind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + + /* Reset vp->sp_max_since_gc so that the VM checks actually + trigger. */ + return_unused_stack_to_os (vp); + + ret = scm_call_0 (thunk); + + scm_dynwind_end (); - if (flags) - scm_dynwind_end (); - return ret; } #undef FUNC_NAME @@ -1134,11 +1491,10 @@ SCM_DEFINE (scm_call_with_vm, "call-with-vm", 2, 0, 1, * Initialize */ -SCM scm_load_compiled_with_vm (SCM file) +SCM +scm_load_compiled_with_vm (SCM file) { - SCM program = scm_load_thunk_from_file (file); - - return scm_c_vm_run (scm_the_vm (), program, NULL, 0); + return scm_call_0 (scm_load_thunk_from_file (file)); } @@ -1175,7 +1531,10 @@ scm_bootstrap_vm (void) (scm_t_extension_init_func)scm_init_vm_builtins, NULL); - initialize_default_stack_size (); + page_size = getpagesize (); + /* page_size should be a power of two. */ + if (page_size & (page_size - 1)) + abort (); sym_vm_run = scm_from_latin1_symbol ("vm-run"); sym_vm_error = scm_from_latin1_symbol ("vm-error"); @@ -1192,14 +1551,6 @@ scm_bootstrap_vm (void) vm_builtin_##builtin = scm_i_make_program (vm_builtin_##builtin##_code); FOR_EACH_VM_BUILTIN (DEFINE_BUILTIN); #undef DEFINE_BUILTIN - -#ifdef VM_ENABLE_PRECISE_STACK_GC_SCAN - vm_stack_gc_kind = - GC_new_kind (GC_new_free_list (), - GC_MAKE_PROC (GC_new_proc (vm_stack_mark), 0), - 0, 1); - -#endif } void