X-Git-Url: https://git.hcoop.net/bpt/guile.git/blobdiff_plain/f689dd6982697f592b0dd5e63dc87e516657fb25..eb3d623da57e6d31a58d95f932345fb761f9b701:/libguile/vm.c diff --git a/libguile/vm.c b/libguile/vm.c index f9441ad20..0e5983575 100644 --- a/libguile/vm.c +++ b/libguile/vm.c @@ -1,4 +1,4 @@ -/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc. +/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Free Software Foundation, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License @@ -28,6 +28,7 @@ #include #include #include +#include #ifdef HAVE_SYS_MMAN_H #include @@ -39,14 +40,14 @@ #include "_scm.h" #include "control.h" #include "frames.h" +#include "gc-inline.h" #include "instructions.h" #include "loader.h" #include "programs.h" +#include "simpos.h" #include "vm.h" #include "vm-builtins.h" -#include "private-gc.h" /* scm_getenv_int */ - static int vm_default_engine = SCM_VM_REGULAR_ENGINE; /* Unfortunately we can't snarf these: snarfed things are only loaded up from @@ -57,12 +58,46 @@ static SCM sym_keyword_argument_error; static SCM sym_regular; static SCM sym_debug; +/* The page size. */ +static size_t page_size; + /* The VM has a number of internal assertions that shouldn't normally be necessary, but might be if you think you found a bug in the VM. */ -#define VM_ENABLE_ASSERTIONS +/* #define VM_ENABLE_ASSERTIONS */ + +static void vm_expand_stack (struct scm_vm *vp, SCM *new_sp) SCM_NOINLINE; + +/* RESTORE is for the case where we know we have done a PUSH of equal or + greater stack size in the past. Otherwise PUSH is the thing, which + may expand the stack. */ +enum vm_increase_sp_kind { VM_SP_PUSH, VM_SP_RESTORE }; + +static inline void +vm_increase_sp (struct scm_vm *vp, SCM *new_sp, enum vm_increase_sp_kind kind) +{ + if (new_sp <= vp->sp_max_since_gc) + { + vp->sp = new_sp; + return; + } -/* #define VM_ENABLE_PARANOID_ASSERTIONS */ + if (kind == VM_SP_PUSH && new_sp >= vp->stack_limit) + vm_expand_stack (vp, new_sp); + else + vp->sp_max_since_gc = vp->sp = new_sp; +} +static inline void +vm_push_sp (struct scm_vm *vp, SCM *new_sp) +{ + vm_increase_sp (vp, new_sp, VM_SP_PUSH); +} + +static inline void +vm_restore_sp (struct scm_vm *vp, SCM *new_sp) +{ + vm_increase_sp (vp, new_sp, VM_SP_RESTORE); +} /* @@ -77,17 +112,22 @@ scm_i_vm_cont_print (SCM x, SCM port, scm_print_state *pstate) scm_puts_unlocked (">", port); } -/* In theory, a number of vm instances can be active in the call trace, and we - only want to reify the continuations of those in the current continuation - root. I don't see a nice way to do this -- ideally it would involve dynwinds, - and previous values of the *the-vm* fluid within the current continuation - root. But we don't have access to continuation roots in the dynwind stack. - So, just punt for now, we just capture the continuation for the current VM. +int +scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame) +{ + struct scm_vm_cont *data = SCM_VM_CONT_DATA (cont); + + frame->stack_holder = data; + frame->fp_offset = (data->fp + data->reloc) - data->stack_base; + frame->sp_offset = (data->sp + data->reloc) - data->stack_base; + frame->ip = data->ra; - While I'm on the topic, ideally we could avoid copying the C stack if the - continuation root is inside VM code, and call/cc was invoked within that same - call to vm_run; but that's currently not implemented. - */ + return 1; +} + +/* Ideally we could avoid copying the C stack if the continuation root + is inside VM code, and call/cc was invoked within that same call to + vm_run. That's currently not implemented. */ SCM scm_i_vm_capture_stack (SCM *stack_base, SCM *fp, SCM *sp, scm_t_uint32 *ra, scm_t_dynstack *dynstack, scm_t_uint32 flags) @@ -108,43 +148,84 @@ scm_i_vm_capture_stack (SCM *stack_base, SCM *fp, SCM *sp, scm_t_uint32 *ra, return scm_cell (scm_tc7_vm_cont, (scm_t_bits)p); } +struct return_to_continuation_data +{ + struct scm_vm_cont *cp; + struct scm_vm *vp; +}; + +/* Called with the GC lock to prevent the stack marker from traversing a + stack in an inconsistent state. */ +static void * +vm_return_to_continuation_inner (void *data_ptr) +{ + struct return_to_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + scm_t_ptrdiff reloc; + + /* We know that there is enough space for the continuation, because we + captured it in the past. However there may have been an expansion + since the capture, so we may have to re-link the frame + pointers. */ + reloc = (vp->stack_base - (cp->stack_base - cp->reloc)); + vp->fp = cp->fp + reloc; + memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM)); + vm_restore_sp (vp, cp->sp + reloc); + + if (reloc) + { + SCM *fp = vp->fp; + while (fp) + { + SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp); + if (next_fp) + { + next_fp += reloc; + SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp); + } + fp = next_fp; + } + } + + return NULL; +} + static void vm_return_to_continuation (struct scm_vm *vp, SCM cont, size_t n, SCM *argv) { struct scm_vm_cont *cp; SCM *argv_copy; + struct return_to_continuation_data data; argv_copy = alloca (n * sizeof(SCM)); memcpy (argv_copy, argv, n * sizeof(SCM)); cp = SCM_VM_CONT_DATA (cont); - if (vp->stack_size < cp->stack_size + n + 3) - scm_misc_error ("vm-engine", "not enough space to reinstate continuation", - scm_list_1 (cont)); + data.cp = cp; + data.vp = vp; + GC_call_with_alloc_lock (vm_return_to_continuation_inner, &data); - vp->sp = cp->sp; - vp->fp = cp->fp; - memcpy (vp->stack_base, cp->stack_base, cp->stack_size * sizeof (SCM)); + /* Now we have the continuation properly copied over. We just need to + copy the arguments. It is not guaranteed that there is actually + space for the arguments, though, so we have to bump the SP first. */ + vm_push_sp (vp, vp->sp + 3 + n); + /* Now copy on an empty frame and the return values, as the + continuation expects. */ { + SCM *base = vp->sp + 1 - 3 - n; size_t i; - /* Push on an empty frame, as the continuation expects. */ for (i = 0; i < 3; i++) - { - vp->sp++; - *vp->sp = SCM_BOOL_F; - } + base[i] = SCM_BOOL_F; - /* Push the return values. */ for (i = 0; i < n; i++) - { - vp->sp++; - *vp->sp = argv_copy[i]; - } - vp->ip = cp->ra; + base[i + 3] = argv_copy[i]; } + + vp->ip = cp->ra; } static struct scm_vm * thread_vm (scm_i_thread *t); @@ -283,59 +364,76 @@ vm_abort (struct scm_vm *vp, SCM tag, for (; i < nstack + tail_len; i++, tail = scm_cdr (tail)) argv[i] = scm_car (tail); - /* FIXME: NULLSTACK (SCM_VM_DATA (vp)->sp - sp) */ vp->sp = sp; scm_c_abort (vp, tag, nstack + tail_len, argv, current_registers); } -static void -vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, - size_t n, SCM *argv, - scm_t_dynstack *dynstack, - scm_i_jmp_buf *registers) +struct vm_reinstate_partial_continuation_data { + struct scm_vm *vp; struct scm_vm_cont *cp; - SCM *argv_copy, *base; scm_t_ptrdiff reloc; - size_t i; +}; - argv_copy = alloca (n * sizeof(SCM)); - memcpy (argv_copy, argv, n * sizeof(SCM)); +static void * +vm_reinstate_partial_continuation_inner (void *data_ptr) +{ + struct vm_reinstate_partial_continuation_data *data = data_ptr; + struct scm_vm *vp = data->vp; + struct scm_vm_cont *cp = data->cp; + SCM *base; + scm_t_ptrdiff reloc; - cp = SCM_VM_CONT_DATA (cont); base = SCM_FRAME_LOCALS_ADDRESS (vp->fp); reloc = cp->reloc + (base - cp->stack_base); -#define RELOC(scm_p) \ - (((SCM *) (scm_p)) + reloc) - - if ((base - vp->stack_base) + cp->stack_size + n + 1 > vp->stack_size) - scm_misc_error ("vm-engine", - "not enough space to instate partial continuation", - scm_list_1 (cont)); - memcpy (base, cp->stack_base, cp->stack_size * sizeof (SCM)); + vp->fp = cp->fp + reloc; + vp->ip = cp->ra; + /* now relocate frame pointers */ { SCM *fp; - for (fp = RELOC (cp->fp); - SCM_FRAME_LOWER_ADDRESS (fp) > base; + for (fp = vp->fp; + SCM_FRAME_LOWER_ADDRESS (fp) >= base; fp = SCM_FRAME_DYNAMIC_LINK (fp)) - SCM_FRAME_SET_DYNAMIC_LINK (fp, RELOC (SCM_FRAME_DYNAMIC_LINK (fp))); + SCM_FRAME_SET_DYNAMIC_LINK (fp, SCM_FRAME_DYNAMIC_LINK (fp) + reloc); } - vp->sp = base - 1 + cp->stack_size; - vp->fp = RELOC (cp->fp); - vp->ip = cp->ra; + data->reloc = reloc; + + return NULL; +} + +static void +vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, + size_t n, SCM *argv, + scm_t_dynstack *dynstack, + scm_i_jmp_buf *registers) +{ + struct vm_reinstate_partial_continuation_data data; + struct scm_vm_cont *cp; + SCM *argv_copy; + scm_t_ptrdiff reloc; + size_t i; + + argv_copy = alloca (n * sizeof(SCM)); + memcpy (argv_copy, argv, n * sizeof(SCM)); + + cp = SCM_VM_CONT_DATA (cont); + + vm_push_sp (vp, SCM_FRAME_LOCALS_ADDRESS (vp->fp) + cp->stack_size + n - 1); + + data.vp = vp; + data.cp = cp; + GC_call_with_alloc_lock (vm_reinstate_partial_continuation_inner, &data); + reloc = data.reloc; /* Push the arguments. */ for (i = 0; i < n; i++) - { - vp->sp++; - *vp->sp = argv_copy[i]; - } + vp->sp[i + 1 - n] = argv_copy[i]; /* The prompt captured a slice of the dynamic stack. Here we wind those entries onto the current thread's stack. We also have to @@ -355,7 +453,6 @@ vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, scm_dynstack_wind_1 (dynstack, walk); } } -#undef RELOC } @@ -365,8 +462,8 @@ vm_reinstate_partial_continuation (struct scm_vm *vp, SCM cont, static void vm_error (const char *msg, SCM arg) SCM_NORETURN; static void vm_error_bad_instruction (scm_t_uint32 inst) SCM_NORETURN SCM_NOINLINE; -static void vm_error_unbound (SCM proc, SCM sym) SCM_NORETURN SCM_NOINLINE; -static void vm_error_unbound_fluid (SCM proc, SCM fluid) SCM_NORETURN SCM_NOINLINE; +static void vm_error_unbound (SCM sym) SCM_NORETURN SCM_NOINLINE; +static void vm_error_unbound_fluid (SCM fluid) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_variable (const char *func_name, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_apply_to_non_list (SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_kwargs_length_not_even (SCM proc) SCM_NORETURN SCM_NOINLINE; @@ -380,6 +477,8 @@ static void vm_error_improper_list (SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_pair (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_bytevector (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_a_struct (const char *subr, SCM x) SCM_NORETURN SCM_NOINLINE; +static void vm_error_not_a_vector (const char *subr, SCM v) SCM_NORETURN SCM_NOINLINE; +static void vm_error_out_of_range (const char *subr, SCM k) SCM_NORETURN SCM_NOINLINE; static void vm_error_no_values (void) SCM_NORETURN SCM_NOINLINE; static void vm_error_not_enough_values (void) SCM_NORETURN SCM_NOINLINE; static void vm_error_wrong_number_of_values (scm_t_uint32 expected) SCM_NORETURN SCM_NOINLINE; @@ -402,17 +501,17 @@ vm_error_bad_instruction (scm_t_uint32 inst) } static void -vm_error_unbound (SCM proc, SCM sym) +vm_error_unbound (SCM sym) { - scm_error_scm (scm_misc_error_key, proc, + scm_error_scm (scm_misc_error_key, SCM_BOOL_F, scm_from_latin1_string ("Unbound variable: ~s"), scm_list_1 (sym), SCM_BOOL_F); } static void -vm_error_unbound_fluid (SCM proc, SCM fluid) +vm_error_unbound_fluid (SCM fluid) { - scm_error_scm (scm_misc_error_key, proc, + scm_error_scm (scm_misc_error_key, SCM_BOOL_F, scm_from_latin1_string ("Unbound fluid: ~s"), scm_list_1 (fluid), SCM_BOOL_F); } @@ -504,6 +603,19 @@ vm_error_not_a_struct (const char *subr, SCM x) scm_wrong_type_arg_msg (subr, 1, x, "struct"); } +static void +vm_error_not_a_vector (const char *subr, SCM x) +{ + scm_wrong_type_arg_msg (subr, 1, x, "vector"); +} + +static void +vm_error_out_of_range (const char *subr, SCM k) +{ + scm_to_size_t (k); + scm_out_of_range (subr, k); +} + static void vm_error_no_values (void) { @@ -655,25 +767,6 @@ scm_i_call_with_current_continuation (SCM proc) * VM */ -/* Hard stack limit is 512M words: 2 gigabytes on 32-bit machines, 4 on - 64-bit machines. */ -static const size_t hard_max_stack_size = 512 * 1024 * 1024; - -/* Initial stack size: 4 or 8 kB. */ -static const size_t initial_stack_size = 1024; - -/* Default soft stack limit is 1M words (4 or 8 megabytes). */ -static size_t default_max_stack_size = 1024 * 1024; - -static void -initialize_default_stack_size (void) -{ - int size = scm_getenv_int ("GUILE_STACK_SIZE", (int) default_max_stack_size); - if (size >= initial_stack_size && (size_t) size < ((size_t) -1) / sizeof(SCM)) - default_max_stack_size = size; -} - -static void vm_expand_stack (struct scm_vm *vp) SCM_NOINLINE; #define VM_NAME vm_regular_engine #define VM_USE_HOOKS 0 #define FUNC_NAME "vm-regular-engine" @@ -711,13 +804,17 @@ allocate_stack (size_t size) ret = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (ret == MAP_FAILED) - SCM_SYSERROR; + ret = NULL; #else ret = malloc (size); - if (!ret) - SCM_SYSERROR; #endif + if (!ret) + { + perror ("allocate_stack failed"); + return NULL; + } + return (SCM *) ret; } #undef FUNC_NAME @@ -749,13 +846,16 @@ expand_stack (SCM *old_stack, size_t old_size, size_t new_size) new_stack = mremap (old_stack, old_size, new_size, MREMAP_MAYMOVE); if (new_stack == MAP_FAILED) - SCM_SYSERROR; + return NULL; return (SCM *) new_stack; #else SCM *new_stack; new_stack = allocate_stack (new_size); + if (!new_stack) + return NULL; + memcpy (new_stack, old_stack, old_size * sizeof (SCM)); free_stack (old_stack, old_size); @@ -773,10 +873,15 @@ make_vm (void) vp = scm_gc_malloc (sizeof (struct scm_vm), "vm"); - vp->stack_size = initial_stack_size; + vp->stack_size = page_size / sizeof (SCM); vp->stack_base = allocate_stack (vp->stack_size); + if (!vp->stack_base) + /* As in expand_stack, we don't have any way to throw an exception + if we can't allocate one measely page -- there's no stack to + handle it. For now, abort. */ + abort (); vp->stack_limit = vp->stack_base + vp->stack_size; - vp->max_stack_size = default_max_stack_size; + vp->overflow_handler_stack = SCM_EOL; vp->ip = NULL; vp->sp = vp->stack_base - 1; vp->fp = NULL; @@ -789,26 +894,123 @@ make_vm (void) } #undef FUNC_NAME +static void +return_unused_stack_to_os (struct scm_vm *vp) +{ +#if HAVE_SYS_MMAN_H + scm_t_uintptr start = (scm_t_uintptr) (vp->sp + 1); + scm_t_uintptr end = (scm_t_uintptr) vp->stack_limit; + /* The second condition is needed to protect against wrap-around. */ + if (vp->sp_max_since_gc < vp->stack_limit && vp->sp < vp->sp_max_since_gc) + end = (scm_t_uintptr) (vp->sp_max_since_gc + 1); + + start = ((start - 1U) | (page_size - 1U)) + 1U; /* round up */ + end = ((end - 1U) | (page_size - 1U)) + 1U; /* round up */ + + /* Return these pages to the OS. The next time they are paged in, + they will be zeroed. */ + if (start < end) + { + int ret = 0; + + do + ret = madvise ((void *) start, end - start, MADV_DONTNEED); + while (ret && errno == -EAGAIN); + + if (ret) + perror ("madvise failed"); + } + + vp->sp_max_since_gc = vp->sp; +#endif +} + +#define DEAD_SLOT_MAP_CACHE_SIZE 32U +struct dead_slot_map_cache_entry +{ + scm_t_uint32 *ip; + const scm_t_uint8 *map; +}; + +struct dead_slot_map_cache +{ + struct dead_slot_map_cache_entry entries[DEAD_SLOT_MAP_CACHE_SIZE]; +}; + +static const scm_t_uint8 * +find_dead_slot_map (scm_t_uint32 *ip, struct dead_slot_map_cache *cache) +{ + /* The lower two bits should be zero. FIXME: Use a better hash + function; we don't expose scm_raw_hashq currently. */ + size_t slot = (((scm_t_uintptr) ip) >> 2) % DEAD_SLOT_MAP_CACHE_SIZE; + const scm_t_uint8 *map; + + if (cache->entries[slot].ip == ip) + map = cache->entries[slot].map; + else + { + map = scm_find_dead_slot_map_unlocked (ip); + cache->entries[slot].ip = ip; + cache->entries[slot].map = map; + } + + return map; +} + /* Mark the VM stack region between its base and its current top. */ struct GC_ms_entry * scm_i_vm_mark_stack (struct scm_vm *vp, struct GC_ms_entry *mark_stack_ptr, struct GC_ms_entry *mark_stack_limit) { SCM *sp, *fp; + /* The first frame will be marked conservatively (without a dead + slot map). This is because GC can happen at any point within the + hottest activation, due to multiple threads or per-instruction + hooks, and providing dead slot maps for all points in a program + would take a prohibitive amount of space. */ + const scm_t_uint8 *dead_slots = NULL; + scm_t_uintptr upper = (scm_t_uintptr) GC_greatest_plausible_heap_addr; + scm_t_uintptr lower = (scm_t_uintptr) GC_least_plausible_heap_addr; + struct dead_slot_map_cache cache; + + memset (&cache, 0, sizeof (cache)); for (fp = vp->fp, sp = vp->sp; fp; fp = SCM_FRAME_DYNAMIC_LINK (fp)) { for (; sp >= &SCM_FRAME_LOCAL (fp, 0); sp--) { SCM elt = *sp; - if (SCM_NIMP (elt)) - mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word *) elt, - mark_stack_ptr, mark_stack_limit, - NULL); + if (SCM_NIMP (elt) + && SCM_UNPACK (elt) >= lower && SCM_UNPACK (elt) <= upper) + { + if (dead_slots) + { + size_t slot = sp - &SCM_FRAME_LOCAL (fp, 0); + if (dead_slots[slot / 8U] & (1U << (slot % 8U))) + { + /* This value may become dead as a result of GC, + so we can't just leave it on the stack. */ + *sp = SCM_UNSPECIFIED; + continue; + } + } + + mark_stack_ptr = GC_mark_and_push ((void *) elt, + mark_stack_ptr, + mark_stack_limit, + NULL); + } } sp = SCM_FRAME_PREVIOUS_SP (fp); + /* Inner frames may have a dead slots map for precise marking. + Note that there may be other reasons to not have a dead slots + map, e.g. if all of the frame's slots below the callee frame + are live. */ + dead_slots = find_dead_slot_map (SCM_FRAME_RETURN_ADDRESS (fp), &cache); } + return_unused_stack_to_os (vp); + return mark_stack_ptr; } @@ -821,76 +1023,170 @@ scm_i_vm_free_stack (struct scm_vm *vp) vp->stack_size = 0; } -static void -vm_expand_stack (struct scm_vm *vp) +struct vm_expand_stack_data { - scm_t_ptrdiff stack_size = vp->sp + 1 - vp->stack_base; + struct scm_vm *vp; + size_t stack_size; + SCM *new_sp; +}; - if (stack_size > hard_max_stack_size) - { - /* We have expanded the soft limit to the point that we reached a - hard limit. There is nothing sensible to do. */ - fprintf (stderr, "Hard stack size limit (%zu words) reached; aborting.\n", - hard_max_stack_size); - abort (); - } +static void * +vm_expand_stack_inner (void *data_ptr) +{ + struct vm_expand_stack_data *data = data_ptr; - if (stack_size > vp->stack_size) + struct scm_vm *vp = data->vp; + SCM *old_stack, *new_stack; + size_t new_size; + scm_t_ptrdiff reloc; + + new_size = vp->stack_size; + while (new_size < data->stack_size) + new_size *= 2; + old_stack = vp->stack_base; + + new_stack = expand_stack (vp->stack_base, vp->stack_size, new_size); + if (!new_stack) + return NULL; + + vp->stack_base = new_stack; + vp->stack_size = new_size; + vp->stack_limit = vp->stack_base + new_size; + reloc = vp->stack_base - old_stack; + + if (reloc) { - SCM *old_stack; - size_t new_size; - scm_t_ptrdiff reloc; - - new_size = vp->stack_size; - while (new_size < stack_size) - new_size *= 2; - old_stack = vp->stack_base; - vp->stack_base = expand_stack (old_stack, vp->stack_size, new_size); - vp->stack_size = new_size; - vp->stack_limit = vp->stack_base + new_size; - reloc = vp->stack_base - old_stack; - - if (reloc) + SCM *fp; + if (vp->fp) + vp->fp += reloc; + data->new_sp += reloc; + fp = vp->fp; + while (fp) { - SCM *fp; - vp->fp += reloc; - vp->sp += reloc; - fp = vp->fp; - while (fp) + SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp); + if (next_fp) { - SCM *next_fp = SCM_FRAME_DYNAMIC_LINK (fp); - if (next_fp) - { - next_fp += reloc; - SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp); - } - fp = next_fp; + next_fp += reloc; + SCM_FRAME_SET_DYNAMIC_LINK (fp, next_fp); } + fp = next_fp; } } - if (stack_size >= vp->max_stack_size) - { - /* Expand the soft limit by 256K entries to give us space to - handle the error. */ - vp->max_stack_size += 256 * 1024; + return new_stack; +} + +static scm_t_ptrdiff +current_overflow_size (struct scm_vm *vp) +{ + if (scm_is_pair (vp->overflow_handler_stack)) + return scm_to_ptrdiff_t (scm_caar (vp->overflow_handler_stack)); + return -1; +} + +static int +should_handle_stack_overflow (struct scm_vm *vp, scm_t_ptrdiff stack_size) +{ + scm_t_ptrdiff overflow_size = current_overflow_size (vp); + return overflow_size >= 0 && stack_size >= overflow_size; +} + +static void +reset_stack_limit (struct scm_vm *vp) +{ + if (should_handle_stack_overflow (vp, vp->stack_size)) + vp->stack_limit = vp->stack_base + current_overflow_size (vp); + else + vp->stack_limit = vp->stack_base + vp->stack_size; +} + +struct overflow_handler_data +{ + struct scm_vm *vp; + SCM overflow_handler_stack; +}; + +static void +wind_overflow_handler (void *ptr) +{ + struct overflow_handler_data *data = ptr; + + data->vp->overflow_handler_stack = data->overflow_handler_stack; + + reset_stack_limit (data->vp); +} + +static void +unwind_overflow_handler (void *ptr) +{ + struct overflow_handler_data *data = ptr; - /* If it's still not big enough... it's quite improbable, but go - ahead and set to the full available stack size. */ - if (vp->max_stack_size < stack_size) - vp->max_stack_size = vp->stack_size; + data->vp->overflow_handler_stack = scm_cdr (data->overflow_handler_stack); + + reset_stack_limit (data->vp); +} + +static void +vm_expand_stack (struct scm_vm *vp, SCM *new_sp) +{ + scm_t_ptrdiff stack_size = new_sp + 1 - vp->stack_base; - /* But don't exceed the hard maximum. */ - if (vp->max_stack_size > hard_max_stack_size) - vp->max_stack_size = hard_max_stack_size; + if (stack_size > vp->stack_size) + { + struct vm_expand_stack_data data; - /* Finally, reset the limit, to catch further overflows. */ - vp->stack_limit = vp->stack_base + vp->max_stack_size; + data.vp = vp; + data.stack_size = stack_size; + data.new_sp = new_sp; + + if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data)) + /* Throw an unwind-only exception. */ + scm_report_stack_overflow (); - vm_error ("VM: Stack overflow", SCM_UNDEFINED); + new_sp = data.new_sp; } - /* Otherwise continue, with the new enlarged stack. */ + vp->sp_max_since_gc = vp->sp = new_sp; + + if (should_handle_stack_overflow (vp, stack_size)) + { + SCM more_stack, new_limit; + + struct overflow_handler_data data; + data.vp = vp; + data.overflow_handler_stack = vp->overflow_handler_stack; + + scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE); + + scm_dynwind_rewind_handler (unwind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + scm_dynwind_unwind_handler (wind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + + /* Call the overflow handler. */ + more_stack = scm_call_0 (scm_cdar (data.overflow_handler_stack)); + + /* If the overflow handler returns, its return value should be an + integral number of words from the outer stack limit to transfer + to the inner limit. */ + if (scm_to_ptrdiff_t (more_stack) <= 0) + scm_out_of_range (NULL, more_stack); + new_limit = scm_sum (scm_caar (data.overflow_handler_stack), more_stack); + if (scm_is_pair (scm_cdr (data.overflow_handler_stack))) + new_limit = scm_min (new_limit, + scm_caadr (data.overflow_handler_stack)); + + /* Ensure the new limit is in range. */ + scm_to_ptrdiff_t (new_limit); + + /* Increase the limit that we will restore. */ + scm_set_car_x (scm_car (data.overflow_handler_stack), new_limit); + + scm_dynwind_end (); + + /* Recurse */ + return vm_expand_stack (vp, new_sp); + } } static struct scm_vm * @@ -924,13 +1220,10 @@ scm_call_n (SCM proc, SCM *argv, size_t nargs) SCM_CHECK_STACK; - /* Check that we have enough space: 3 words for the boot - continuation, 3 + nargs for the procedure application, and 3 for - setting up a new frame. */ - base_frame_size = 3 + 3 + nargs + 3; - vp->sp += base_frame_size; - if (vp->sp >= vp->stack_limit) - vm_expand_stack (vp); + /* Check that we have enough space: 3 words for the boot continuation, + and 3 + nargs for the procedure application. */ + base_frame_size = 3 + 3 + nargs; + vm_push_sp (vp, vp->sp + base_frame_size); base = vp->sp + 1 - base_frame_size; /* Since it's possible to receive the arguments on the stack itself, @@ -951,14 +1244,16 @@ scm_call_n (SCM proc, SCM *argv, size_t nargs) base[4] = SCM_PACK (vp->ip); /* ra */ base[5] = proc; vp->fp = &base[5]; - vp->sp = &SCM_FRAME_LOCAL (vp->fp, nargs); { int resume = SCM_I_SETJMP (registers); if (SCM_UNLIKELY (resume)) - /* Non-local return. */ - vm_dispatch_abort_hook (vp); + { + scm_gc_after_nonlocal_exit (); + /* Non-local return. */ + vm_dispatch_abort_hook (vp); + } return vm_engines[vp->engine](thread, vp, ®isters, resume); } @@ -1136,6 +1431,61 @@ SCM_DEFINE (scm_call_with_vm, "call-with-vm", 1, 0, 1, } #undef FUNC_NAME +SCM_DEFINE (scm_call_with_stack_overflow_handler, + "call-with-stack-overflow-handler", 3, 0, 0, + (SCM limit, SCM thunk, SCM handler), + "Call @var{thunk} in an environment in which the stack limit has\n" + "been reduced to @var{limit} additional words. If the limit is\n" + "reached, @var{handler} (a thunk) will be invoked in the dynamic\n" + "environment of the error. For the extent of the call to\n" + "@var{handler}, the stack limit and handler are restored to the\n" + "values that were in place when\n" + "@code{call-with-stack-overflow-handler} was called.") +#define FUNC_NAME s_scm_call_with_stack_overflow_handler +{ + struct scm_vm *vp; + scm_t_ptrdiff c_limit, stack_size; + struct overflow_handler_data data; + SCM new_limit, ret; + + vp = scm_the_vm (); + stack_size = vp->sp - vp->stack_base; + + c_limit = scm_to_ptrdiff_t (limit); + if (c_limit <= 0) + scm_out_of_range (FUNC_NAME, limit); + + new_limit = scm_sum (scm_from_ptrdiff_t (stack_size), limit); + if (scm_is_pair (vp->overflow_handler_stack)) + new_limit = scm_min (new_limit, scm_caar (vp->overflow_handler_stack)); + + /* Hacky check that the current stack depth plus the limit is within + the range of a ptrdiff_t. */ + scm_to_ptrdiff_t (new_limit); + + data.vp = vp; + data.overflow_handler_stack = + scm_acons (limit, handler, vp->overflow_handler_stack); + + scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE); + + scm_dynwind_rewind_handler (wind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + scm_dynwind_unwind_handler (unwind_overflow_handler, &data, + SCM_F_WIND_EXPLICITLY); + + /* Reset vp->sp_max_since_gc so that the VM checks actually + trigger. */ + return_unused_stack_to_os (vp); + + ret = scm_call_0 (thunk); + + scm_dynwind_end (); + + return ret; +} +#undef FUNC_NAME + /* * Initialize @@ -1181,7 +1531,10 @@ scm_bootstrap_vm (void) (scm_t_extension_init_func)scm_init_vm_builtins, NULL); - initialize_default_stack_size (); + page_size = getpagesize (); + /* page_size should be a power of two. */ + if (page_size & (page_size - 1)) + abort (); sym_vm_run = scm_from_latin1_symbol ("vm-run"); sym_vm_error = scm_from_latin1_symbol ("vm-error");