-/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014 Free Software Foundation, Inc.
+/* Copyright (C) 2001, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
static SCM sym_regular;
static SCM sym_debug;
+/* The page size. */
+static size_t page_size;
+
/* The VM has a number of internal assertions that shouldn't normally be
necessary, but might be if you think you found a bug in the VM. */
/* #define VM_ENABLE_ASSERTIONS */
scm_puts_unlocked (">", port);
}
+int
+scm_i_vm_cont_to_frame (SCM cont, struct scm_frame *frame)
+{
+ struct scm_vm_cont *data = SCM_VM_CONT_DATA (cont);
+
+ frame->stack_holder = data;
+ frame->fp_offset = (data->fp + data->reloc) - data->stack_base;
+ frame->sp_offset = (data->sp + data->reloc) - data->stack_base;
+ frame->ip = data->ra;
+
+ return 1;
+}
+
/* Ideally we could avoid copying the C stack if the continuation root
is inside VM code, and call/cc was invoked within that same call to
vm_run. That's currently not implemented. */
{
SCM *fp;
for (fp = vp->fp;
- SCM_FRAME_LOWER_ADDRESS (fp) > base;
+ SCM_FRAME_LOWER_ADDRESS (fp) >= base;
fp = SCM_FRAME_DYNAMIC_LINK (fp))
SCM_FRAME_SET_DYNAMIC_LINK (fp, SCM_FRAME_DYNAMIC_LINK (fp) + reloc);
}
static void vm_error (const char *msg, SCM arg) SCM_NORETURN;
static void vm_error_bad_instruction (scm_t_uint32 inst) SCM_NORETURN SCM_NOINLINE;
-static void vm_error_unbound (SCM proc, SCM sym) SCM_NORETURN SCM_NOINLINE;
-static void vm_error_unbound_fluid (SCM proc, SCM fluid) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_unbound (SCM sym) SCM_NORETURN SCM_NOINLINE;
+static void vm_error_unbound_fluid (SCM fluid) SCM_NORETURN SCM_NOINLINE;
static void vm_error_not_a_variable (const char *func_name, SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_apply_to_non_list (SCM x) SCM_NORETURN SCM_NOINLINE;
static void vm_error_kwargs_length_not_even (SCM proc) SCM_NORETURN SCM_NOINLINE;
}
static void
-vm_error_unbound (SCM proc, SCM sym)
+vm_error_unbound (SCM sym)
{
- scm_error_scm (scm_misc_error_key, proc,
+ scm_error_scm (scm_misc_error_key, SCM_BOOL_F,
scm_from_latin1_string ("Unbound variable: ~s"),
scm_list_1 (sym), SCM_BOOL_F);
}
static void
-vm_error_unbound_fluid (SCM proc, SCM fluid)
+vm_error_unbound_fluid (SCM fluid)
{
- scm_error_scm (scm_misc_error_key, proc,
+ scm_error_scm (scm_misc_error_key, SCM_BOOL_F,
scm_from_latin1_string ("Unbound fluid: ~s"),
scm_list_1 (fluid), SCM_BOOL_F);
}
* VM
*/
-/* The page size. */
-static size_t page_size;
-
-/* Initial stack size. Defaults to one page. */
-static size_t initial_stack_size;
-
-/* Default soft stack limit is 1M words (4 or 8 megabytes). */
-static size_t default_max_stack_size = 1024 * 1024;
-
-static void
-initialize_default_stack_size (void)
-{
- initial_stack_size = page_size / sizeof (SCM);
-
- {
- int size;
- size = scm_getenv_int ("GUILE_STACK_SIZE", (int) default_max_stack_size);
- if (size >= initial_stack_size
- && (size_t) size < ((size_t) -1) / sizeof(SCM))
- default_max_stack_size = size;
- }
-}
-
#define VM_NAME vm_regular_engine
#define VM_USE_HOOKS 0
#define FUNC_NAME "vm-regular-engine"
vp = scm_gc_malloc (sizeof (struct scm_vm), "vm");
- vp->stack_size = initial_stack_size;
+ vp->stack_size = page_size / sizeof (SCM);
vp->stack_base = allocate_stack (vp->stack_size);
if (!vp->stack_base)
/* As in expand_stack, we don't have any way to throw an exception
handle it. For now, abort. */
abort ();
vp->stack_limit = vp->stack_base + vp->stack_size;
- vp->max_stack_size = default_max_stack_size;
+ vp->overflow_handler_stack = SCM_EOL;
vp->ip = NULL;
vp->sp = vp->stack_base - 1;
vp->fp = NULL;
{
/* This value may become dead as a result of GC,
so we can't just leave it on the stack. */
- *sp = SCM_UNBOUND;
+ *sp = SCM_UNSPECIFIED;
continue;
}
}
return new_stack;
}
+static scm_t_ptrdiff
+current_overflow_size (struct scm_vm *vp)
+{
+ if (scm_is_pair (vp->overflow_handler_stack))
+ return scm_to_ptrdiff_t (scm_caar (vp->overflow_handler_stack));
+ return -1;
+}
+
+static int
+should_handle_stack_overflow (struct scm_vm *vp, scm_t_ptrdiff stack_size)
+{
+ scm_t_ptrdiff overflow_size = current_overflow_size (vp);
+ return overflow_size >= 0 && stack_size >= overflow_size;
+}
+
+static void
+reset_stack_limit (struct scm_vm *vp)
+{
+ if (should_handle_stack_overflow (vp, vp->stack_size))
+ vp->stack_limit = vp->stack_base + current_overflow_size (vp);
+ else
+ vp->stack_limit = vp->stack_base + vp->stack_size;
+}
+
+struct overflow_handler_data
+{
+ struct scm_vm *vp;
+ SCM overflow_handler_stack;
+};
+
+static void
+wind_overflow_handler (void *ptr)
+{
+ struct overflow_handler_data *data = ptr;
+
+ data->vp->overflow_handler_stack = data->overflow_handler_stack;
+
+ reset_stack_limit (data->vp);
+}
+
+static void
+unwind_overflow_handler (void *ptr)
+{
+ struct overflow_handler_data *data = ptr;
+
+ data->vp->overflow_handler_stack = scm_cdr (data->overflow_handler_stack);
+
+ reset_stack_limit (data->vp);
+}
+
static void
vm_expand_stack (struct scm_vm *vp, SCM *new_sp)
{
data.new_sp = new_sp;
if (!GC_call_with_alloc_lock (vm_expand_stack_inner, &data))
+ /* Throw an unwind-only exception. */
scm_report_stack_overflow ();
new_sp = data.new_sp;
vp->sp_max_since_gc = vp->sp = new_sp;
- if (stack_size >= vp->max_stack_size)
+ if (should_handle_stack_overflow (vp, stack_size))
{
- /* Expand the soft limit by 256K entries to give us space to
- handle the error. */
- vp->max_stack_size += 256 * 1024;
+ SCM more_stack, new_limit;
- /* If it's still not big enough... it's quite improbable, but go
- ahead and set to the full available stack size. */
- if (vp->max_stack_size < stack_size)
- vp->max_stack_size = vp->stack_size;
+ struct overflow_handler_data data;
+ data.vp = vp;
+ data.overflow_handler_stack = vp->overflow_handler_stack;
- /* Finally, reset the limit, to catch further overflows. */
- vp->stack_limit = vp->stack_base + vp->max_stack_size;
+ scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE);
- /* FIXME: Use scm_report_stack_overflow, but in a mode that allows
- pre-unwind handlers to run. */
- vm_error ("VM: Stack overflow", SCM_UNDEFINED);
- }
+ scm_dynwind_rewind_handler (unwind_overflow_handler, &data,
+ SCM_F_WIND_EXPLICITLY);
+ scm_dynwind_unwind_handler (wind_overflow_handler, &data,
+ SCM_F_WIND_EXPLICITLY);
+
+ /* Call the overflow handler. */
+ more_stack = scm_call_0 (scm_cdar (data.overflow_handler_stack));
+
+ /* If the overflow handler returns, its return value should be an
+ integral number of words from the outer stack limit to transfer
+ to the inner limit. */
+ if (scm_to_ptrdiff_t (more_stack) <= 0)
+ scm_out_of_range (NULL, more_stack);
+ new_limit = scm_sum (scm_caar (data.overflow_handler_stack), more_stack);
+ if (scm_is_pair (scm_cdr (data.overflow_handler_stack)))
+ new_limit = scm_min (new_limit,
+ scm_caadr (data.overflow_handler_stack));
+
+ /* Ensure the new limit is in range. */
+ scm_to_ptrdiff_t (new_limit);
+
+ /* Increase the limit that we will restore. */
+ scm_set_car_x (scm_car (data.overflow_handler_stack), new_limit);
- /* Otherwise continue, with the new enlarged stack. */
+ scm_dynwind_end ();
+
+ /* Recurse */
+ return vm_expand_stack (vp, new_sp);
+ }
}
static struct scm_vm *
int resume = SCM_I_SETJMP (registers);
if (SCM_UNLIKELY (resume))
- /* Non-local return. */
- vm_dispatch_abort_hook (vp);
+ {
+ scm_gc_after_nonlocal_exit ();
+ /* Non-local return. */
+ vm_dispatch_abort_hook (vp);
+ }
return vm_engines[vp->engine](thread, vp, ®isters, resume);
}
}
#undef FUNC_NAME
+SCM_DEFINE (scm_call_with_stack_overflow_handler,
+ "call-with-stack-overflow-handler", 3, 0, 0,
+ (SCM limit, SCM thunk, SCM handler),
+ "Call @var{thunk} in an environment in which the stack limit has\n"
+ "been reduced to @var{limit} additional words. If the limit is\n"
+ "reached, @var{handler} (a thunk) will be invoked in the dynamic\n"
+ "environment of the error. For the extent of the call to\n"
+ "@var{handler}, the stack limit and handler are restored to the\n"
+ "values that were in place when\n"
+ "@code{call-with-stack-overflow-handler} was called.")
+#define FUNC_NAME s_scm_call_with_stack_overflow_handler
+{
+ struct scm_vm *vp;
+ scm_t_ptrdiff c_limit, stack_size;
+ struct overflow_handler_data data;
+ SCM new_limit, ret;
+
+ vp = scm_the_vm ();
+ stack_size = vp->sp - vp->stack_base;
+
+ c_limit = scm_to_ptrdiff_t (limit);
+ if (c_limit <= 0)
+ scm_out_of_range (FUNC_NAME, limit);
+
+ new_limit = scm_sum (scm_from_ptrdiff_t (stack_size), limit);
+ if (scm_is_pair (vp->overflow_handler_stack))
+ new_limit = scm_min (new_limit, scm_caar (vp->overflow_handler_stack));
+
+ /* Hacky check that the current stack depth plus the limit is within
+ the range of a ptrdiff_t. */
+ scm_to_ptrdiff_t (new_limit);
+
+ data.vp = vp;
+ data.overflow_handler_stack =
+ scm_acons (limit, handler, vp->overflow_handler_stack);
+
+ scm_dynwind_begin (SCM_F_DYNWIND_REWINDABLE);
+
+ scm_dynwind_rewind_handler (wind_overflow_handler, &data,
+ SCM_F_WIND_EXPLICITLY);
+ scm_dynwind_unwind_handler (unwind_overflow_handler, &data,
+ SCM_F_WIND_EXPLICITLY);
+
+ /* Reset vp->sp_max_since_gc so that the VM checks actually
+ trigger. */
+ return_unused_stack_to_os (vp);
+
+ ret = scm_call_0 (thunk);
+
+ scm_dynwind_end ();
+
+ return ret;
+}
+#undef FUNC_NAME
+
\f
/*
* Initialize
if (page_size & (page_size - 1))
abort ();
- initialize_default_stack_size ();
-
sym_vm_run = scm_from_latin1_symbol ("vm-run");
sym_vm_error = scm_from_latin1_symbol ("vm-error");
sym_keyword_argument_error = scm_from_latin1_symbol ("keyword-argument-error");