-/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002 Free Software Foundation, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* whether to permit this exception to apply to your modifications.
* If you do not wish that, delete this exception notice. */
-/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
- gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
/* #define DEBUGINFO */
#include <errno.h>
#include <string.h>
+#ifdef __ia64__
+#include <ucontext.h>
+extern unsigned long * __libc_ia64_register_backing_store_base;
+#endif
+
#include "libguile/_scm.h"
#include "libguile/eval.h"
#include "libguile/stime.h"
\f
+#define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
+
unsigned int scm_gc_running_p = 0;
\f
#if (SCM_DEBUG_CELL_ACCESSES == 1)
-scm_bits_t scm_tc16_allocated;
-
-/* Set this to != 0 if every cell that is accessed shall be checked:
+/* Set this to != 0 if every cell that is accessed shall be checked:
*/
unsigned int scm_debug_cell_accesses_p = 1;
if (debug_cells_gc_interval)
{
static unsigned int counter = 0;
-
+
if (counter != 0)
{
--counter;
* INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
* trigger a GC.
*
- * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
- * reclaimed by a GC triggered by must_malloc. If less than this is
+ * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must
+ * be reclaimed by a GC triggered by a malloc. If less than this is
* reclaimed, the trigger threshold is raised. [I don't know what a
* good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
* work around a oscillation that caused almost constant GC.]
# define SCM_HEAP_SEG_SIZE 32768L
#else
# ifdef sequent
-# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
+# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_t_cell))
# else
-# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
+# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_t_cell))
# endif
#endif
/* Make heap grow with factor 1.5 */
#define SCM_INIT_MALLOC_LIMIT 100000
#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
-/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
+/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_t_cell * span)
aligned inner bounds for allocated storage */
#ifdef PROT386
# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
# else
-# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
-# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
+# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_t_cell)*(span)-1L) & ((long)(p)+sizeof(scm_t_cell)*(span)-1L))
+# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_t_cell)*(span)-1L) & (long)(p))
# endif /* UNICOS */
#endif /* PROT386 */
-#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
+#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_t_cell) - 1) & SCM_UNPACK (x)) == 0)
#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
#define CLUSTER_SIZE_IN_BYTES(freelist) \
/* scm_freelists
*/
-typedef struct scm_freelist_t {
+typedef struct scm_t_freelist {
/* collected cells */
SCM cells;
/* number of cells left to collect before cluster is full */
* belonging to this list.
*/
unsigned long heap_size;
-} scm_freelist_t;
+} scm_t_freelist;
SCM scm_freelist = SCM_EOL;
-scm_freelist_t scm_master_freelist = {
+scm_t_freelist scm_master_freelist = {
SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
};
SCM scm_freelist2 = SCM_EOL;
-scm_freelist_t scm_master_freelist2 = {
+scm_t_freelist scm_master_freelist2 = {
SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
};
/* scm_mtrigger
- * is the number of bytes of must_malloc allocation needed to trigger gc.
+ * is the number of bytes of malloc allocation needed to trigger gc.
*/
unsigned long scm_mtrigger;
SCM_SYMBOL (sym_cells_marked, "cells-marked");
SCM_SYMBOL (sym_cells_swept, "cells-swept");
-typedef struct scm_heap_seg_data_t
+typedef struct scm_t_heap_seg_data
{
/* lower and upper bounds of the segment */
SCM_CELLPTR bounds[2];
/* address of the head-of-freelist pointer for this segment's cells.
All segments usually point to the same one, scm_freelist. */
- scm_freelist_t *freelist;
+ scm_t_freelist *freelist;
/* number of cells per object in this segment */
int span;
-} scm_heap_seg_data_t;
+} scm_t_heap_seg_data;
-static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_freelist_t *);
+static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
typedef enum { return_on_error, abort_on_error } policy_on_error;
-static void alloc_some_heap (scm_freelist_t *, policy_on_error);
+static void alloc_some_heap (scm_t_freelist *, policy_on_error);
#define SCM_HEAP_SIZE \
#define BVEC_GROW_SIZE 256
#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
-#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
+#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
/* mark space allocation */
-typedef struct scm_mark_space_t
+typedef struct scm_t_mark_space
{
- scm_c_bvec_limb_t *bvec_space;
- struct scm_mark_space_t *next;
-} scm_mark_space_t;
+ scm_t_c_bvec_limb *bvec_space;
+ struct scm_t_mark_space *next;
+} scm_t_mark_space;
-static scm_mark_space_t *current_mark_space;
-static scm_mark_space_t **mark_space_ptr;
+static scm_t_mark_space *current_mark_space;
+static scm_t_mark_space **mark_space_ptr;
static ptrdiff_t current_mark_space_offset;
-static scm_mark_space_t *mark_space_head;
+static scm_t_mark_space *mark_space_head;
-static scm_c_bvec_limb_t *
+static scm_t_c_bvec_limb *
get_bvec ()
#define FUNC_NAME "get_bvec"
{
- scm_c_bvec_limb_t *res;
+ scm_t_c_bvec_limb *res;
if (!current_mark_space)
{
- SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
+ SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
if (!current_mark_space)
SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
if (!(current_mark_space->bvec_space))
{
SCM_SYSCALL (current_mark_space->bvec_space =
- (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
+ (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
if (!(current_mark_space->bvec_space))
SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
static void
clear_mark_space ()
{
- scm_mark_space_t *ms;
+ scm_t_mark_space *ms;
for (ms = mark_space_head; ms; ms = ms->next)
memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
-/* Return the number of the heap segment containing CELL. */
-static long
-which_seg (SCM cell)
-{
- long i;
-
- for (i = 0; i < scm_n_heap_segs; i++)
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
- && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
- return i;
- fprintf (stderr, "which_seg: can't find segment containing cell %lux\n",
- (unsigned long) SCM_UNPACK (cell));
- abort ();
-}
-
+static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
static void
-map_free_list (scm_freelist_t *master, SCM freelist)
+map_free_list (scm_t_freelist *master, SCM freelist)
{
long last_seg = -1, count = 0;
SCM f;
for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
{
- long this_seg = which_seg (f);
+ long int this_seg = heap_segment (f);
- if (this_seg != last_seg)
+ if (this_seg == -1)
+ {
+ fprintf (stderr,
+ "map_free_list: can't find segment containing cell %lux\n",
+ (unsigned long int) SCM_UNPACK (f));
+ abort ();
+ }
+ else if (this_seg != last_seg)
{
if (last_seg != -1)
fprintf (stderr, " %5ld %d-cells in segment %ld\n",
"@code{--enable-guile-debug} builds of Guile.")
#define FUNC_NAME s_scm_map_free_list
{
- long i;
+ size_t i;
+
fprintf (stderr, "%ld segments total (%d:%ld",
(long) scm_n_heap_segs,
scm_heap_table[0].span,
(long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
- for (i = 1; i < scm_n_heap_segs; i++)
+
+ for (i = 1; i != scm_n_heap_segs; i++)
fprintf (stderr, ", %d:%ld",
scm_heap_table[i].span,
(long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
}
static void
-free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
+free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
{
SCM clusters;
long i = 0, len, n = 0;
}
#undef FUNC_NAME
-#endif
+#endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
#ifdef GUILE_DEBUG_FREELIST
}
#undef FUNC_NAME
-
-SCM
-scm_debug_newcell (void)
-{
- SCM new;
-
- scm_newcell_count++;
- if (scm_debug_check_freelist)
- {
- scm_check_freelist (scm_freelist);
- scm_gc();
- }
-
- /* The rest of this is supposed to be identical to the SCM_NEWCELL
- macro. */
- if (SCM_NULLP (scm_freelist))
- {
- new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
- SCM_GC_SET_ALLOCATED (new);
- }
- else
- {
- new = scm_freelist;
- scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
- SCM_GC_SET_ALLOCATED (new);
- }
-
- return new;
-}
-
-SCM
-scm_debug_newcell2 (void)
-{
- SCM new;
-
- scm_newcell2_count++;
- if (scm_debug_check_freelist)
- {
- scm_check_freelist (scm_freelist2);
- scm_gc ();
- }
-
- /* The rest of this is supposed to be identical to the SCM_NEWCELL
- macro. */
- if (SCM_NULLP (scm_freelist2))
- {
- new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
- SCM_GC_SET_ALLOCATED (new);
- }
- else
- {
- new = scm_freelist2;
- scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
- SCM_GC_SET_ALLOCATED (new);
- }
-
- return new;
-}
-
#endif /* GUILE_DEBUG_FREELIST */
\f
static unsigned long
-master_cells_allocated (scm_freelist_t *master)
+master_cells_allocated (scm_t_freelist *master)
{
/* the '- 1' below is to ignore the cluster spine cells. */
long objects = master->clusters_allocated * (master->cluster_size - 1);
local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
- answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
- scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
- scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
- scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
- scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
- scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
- scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
- scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
- scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
- scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
- scm_cons (sym_heap_segments, heap_segs),
- SCM_UNDEFINED);
+ answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
+ scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
+ scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
+ scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
+ scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
+ scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
+ scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
+ scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
+ scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
+ scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
+ scm_cons (sym_heap_segments, heap_segs),
+ SCM_UNDEFINED);
SCM_ALLOW_INTS;
return answer;
}
*/
static void
-adjust_min_yield (scm_freelist_t *freelist)
+adjust_min_yield (scm_t_freelist *freelist)
{
/* min yield is adjusted upwards so that next predicted total yield
* (allocated cells actually freed by GC) becomes
*/
SCM
-scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
+scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
{
SCM cell;
++scm_ints_disabled;
*/
void
-scm_alloc_cluster (scm_freelist_t *master)
+scm_alloc_cluster (scm_t_freelist *master)
{
SCM freelist, cell;
cell = scm_gc_for_newcell (master, &freelist);
#endif
-scm_c_hook_t scm_before_gc_c_hook;
-scm_c_hook_t scm_before_mark_c_hook;
-scm_c_hook_t scm_before_sweep_c_hook;
-scm_c_hook_t scm_after_sweep_c_hook;
-scm_c_hook_t scm_after_gc_c_hook;
-
+scm_t_c_hook scm_before_gc_c_hook;
+scm_t_c_hook scm_before_mark_c_hook;
+scm_t_c_hook scm_before_sweep_c_hook;
+scm_t_c_hook scm_after_sweep_c_hook;
+scm_t_c_hook scm_after_gc_c_hook;
+
+#ifdef __ia64__
+# define SCM_MARK_BACKING_STORE() do { \
+ ucontext_t ctx; \
+ SCM_STACKITEM * top, * bot; \
+ getcontext (&ctx); \
+ scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
+ ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
+ / sizeof (SCM_STACKITEM))); \
+ bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
+ top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
+ scm_mark_locations (bot, top - bot); } while (0)
+#else
+# define SCM_MARK_BACKING_STORE()
+#endif
void
scm_igc (const char *what)
/* During the critical section, only the current thread may run. */
SCM_CRITICAL_SECTION_START;
- /* fprintf (stderr, "gc: %s\n", what); */
-
if (!scm_stack_base || scm_block_gc)
{
--scm_gc_running_p;
++scm_gc_heap_lock;
- /* flush dead entries from the continuation stack */
- {
- long x;
- long bound;
- SCM * elts;
- elts = SCM_VELTS (scm_continuation_stack);
- bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
- x = SCM_INUM (scm_continuation_stack_ptr);
- while (x < bound)
- {
- elts[x] = SCM_BOOL_F;
- ++x;
- }
- }
-
scm_c_hook_run (&scm_before_mark_c_hook, 0);
clear_mark_space ();
scm_mark_locations (scm_stack_base - stack_len, stack_len);
#endif
}
+ SCM_MARK_BACKING_STORE();
#else /* USE_THREADS */
/* mark the registered roots */
{
- long i;
+ size_t i;
for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
- for (; ! SCM_NULLP (l); l = SCM_CDR (l)) {
+ for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
scm_gc_mark (*p);
}
{
register long i;
register SCM ptr;
- scm_bits_t cell_type;
+ scm_t_bits cell_type;
#ifndef MARK_DEPENDENCIES
# define RECURSE scm_gc_mark
#else
/* go through the usual marking, but not for self-cycles. */
# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
-#endif
+#endif
ptr = p;
#ifdef MARK_DEPENDENCIES
return;
gc_mark_nimp:
-
+
#ifdef MARK_DEPENDENCIES
if (SCM_EQ_P (ptr, p))
return;
SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
#else
/* In non-debug mode, do at least some cheap testing. */
- if (!SCM_CELLP (ptr))
+ if (!CELL_P (ptr))
SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
#endif
#ifndef MARK_DEPENDENCIES
-
+
if (SCM_GCMARKP (ptr))
return;
-
+
SCM_SETGCMARK (ptr);
#endif
RECURSE (SCM_SETTER (ptr));
ptr = SCM_PROCEDURE (ptr);
goto_gc_mark_loop;
- case scm_tcs_cons_gloc:
+ case scm_tcs_struct:
{
- /* Dirk:FIXME:: The following code is super ugly: ptr may be a
- * struct or a gloc. If it is a gloc, the cell word #0 of ptr
- * is the address of a scm_tc16_variable smob. If it is a
- * struct, the cell word #0 of ptr is a pointer to a struct
- * vtable data region. (The fact that these are accessed in
- * the same way restricts the possibilites to change the data
- * layout of structs or heap cells.) To discriminate between
- * the two, it is guaranteed that the scm_vtable_index_vcell
- * element of the prospective vtable is always zero. For a
- * gloc, this location has the CDR of the variable smob, which
- * is guaranteed to be non-zero.
- */
- scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
- scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
- if (vtable_data [scm_vtable_index_vcell] != 0)
+ /* XXX - use less explicit code. */
+ scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
+ scm_t_bits * vtable_data = (scm_t_bits *) word0;
+ SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
+ long len = SCM_SYMBOL_LENGTH (layout);
+ char * fields_desc = SCM_SYMBOL_CHARS (layout);
+ scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
+
+ if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
{
- /* ptr is a gloc */
- SCM gloc_car = SCM_PACK (word0);
- RECURSE (gloc_car);
- ptr = SCM_CDR (ptr);
- goto gc_mark_loop;
- }
- else
- {
- /* ptr is a struct */
- SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
- long len = SCM_SYMBOL_LENGTH (layout);
- char * fields_desc = SCM_SYMBOL_CHARS (layout);
- scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
-
- if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
- {
- RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
- RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
- }
- if (len)
- {
- long x;
-
- for (x = 0; x < len - 2; x += 2, ++struct_data)
- if (fields_desc[x] == 'p')
- RECURSE (SCM_PACK (*struct_data));
- if (fields_desc[x] == 'p')
- {
- if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
- for (x = *struct_data++; x; --x, ++struct_data)
- RECURSE (SCM_PACK (*struct_data));
- else
- RECURSE (SCM_PACK (*struct_data));
- }
- }
- /* mark vtable */
- ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
- goto_gc_mark_loop;
+ RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
+ RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
+ }
+ if (len)
+ {
+ long x;
+
+ for (x = 0; x < len - 2; x += 2, ++struct_data)
+ if (fields_desc[x] == 'p')
+ RECURSE (SCM_PACK (*struct_data));
+ if (fields_desc[x] == 'p')
+ {
+ if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
+ for (x = *struct_data++; x; --x, ++struct_data)
+ RECURSE (SCM_PACK (*struct_data));
+ else
+ RECURSE (SCM_PACK (*struct_data));
+ }
}
+ /* mark vtable */
+ ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
+ goto_gc_mark_loop;
}
break;
case scm_tcs_closures:
case scm_tc7_string:
break;
- case scm_tc7_substring:
- ptr = SCM_CDR (ptr);
- goto_gc_mark_loop;
-
case scm_tc7_wvect:
- SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
+ SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
scm_weak_vectors = ptr;
if (SCM_IS_WHVEC_ANY (ptr))
{
case scm_tc7_symbol:
ptr = SCM_PROP_SLOTS (ptr);
goto_gc_mark_loop;
+ case scm_tc7_variable:
+ ptr = SCM_CELL_OBJECT_1 (ptr);
+ goto_gc_mark_loop;
case scm_tcs_subrs:
break;
case scm_tc7_port:
switch (SCM_TYP16 (ptr))
{ /* should be faster than going through scm_smobs */
case scm_tc_free_cell:
- /* printf("found free_cell %X ", ptr); fflush(stdout); */
+ /* We have detected a free cell. This can happen if non-object data
+ * on the C stack points into guile's heap and is scanned during
+ * conservative marking. */
+ break;
case scm_tc16_big:
case scm_tc16_real:
case scm_tc16_complex:
#undef FNAME
-/* Mark a Region Conservatively
- */
-
-void
-scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
+/* Determine whether the given value does actually represent a cell in some
+ * heap segment. If this is the case, the number of the heap segment is
+ * returned. Otherwise, -1 is returned. Binary search is used in order to
+ * determine the heap segment that contains the cell.*/
+/* FIXME: To be used within scm_mark_locations and scm_cellp this function
+ * should be an inline function. */
+static long int
+heap_segment (SCM obj)
{
- unsigned long m;
-
- for (m = 0; m < n; ++m)
+ if (!CELL_P (obj))
+ return -1;
+ else
{
- SCM obj = * (SCM *) &x[m];
- if (SCM_CELLP (obj))
+ SCM_CELLPTR ptr = SCM2PTR (obj);
+ unsigned long int i = 0;
+ unsigned long int j = scm_n_heap_segs - 1;
+
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
+ return -1;
+ else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
+ return -1;
+ else
{
- SCM_CELLPTR ptr = SCM2PTR (obj);
- long i = 0;
- long j = scm_n_heap_segs - 1;
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
- && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
+ while (i < j)
{
- while (i <= j)
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
+ {
+ break;
+ }
+ else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
+ {
+ i = j;
+ break;
+ }
+ else
{
- long seg_id;
- seg_id = -1;
- if ((i == j)
- || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
- seg_id = i;
- else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
- seg_id = j;
- else
+ unsigned long int k = (i + j) / 2;
+
+ if (k == i)
+ return -1;
+ else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
{
- long k;
- k = (i + j) / 2;
- if (k == i)
- break;
- if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
- {
- j = k;
- ++i;
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
- continue;
- else
- break;
- }
- else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
- {
- i = k;
- --j;
- if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- continue;
- else
- break;
- }
+ j = k;
+ ++i;
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
+ return -1;
+ }
+ else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
+ {
+ i = k;
+ --j;
+ if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
+ return -1;
}
-
- if (SCM_GC_IN_CARD_HEADERP (ptr))
- break;
-
- if (scm_heap_table[seg_id].span == 1
- || DOUBLECELL_ALIGNED_P (obj))
- scm_gc_mark (obj);
-
- break;
}
}
+
+ if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
+ return -1;
+ else if (SCM_GC_IN_CARD_HEADERP (ptr))
+ return -1;
+ else
+ return i;
}
}
}
+/* Mark a region conservatively */
+void
+scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
+{
+ unsigned long m;
+
+ for (m = 0; m < n; ++m)
+ {
+ SCM obj = * (SCM *) &x[m];
+ long int segment = heap_segment (obj);
+ if (segment >= 0)
+ scm_gc_mark (obj);
+ }
+}
+
+
/* The function scm_cellp determines whether an SCM value can be regarded as a
- * pointer to a cell on the heap. Binary search is used in order to determine
- * the heap segment that contains the cell.
+ * pointer to a cell on the heap.
*/
int
scm_cellp (SCM value)
{
- if (SCM_CELLP (value)) {
- scm_cell * ptr = SCM2PTR (value);
- unsigned long i = 0;
- unsigned long j = scm_n_heap_segs - 1;
-
- if (SCM_GC_IN_CARD_HEADERP (ptr))
- return 0;
-
- while (i < j) {
- long k = (i + j) / 2;
- if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
- j = k;
- } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
- i = k + 1;
- }
- }
-
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
- && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
- && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
- && !SCM_GC_IN_CARD_HEADERP (ptr)
- )
- return 1;
- else
- return 0;
- } else
- return 0;
+ long int segment = heap_segment (value);
+ return (segment >= 0);
}
static void
-gc_sweep_freelist_start (scm_freelist_t *freelist)
+gc_sweep_freelist_start (scm_t_freelist *freelist)
{
freelist->cells = SCM_EOL;
freelist->left_to_collect = freelist->cluster_size;
}
static void
-gc_sweep_freelist_finish (scm_freelist_t *freelist)
+gc_sweep_freelist_finish (scm_t_freelist *freelist)
{
long collected;
*freelist->clustertail = freelist->cells;
#define NEXT_DATA_CELL(ptr, span) \
do { \
- scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
+ scm_t_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
(ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
: nxt__); \
{
register SCM_CELLPTR ptr;
register SCM nfreelist;
- register scm_freelist_t *freelist;
+ register scm_t_freelist *freelist;
register unsigned long m;
register int span;
- long i;
+ size_t i;
size_t seg_size;
m = 0;
switch SCM_TYP7 (scmptr)
{
- case scm_tcs_cons_gloc:
+ case scm_tcs_struct:
{
- /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
- * struct or a gloc. See the corresponding comment in
- * scm_gc_mark.
+ /* Structs need to be freed in a special order.
+ * This is handled by GC C hooks in struct.c.
*/
- scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
- - scm_tc3_cons_gloc);
- /* access as struct */
- scm_bits_t * vtable_data = (scm_bits_t *) word0;
- if (vtable_data[scm_vtable_index_vcell] == 0)
- {
- /* Structs need to be freed in a special order.
- * This is handled by GC C hooks in struct.c.
- */
- SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
- scm_structs_to_free = scmptr;
- continue;
- }
- /* fall through so that scmptr gets collected */
+ SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
+ scm_structs_to_free = scmptr;
}
- break;
+ continue;
case scm_tcs_cons_imcar:
case scm_tcs_cons_nimcar:
case scm_tcs_closures:
case scm_tc7_pws:
break;
case scm_tc7_wvect:
- m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
- scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
- break;
case scm_tc7_vector:
{
unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
if (length > 0)
{
- m += length * sizeof (scm_bits_t);
- scm_must_free (SCM_VECTOR_BASE (scmptr));
+ scm_gc_free (SCM_VECTOR_BASE (scmptr),
+ length * sizeof (scm_t_bits),
+ "vector");
}
break;
}
#ifdef CCLO
case scm_tc7_cclo:
- m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
- scm_must_free (SCM_CCLO_BASE (scmptr));
+ scm_gc_free (SCM_CCLO_BASE (scmptr),
+ SCM_CCLO_LENGTH (scmptr) * sizeof (SCM),
+ "compiled closure");
break;
#endif
#ifdef HAVE_ARRAYS
unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
if (length > 0)
{
- m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
- scm_must_free (SCM_BITVECTOR_BASE (scmptr));
+ scm_gc_free (SCM_BITVECTOR_BASE (scmptr),
+ (sizeof (long)
+ * ((length+SCM_LONG_BIT-1) / SCM_LONG_BIT)),
+ "vector");
}
}
break;
case scm_tc7_fvect:
case scm_tc7_dvect:
case scm_tc7_cvect:
- m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
- scm_must_free (SCM_UVECTOR_BASE (scmptr));
+ scm_gc_free (SCM_UVECTOR_BASE (scmptr),
+ (SCM_UVECTOR_LENGTH (scmptr)
+ * scm_uniform_element_size (scmptr)),
+ "vector");
break;
#endif
- case scm_tc7_substring:
- break;
case scm_tc7_string:
- m += SCM_STRING_LENGTH (scmptr) + 1;
- scm_must_free (SCM_STRING_CHARS (scmptr));
+ scm_gc_free (SCM_STRING_CHARS (scmptr),
+ SCM_STRING_LENGTH (scmptr) + 1, "string");
break;
case scm_tc7_symbol:
- m += SCM_SYMBOL_LENGTH (scmptr) + 1;
- scm_must_free (SCM_SYMBOL_CHARS (scmptr));
+ scm_gc_free (SCM_SYMBOL_CHARS (scmptr),
+ SCM_SYMBOL_LENGTH (scmptr) + 1, "symbol");
break;
+ case scm_tc7_variable:
+ break;
case scm_tcs_subrs:
/* the various "subrs" (primitives) are never freed */
continue;
if SCM_OPENP (scmptr)
{
int k = SCM_PTOBNUM (scmptr);
+ size_t mm;
#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
if (!(k < scm_numptob))
SCM_MISC_ERROR ("undefined port type", SCM_EOL);
/* Yes, I really do mean scm_ptobs[k].free */
/* rather than ftobs[k].close. .close */
/* is for explicit CLOSE-PORT by user */
- m += (scm_ptobs[k].free) (scmptr);
+ mm = scm_ptobs[k].free (scmptr);
+
+ if (mm != 0)
+ {
+#if SCM_ENABLE_DEPRECATED == 1
+ scm_c_issue_deprecation_warning
+ ("Returning non-0 from a port free function is "
+ "deprecated. Use scm_gc_free et al instead.");
+ scm_c_issue_deprecation_warning_fmt
+ ("(You just returned non-0 while freeing a %s.)",
+ SCM_PTOBNAME (k));
+ m += mm;
+#else
+ abort ();
+#endif
+ }
+
SCM_SETSTREAM (scmptr, 0);
scm_remove_from_port_table (scmptr);
scm_gc_ports_collected++;
break;
#ifdef SCM_BIGDIG
case scm_tc16_big:
- m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
- scm_must_free (SCM_BDIGITS (scmptr));
+ scm_gc_free (SCM_BDIGITS (scmptr),
+ ((SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG
+ / SCM_CHAR_BIT)), "bignum");
break;
#endif /* def SCM_BIGDIG */
case scm_tc16_complex:
- m += sizeof (scm_complex_t);
- scm_must_free (SCM_COMPLEX_MEM (scmptr));
+ scm_gc_free (SCM_COMPLEX_MEM (scmptr), 2*sizeof (double),
+ "complex");
break;
default:
{
SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
#endif
if (scm_smobs[k].free)
- m += (scm_smobs[k].free) (scmptr);
+ {
+ size_t mm;
+ mm = scm_smobs[k].free (scmptr);
+ if (mm != 0)
+ {
+#if SCM_ENABLE_DEPRECATED == 1
+ scm_c_issue_deprecation_warning
+ ("Returning non-0 from a smob free function is "
+ "deprecated. Use scm_gc_free et al instead.");
+ scm_c_issue_deprecation_warning_fmt
+ ("(You just returned non-0 while freeing a %s.)",
+ SCM_SMOBNAME (k));
+ m += mm;
+#else
+ abort();
+#endif
+ }
+ }
break;
}
}
scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
scm_gc_yield -= scm_cells_allocated;
-
+
if (scm_mallocated < m)
- /* The byte count of allocated objects has underflowed. This is
- probably because you forgot to report the sizes of objects you
- have allocated, by calling scm_done_malloc or some such. When
- the GC freed them, it subtracted their size from
- scm_mallocated, which underflowed. */
- abort ();
+ {
+ /* The byte count of allocated objects has underflowed. This is
+ probably because you forgot to report the sizes of objects you
+ have allocated, by calling scm_done_malloc or some such. When
+ the GC freed them, it subtracted their size from
+ scm_mallocated, which underflowed. */
+ fprintf (stderr,
+ "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
+ "This is probably because the GC hasn't been correctly informed\n"
+ "about object sizes\n");
+ abort ();
+ }
scm_mallocated -= m;
scm_gc_malloc_collected = m;
\f
-/* {Front end to malloc}
- *
- * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
- * scm_done_free
- *
- * These functions provide services comparable to malloc, realloc, and
- * free. They should be used when allocating memory that will be under
- * control of the garbage collector, i.e., if the memory may be freed
- * during garbage collection.
+/* Function for non-cell memory management.
*/
-/* scm_must_malloc
- * Return newly malloced storage or throw an error.
- *
- * The parameter WHAT is a string for error reporting.
- * If the threshold scm_mtrigger will be passed by this
- * allocation, or if the first call to malloc fails,
- * garbage collect -- on the presumption that some objects
- * using malloced storage may be collected.
- *
- * The limit scm_mtrigger may be raised by this allocation.
- */
void *
-scm_must_malloc (size_t size, const char *what)
+scm_malloc (size_t size)
{
void *ptr;
- unsigned long nm = scm_mallocated + size;
- if (nm < size)
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
+ if (size == 0)
+ return NULL;
+
+ SCM_SYSCALL (ptr = malloc (size));
+ if (ptr)
+ return ptr;
+
+ scm_igc ("malloc");
+ SCM_SYSCALL (ptr = malloc (size));
+ if (ptr)
+ return ptr;
+
+ scm_memory_error ("malloc");
+}
+
+void *
+scm_realloc (void *mem, size_t size)
+{
+ void *ptr;
- if (nm <= scm_mtrigger)
+ SCM_SYSCALL (ptr = realloc (mem, size));
+ if (ptr)
+ return ptr;
+
+ scm_igc ("realloc");
+ SCM_SYSCALL (ptr = realloc (mem, size));
+ if (ptr)
+ return ptr;
+
+ scm_memory_error ("realloc");
+}
+
+char *
+scm_strndup (const char *str, size_t n)
+{
+ char *dst = scm_malloc (n+1);
+ memcpy (dst, str, n);
+ dst[n] = 0;
+ return dst;
+}
+
+char *
+scm_strdup (const char *str)
+{
+ return scm_strndup (str, strlen (str));
+}
+
+void
+scm_gc_register_collectable_memory (void *mem, size_t size, const char *what)
+{
+ scm_mallocated += size;
+
+ if (scm_mallocated > scm_mtrigger)
{
- SCM_SYSCALL (ptr = malloc (size));
- if (NULL != ptr)
+ scm_igc (what);
+ if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
{
- scm_mallocated = nm;
-#ifdef GUILE_DEBUG_MALLOC
- scm_malloc_register (ptr, what);
-#endif
- return ptr;
+ if (scm_mallocated > scm_mtrigger)
+ scm_mtrigger = scm_mallocated + scm_mallocated / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
}
}
- scm_igc (what);
+#ifdef GUILE_DEBUG_MALLOC
+ if (mem)
+ scm_malloc_register (mem, what);
+#endif
+}
- nm = scm_mallocated + size;
-
- if (nm < size)
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
+void
+scm_gc_unregister_collectable_memory (void *mem, size_t size, const char *what)
+{
+ scm_mallocated -= size;
- SCM_SYSCALL (ptr = malloc (size));
- if (NULL != ptr)
- {
- scm_mallocated = nm;
- if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
- else
- scm_mtrigger += scm_mtrigger / 2;
- }
#ifdef GUILE_DEBUG_MALLOC
- scm_malloc_register (ptr, what);
+ if (mem)
+ scm_malloc_unregister (mem);
#endif
+}
- return ptr;
- }
+void *
+scm_gc_malloc (size_t size, const char *what)
+{
+ /* XXX - The straightforward implementation below has the problem
+ that it might call the GC twice, once in scm_malloc and then
+ again in scm_gc_register_collectable_memory. We don't really
+ want the second GC since it will not find new garbage.
+ */
- scm_memory_error (what);
+ void *ptr = scm_malloc (size);
+ scm_gc_register_collectable_memory (ptr, size, what);
+ return ptr;
}
-
-/* scm_must_realloc
- * is similar to scm_must_malloc.
- */
void *
-scm_must_realloc (void *where,
- size_t old_size,
- size_t size,
- const char *what)
+scm_gc_realloc (void *mem, size_t old_size, size_t new_size, const char *what)
{
- void *ptr;
- unsigned long nm;
+ /* XXX - see scm_gc_malloc. */
- if (size <= old_size)
- return where;
+ void *ptr = scm_realloc (mem, new_size);
+ scm_gc_unregister_collectable_memory (mem, old_size, what);
+ scm_gc_register_collectable_memory (ptr, new_size, what);
+ return ptr;
+}
- nm = scm_mallocated + size - old_size;
+void
+scm_gc_free (void *mem, size_t size, const char *what)
+{
+ scm_gc_unregister_collectable_memory (mem, size, what);
+ free (mem);
+}
- if (nm < (size - old_size))
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
+char *
+scm_gc_strndup (const char *str, size_t n, const char *what)
+{
+ char *dst = scm_gc_malloc (n+1, what);
+ memcpy (dst, str, n);
+ dst[n] = 0;
+ return dst;
+}
- if (nm <= scm_mtrigger)
- {
- SCM_SYSCALL (ptr = realloc (where, size));
- if (NULL != ptr)
- {
- scm_mallocated = nm;
-#ifdef GUILE_DEBUG_MALLOC
- scm_malloc_reregister (where, ptr, what);
-#endif
- return ptr;
- }
- }
+char *
+scm_gc_strdup (const char *str, const char *what)
+{
+ return scm_gc_strndup (str, strlen (str), what);
+}
- scm_igc (what);
+#if SCM_ENABLE_DEPRECATED == 1
- nm = scm_mallocated + size - old_size;
+/* {Deprecated front end to malloc}
+ *
+ * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
+ * scm_done_free
+ *
+ * These functions provide services comparable to malloc, realloc, and
+ * free. They should be used when allocating memory that will be under
+ * control of the garbage collector, i.e., if the memory may be freed
+ * during garbage collection.
+ *
+ * They are deprecated because they weren't really used the way
+ * outlined above, and making sure to return the right amount from
+ * smob free routines was sometimes difficult when dealing with nested
+ * data structures. We basically want everybody to review their code
+ * and use the more symmetrical scm_gc_malloc/scm_gc_free functions
+ * instead. In some cases, where scm_must_malloc has been used
+ * incorrectly (i.e. for non-GC-able memory), use scm_malloc/free.
+ */
- if (nm < (size - old_size))
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
+void *
+scm_must_malloc (size_t size, const char *what)
+{
+ scm_c_issue_deprecation_warning
+ ("scm_must_malloc is deprecated. "
+ "Use scm_gc_malloc and scm_gc_free instead.");
- SCM_SYSCALL (ptr = realloc (where, size));
- if (NULL != ptr)
- {
- scm_mallocated = nm;
- if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
- else
- scm_mtrigger += scm_mtrigger / 2;
- }
-#ifdef GUILE_DEBUG_MALLOC
- scm_malloc_reregister (where, ptr, what);
-#endif
- return ptr;
- }
+ return scm_gc_malloc (size, what);
+}
- scm_memory_error (what);
+void *
+scm_must_realloc (void *where,
+ size_t old_size,
+ size_t size,
+ const char *what)
+{
+ scm_c_issue_deprecation_warning
+ ("scm_must_realloc is deprecated. "
+ "Use scm_gc_realloc and scm_gc_free instead.");
+
+ return scm_gc_realloc (where, old_size, size, what);
}
char *
scm_must_strndup (const char *str, size_t length)
{
- char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
- memcpy (dst, str, length);
- dst[length] = 0;
- return dst;
+ scm_c_issue_deprecation_warning
+ ("scm_must_strndup is deprecated. "
+ "Use scm_gc_strndup and scm_gc_free instead.");
+
+ return scm_gc_strndup (str, length, "string");
}
char *
scm_must_strdup (const char *str)
{
- return scm_must_strndup (str, strlen (str));
+ scm_c_issue_deprecation_warning
+ ("scm_must_strdup is deprecated. "
+ "Use scm_gc_strdup and scm_gc_free instead.");
+
+ return scm_gc_strdup (str, "string");
}
void
scm_must_free (void *obj)
#define FUNC_NAME "scm_must_free"
{
+ scm_c_issue_deprecation_warning
+ ("scm_must_free is deprecated. "
+ "Use scm_gc_malloc and scm_gc_free instead.");
+
#ifdef GUILE_DEBUG_MALLOC
scm_malloc_unregister (obj);
#endif
#undef FUNC_NAME
-/* Announce that there has been some malloc done that will be freed
- * during gc. A typical use is for a smob that uses some malloced
- * memory but can not get it from scm_must_malloc (for whatever
- * reason). When a new object of this smob is created you call
- * scm_done_malloc with the size of the object. When your smob free
- * function is called, be sure to include this size in the return
- * value.
- *
- * If you can't actually free the memory in the smob free function,
- * for whatever reason (like reference counting), you still can (and
- * should) report the amount of memory freed when you actually free it.
- * Do it by calling scm_done_malloc with the _negated_ size. Clever,
- * eh? Or even better, call scm_done_free. */
-
void
scm_done_malloc (long size)
{
- if (size < 0) {
- if (scm_mallocated < size)
- /* The byte count of allocated objects has underflowed. This is
- probably because you forgot to report the sizes of objects you
- have allocated, by calling scm_done_malloc or some such. When
- the GC freed them, it subtracted their size from
- scm_mallocated, which underflowed. */
- abort ();
- } else {
- unsigned long nm = scm_mallocated + size;
- if (nm < size)
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
- }
-
- scm_mallocated += size;
+ scm_c_issue_deprecation_warning
+ ("scm_done_malloc is deprecated. "
+ "Use scm_gc_register_collectable_memory instead.");
- if (scm_mallocated > scm_mtrigger)
- {
- scm_igc ("foreign mallocs");
- if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
- {
- if (scm_mallocated > scm_mtrigger)
- scm_mtrigger = scm_mallocated + scm_mallocated / 2;
- else
- scm_mtrigger += scm_mtrigger / 2;
- }
- }
+ scm_gc_register_collectable_memory (NULL, size, "foreign mallocs");
}
void
scm_done_free (long size)
{
- if (size >= 0) {
- if (scm_mallocated < size)
- /* The byte count of allocated objects has underflowed. This is
- probably because you forgot to report the sizes of objects you
- have allocated, by calling scm_done_malloc or some such. When
- the GC freed them, it subtracted their size from
- scm_mallocated, which underflowed. */
- abort ();
- } else {
- unsigned long nm = scm_mallocated + size;
- if (nm < size)
- /* The byte count of allocated objects has overflowed. This is
- probably because you forgot to report the correct size of freed
- memory in some of your smob free methods. */
- abort ();
- }
+ scm_c_issue_deprecation_warning
+ ("scm_done_free is deprecated. "
+ "Use scm_gc_unregister_collectable_memory instead.");
- scm_mallocated -= size;
+ scm_gc_unregister_collectable_memory (NULL, size, "foreign mallocs");
}
+#endif /* SCM_ENABLE_DEPRECATED == 1 */
\f
/* {Heap Segments}
*/
SCM_CELLPTR scm_heap_org;
-scm_heap_seg_data_t * scm_heap_table = 0;
+scm_t_heap_seg_data * scm_heap_table = 0;
static size_t heap_segment_table_size = 0;
size_t scm_n_heap_segs = 0;
} while (0)
static size_t
-init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_freelist_t *freelist)
+init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
{
register SCM_CELLPTR ptr;
SCM_CELLPTR seg_end;
- long new_seg_index;
+ size_t new_seg_index;
ptrdiff_t n_new_cells;
int span = freelist->span;
seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
/* Find the right place and insert the segment record.
- *
*/
- for (new_seg_index = 0;
- ( (new_seg_index < scm_n_heap_segs)
- && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
- new_seg_index++)
- ;
+ new_seg_index = 0;
+ while (new_seg_index < scm_n_heap_segs
+ && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
+ new_seg_index++;
{
int i;
NEXT_DATA_CELL (ptr, span);
while (ptr < seg_end)
{
- scm_cell *nxt = ptr;
- scm_cell *prv = NULL;
- scm_cell *last_card = NULL;
+ scm_t_cell *nxt = ptr;
+ scm_t_cell *prv = NULL;
+ scm_t_cell *last_card = NULL;
int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
NEXT_DATA_CELL(nxt, span);
while (n_data_cells--)
{
- scm_cell *card = SCM_GC_CELL_CARD (ptr);
+ scm_t_cell *card = SCM_GC_CELL_CARD (ptr);
SCM scmptr = PTR2SCM (ptr);
nxt = ptr;
NEXT_DATA_CELL (nxt, span);
/* sanity check */
{
- scm_cell *ref = seg_end;
+ scm_t_cell *ref = seg_end;
NEXT_DATA_CELL (ref, span);
if (ref != ptr)
/* [cmm] looks like the segment size doesn't divide cleanly by
}
static size_t
-round_to_cluster_size (scm_freelist_t *freelist, size_t len)
+round_to_cluster_size (scm_t_freelist *freelist, size_t len)
{
size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
}
static void
-alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
+alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
#define FUNC_NAME "alloc_some_heap"
{
SCM_CELLPTR ptr;
* init_heap_seg only if the allocation of the segment itself succeeds.
*/
size_t new_table_size = scm_n_heap_segs + 1;
- size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
- scm_heap_seg_data_t *new_heap_table;
+ size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
+ scm_t_heap_seg_data *new_heap_table;
- SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
+ SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
realloc ((char *)scm_heap_table, size)));
if (!new_heap_table)
{
#endif
if (len < min_cells)
len = min_cells + freelist->cluster_size;
- len *= sizeof (scm_cell);
+ len *= sizeof (scm_t_cell);
/* force new sampling */
freelist->collected = LONG_MAX;
}
* parameters. Therefore, you can be sure that the compiler will keep those
* scheme values alive (on the stack or in a register) up to the point where
* scm_remember_upto_here* is called. In other words, place the call to
- * scm_remember_upt_here* _behind_ the last code in your function, that
+ * scm_remember_upto_here* _behind_ the last code in your function, that
* depends on the scheme object to exist.
*
- * Example: We want to make sure, that the string object str does not get
- * garbage collected during the execution of 'some_function', because
- * otherwise the characters belonging to str would be freed and
+ * Example: We want to make sure that the string object str does not get
+ * garbage collected during the execution of 'some_function' in the code
+ * below, because otherwise the characters belonging to str would be freed and
* 'some_function' might access freed memory. To make sure that the compiler
* keeps str alive on the stack or in a register such that it is visible to
* the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
/* Empty. Protects any number of objects from garbage collection. */
}
-
-#if (SCM_DEBUG_DEPRECATED == 0)
-
-void
-scm_remember (SCM *ptr)
-{
- scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
- "Use the `scm_remember_upto_here*' family of functions instead.");
-}
-
-SCM
-scm_protect_object (SCM obj)
-{
- scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
- "Use `scm_gc_protect_object' instead.");
- return scm_gc_protect_object (obj);
-}
-
-SCM
-scm_unprotect_object (SCM obj)
-{
- scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
- "Use `scm_gc_unprotect_object' instead.");
- return scm_gc_unprotect_object (obj);
-}
-
-#endif /* SCM_DEBUG_DEPRECATED == 0 */
-
/*
These crazy functions prevent garbage collection
of arguments after the first argument by
{
SCM handle;
SCM key = scm_long2num ((long) p);
-
+
/* This critical section barrier will be replaced by a mutex. */
SCM_REDEFER_INTS;
scm_gc_unregister_root (p);
}
-int terminating;
+int scm_i_terminating;
/* called on process termination. */
#ifdef HAVE_ATEXIT
#endif
#endif
{
- terminating = 1;
+ scm_i_terminating = 1;
scm_flush_all_ports ();
}
\f
static int
-make_initial_segment (size_t init_heap_size, scm_freelist_t *freelist)
+make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
{
size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
\f
static void
-init_freelist (scm_freelist_t *freelist,
+init_freelist (scm_t_freelist *freelist,
int span,
long cluster_size,
int min_yield)
size_t init_heap_size_2;
size_t j;
-#if (SCM_DEBUG_CELL_ACCESSES == 1)
- scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
-#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
-
j = SCM_NUM_PROTECTS;
while (j)
scm_sys_protects[--j] = SCM_BOOL_F;
j = SCM_HEAP_SEG_SIZE;
scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
- scm_heap_table = ((scm_heap_seg_data_t *)
- scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
+ scm_heap_table = ((scm_t_heap_seg_data *)
+ scm_malloc (sizeof (scm_t_heap_seg_data) * 2));
heap_segment_table_size = 2;
mark_space_ptr = &mark_space_head;
scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
/* Initialise the list of ports. */
- scm_port_table = (scm_port_t **)
- malloc (sizeof (scm_port_t *) * scm_port_table_room);
+ scm_port_table = (scm_t_port **)
+ malloc (sizeof (scm_t_port *) * scm_port_table_room);
if (!scm_port_table)
return 1;
* the execution of the next gc. Then, guile would keep executing the
* after-gc-hook over and over again, and would never come to do other
* things.
- *
+ *
* To overcome this problem, if cell access debugging with additional
* garbage collections is enabled, the after-gc-hook is never run by the
* garbage collecter. When running guile with cell access debugging and the
return NULL;
}
+#if SCM_ENABLE_DEPRECATED == 1
+
+/* If an allocated cell is detected during garbage collection, this
+ * means that some code has just obtained the object but was preempted
+ * before the initialization of the object was completed. This meanst
+ * that some entries of the allocated cell may already contain SCM
+ * objects. Therefore, allocated cells are scanned conservatively.
+ */
+
+scm_t_bits scm_tc16_allocated;
+
+static SCM
+allocated_mark (SCM cell)
+{
+ unsigned long int cell_segment = heap_segment (cell);
+ unsigned int span = scm_heap_table[cell_segment].span;
+ unsigned int i;
+
+ for (i = 1; i != span * 2; ++i)
+ {
+ SCM obj = SCM_CELL_OBJECT (cell, i);
+ long int obj_segment = heap_segment (obj);
+ if (obj_segment >= 0)
+ scm_gc_mark (obj);
+ }
+ return SCM_BOOL_F;
+}
+
+SCM
+scm_deprecated_newcell (void)
+{
+ scm_c_issue_deprecation_warning
+ ("SCM_NEWCELL is deprecated. Use `scm_cell' instead.\n");
+
+ return scm_cell (scm_tc16_allocated, 0);
+}
+
+SCM
+scm_deprecated_newcell2 (void)
+{
+ scm_c_issue_deprecation_warning
+ ("SCM_NEWCELL2 is deprecated. Use `scm_double_cell' instead.\n");
+
+ return scm_double_cell (scm_tc16_allocated, 0, 0, 0);
+}
+
+#endif /* SCM_ENABLE_DEPRECATED == 1 */
void
scm_init_gc ()
{
SCM after_gc_thunk;
+#if SCM_ENABLE_DEPRECATED == 1
+ scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
+ scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
+#endif
+
scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
scm_c_define ("after-gc-hook", scm_after_gc_hook);