-/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
/* #define DEBUGINFO */
+/* SECTION: This code is compiled once.
+ */
+
+#ifndef MARK_DEPENDENCIES
+
\f
#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
#include "libguile/_scm.h"
+#include "libguile/eval.h"
#include "libguile/stime.h"
#include "libguile/stackchk.h"
#include "libguile/struct.h"
-#include "libguile/weaks.h"
-#include "libguile/guardians.h"
#include "libguile/smob.h"
#include "libguile/unif.h"
#include "libguile/async.h"
#include "libguile/root.h"
#include "libguile/strings.h"
#include "libguile/vectors.h"
+#include "libguile/weaks.h"
+#include "libguile/hashtab.h"
+#include "libguile/tags.h"
#include "libguile/validate.h"
+#include "libguile/deprecation.h"
#include "libguile/gc.h"
#ifdef GUILE_DEBUG_MALLOC
#endif
\f
+
+unsigned int scm_gc_running_p = 0;
+
+\f
+
+#if (SCM_DEBUG_CELL_ACCESSES == 1)
+
+scm_t_bits scm_tc16_allocated;
+
+/* Set this to != 0 if every cell that is accessed shall be checked:
+ */
+unsigned int scm_debug_cell_accesses_p = 1;
+
+/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
+ * the number of cell accesses after which a gc shall be called.
+ */
+static unsigned int debug_cells_gc_interval = 0;
+
+
+/* If an allocated cell is detected during garbage collection, this means that
+ * some code has just obtained the object but was preempted before the
+ * initialization of the object was completed. This meanst that some entries
+ * of the allocated cell may already contain SCM objects. Therefore,
+ * allocated cells are scanned conservatively. */
+static SCM
+allocated_mark (SCM allocated)
+{
+ scm_gc_mark_cell_conservatively (allocated);
+ return SCM_BOOL_F;
+}
+
+
+/* Assert that the given object is a valid reference to a valid cell. This
+ * test involves to determine whether the object is a cell pointer, whether
+ * this pointer actually points into a heap segment and whether the cell
+ * pointed to is not a free cell. Further, additional garbage collections may
+ * get executed after a user defined number of cell accesses. This helps to
+ * find places in the C code where references are dropped for extremely short
+ * periods.
+ */
+void
+scm_assert_cell_valid (SCM cell)
+{
+ static unsigned int already_running = 0;
+
+ if (scm_debug_cell_accesses_p && !already_running)
+ {
+ already_running = 1; /* set to avoid recursion */
+
+ if (!scm_cellp (cell))
+ {
+ fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
+ (unsigned long) SCM_UNPACK (cell));
+ abort ();
+ }
+ else if (!scm_gc_running_p)
+ {
+ /* Dirk::FIXME:: During garbage collection there occur references to
+ free cells. This is allright during conservative marking, but
+ should not happen otherwise (I think). The case of free cells
+ accessed during conservative marking is handled in function
+ scm_mark_locations. However, there still occur accesses to free
+ cells during gc. I don't understand why this happens. If it is
+ a bug and gets fixed, the following test should also work while
+ gc is running.
+ */
+ if (SCM_FREE_CELL_P (cell))
+ {
+ fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
+ (unsigned long) SCM_UNPACK (cell));
+ abort ();
+ }
+
+ /* If desired, perform additional garbage collections after a user
+ * defined number of cell accesses.
+ */
+ if (debug_cells_gc_interval)
+ {
+ static unsigned int counter = 0;
+
+ if (counter != 0)
+ {
+ --counter;
+ }
+ else
+ {
+ counter = debug_cells_gc_interval;
+ scm_igc ("scm_assert_cell_valid");
+ }
+ }
+ }
+ already_running = 0; /* re-enable */
+ }
+}
+
+
+SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
+ (SCM flag),
+ "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
+ "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
+ "but no additional calls to garbage collection are issued.\n"
+ "If @var{flag} is a number, cell access checking is enabled,\n"
+ "with an additional garbage collection after the given\n"
+ "number of cell accesses.\n"
+ "This procedure only exists when the compile-time flag\n"
+ "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
+#define FUNC_NAME s_scm_set_debug_cell_accesses_x
+{
+ if (SCM_FALSEP (flag)) {
+ scm_debug_cell_accesses_p = 0;
+ } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
+ debug_cells_gc_interval = 0;
+ scm_debug_cell_accesses_p = 1;
+ } else if (SCM_INUMP (flag)) {
+ long int f = SCM_INUM (flag);
+ if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
+ debug_cells_gc_interval = f;
+ scm_debug_cell_accesses_p = 1;
+ } else {
+ SCM_WRONG_TYPE_ARG (1, flag);
+ }
+ return SCM_UNSPECIFIED;
+}
+#undef FUNC_NAME
+
+#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
+
+\f
+
/* {heap tuning parameters}
*
* These are parameters for controlling memory allocation. The heap
*
* If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
* will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
- * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
+ * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
* is in scm_init_storage() and alloc_some_heap() in sys.c
*
* If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
* large heaps, especially if code behaviour is varying its
* maximum consumption between different freelists.
*/
-#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
-#define SCM_CLUSTER_SIZE_1 2000L
-#define SCM_MIN_YIELD_1 40
-#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
-#define SCM_CLUSTER_SIZE_2 1000L
+#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
+#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
+#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
+size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
+ / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
+int scm_default_min_yield_1 = 40;
+
+#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
+size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
+ / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
/* The following value may seem large, but note that if we get to GC at
* all, this means that we have a numerically intensive application
*/
-#define SCM_MIN_YIELD_2 40
+int scm_default_min_yield_2 = 40;
-#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
+size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
-#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
+#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
#ifdef _QC
# define SCM_HEAP_SEG_SIZE 32768L
#else
#define SCM_INIT_MALLOC_LIMIT 100000
#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
-/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
- bounds for allocated storage */
+/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
+ aligned inner bounds for allocated storage */
#ifdef PROT386
/*in 386 protected mode we must only adjust the offset */
# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
# endif /* UNICOS */
#endif /* PROT386 */
-#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
-#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
-#ifdef GUILE_NEW_GC_SCHEME
-#define SCM_HEAP_SIZE \
- (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
-#else
-#define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
-#endif
-#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
+#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
+
+#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
+#define CLUSTER_SIZE_IN_BYTES(freelist) \
+ (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
\f
/* scm_freelists
*/
-typedef struct scm_freelist_t {
+typedef struct scm_t_freelist {
/* collected cells */
SCM cells;
-#ifdef GUILE_NEW_GC_SCHEME
/* number of cells left to collect before cluster is full */
unsigned int left_to_collect;
/* number of clusters which have been allocated */
SCM clusters;
SCM *clustertail;
/* this is the number of objects in each cluster, including the spine cell */
- int cluster_size;
+ unsigned int cluster_size;
/* indicates that we should grow heap instead of GC:ing
*/
int grow_heap_p;
/* defines min_yield as percent of total heap size
*/
int min_yield_fraction;
-#endif
/* number of cells per object on this list */
int span;
/* number of collected cells during last GC */
- long collected;
+ unsigned long collected;
/* number of collected cells during penultimate GC */
- long collected_1;
+ unsigned long collected_1;
/* total number of cells in heap segments
* belonging to this list.
*/
- long heap_size;
-} scm_freelist_t;
+ unsigned long heap_size;
+} scm_t_freelist;
-#ifdef GUILE_NEW_GC_SCHEME
SCM scm_freelist = SCM_EOL;
-scm_freelist_t scm_master_freelist = {
- SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
+scm_t_freelist scm_master_freelist = {
+ SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
};
SCM scm_freelist2 = SCM_EOL;
-scm_freelist_t scm_master_freelist2 = {
- SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
+scm_t_freelist scm_master_freelist2 = {
+ SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
};
-#else
-scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
-scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
-#endif
/* scm_mtrigger
* is the number of bytes of must_malloc allocation needed to trigger gc.
*/
unsigned long scm_mtrigger;
-
/* scm_gc_heap_lock
* If set, don't expand the heap. Set only during gc, during which no allocation
* is supposed to take place anyway.
* Don't pause for collection if this is set -- just
* expand the heap.
*/
-
int scm_block_gc = 1;
-/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
- * collection (GC) more space is allocated for the heap.
- */
-#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
-
/* During collection, this accumulates objects holding
* weak references.
*/
SCM scm_weak_vectors;
+/* During collection, this accumulates structures which are to be freed.
+ */
+SCM scm_structs_to_free;
+
/* GC Statistics Keeping
*/
unsigned long scm_cells_allocated = 0;
-long scm_mallocated = 0;
+unsigned long scm_mallocated = 0;
unsigned long scm_gc_cells_collected;
-#ifdef GUILE_NEW_GC_SCHEME
unsigned long scm_gc_yield;
static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
-#endif
unsigned long scm_gc_malloc_collected;
unsigned long scm_gc_ports_collected;
-unsigned long scm_gc_rt;
unsigned long scm_gc_time_taken = 0;
+static unsigned long t_before_gc;
+static unsigned long t_before_sweep;
+unsigned long scm_gc_mark_time_taken = 0;
+unsigned long scm_gc_sweep_time_taken = 0;
+unsigned long scm_gc_times = 0;
+unsigned long scm_gc_cells_swept = 0;
+double scm_gc_cells_marked_acc = 0.;
+double scm_gc_cells_swept_acc = 0.;
SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
SCM_SYMBOL (sym_heap_size, "cell-heap-size");
SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
+SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
+SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
+SCM_SYMBOL (sym_times, "gc-times");
+SCM_SYMBOL (sym_cells_marked, "cells-marked");
+SCM_SYMBOL (sym_cells_swept, "cells-swept");
-typedef struct scm_heap_seg_data_t
+typedef struct scm_t_heap_seg_data
{
/* lower and upper bounds of the segment */
SCM_CELLPTR bounds[2];
/* address of the head-of-freelist pointer for this segment's cells.
All segments usually point to the same one, scm_freelist. */
- scm_freelist_t *freelist;
+ scm_t_freelist *freelist;
- /* number of SCM words per object in this segment */
+ /* number of cells per object in this segment */
int span;
+} scm_t_heap_seg_data;
- /* If SEG_DATA->valid is non-zero, the conservative marking
- functions will apply SEG_DATA->valid to the purported pointer and
- SEG_DATA, and mark the object iff the function returns non-zero.
- At the moment, I don't think anyone uses this. */
- int (*valid) ();
-} scm_heap_seg_data_t;
+static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
+typedef enum { return_on_error, abort_on_error } policy_on_error;
+static void alloc_some_heap (scm_t_freelist *, policy_on_error);
-static void scm_mark_weak_vector_spines (void);
-static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
-static void alloc_some_heap (scm_freelist_t *);
+#define SCM_HEAP_SIZE \
+ (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
+#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
-\f
-/* Debugging functions. */
+#define BVEC_GROW_SIZE 256
+#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
+#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
-#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
+/* mark space allocation */
-/* Return the number of the heap segment containing CELL. */
-static int
-which_seg (SCM cell)
+typedef struct scm_t_mark_space
+{
+ scm_t_c_bvec_limb *bvec_space;
+ struct scm_t_mark_space *next;
+} scm_t_mark_space;
+
+static scm_t_mark_space *current_mark_space;
+static scm_t_mark_space **mark_space_ptr;
+static ptrdiff_t current_mark_space_offset;
+static scm_t_mark_space *mark_space_head;
+
+static scm_t_c_bvec_limb *
+get_bvec ()
+#define FUNC_NAME "get_bvec"
{
- int i;
+ scm_t_c_bvec_limb *res;
- for (i = 0; i < scm_n_heap_segs; i++)
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
- && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
- return i;
- fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
- SCM_UNPACK (cell));
- abort ();
-}
+ if (!current_mark_space)
+ {
+ SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
+ if (!current_mark_space)
+ SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
+ current_mark_space->bvec_space = NULL;
+ current_mark_space->next = NULL;
-#ifdef GUILE_NEW_GC_SCHEME
-static void
-map_free_list (scm_freelist_t *master, SCM freelist)
-{
- int last_seg = -1, count = 0;
- SCM f;
+ *mark_space_ptr = current_mark_space;
+ mark_space_ptr = &(current_mark_space->next);
+
+ return get_bvec ();
+ }
- for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
+ if (!(current_mark_space->bvec_space))
{
- int this_seg = which_seg (f);
+ SCM_SYSCALL (current_mark_space->bvec_space =
+ (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
+ if (!(current_mark_space->bvec_space))
+ SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
- if (this_seg != last_seg)
- {
- if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, master->span, last_seg);
- last_seg = this_seg;
- count = 0;
- }
- count++;
+ current_mark_space_offset = 0;
+
+ return get_bvec ();
}
- if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, master->span, last_seg);
+
+ if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
+ {
+ current_mark_space = NULL;
+
+ return get_bvec ();
+ }
+
+ res = current_mark_space->bvec_space + current_mark_space_offset;
+ current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
+
+ return res;
}
-#else
+#undef FUNC_NAME
+
+
+static void
+clear_mark_space ()
+{
+ scm_t_mark_space *ms;
+
+ for (ms = mark_space_head; ms; ms = ms->next)
+ memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
+}
+
+
+\f
+/* Debugging functions. */
+
+#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
+
static void
-map_free_list (scm_freelist_t *freelist)
+map_free_list (scm_t_freelist *master, SCM freelist)
{
- int last_seg = -1, count = 0;
+ long last_seg = -1, count = 0;
SCM f;
- for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
+ for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
{
- int this_seg = which_seg (f);
+ long int this_seg = heap_segment (f);
- if (this_seg != last_seg)
+ if (this_seg == -1)
+ {
+ fprintf (stderr,
+ "map_free_list: can't find segment containing cell %lux\n",
+ (unsigned long int) SCM_UNPACK (cell));
+ abort ();
+ }
+ else if (this_seg != last_seg)
{
if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, freelist->span, last_seg);
+ fprintf (stderr, " %5ld %d-cells in segment %ld\n",
+ (long) count, master->span, (long) last_seg);
last_seg = this_seg;
count = 0;
}
count++;
}
if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, freelist->span, last_seg);
+ fprintf (stderr, " %5ld %d-cells in segment %ld\n",
+ (long) count, master->span, (long) last_seg);
}
-#endif
SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
(),
- "Print debugging information about the free-list.\n"
- "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
+ "Print debugging information about the free-list.\n"
+ "@code{map-free-list} is only included in\n"
+ "@code{--enable-guile-debug} builds of Guile.")
#define FUNC_NAME s_scm_map_free_list
{
- int i;
- fprintf (stderr, "%d segments total (%d:%d",
- scm_n_heap_segs,
+ size_t i;
+
+ fprintf (stderr, "%ld segments total (%d:%ld",
+ (long) scm_n_heap_segs,
scm_heap_table[0].span,
- scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
- for (i = 1; i < scm_n_heap_segs; i++)
- fprintf (stderr, ", %d:%d",
+ (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
+
+ for (i = 1; i != scm_n_heap_segs; i++)
+ fprintf (stderr, ", %d:%ld",
scm_heap_table[i].span,
- scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
+ (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
fprintf (stderr, ")\n");
-#ifdef GUILE_NEW_GC_SCHEME
map_free_list (&scm_master_freelist, scm_freelist);
map_free_list (&scm_master_freelist2, scm_freelist2);
-#else
- map_free_list (&scm_freelist);
- map_free_list (&scm_freelist2);
-#endif
fflush (stderr);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-#ifdef GUILE_NEW_GC_SCHEME
-static int last_cluster;
-static int last_size;
+static long last_cluster;
+static long last_size;
-static int
-free_list_length (char *title, int i, SCM freelist)
+static long
+free_list_length (char *title, long i, SCM freelist)
{
SCM ls;
- int n = 0;
- for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
- if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
+ long n = 0;
+ for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
+ if (SCM_FREE_CELL_P (ls))
++n;
else
{
- fprintf (stderr, "bad cell in %s at position %d\n", title, n);
+ fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
abort ();
}
if (n != last_size)
if (i > 0)
{
if (last_cluster == i - 1)
- fprintf (stderr, "\t%d\n", last_size);
+ fprintf (stderr, "\t%ld\n", (long) last_size);
else
- fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
+ fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
}
if (i >= 0)
- fprintf (stderr, "%s %d", title, i);
+ fprintf (stderr, "%s %ld", title, (long) i);
else
- fprintf (stderr, "%s\t%d\n", title, n);
+ fprintf (stderr, "%s\t%ld\n", title, (long) n);
last_cluster = i;
last_size = n;
}
}
static void
-free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
+free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
{
SCM clusters;
- int i = 0, len, n = 0;
+ long i = 0, len, n = 0;
fprintf (stderr, "%s\n\n", title);
n += free_list_length ("free list", -1, freelist);
for (clusters = master->clusters;
n += len;
}
if (last_cluster == i - 1)
- fprintf (stderr, "\t%d\n", last_size);
+ fprintf (stderr, "\t%ld\n", (long) last_size);
else
- fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
- fprintf (stderr, "\ntotal %d objects\n\n", n);
+ fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
+ fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
}
SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
(),
- "Print debugging information about the free-list.\n"
- "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
+ "Print debugging information about the free-list.\n"
+ "@code{free-list-length} is only included in\n"
+ "@code{--enable-guile-debug} builds of Guile.")
#define FUNC_NAME s_scm_free_list_length
{
free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-#endif
#endif
#ifdef GUILE_DEBUG_FREELIST
+/* Non-zero if freelist debugging is in effect. Set this via
+ `gc-set-debug-check-freelist!'. */
+static int scm_debug_check_freelist = 0;
+
/* Number of calls to SCM_NEWCELL since startup. */
static unsigned long scm_newcell_count;
static unsigned long scm_newcell2_count;
/* Search freelist for anything that isn't marked as a free cell.
Abort if we find something. */
-#ifdef GUILE_NEW_GC_SCHEME
static void
scm_check_freelist (SCM freelist)
{
SCM f;
- int i = 0;
+ long i = 0;
- for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
- if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
+ for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
+ if (!SCM_FREE_CELL_P (f))
{
- fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
- scm_newcell_count, i);
- fflush (stderr);
+ fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
+ (long) scm_newcell_count, (long) i);
abort ();
}
}
-#else
-static void
-scm_check_freelist (scm_freelist_t *freelist)
-{
- SCM f;
- int i = 0;
-
- for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
- if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
- {
- fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
- scm_newcell_count, i);
- fflush (stderr);
- abort ();
- }
-}
-#endif
-
-static int scm_debug_check_freelist = 0;
SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
(SCM flag),
- "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
- "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
- "compile-time flag was selected.\n")
+ "If @var{flag} is @code{#t}, check the freelist for consistency\n"
+ "on each cell allocation. This procedure only exists when the\n"
+ "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
{
+ /* [cmm] I did a double-take when I read this code the first time.
+ well, FWIW. */
SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-#ifdef GUILE_NEW_GC_SCHEME
-
SCM
scm_debug_newcell (void)
{
/* The rest of this is supposed to be identical to the SCM_NEWCELL
macro. */
- if (SCM_IMP (scm_freelist))
- new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
+ if (SCM_NULLP (scm_freelist))
+ {
+ new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
+ SCM_GC_SET_ALLOCATED (new);
+ }
else
{
new = scm_freelist;
- scm_freelist = SCM_CDR (scm_freelist);
- SCM_SETCAR (new, scm_tc16_allocated);
+ scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
+ SCM_GC_SET_ALLOCATED (new);
}
return new;
/* The rest of this is supposed to be identical to the SCM_NEWCELL
macro. */
- if (SCM_IMP (scm_freelist2))
- new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
- else
- {
- new = scm_freelist2;
- scm_freelist2 = SCM_CDR (scm_freelist2);
- SCM_SETCAR (new, scm_tc16_allocated);
- }
-
- return new;
-}
-
-#else /* GUILE_NEW_GC_SCHEME */
-
-SCM
-scm_debug_newcell (void)
-{
- SCM new;
-
- scm_newcell_count++;
- if (scm_debug_check_freelist)
- {
- scm_check_freelist (&scm_freelist);
- scm_gc();
- }
-
- /* The rest of this is supposed to be identical to the SCM_NEWCELL
- macro. */
- if (SCM_IMP (scm_freelist.cells))
- new = scm_gc_for_newcell (&scm_freelist);
- else
+ if (SCM_NULLP (scm_freelist2))
{
- new = scm_freelist.cells;
- scm_freelist.cells = SCM_CDR (scm_freelist.cells);
- SCM_SETCAR (new, scm_tc16_allocated);
- ++scm_cells_allocated;
+ new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
+ SCM_GC_SET_ALLOCATED (new);
}
-
- return new;
-}
-
-SCM
-scm_debug_newcell2 (void)
-{
- SCM new;
-
- scm_newcell2_count++;
- if (scm_debug_check_freelist) {
- scm_check_freelist (&scm_freelist2);
- scm_gc();
- }
-
- /* The rest of this is supposed to be identical to the SCM_NEWCELL2
- macro. */
- if (SCM_IMP (scm_freelist2.cells))
- new = scm_gc_for_newcell (&scm_freelist2);
else
{
- new = scm_freelist2.cells;
- scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
- SCM_SETCAR (new, scm_tc16_allocated);
- scm_cells_allocated += 2;
+ new = scm_freelist2;
+ scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
+ SCM_GC_SET_ALLOCATED (new);
}
return new;
}
-#endif /* GUILE_NEW_GC_SCHEME */
#endif /* GUILE_DEBUG_FREELIST */
\f
-#ifdef GUILE_NEW_GC_SCHEME
static unsigned long
-master_cells_allocated (scm_freelist_t *master)
+master_cells_allocated (scm_t_freelist *master)
{
- int objects = master->clusters_allocated * (master->cluster_size - 1);
+ /* the '- 1' below is to ignore the cluster spine cells. */
+ long objects = master->clusters_allocated * (master->cluster_size - 1);
if (SCM_NULLP (master->clusters))
objects -= master->left_to_collect;
return master->span * objects;
static unsigned long
freelist_length (SCM freelist)
{
- int n;
- for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
+ long n;
+ for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
++n;
return n;
}
- scm_master_freelist.span * freelist_length (scm_freelist)
- scm_master_freelist2.span * freelist_length (scm_freelist2));
}
-#endif
/* {Scheme Interface to GC}
*/
SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
(),
- "Returns an association list of statistics about Guile's current use of storage. ")
+ "Return an association list of statistics about Guile's current\n"
+ "use of storage.")
#define FUNC_NAME s_scm_gc_stats
{
- int i;
- int n;
+ long i;
+ long n;
SCM heap_segs;
- long int local_scm_mtrigger;
- long int local_scm_mallocated;
- long int local_scm_heap_size;
- long int local_scm_cells_allocated;
- long int local_scm_gc_time_taken;
+ unsigned long int local_scm_mtrigger;
+ unsigned long int local_scm_mallocated;
+ unsigned long int local_scm_heap_size;
+ unsigned long int local_scm_cells_allocated;
+ unsigned long int local_scm_gc_time_taken;
+ unsigned long int local_scm_gc_times;
+ unsigned long int local_scm_gc_mark_time_taken;
+ unsigned long int local_scm_gc_sweep_time_taken;
+ double local_scm_gc_cells_swept;
+ double local_scm_gc_cells_marked;
SCM answer;
SCM_DEFER_INTS;
- scm_block_gc = 1;
+
+ ++scm_block_gc;
+
retry:
heap_segs = SCM_EOL;
n = scm_n_heap_segs;
heap_segs);
if (scm_n_heap_segs != n)
goto retry;
- scm_block_gc = 0;
+
+ --scm_block_gc;
/* Below, we cons to produce the resulting list. We want a snapshot of
* the heap situation before consing.
local_scm_mtrigger = scm_mtrigger;
local_scm_mallocated = scm_mallocated;
local_scm_heap_size = SCM_HEAP_SIZE;
-#ifdef GUILE_NEW_GC_SCHEME
local_scm_cells_allocated = compute_cells_allocated ();
-#else
- local_scm_cells_allocated = scm_cells_allocated;
-#endif
local_scm_gc_time_taken = scm_gc_time_taken;
-
- answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
- scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
- scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
- scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
- scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
- scm_cons (sym_heap_segments, heap_segs),
- SCM_UNDEFINED);
+ local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
+ local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
+ local_scm_gc_times = scm_gc_times;
+ local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
+ local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
+
+ answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
+ scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
+ scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
+ scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
+ scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
+ scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
+ scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
+ scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
+ scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
+ scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
+ scm_cons (sym_heap_segments, heap_segs),
+ SCM_UNDEFINED);
SCM_ALLOW_INTS;
return answer;
}
#undef FUNC_NAME
-void
-scm_gc_start (const char *what)
+static void
+gc_start_stats (const char *what SCM_UNUSED)
{
- scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
+ t_before_gc = scm_c_get_internal_run_time ();
+ scm_gc_cells_swept = 0;
scm_gc_cells_collected = 0;
-#ifdef GUILE_NEW_GC_SCHEME
scm_gc_yield_1 = scm_gc_yield;
scm_gc_yield = (scm_cells_allocated
+ master_cells_allocated (&scm_master_freelist)
+ master_cells_allocated (&scm_master_freelist2));
-#endif
scm_gc_malloc_collected = 0;
scm_gc_ports_collected = 0;
}
-void
-scm_gc_end ()
+
+static void
+gc_end_stats ()
{
- scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
- scm_gc_time_taken += scm_gc_rt;
- scm_system_async_mark (scm_gc_async);
+ unsigned long t = scm_c_get_internal_run_time ();
+ scm_gc_time_taken += (t - t_before_gc);
+ scm_gc_sweep_time_taken += (t - t_before_sweep);
+ ++scm_gc_times;
+
+ scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
+ scm_gc_cells_swept_acc += scm_gc_cells_swept;
}
/* {C Interface For When GC is Triggered}
*/
-#ifdef GUILE_NEW_GC_SCHEME
-
static void
-adjust_min_yield (scm_freelist_t *freelist)
+adjust_min_yield (scm_t_freelist *freelist)
{
/* min yield is adjusted upwards so that next predicted total yield
* (allocated cells actually freed by GC) becomes
if (freelist->min_yield_fraction)
{
/* Pick largest of last two yields. */
- int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
+ long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
- (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
#ifdef DEBUGINFO
- fprintf (stderr, " after GC = %d, delta = %d\n",
- scm_cells_allocated,
- delta);
+ fprintf (stderr, " after GC = %lu, delta = %ld\n",
+ (long) scm_cells_allocated,
+ (long) delta);
#endif
if (delta > 0)
freelist->min_yield += delta;
}
}
+
/* When we get POSIX threads support, the master will be global and
* common while the freelist will be individual for each thread.
*/
SCM
-scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
+scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
{
SCM cell;
++scm_ints_disabled;
{
if (SCM_NULLP (master->clusters))
{
- if (master->grow_heap_p)
+ if (master->grow_heap_p || scm_block_gc)
{
+ /* In order to reduce gc frequency, try to allocate a new heap
+ * segment first, even if gc might find some free cells. If we
+ * can't obtain a new heap segment, we will try gc later.
+ */
master->grow_heap_p = 0;
- alloc_some_heap (master);
+ alloc_some_heap (master, return_on_error);
}
- else
+ if (SCM_NULLP (master->clusters))
{
+ /* The heap was not grown, either because it wasn't scheduled to
+ * grow, or because there was not enough memory available. In
+ * both cases we have to try gc to get some free cells.
+ */
#ifdef DEBUGINFO
- fprintf (stderr, "allocated = %d, ",
- scm_cells_allocated
+ fprintf (stderr, "allocated = %lu, ",
+ (long) (scm_cells_allocated
+ master_cells_allocated (&scm_master_freelist)
- + master_cells_allocated (&scm_master_freelist2));
+ + master_cells_allocated (&scm_master_freelist2)));
#endif
scm_igc ("cells");
adjust_min_yield (master);
+ if (SCM_NULLP (master->clusters))
+ {
+ /* gc could not free any cells. Now, we _must_ allocate a
+ * new heap segment, because there is no other possibility
+ * to provide a new cell for the caller.
+ */
+ alloc_some_heap (master, abort_on_error);
+ }
}
}
cell = SCM_CAR (master->clusters);
++master->clusters_allocated;
}
while (SCM_NULLP (cell));
+
+#ifdef GUILE_DEBUG_FREELIST
+ scm_check_freelist (cell);
+#endif
+
--scm_ints_disabled;
- *freelist = SCM_CDR (cell);
- SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
+ *freelist = SCM_FREE_CELL_CDR (cell);
return cell;
}
+
#if 0
/* This is a support routine which can be used to reserve a cluster
* for some special use, such as debugging. It won't be useful until
*/
void
-scm_alloc_cluster (scm_freelist_t *master)
+scm_alloc_cluster (scm_t_freelist *master)
{
SCM freelist, cell;
cell = scm_gc_for_newcell (master, &freelist);
}
#endif
-#else /* GUILE_NEW_GC_SCHEME */
-
-void
-scm_gc_for_alloc (scm_freelist_t *freelist)
-{
- SCM_REDEFER_INTS;
- scm_igc ("cells");
-#ifdef GUILE_DEBUG_FREELIST
- fprintf (stderr, "Collected: %d, min_yield: %d\n",
- freelist->collected, MIN_GC_YIELD (freelist));
-#endif
- if ((freelist->collected < MIN_GC_YIELD (freelist))
- || SCM_IMP (freelist->cells))
- alloc_some_heap (freelist);
- SCM_REALLOW_INTS;
-}
-
-SCM
-scm_gc_for_newcell (scm_freelist_t *freelist)
-{
- SCM fl;
- scm_gc_for_alloc (freelist);
- fl = freelist->cells;
- freelist->cells = SCM_CDR (fl);
- SCM_SETCAR (fl, scm_tc16_allocated);
- return fl;
-}
+scm_t_c_hook scm_before_gc_c_hook;
+scm_t_c_hook scm_before_mark_c_hook;
+scm_t_c_hook scm_before_sweep_c_hook;
+scm_t_c_hook scm_after_sweep_c_hook;
+scm_t_c_hook scm_after_gc_c_hook;
-#endif /* GUILE_NEW_GC_SCHEME */
void
scm_igc (const char *what)
{
- int j;
+ long j;
+ ++scm_gc_running_p;
+ scm_c_hook_run (&scm_before_gc_c_hook, 0);
#ifdef DEBUGINFO
fprintf (stderr,
SCM_NULLP (scm_freelist)
? "*"
: (SCM_NULLP (scm_freelist2) ? "o" : "m"));
#endif
-#ifdef USE_THREADS
/* During the critical section, only the current thread may run. */
- SCM_THREAD_CRITICAL_SECTION_START;
-#endif
+ SCM_CRITICAL_SECTION_START;
/* fprintf (stderr, "gc: %s\n", what); */
- scm_gc_start (what);
-
if (!scm_stack_base || scm_block_gc)
{
- scm_gc_end ();
+ --scm_gc_running_p;
return;
}
- if (scm_mallocated < 0)
- /* The byte count of allocated objects has underflowed. This is
- probably because you forgot to report the sizes of objects you
- have allocated, by calling scm_done_malloc or some such. When
- the GC freed them, it subtracted their size from
- scm_mallocated, which underflowed. */
- abort ();
+ gc_start_stats (what);
if (scm_gc_heap_lock)
/* We've invoked the collector while a GC is already in progress.
++scm_gc_heap_lock;
- scm_weak_vectors = SCM_EOL;
-
- scm_guardian_gc_init ();
-
- /* unprotect any struct types with no instances */
-#if 0
- {
- SCM type_list;
- SCM * pos;
-
- pos = &scm_type_obj_list;
- type_list = scm_type_obj_list;
- while (type_list != SCM_EOL)
- if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
- {
- pos = SCM_CDRLOC (type_list);
- type_list = SCM_CDR (type_list);
- }
- else
- {
- *pos = SCM_CDR (type_list);
- type_list = SCM_CDR (type_list);
- }
- }
-#endif
-
/* flush dead entries from the continuation stack */
{
- int x;
- int bound;
+ long x;
+ long bound;
SCM * elts;
elts = SCM_VELTS (scm_continuation_stack);
- bound = SCM_LENGTH (scm_continuation_stack);
+ bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
x = SCM_INUM (scm_continuation_stack_ptr);
while (x < bound)
{
}
}
+ scm_c_hook_run (&scm_before_mark_c_hook, 0);
+
+ clear_mark_space ();
+
#ifndef USE_THREADS
- /* Protect from the C stack. This must be the first marking
- * done because it provides information about what objects
- * are "in-use" by the C code. "in-use" objects are those
- * for which the values from SCM_LENGTH and SCM_CHARS must remain
- * usable. This requirement is stricter than a liveness
- * requirement -- in particular, it constrains the implementation
- * of scm_vector_set_length_x.
- */
+ /* Mark objects on the C stack. */
SCM_FLUSH_REGISTER_WINDOWS;
/* This assumes that all registers are saved into the jmp_buf */
setjmp (scm_save_regs_gc_mark);
scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
- ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
+ ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
sizeof scm_save_regs_gc_mark)
/ sizeof (SCM_STACKITEM)));
{
- /* stack_len is long rather than scm_sizet in order to guarantee that
- &stack_len is long aligned */
+ unsigned long stack_len = scm_stack_size (scm_stack_base);
#ifdef SCM_STACK_GROWS_UP
-#ifdef nosve
- long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
-#else
- long stack_len = scm_stack_size (scm_stack_base);
-#endif
- scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
-#else
-#ifdef nosve
- long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
+ scm_mark_locations (scm_stack_base, stack_len);
#else
- long stack_len = scm_stack_size (scm_stack_base);
-#endif
- scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
+ scm_mark_locations (scm_stack_base - stack_len, stack_len);
#endif
}
#endif /* USE_THREADS */
- /* FIXME: insert a phase to un-protect string-data preserved
- * in scm_vector_set_length_x.
- */
-
j = SCM_NUM_PROTECTS;
while (j--)
scm_gc_mark (scm_sys_protects[j]);
+ /* mark the registered roots */
+ {
+ size_t i;
+ for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
+ SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
+ for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
+ SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
+ scm_gc_mark (*p);
+ }
+ }
+ }
+
/* FIXME: we should have a means to register C functions to be run
* in different phases of GC
*/
scm_gc_mark (scm_root->handle);
#endif
- scm_mark_weak_vector_spines ();
+ t_before_sweep = scm_c_get_internal_run_time ();
+ scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
- scm_guardian_zombify ();
+ scm_c_hook_run (&scm_before_sweep_c_hook, 0);
scm_gc_sweep ();
+ scm_c_hook_run (&scm_after_sweep_c_hook, 0);
+
--scm_gc_heap_lock;
- scm_gc_end ();
+ gc_end_stats ();
-#ifdef USE_THREADS
- SCM_THREAD_CRITICAL_SECTION_END;
-#endif
+ SCM_CRITICAL_SECTION_END;
+ scm_c_hook_run (&scm_after_gc_c_hook, 0);
+ --scm_gc_running_p;
}
\f
+
/* {Mark/Sweep}
*/
+#define MARK scm_gc_mark
+#define FNAME "scm_gc_mark"
+#endif /*!MARK_DEPENDENCIES*/
/* Mark an object precisely.
*/
void
-scm_gc_mark (SCM p)
+MARK (SCM p)
+#define FUNC_NAME FNAME
{
register long i;
register SCM ptr;
+ scm_t_bits cell_type;
+#ifndef MARK_DEPENDENCIES
+# define RECURSE scm_gc_mark
+#else
+ /* go through the usual marking, but not for self-cycles. */
+# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
+#endif
ptr = p;
+#ifdef MARK_DEPENDENCIES
+ goto gc_mark_loop_first_time;
+#endif
+
+/* A simple hack for debugging. Chose the second branch to get a
+ meaningful backtrace for crashes inside the GC.
+*/
+#if 1
+#define goto_gc_mark_loop goto gc_mark_loop
+#define goto_gc_mark_nimp goto gc_mark_nimp
+#else
+#define goto_gc_mark_loop RECURSE(ptr); return
+#define goto_gc_mark_nimp RECURSE(ptr); return
+#endif
+
gc_mark_loop:
if (SCM_IMP (ptr))
return;
gc_mark_nimp:
- if (SCM_NCELLP (ptr))
- scm_wta (ptr, "rogue pointer in heap", NULL);
+
+#ifdef MARK_DEPENDENCIES
+ if (SCM_EQ_P (ptr, p))
+ return;
+
+ scm_gc_mark (ptr);
+ return;
- switch (SCM_TYP7 (ptr))
+gc_mark_loop_first_time:
+#endif
+
+#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
+ /* We are in debug mode. Check the ptr exhaustively. */
+ if (!scm_cellp (ptr))
+ SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
+#else
+ /* In non-debug mode, do at least some cheap testing. */
+ if (!SCM_CELLP (ptr))
+ SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
+#endif
+
+#ifndef MARK_DEPENDENCIES
+
+ if (SCM_GCMARKP (ptr))
+ return;
+
+ SCM_SETGCMARK (ptr);
+
+#endif
+
+ cell_type = SCM_GC_CELL_TYPE (ptr);
+ switch (SCM_ITAG7 (cell_type))
{
case scm_tcs_cons_nimcar:
- if (SCM_GCMARKP (ptr))
- break;
- SCM_SETGCMARK (ptr);
- if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
+ if (SCM_IMP (SCM_CDR (ptr)))
{
ptr = SCM_CAR (ptr);
- goto gc_mark_nimp;
+ goto_gc_mark_nimp;
}
- scm_gc_mark (SCM_CAR (ptr));
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_nimp;
+ RECURSE (SCM_CAR (ptr));
+ ptr = SCM_CDR (ptr);
+ goto_gc_mark_nimp;
case scm_tcs_cons_imcar:
- if (SCM_GCMARKP (ptr))
- break;
- SCM_SETGCMARK (ptr);
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_loop;
+ ptr = SCM_CDR (ptr);
+ goto_gc_mark_loop;
case scm_tc7_pws:
- if (SCM_GCMARKP (ptr))
- break;
- SCM_SETGCMARK (ptr);
- scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_loop;
+ RECURSE (SCM_SETTER (ptr));
+ ptr = SCM_PROCEDURE (ptr);
+ goto_gc_mark_loop;
case scm_tcs_cons_gloc:
- if (SCM_GCMARKP (ptr))
- break;
- SCM_SETGCMARK (ptr);
{
- /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
- * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
- * to a heap cell. If it is a struct, the cell word #0 of ptr is a
- * pointer to a struct vtable data region. The fact that these are
- * accessed in the same way restricts the possibilites to change the
- * data layout of structs or heap cells.
+ /* Dirk:FIXME:: The following code is super ugly: ptr may be a
+ * struct or a gloc. If it is a gloc, the cell word #0 of ptr
+ * is the address of a scm_tc16_variable smob. If it is a
+ * struct, the cell word #0 of ptr is a pointer to a struct
+ * vtable data region. (The fact that these are accessed in
+ * the same way restricts the possibilites to change the data
+ * layout of structs or heap cells.) To discriminate between
+ * the two, it is guaranteed that the scm_vtable_index_vcell
+ * element of the prospective vtable is always zero. For a
+ * gloc, this location has the CDR of the variable smob, which
+ * is guaranteed to be non-zero.
*/
- scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
- scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
- switch (vtable_data [scm_vtable_index_vcell])
+ scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
+ scm_t_bits * vtable_data = (scm_t_bits *) word0; /* access as struct */
+ if (vtable_data [scm_vtable_index_vcell] != 0)
{
- default:
- {
- /* ptr is a gloc */
- SCM gloc_car = SCM_PACK (word0);
- scm_gc_mark (gloc_car);
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_loop;
- }
- case 1: /* ! */
- case 0: /* ! */
- {
- /* ptr is a struct */
- SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
- int len = SCM_LENGTH (layout);
- char * fields_desc = SCM_CHARS (layout);
- /* We're using SCM_GCCDR here like STRUCT_DATA, except
- that it removes the mark */
- scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
-
- if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
- {
- scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
- scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
- }
- if (len)
- {
- int x;
-
- for (x = 0; x < len - 2; x += 2, ++struct_data)
- if (fields_desc[x] == 'p')
- scm_gc_mark (SCM_PACK (*struct_data));
- if (fields_desc[x] == 'p')
- {
- if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
- for (x = *struct_data; x; --x)
- scm_gc_mark (SCM_PACK (*++struct_data));
- else
- scm_gc_mark (SCM_PACK (*struct_data));
- }
- }
- if (vtable_data [scm_vtable_index_vcell] == 0)
- {
- vtable_data [scm_vtable_index_vcell] = 1;
- ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
- goto gc_mark_loop;
- }
- }
+ /* ptr is a gloc */
+ SCM gloc_car = SCM_PACK (word0);
+ RECURSE (gloc_car);
+ ptr = SCM_CDR (ptr);
+ goto gc_mark_loop;
+ }
+ else
+ {
+ /* ptr is a struct */
+ SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
+ long len = SCM_SYMBOL_LENGTH (layout);
+ char * fields_desc = SCM_SYMBOL_CHARS (layout);
+ scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
+
+ if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
+ {
+ RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
+ RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
+ }
+ if (len)
+ {
+ long x;
+
+ for (x = 0; x < len - 2; x += 2, ++struct_data)
+ if (fields_desc[x] == 'p')
+ RECURSE (SCM_PACK (*struct_data));
+ if (fields_desc[x] == 'p')
+ {
+ if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
+ for (x = *struct_data++; x; --x, ++struct_data)
+ RECURSE (SCM_PACK (*struct_data));
+ else
+ RECURSE (SCM_PACK (*struct_data));
+ }
+ }
+ /* mark vtable */
+ ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
+ goto_gc_mark_loop;
}
}
break;
case scm_tcs_closures:
- if (SCM_GCMARKP (ptr))
- break;
- SCM_SETGCMARK (ptr);
- if (SCM_IMP (SCM_CDR (ptr)))
+ if (SCM_IMP (SCM_ENV (ptr)))
{
ptr = SCM_CLOSCAR (ptr);
- goto gc_mark_nimp;
+ goto_gc_mark_nimp;
}
- scm_gc_mark (SCM_CLOSCAR (ptr));
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_nimp;
+ RECURSE (SCM_CLOSCAR (ptr));
+ ptr = SCM_ENV (ptr);
+ goto_gc_mark_nimp;
case scm_tc7_vector:
- case scm_tc7_lvector:
-#ifdef CCLO
- case scm_tc7_cclo:
-#endif
- if (SCM_GC8MARKP (ptr))
- break;
- SCM_SETGC8MARK (ptr);
- i = SCM_LENGTH (ptr);
+ i = SCM_VECTOR_LENGTH (ptr);
if (i == 0)
break;
while (--i > 0)
if (SCM_NIMP (SCM_VELTS (ptr)[i]))
- scm_gc_mark (SCM_VELTS (ptr)[i]);
+ RECURSE (SCM_VELTS (ptr)[i]);
ptr = SCM_VELTS (ptr)[0];
- goto gc_mark_loop;
- case scm_tc7_contin:
- if SCM_GC8MARKP
- (ptr) break;
- SCM_SETGC8MARK (ptr);
- if (SCM_VELTS (ptr))
- scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
- (scm_sizet)
- (SCM_LENGTH (ptr) +
- (sizeof (SCM_STACKITEM) + -1 +
- sizeof (scm_contregs)) /
- sizeof (SCM_STACKITEM)));
- break;
+ goto_gc_mark_loop;
+#ifdef CCLO
+ case scm_tc7_cclo:
+ {
+ size_t i = SCM_CCLO_LENGTH (ptr);
+ size_t j;
+ for (j = 1; j != i; ++j)
+ {
+ SCM obj = SCM_CCLO_REF (ptr, j);
+ if (!SCM_IMP (obj))
+ RECURSE (obj);
+ }
+ ptr = SCM_CCLO_REF (ptr, 0);
+ goto_gc_mark_loop;
+ }
+#endif
#ifdef HAVE_ARRAYS
case scm_tc7_bvect:
case scm_tc7_byvect:
#endif
#endif
case scm_tc7_string:
- SCM_SETGC8MARK (ptr);
break;
case scm_tc7_substring:
- if (SCM_GC8MARKP(ptr))
- break;
- SCM_SETGC8MARK (ptr);
ptr = SCM_CDR (ptr);
- goto gc_mark_loop;
+ goto_gc_mark_loop;
case scm_tc7_wvect:
- if (SCM_GC8MARKP(ptr))
- break;
- SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
+ SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
scm_weak_vectors = ptr;
- SCM_SETGC8MARK (ptr);
if (SCM_IS_WHVEC_ANY (ptr))
{
- int x;
- int len;
+ long x;
+ long len;
int weak_keys;
int weak_values;
- len = SCM_LENGTH (ptr);
+ len = SCM_VECTOR_LENGTH (ptr);
weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
* won't prematurely drop table entries.
*/
if (!weak_keys)
- scm_gc_mark (SCM_CAR (kvpair));
+ RECURSE (SCM_CAR (kvpair));
if (!weak_values)
- scm_gc_mark (SCM_GCCDR (kvpair));
+ RECURSE (SCM_CDR (kvpair));
alist = next_alist;
}
if (SCM_NIMP (alist))
- scm_gc_mark (alist);
+ RECURSE (alist);
}
}
break;
- case scm_tc7_msymbol:
- if (SCM_GC8MARKP(ptr))
- break;
- SCM_SETGC8MARK (ptr);
- scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
- ptr = SCM_SYMBOL_PROPS (ptr);
- goto gc_mark_loop;
- case scm_tc7_ssymbol:
- if (SCM_GC8MARKP(ptr))
- break;
- SCM_SETGC8MARK (ptr);
- break;
+ case scm_tc7_symbol:
+ ptr = SCM_PROP_SLOTS (ptr);
+ goto_gc_mark_loop;
case scm_tcs_subrs:
break;
case scm_tc7_port:
i = SCM_PTOBNUM (ptr);
+#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
if (!(i < scm_numptob))
- goto def;
- if (SCM_GC8MARKP (ptr))
- break;
- SCM_SETGC8MARK (ptr);
+ SCM_MISC_ERROR ("undefined port type", SCM_EOL);
+#endif
if (SCM_PTAB_ENTRY(ptr))
- scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
+ RECURSE (SCM_FILENAME (ptr));
if (scm_ptobs[i].mark)
{
ptr = (scm_ptobs[i].mark) (ptr);
- goto gc_mark_loop;
+ goto_gc_mark_loop;
}
else
return;
break;
case scm_tc7_smob:
- if (SCM_GC8MARKP (ptr))
- break;
- SCM_SETGC8MARK (ptr);
- switch (SCM_GCTYP16 (ptr))
+ switch (SCM_TYP16 (ptr))
{ /* should be faster than going through scm_smobs */
case scm_tc_free_cell:
- /* printf("found free_cell %X ", ptr); fflush(stdout); */
- case scm_tc16_allocated:
+ /* We have detected a free cell. This can happen if non-object data
+ * on the C stack points into guile's heap and is scanned during
+ * conservative marking. */
+#if (SCM_DEBUG_CELL_ACCESSES == 0)
+ /* If cell debugging is disabled, there is a second situation in
+ * which a free cell can be encountered, namely if with preemptive
+ * threading one thread has just obtained a fresh cell and was
+ * preempted before the cell initialization was completed. In this
+ * case, some entries of the cell may already contain objects.
+ * Thus, if cell debugging is disabled, free cells are scanned
+ * conservatively. */
+ scm_gc_mark_cell_conservatively (ptr);
+#else /* SCM_DEBUG_CELL_ACCESSES == 1 */
+ /* With cell debugging enabled, a freshly obtained but not fully
+ * initialized cell is guaranteed to be of type scm_tc16_allocated.
+ * Thus, no conservative scanning for free cells is necessary, but
+ * instead cells of type scm_tc16_allocated have to be scanned
+ * conservatively. This is done in the mark function of the
+ * scm_tc16_allocated smob type. */
+#endif
+ break;
case scm_tc16_big:
case scm_tc16_real:
case scm_tc16_complex:
break;
default:
i = SCM_SMOBNUM (ptr);
+#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
if (!(i < scm_numsmob))
- goto def;
+ SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
+#endif
if (scm_smobs[i].mark)
{
ptr = (scm_smobs[i].mark) (ptr);
- goto gc_mark_loop;
+ goto_gc_mark_loop;
}
else
return;
}
break;
default:
- def:scm_wta (ptr, "unknown type in ", "gc_mark");
+ SCM_MISC_ERROR ("unknown type", SCM_EOL);
}
+#undef RECURSE
}
+#undef FUNC_NAME
+#ifndef MARK_DEPENDENCIES
-/* Mark a Region Conservatively
- */
-
-void
-scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
-{
- register long m = n;
- register int i, j;
- register SCM_CELLPTR ptr;
-
- while (0 <= --m)
- if (SCM_CELLP (* (SCM *) &x[m]))
- {
- ptr = SCM2PTR (* (SCM *) &x[m]);
- i = 0;
- j = scm_n_heap_segs - 1;
- if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
- && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- {
- while (i <= j)
- {
- int seg_id;
- seg_id = -1;
- if ( (i == j)
- || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
- seg_id = i;
- else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
- seg_id = j;
- else
- {
- int k;
- k = (i + j) / 2;
- if (k == i)
- break;
- if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
- {
- j = k;
- ++i;
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
- continue;
- else
- break;
- }
- else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
- {
- i = k;
- --j;
- if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- continue;
- else
- break;
- }
- }
- if (!scm_heap_table[seg_id].valid
- || scm_heap_table[seg_id].valid (ptr,
- &scm_heap_table[seg_id]))
- if (scm_heap_table[seg_id].span == 1
- || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
- scm_gc_mark (* (SCM *) &x[m]);
- break;
- }
-
- }
- }
-}
-
-
-/* The following is a C predicate which determines if an SCM value can be
- regarded as a pointer to a cell on the heap. The code is duplicated
- from scm_mark_locations. */
-
+#undef MARK
+#undef FNAME
-int
-scm_cellp (SCM value)
+/* And here we define `scm_gc_mark_dependencies', by including this
+ * same file in itself.
+ */
+#define MARK scm_gc_mark_dependencies
+#define FNAME "scm_gc_mark_dependencies"
+#define MARK_DEPENDENCIES
+#include "gc.c"
+#undef MARK_DEPENDENCIES
+#undef MARK
+#undef FNAME
+
+
+/* Determine whether the given value does actually represent a cell in some
+ * heap segment. If this is the case, the number of the heap segment is
+ * returned. Otherwise, -1 is returned. Binary search is used in order to
+ * determine the heap segment that contains the cell.*/
+/* FIXME: To be used within scm_gc_mark_cell_conservatively,
+ * scm_mark_locations and scm_cellp this function should be an inline
+ * function. */
+static long int
+heap_segment (SCM obj)
{
- register int i, j;
- register SCM_CELLPTR ptr;
-
- if (SCM_CELLP (value))
+ if (!SCM_CELLP (obj))
+ return -1;
+ else
{
- ptr = SCM2PTR (value);
- i = 0;
- j = scm_n_heap_segs - 1;
- if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
- && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
+ SCM_CELLPTR ptr = SCM2PTR (obj);
+ unsigned long int i = 0;
+ unsigned long int j = scm_n_heap_segs - 1;
+
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
+ return -1;
+ else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
+ return -1;
+ else
{
- while (i <= j)
+ while (i < j)
{
- int seg_id;
- seg_id = -1;
- if ( (i == j)
- || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
- seg_id = i;
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
+ {
+ break;
+ }
else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
- seg_id = j;
+ {
+ i = j;
+ break;
+ }
else
{
- int k;
- k = (i + j) / 2;
+ unsigned long int k = (i + j) / 2;
+
if (k == i)
- break;
- if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
+ return -1;
+ else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
{
j = k;
++i;
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
- continue;
- else
- break;
+ if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
+ return -1;
}
else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
{
i = k;
--j;
- if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- continue;
- else
- break;
+ if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
+ return -1;
}
}
- if (!scm_heap_table[seg_id].valid
- || scm_heap_table[seg_id].valid (ptr,
- &scm_heap_table[seg_id]))
- if (scm_heap_table[seg_id].span == 1
- || SCM_DOUBLE_CELLP (value))
- scm_gc_mark (value);
- break;
}
+ if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
+ return -1;
+ else if (SCM_GC_IN_CARD_HEADERP (ptr))
+ return -1;
+ else
+ return i;
}
}
- return 0;
}
-static void
-scm_mark_weak_vector_spines ()
+/* Mark the entries of a cell conservatively. The given cell is known to be
+ * on the heap. Still we have to determine its heap segment in order to
+ * figure out whether it is a single or a double cell. Then, each of the cell
+ * elements itself is checked and potentially marked. */
+void
+scm_gc_mark_cell_conservatively (SCM cell)
{
- SCM w;
+ unsigned long int cell_segment = heap_segment (cell);
+ unsigned int span = scm_heap_table[cell_segment].span;
+ unsigned int i;
- for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
+ for (i = 1; i != span * 2; ++i)
{
- if (SCM_IS_WHVEC_ANY (w))
- {
- SCM *ptr;
- SCM obj;
- int j;
- int n;
-
- obj = w;
- ptr = SCM_VELTS (w);
- n = SCM_LENGTH (w);
- for (j = 0; j < n; ++j)
- {
- SCM alist;
+ SCM obj = SCM_CELL_OBJECT (cell, i);
+ long int obj_segment = heap_segment (obj);
+ if (obj_segment >= 0)
+ scm_gc_mark (obj);
+ }
+}
- alist = ptr[j];
- while ( SCM_CONSP (alist)
- && !SCM_GCMARKP (alist)
- && SCM_CONSP (SCM_CAR (alist)))
- {
- SCM_SETGCMARK (alist);
- SCM_SETGCMARK (SCM_CAR (alist));
- alist = SCM_GCCDR (alist);
- }
- }
- }
+
+/* Mark a region conservatively */
+void
+scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
+{
+ unsigned long m;
+
+ for (m = 0; m < n; ++m)
+ {
+ SCM obj = * (SCM *) &x[m];
+ long int segment = heap_segment (obj);
+ if (segment >= 0)
+ scm_gc_mark (obj);
}
}
-#ifdef GUILE_NEW_GC_SCHEME
+/* The function scm_cellp determines whether an SCM value can be regarded as a
+ * pointer to a cell on the heap.
+ */
+int
+scm_cellp (SCM value)
+{
+ long int segment = heap_segment (value);
+ return (segment >= 0);
+}
+
+
static void
-gc_sweep_freelist_start (scm_freelist_t *freelist)
+gc_sweep_freelist_start (scm_t_freelist *freelist)
{
freelist->cells = SCM_EOL;
freelist->left_to_collect = freelist->cluster_size;
}
static void
-gc_sweep_freelist_finish (scm_freelist_t *freelist)
+gc_sweep_freelist_finish (scm_t_freelist *freelist)
{
- int collected;
+ long collected;
*freelist->clustertail = freelist->cells;
- if (SCM_NNULLP (freelist->cells))
+ if (!SCM_NULLP (freelist->cells))
{
SCM c = freelist->cells;
- SCM_SETCAR (c, SCM_CDR (c));
- SCM_SETCDR (c, SCM_EOL);
+ SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
+ SCM_SET_CELL_WORD_1 (c, SCM_EOL);
freelist->collected +=
freelist->span * (freelist->cluster_size - freelist->left_to_collect);
}
collected = SCM_MAX (freelist->collected_1, freelist->collected);
freelist->grow_heap_p = (collected < freelist->min_yield);
}
-#endif
+
+#define NEXT_DATA_CELL(ptr, span) \
+ do { \
+ scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
+ (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
+ CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
+ : nxt__); \
+ } while (0)
void
scm_gc_sweep ()
+#define FUNC_NAME "scm_gc_sweep"
{
register SCM_CELLPTR ptr;
register SCM nfreelist;
- register scm_freelist_t *freelist;
- register long m;
+ register scm_t_freelist *freelist;
+ register unsigned long m;
register int span;
- long i;
- scm_sizet seg_size;
+ size_t i;
+ size_t seg_size;
m = 0;
-#ifdef GUILE_NEW_GC_SCHEME
gc_sweep_freelist_start (&scm_master_freelist);
gc_sweep_freelist_start (&scm_master_freelist2);
-#else
- /* Reset all free list pointers. We'll reconstruct them completely
- while scanning. */
- for (i = 0; i < scm_n_heap_segs; i++)
- scm_heap_table[i].freelist->cells = SCM_EOL;
-#endif
for (i = 0; i < scm_n_heap_segs; i++)
{
-#ifdef GUILE_NEW_GC_SCHEME
- register unsigned int left_to_collect;
-#else
- register scm_sizet n = 0;
-#endif
- register scm_sizet j;
+ register long left_to_collect;
+ register size_t j;
/* Unmarked cells go onto the front of the freelist this heap
segment points to. Rather than updating the real freelist
simply don't assign nfreelist back into the real freelist. */
freelist = scm_heap_table[i].freelist;
nfreelist = freelist->cells;
-#ifdef GUILE_NEW_GC_SCHEME
left_to_collect = freelist->left_to_collect;
-#endif
span = scm_heap_table[i].span;
ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
+
+ /* use only data cells in seg_size */
+ seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
+
+ scm_gc_cells_swept += seg_size;
+
for (j = seg_size + span; j -= span; ptr += span)
{
- SCM scmptr = PTR2SCM (ptr);
+ SCM scmptr;
- switch SCM_TYP7 (scmptr)
+ if (SCM_GC_IN_CARD_HEADERP (ptr))
{
+ SCM_CELLPTR nxt;
+
+ /* cheat here */
+ nxt = ptr;
+ NEXT_DATA_CELL (nxt, span);
+ j += span;
+
+ ptr = nxt - span;
+ continue;
+ }
+
+ scmptr = PTR2SCM (ptr);
+
+ if (SCM_GCMARKP (scmptr))
+ continue;
+
+ switch SCM_TYP7 (scmptr)
+ {
case scm_tcs_cons_gloc:
{
/* Dirk:FIXME:: Again, super ugly code: scmptr may be a
* struct or a gloc. See the corresponding comment in
* scm_gc_mark.
*/
- scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
- scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
- if (SCM_GCMARKP (scmptr))
- {
- if (vtable_data [scm_vtable_index_vcell] == 1)
- vtable_data [scm_vtable_index_vcell] = 0;
- goto cmrkcontinue;
- }
- else
+ scm_t_bits word0 = (SCM_CELL_WORD_0 (scmptr)
+ - scm_tc3_cons_gloc);
+ /* access as struct */
+ scm_t_bits * vtable_data = (scm_t_bits *) word0;
+ if (vtable_data[scm_vtable_index_vcell] == 0)
{
- if (vtable_data [scm_vtable_index_vcell] == 0
- || vtable_data [scm_vtable_index_vcell] == 1)
- {
- scm_struct_free_t free
- = (scm_struct_free_t) vtable_data[scm_struct_i_free];
- m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
- }
+ /* Structs need to be freed in a special order.
+ * This is handled by GC C hooks in struct.c.
+ */
+ SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
+ scm_structs_to_free = scmptr;
+ continue;
}
+ /* fall through so that scmptr gets collected */
}
break;
case scm_tcs_cons_imcar:
case scm_tcs_cons_nimcar:
case scm_tcs_closures:
case scm_tc7_pws:
- if (SCM_GCMARKP (scmptr))
- goto cmrkcontinue;
break;
case scm_tc7_wvect:
- if (SCM_GC8MARKP (scmptr))
- {
- goto c8mrkcontinue;
- }
- else
- {
- m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
- scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
- break;
- }
-
case scm_tc7_vector:
- case scm_tc7_lvector:
+ {
+ unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
+ if (length > 0)
+ {
+ m += length * sizeof (scm_t_bits);
+ scm_must_free (SCM_VECTOR_BASE (scmptr));
+ }
+ break;
+ }
#ifdef CCLO
case scm_tc7_cclo:
-#endif
- if (SCM_GC8MARKP (scmptr))
- goto c8mrkcontinue;
-
- m += (SCM_LENGTH (scmptr) * sizeof (SCM));
- freechars:
- scm_must_free (SCM_CHARS (scmptr));
- /* SCM_SETCHARS(scmptr, 0);*/
+ m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
+ scm_must_free (SCM_CCLO_BASE (scmptr));
break;
+#endif
#ifdef HAVE_ARRAYS
case scm_tc7_bvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
- goto freechars;
+ {
+ unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
+ if (length > 0)
+ {
+ m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
+ scm_must_free (SCM_BITVECTOR_BASE (scmptr));
+ }
+ }
+ break;
case scm_tc7_byvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
- goto freechars;
case scm_tc7_ivect:
case scm_tc7_uvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
- goto freechars;
case scm_tc7_svect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
- goto freechars;
#ifdef HAVE_LONG_LONGS
case scm_tc7_llvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
- goto freechars;
#endif
case scm_tc7_fvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
- goto freechars;
case scm_tc7_dvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
- goto freechars;
case scm_tc7_cvect:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
- goto freechars;
+ m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
+ scm_must_free (SCM_UVECTOR_BASE (scmptr));
+ break;
#endif
case scm_tc7_substring:
- if (SCM_GC8MARKP (scmptr))
- goto c8mrkcontinue;
break;
case scm_tc7_string:
- if (SCM_GC8MARKP (scmptr))
- goto c8mrkcontinue;
- m += SCM_HUGE_LENGTH (scmptr) + 1;
- goto freechars;
- case scm_tc7_msymbol:
- if (SCM_GC8MARKP (scmptr))
- goto c8mrkcontinue;
- m += (SCM_LENGTH (scmptr) + 1
- + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
- scm_must_free ((char *)SCM_SLOTS (scmptr));
+ m += SCM_STRING_LENGTH (scmptr) + 1;
+ scm_must_free (SCM_STRING_CHARS (scmptr));
break;
- case scm_tc7_contin:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
- if (SCM_VELTS (scmptr))
- goto freechars;
- case scm_tc7_ssymbol:
- if SCM_GC8MARKP(scmptr)
- goto c8mrkcontinue;
+ case scm_tc7_symbol:
+ m += SCM_SYMBOL_LENGTH (scmptr) + 1;
+ scm_must_free (SCM_SYMBOL_CHARS (scmptr));
break;
case scm_tcs_subrs:
+ /* the various "subrs" (primitives) are never freed */
continue;
case scm_tc7_port:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
if SCM_OPENP (scmptr)
{
int k = SCM_PTOBNUM (scmptr);
+#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
if (!(k < scm_numptob))
- goto sweeperr;
+ SCM_MISC_ERROR ("undefined port type", SCM_EOL);
+#endif
/* Keep "revealed" ports alive. */
if (scm_revealed_count (scmptr) > 0)
continue;
SCM_SETSTREAM (scmptr, 0);
scm_remove_from_port_table (scmptr);
scm_gc_ports_collected++;
- SCM_SETAND_CAR (scmptr, ~SCM_OPN);
+ SCM_CLR_PORT_OPEN_FLAG (scmptr);
}
break;
case scm_tc7_smob:
- switch SCM_GCTYP16 (scmptr)
+ switch SCM_TYP16 (scmptr)
{
case scm_tc_free_cell:
case scm_tc16_real:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
break;
#ifdef SCM_BIGDIG
case scm_tc16_big:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
- goto freechars;
+ scm_must_free (SCM_BDIGITS (scmptr));
+ break;
#endif /* def SCM_BIGDIG */
case scm_tc16_complex:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
- m += 2 * sizeof (double);
- goto freechars;
+ m += sizeof (scm_t_complex);
+ scm_must_free (SCM_COMPLEX_MEM (scmptr));
+ break;
default:
- if SCM_GC8MARKP (scmptr)
- goto c8mrkcontinue;
-
{
int k;
k = SCM_SMOBNUM (scmptr);
+#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
if (!(k < scm_numsmob))
- goto sweeperr;
- m += (scm_smobs[k].free) (scmptr);
+ SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
+#endif
+ if (scm_smobs[k].free)
+ m += (scm_smobs[k].free) (scmptr);
break;
}
}
break;
default:
- sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
+ SCM_MISC_ERROR ("unknown type", SCM_EOL);
}
-#if 0
- if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
- exit (2);
-#endif
-#ifndef GUILE_NEW_GC_SCHEME
- n += span;
-#else
+
if (!--left_to_collect)
{
- SCM_SETCAR (scmptr, nfreelist);
+ SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
*freelist->clustertail = scmptr;
freelist->clustertail = SCM_CDRLOC (scmptr);
left_to_collect = freelist->cluster_size;
}
else
-#endif
{
/* Stick the new cell on the front of nfreelist. It's
critical that we mark this cell as freed; otherwise, the
conservative collector might trace it as some other type
of object. */
SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
- SCM_SETCDR (scmptr, nfreelist);
+ SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
nfreelist = scmptr;
}
-
- continue;
- c8mrkcontinue:
- SCM_CLRGC8MARK (scmptr);
- continue;
- cmrkcontinue:
- SCM_CLRGCMARK (scmptr);
}
+
#ifdef GC_FREE_SEGMENTS
if (n == seg_size)
{
/* Update the real freelist pointer to point to the head of
the list of free cells we've built for this segment. */
freelist->cells = nfreelist;
-#ifdef GUILE_NEW_GC_SCHEME
freelist->left_to_collect = left_to_collect;
-#endif
}
-#ifndef GUILE_NEW_GC_SCHEME
- freelist->collected += n;
-#endif
-
#ifdef GUILE_DEBUG_FREELIST
-#ifdef GUILE_NEW_GC_SCHEME
- scm_check_freelist (freelist == &scm_master_freelist
- ? scm_freelist
- : scm_freelist2);
-#else
- scm_check_freelist (freelist);
-#endif
scm_map_free_list ();
#endif
}
-#ifdef GUILE_NEW_GC_SCHEME
gc_sweep_freelist_finish (&scm_master_freelist);
gc_sweep_freelist_finish (&scm_master_freelist2);
be GC-protected instead. */
scm_freelist = SCM_EOL;
scm_freelist2 = SCM_EOL;
-#endif
-
- /* Scan weak vectors. */
- {
- SCM *ptr, w;
- for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
- {
- if (!SCM_IS_WHVEC_ANY (w))
- {
- register long j, n;
-
- ptr = SCM_VELTS (w);
- n = SCM_LENGTH (w);
- for (j = 0; j < n; ++j)
- if (SCM_FREEP (ptr[j]))
- ptr[j] = SCM_BOOL_F;
- }
- else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
- {
- SCM obj = w;
- register long n = SCM_LENGTH (w);
- register long j;
- ptr = SCM_VELTS (w);
-
- for (j = 0; j < n; ++j)
- {
- SCM * fixup;
- SCM alist;
- int weak_keys;
- int weak_values;
-
- weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
- weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
-
- fixup = ptr + j;
- alist = *fixup;
-
- while ( SCM_CONSP (alist)
- && SCM_CONSP (SCM_CAR (alist)))
- {
- SCM key;
- SCM value;
-
- key = SCM_CAAR (alist);
- value = SCM_CDAR (alist);
- if ( (weak_keys && SCM_FREEP (key))
- || (weak_values && SCM_FREEP (value)))
- {
- *fixup = SCM_CDR (alist);
- }
- else
- fixup = SCM_CDRLOC (alist);
- alist = SCM_CDR (alist);
- }
- }
- }
- }
- }
scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
-#ifdef GUILE_NEW_GC_SCHEME
scm_gc_yield -= scm_cells_allocated;
-#endif
+
+ if (scm_mallocated < m)
+ /* The byte count of allocated objects has underflowed. This is
+ probably because you forgot to report the sizes of objects you
+ have allocated, by calling scm_done_malloc or some such. When
+ the GC freed them, it subtracted their size from
+ scm_mallocated, which underflowed. */
+ abort ();
+
scm_mallocated -= m;
scm_gc_malloc_collected = m;
}
+#undef FUNC_NAME
\f
-
/* {Front end to malloc}
*
- * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
+ * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
+ * scm_done_free
*
- * These functions provide services comperable to malloc, realloc, and
- * free. They are for allocating malloced parts of scheme objects.
- * The primary purpose of the front end is to impose calls to gc.
+ * These functions provide services comparable to malloc, realloc, and
+ * free. They should be used when allocating memory that will be under
+ * control of the garbage collector, i.e., if the memory may be freed
+ * during garbage collection.
*/
-
/* scm_must_malloc
* Return newly malloced storage or throw an error.
*
* The limit scm_mtrigger may be raised by this allocation.
*/
void *
-scm_must_malloc (scm_sizet size, const char *what)
+scm_must_malloc (size_t size, const char *what)
{
void *ptr;
unsigned long nm = scm_mallocated + size;
+ if (nm < size)
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
+
if (nm <= scm_mtrigger)
{
SCM_SYSCALL (ptr = malloc (size));
scm_igc (what);
nm = scm_mallocated + size;
+
+ if (nm < size)
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
+
SCM_SYSCALL (ptr = malloc (size));
if (NULL != ptr)
{
return ptr;
}
- scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
- return 0; /* never reached */
+ scm_memory_error (what);
}
*/
void *
scm_must_realloc (void *where,
- scm_sizet old_size,
- scm_sizet size,
+ size_t old_size,
+ size_t size,
const char *what)
{
void *ptr;
- scm_sizet nm = scm_mallocated + size - old_size;
+ unsigned long nm;
+
+ if (size <= old_size)
+ return where;
+
+ nm = scm_mallocated + size - old_size;
+
+ if (nm < (size - old_size))
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
if (nm <= scm_mtrigger)
{
scm_igc (what);
nm = scm_mallocated + size - old_size;
+
+ if (nm < (size - old_size))
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
+
SCM_SYSCALL (ptr = realloc (where, size));
if (NULL != ptr)
{
return ptr;
}
- scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
- return 0; /* never reached */
+ scm_memory_error (what);
+}
+
+char *
+scm_must_strndup (const char *str, size_t length)
+{
+ char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
+ memcpy (dst, str, length);
+ dst[length] = 0;
+ return dst;
+}
+
+char *
+scm_must_strdup (const char *str)
+{
+ return scm_must_strndup (str, strlen (str));
}
void
scm_must_free (void *obj)
+#define FUNC_NAME "scm_must_free"
{
#ifdef GUILE_DEBUG_MALLOC
scm_malloc_unregister (obj);
if (obj)
free (obj);
else
- scm_wta (SCM_INUM0, "already free", "");
+ SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
}
+#undef FUNC_NAME
+
/* Announce that there has been some malloc done that will be freed
* during gc. A typical use is for a smob that uses some malloced
* reason). When a new object of this smob is created you call
* scm_done_malloc with the size of the object. When your smob free
* function is called, be sure to include this size in the return
- * value. */
+ * value.
+ *
+ * If you can't actually free the memory in the smob free function,
+ * for whatever reason (like reference counting), you still can (and
+ * should) report the amount of memory freed when you actually free it.
+ * Do it by calling scm_done_malloc with the _negated_ size. Clever,
+ * eh? Or even better, call scm_done_free. */
void
scm_done_malloc (long size)
{
+ if (size < 0) {
+ if (scm_mallocated < size)
+ /* The byte count of allocated objects has underflowed. This is
+ probably because you forgot to report the sizes of objects you
+ have allocated, by calling scm_done_malloc or some such. When
+ the GC freed them, it subtracted their size from
+ scm_mallocated, which underflowed. */
+ abort ();
+ } else {
+ unsigned long nm = scm_mallocated + size;
+ if (nm < size)
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
+ }
+
scm_mallocated += size;
if (scm_mallocated > scm_mtrigger)
}
}
+void
+scm_done_free (long size)
+{
+ if (size >= 0) {
+ if (scm_mallocated < size)
+ /* The byte count of allocated objects has underflowed. This is
+ probably because you forgot to report the sizes of objects you
+ have allocated, by calling scm_done_malloc or some such. When
+ the GC freed them, it subtracted their size from
+ scm_mallocated, which underflowed. */
+ abort ();
+ } else {
+ unsigned long nm = scm_mallocated + size;
+ if (nm < size)
+ /* The byte count of allocated objects has overflowed. This is
+ probably because you forgot to report the correct size of freed
+ memory in some of your smob free methods. */
+ abort ();
+ }
+
+ scm_mallocated -= size;
+}
-\f
+\f
/* {Heap Segments}
*
* Each heap segment is an array of objects of a particular size.
*/
int scm_expmem = 0;
-scm_sizet scm_max_segment_size;
+size_t scm_max_segment_size;
/* scm_heap_org
* is the lowest base address of any heap segment.
*/
SCM_CELLPTR scm_heap_org;
-scm_heap_seg_data_t * scm_heap_table = 0;
-int scm_n_heap_segs = 0;
+scm_t_heap_seg_data * scm_heap_table = 0;
+static size_t heap_segment_table_size = 0;
+size_t scm_n_heap_segs = 0;
/* init_heap_seg
- * initializes a new heap segment and return the number of objects it contains.
+ * initializes a new heap segment and returns the number of objects it contains.
*
- * The segment origin, segment size in bytes, and the span of objects
- * in cells are input parameters. The freelist is both input and output.
+ * The segment origin and segment size in bytes are input parameters.
+ * The freelist is both input and output.
*
- * This function presume that the scm_heap_table has already been expanded
- * to accomodate a new segment record.
+ * This function presumes that the scm_heap_table has already been expanded
+ * to accomodate a new segment record and that the markbit space was reserved
+ * for all the cards in this segment.
*/
+#define INIT_CARD(card, span) \
+ do { \
+ SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
+ if ((span) == 2) \
+ SCM_GC_SET_CARD_DOUBLECELL (card); \
+ } while (0)
-static scm_sizet
-init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
+static size_t
+init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
{
register SCM_CELLPTR ptr;
SCM_CELLPTR seg_end;
- int new_seg_index;
- int n_new_cells;
+ size_t new_seg_index;
+ ptrdiff_t n_new_cells;
int span = freelist->span;
if (seg_org == NULL)
return 0;
- ptr = CELL_UP (seg_org, span);
+ /* Align the begin ptr up.
+ */
+ ptr = SCM_GC_CARD_UP (seg_org);
/* Compute the ceiling on valid object pointers w/in this segment.
*/
- seg_end = CELL_DN ((char *) seg_org + size, span);
+ seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
/* Find the right place and insert the segment record.
- *
*/
- for (new_seg_index = 0;
- ( (new_seg_index < scm_n_heap_segs)
- && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
- new_seg_index++)
- ;
+ new_seg_index = 0;
+ while (new_seg_index < scm_n_heap_segs
+ && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
+ new_seg_index++;
{
int i;
++scm_n_heap_segs;
- scm_heap_table[new_seg_index].valid = 0;
scm_heap_table[new_seg_index].span = span;
scm_heap_table[new_seg_index].freelist = freelist;
scm_heap_table[new_seg_index].bounds[0] = ptr;
scm_heap_table[new_seg_index].bounds[1] = seg_end;
-
- /* Compute the least valid object pointer w/in this segment
- */
- ptr = CELL_UP (ptr, span);
-
-
/*n_new_cells*/
n_new_cells = seg_end - ptr;
-#ifdef GUILE_NEW_GC_SCHEME
-
freelist->heap_size += n_new_cells;
/* Partition objects in this segment into clusters */
{
SCM clusters;
SCM *clusterp = &clusters;
- int n_cluster_cells = span * freelist->cluster_size;
- while (n_new_cells > span) /* at least one spine + one freecell */
+ NEXT_DATA_CELL (ptr, span);
+ while (ptr < seg_end)
{
- /* Determine end of cluster
- */
- if (n_new_cells >= n_cluster_cells)
- {
- seg_end = ptr + n_cluster_cells;
- n_new_cells -= n_cluster_cells;
- }
- else
- /* [cmm] looks like the segment size doesn't divide cleanly by
- cluster size. bad cmm! */
- abort();
+ scm_cell *nxt = ptr;
+ scm_cell *prv = NULL;
+ scm_cell *last_card = NULL;
+ int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
+ NEXT_DATA_CELL(nxt, span);
/* Allocate cluster spine
*/
*clusterp = PTR2SCM (ptr);
- SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
+ SCM_SETCAR (*clusterp, PTR2SCM (nxt));
clusterp = SCM_CDRLOC (*clusterp);
- ptr += span;
+ ptr = nxt;
- while (ptr < seg_end)
+ while (n_data_cells--)
{
+ scm_cell *card = SCM_GC_CELL_CARD (ptr);
SCM scmptr = PTR2SCM (ptr);
+ nxt = ptr;
+ NEXT_DATA_CELL (nxt, span);
+ prv = ptr;
+
+ if (card != last_card)
+ {
+ INIT_CARD (card, span);
+ last_card = card;
+ }
SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
- SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
- ptr += span;
+ SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
+
+ ptr = nxt;
}
- SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
+ SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
}
+ /* sanity check */
+ {
+ scm_cell *ref = seg_end;
+ NEXT_DATA_CELL (ref, span);
+ if (ref != ptr)
+ /* [cmm] looks like the segment size doesn't divide cleanly by
+ cluster size. bad cmm! */
+ abort();
+ }
+
/* Patch up the last cluster pointer in the segment
* to join it to the input freelist.
*/
freelist->clusters = clusters;
}
-#else /* GUILE_NEW_GC_SCHEME */
-
- /* Prepend objects in this segment to the freelist.
- */
- while (ptr < seg_end)
- {
- SCM scmptr = PTR2SCM (ptr);
-
- SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
- SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
- ptr += span;
- }
-
- ptr -= span;
-
- /* Patch up the last freelist pointer in the segment
- * to join it to the input freelist.
- */
- SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
- freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
-
- freelist->heap_size += n_new_cells;
-
-#endif /* GUILE_NEW_GC_SCHEME */
-
#ifdef DEBUGINFO
fprintf (stderr, "H");
#endif
return size;
}
-#ifndef GUILE_NEW_GC_SCHEME
-#define round_to_cluster_size(freelist, len) len
-#else
-
-static scm_sizet
-round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
+static size_t
+round_to_cluster_size (scm_t_freelist *freelist, size_t len)
{
- scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
+ size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
return
(len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
+ ALIGNMENT_SLACK (freelist);
}
-#endif
-
static void
-alloc_some_heap (scm_freelist_t *freelist)
+alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
+#define FUNC_NAME "alloc_some_heap"
{
- scm_heap_seg_data_t * tmptable;
SCM_CELLPTR ptr;
- long len;
+ size_t len;
- /* Critical code sections (such as the garbage collector)
- * aren't supposed to add heap segments.
- */
if (scm_gc_heap_lock)
- scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
-
- /* Expand the heap tables to have room for the new segment.
- * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
- * only if the allocation of the segment itself succeeds.
- */
- len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
-
- SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
- realloc ((char *)scm_heap_table, len)));
- if (!tmptable)
- scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
- else
- scm_heap_table = tmptable;
+ {
+ /* Critical code sections (such as the garbage collector) aren't
+ * supposed to add heap segments.
+ */
+ fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
+ abort ();
+ }
+ if (scm_n_heap_segs == heap_segment_table_size)
+ {
+ /* We have to expand the heap segment table to have room for the new
+ * segment. Do not yet increment scm_n_heap_segs -- that is done by
+ * init_heap_seg only if the allocation of the segment itself succeeds.
+ */
+ size_t new_table_size = scm_n_heap_segs + 1;
+ size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
+ scm_t_heap_seg_data *new_heap_table;
+
+ SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
+ realloc ((char *)scm_heap_table, size)));
+ if (!new_heap_table)
+ {
+ if (error_policy == abort_on_error)
+ {
+ fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
+ abort ();
+ }
+ else
+ {
+ return;
+ }
+ }
+ else
+ {
+ scm_heap_table = new_heap_table;
+ heap_segment_table_size = new_table_size;
+ }
+ }
/* Pick a size for the new heap segment.
* The rule for picking the size of a segment is explained in
* gc.h
*/
-#ifdef GUILE_NEW_GC_SCHEME
{
/* Assure that the new segment is predicted to be large enough.
*
* This gives dh > (f * h - y) / (1 - f)
*/
int f = freelist->min_yield_fraction;
- long h = SCM_HEAP_SIZE;
- long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
+ unsigned long h = SCM_HEAP_SIZE;
+ size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
len = SCM_EXPHEAP (freelist->heap_size);
#ifdef DEBUGINFO
- fprintf (stderr, "(%d < %d)", len, min_cells);
+ fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
#endif
if (len < min_cells)
len = min_cells + freelist->cluster_size;
if (len > scm_max_segment_size)
len = scm_max_segment_size;
-#else
- if (scm_expmem)
- {
- len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
- if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
- != len)
- len = 0;
- }
- else
- len = SCM_HEAP_SEG_SIZE;
-#endif /* GUILE_NEW_GC_SCHEME */
{
- scm_sizet smallest;
+ size_t smallest;
-#ifndef GUILE_NEW_GC_SCHEME
- smallest = (freelist->span * sizeof (scm_cell));
-#else
smallest = CLUSTER_SIZE_IN_BYTES (freelist);
-#endif
if (len < smallest)
len = smallest;
while ((len >= SCM_MIN_HEAP_SEG_SIZE)
&& (len >= smallest))
{
- scm_sizet rounded_len = round_to_cluster_size (freelist, len);
+ size_t rounded_len = round_to_cluster_size (freelist, len);
SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
if (ptr)
{
}
}
- scm_wta (SCM_UNDEFINED, "could not grow", "heap");
-}
-
-
-SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
- (SCM name),
- "")
-#define FUNC_NAME s_scm_unhash_name
-{
- int x;
- int bound;
- SCM_VALIDATE_SYMBOL (1,name);
- SCM_DEFER_INTS;
- bound = scm_n_heap_segs;
- for (x = 0; x < bound; ++x)
+ if (error_policy == abort_on_error)
{
- SCM_CELLPTR p;
- SCM_CELLPTR pbound;
- p = scm_heap_table[x].bounds[0];
- pbound = scm_heap_table[x].bounds[1];
- while (p < pbound)
- {
- SCM cell = PTR2SCM (p);
- if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
- {
- /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
- * struct cell. See the corresponding comment in scm_gc_mark.
- */
- scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
- SCM gloc_car = SCM_PACK (word0); /* access as gloc */
- SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
- if ((SCM_TRUE_P (name) || SCM_EQ_P (SCM_CAR (gloc_car), name))
- && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
- {
- SCM_SET_CELL_OBJECT_0 (cell, name);
- }
- }
- ++p;
- }
+ fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
+ abort ();
}
- SCM_ALLOW_INTS;
- return name;
}
#undef FUNC_NAME
-
\f
/* {GC Protection Helper Functions}
*/
+/*
+ * If within a function you need to protect one or more scheme objects from
+ * garbage collection, pass them as parameters to one of the
+ * scm_remember_upto_here* functions below. These functions don't do
+ * anything, but since the compiler does not know that they are actually
+ * no-ops, it will generate code that calls these functions with the given
+ * parameters. Therefore, you can be sure that the compiler will keep those
+ * scheme values alive (on the stack or in a register) up to the point where
+ * scm_remember_upto_here* is called. In other words, place the call to
+ * scm_remember_upto_here* _behind_ the last code in your function, that
+ * depends on the scheme object to exist.
+ *
+ * Example: We want to make sure, that the string object str does not get
+ * garbage collected during the execution of 'some_function', because
+ * otherwise the characters belonging to str would be freed and
+ * 'some_function' might access freed memory. To make sure that the compiler
+ * keeps str alive on the stack or in a register such that it is visible to
+ * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
+ * call to 'some_function'. Note that this would not be necessary if str was
+ * used anyway after the call to 'some_function'.
+ * char *chars = SCM_STRING_CHARS (str);
+ * some_function (chars);
+ * scm_remember_upto_here_1 (str); // str will be alive up to this point.
+ */
+
+void
+scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
+{
+ /* Empty. Protects a single object from garbage collection. */
+}
+
+void
+scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
+{
+ /* Empty. Protects two objects from garbage collection. */
+}
+
+void
+scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
+{
+ /* Empty. Protects any number of objects from garbage collection. */
+}
+
+
+#if (SCM_DEBUG_DEPRECATED == 0)
+
void
scm_remember (SCM *ptr)
-{ /* empty */ }
+{
+ scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
+ "Use the `scm_remember_upto_here*' family of functions instead.");
+}
+SCM
+scm_protect_object (SCM obj)
+{
+ scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
+ "Use `scm_gc_protect_object' instead.");
+ return scm_gc_protect_object (obj);
+}
+
+SCM
+scm_unprotect_object (SCM obj)
+{
+ scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
+ "Use `scm_gc_unprotect_object' instead.");
+ return scm_gc_unprotect_object (obj);
+}
+
+#endif /* SCM_DEBUG_DEPRECATED == 0 */
/*
These crazy functions prevent garbage collection
}
-/* Protect OBJ from the garbage collector. OBJ will not be freed,
- even if all other references are dropped, until someone applies
- scm_unprotect_object to it. This function returns OBJ.
+/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
+ other references are dropped, until the object is unprotected by calling
+ scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
+ i. e. it is possible to protect the same object several times, but it is
+ necessary to unprotect the object the same number of times to actually get
+ the object unprotected. It is an error to unprotect an object more often
+ than it has been protected before. The function scm_protect_object returns
+ OBJ.
+*/
- Calls to scm_protect_object nest. For every object OBJ, there is a
- counter which scm_protect_object(OBJ) increments and
- scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
- an object's counter is greater than zero, the garbage collector
- will not free it.
+/* Implementation note: For every object X, there is a counter which
+ scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
+*/
- Of course, that's not how it's implemented. scm_protect_object and
- scm_unprotect_object just maintain a list of references to things.
- Since the GC knows about this list, all objects it mentions stay
- alive. scm_protect_object adds its argument to the list;
- scm_unprotect_object removes the first occurrence of its argument
- to the list. */
SCM
-scm_protect_object (SCM obj)
+scm_gc_protect_object (SCM obj)
{
- scm_protects = scm_cons (obj, scm_protects);
+ SCM handle;
+
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_REDEFER_INTS;
+
+ handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
+ SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
+
+ SCM_REALLOW_INTS;
return obj;
}
See scm_protect_object for more information. */
SCM
-scm_unprotect_object (SCM obj)
+scm_gc_unprotect_object (SCM obj)
{
- SCM *tail_ptr = &scm_protects;
+ SCM handle;
- while (SCM_CONSP (*tail_ptr))
- if (SCM_EQ_P (SCM_CAR (*tail_ptr), obj))
- {
- *tail_ptr = SCM_CDR (*tail_ptr);
- break;
- }
- else
- tail_ptr = SCM_CDRLOC (*tail_ptr);
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_REDEFER_INTS;
+
+ handle = scm_hashq_get_handle (scm_protects, obj);
+
+ if (SCM_FALSEP (handle))
+ {
+ fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
+ abort ();
+ }
+ else
+ {
+ SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
+ if (SCM_EQ_P (count, SCM_MAKINUM (0)))
+ scm_hashq_remove_x (scm_protects, obj);
+ else
+ SCM_SETCDR (handle, count);
+ }
+
+ SCM_REALLOW_INTS;
return obj;
}
+void
+scm_gc_register_root (SCM *p)
+{
+ SCM handle;
+ SCM key = scm_long2num ((long) p);
+
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_REDEFER_INTS;
+
+ handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
+ SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
+
+ SCM_REALLOW_INTS;
+}
+
+void
+scm_gc_unregister_root (SCM *p)
+{
+ SCM handle;
+ SCM key = scm_long2num ((long) p);
+
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_REDEFER_INTS;
+
+ handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
+
+ if (SCM_FALSEP (handle))
+ {
+ fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
+ abort ();
+ }
+ else
+ {
+ SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
+ if (SCM_EQ_P (count, SCM_MAKINUM (0)))
+ scm_hashv_remove_x (scm_gc_registered_roots, key);
+ else
+ SCM_SETCDR (handle, count);
+ }
+
+ SCM_REALLOW_INTS;
+}
+
+void
+scm_gc_register_roots (SCM *b, unsigned long n)
+{
+ SCM *p = b;
+ for (; p < b + n; ++p)
+ scm_gc_register_root (p);
+}
+
+void
+scm_gc_unregister_roots (SCM *b, unsigned long n)
+{
+ SCM *p = b;
+ for (; p < b + n; ++p)
+ scm_gc_unregister_root (p);
+}
+
int terminating;
/* called on process termination. */
\f
static int
-make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
+make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
{
- scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
+ size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
+
if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
rounded_size,
freelist))
else
scm_expmem = 1;
-#ifdef GUILE_NEW_GC_SCHEME
if (freelist->min_yield_fraction)
freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
/ 100);
freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
-#endif
return 0;
}
\f
-#ifdef GUILE_NEW_GC_SCHEME
static void
-init_freelist (scm_freelist_t *freelist,
+init_freelist (scm_t_freelist *freelist,
int span,
- int cluster_size,
+ long cluster_size,
int min_yield)
{
freelist->clusters = SCM_EOL;
freelist->heap_size = 0;
}
+
+/* Get an integer from an environment variable. */
+static int
+scm_i_getenv_int (const char *var, int def)
+{
+ char *end, *val = getenv (var);
+ long res;
+ if (!val)
+ return def;
+ res = strtol (val, &end, 10);
+ if (end == val)
+ return def;
+ return res;
+}
+
+
int
-scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
- scm_sizet init_heap_size_2, int gc_trigger_2,
- scm_sizet max_segment_size)
-#else
-int
-scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
-#endif
+scm_init_storage ()
{
- scm_sizet j;
+ unsigned long gc_trigger_1;
+ unsigned long gc_trigger_2;
+ size_t init_heap_size_1;
+ size_t init_heap_size_2;
+ size_t j;
- if (!init_heap_size_1)
- init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
- if (!init_heap_size_2)
- init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
+#if (SCM_DEBUG_CELL_ACCESSES == 1)
+ scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
+ scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
+#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
j = SCM_NUM_PROTECTS;
while (j)
scm_sys_protects[--j] = SCM_BOOL_F;
scm_block_gc = 1;
-#ifdef GUILE_NEW_GC_SCHEME
scm_freelist = SCM_EOL;
scm_freelist2 = SCM_EOL;
- init_freelist (&scm_master_freelist,
- 1, SCM_CLUSTER_SIZE_1,
- gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
- init_freelist (&scm_master_freelist2,
- 2, SCM_CLUSTER_SIZE_2,
- gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
- scm_max_segment_size
- = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
-#else
- scm_freelist.cells = SCM_EOL;
- scm_freelist.span = 1;
- scm_freelist.collected = 0;
- scm_freelist.heap_size = 0;
-
- scm_freelist2.cells = SCM_EOL;
- scm_freelist2.span = 2;
- scm_freelist2.collected = 0;
- scm_freelist2.heap_size = 0;
-#endif
+ gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
+ init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
+ gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
+ init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
+ scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
scm_expmem = 0;
j = SCM_HEAP_SEG_SIZE;
scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
- scm_heap_table = ((scm_heap_seg_data_t *)
- scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
+ scm_heap_table = ((scm_t_heap_seg_data *)
+ scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
+ heap_segment_table_size = 2;
+
+ mark_space_ptr = &mark_space_head;
-#ifdef GUILE_NEW_GC_SCHEME
+ init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
+ init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
make_initial_segment (init_heap_size_2, &scm_master_freelist2))
return 1;
-#else
- if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
- make_initial_segment (init_heap_size_2, &scm_freelist2))
- return 1;
-#endif
+ /* scm_hplims[0] can change. do not remove scm_heap_org */
scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
- /* scm_hplims[0] can change. do not remove scm_heap_org */
- scm_weak_vectors = SCM_EOL;
+ scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
/* Initialise the list of ports. */
- scm_port_table = (scm_port **)
- malloc (sizeof (scm_port *) * scm_port_table_room);
- if (!scm_port_table)
+ scm_t_portable = (scm_t_port **)
+ malloc (sizeof (scm_t_port *) * scm_t_portable_room);
+ if (!scm_t_portable)
return 1;
#ifdef HAVE_ATEXIT
#endif
#endif
- scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
- SCM_SETCDR (scm_undefineds, scm_undefineds);
-
- scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
- scm_nullstr = scm_makstr (0L, 0);
- scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
- scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
- scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
- scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
scm_stand_in_procs = SCM_EOL;
scm_permobjs = SCM_EOL;
- scm_protects = SCM_EOL;
- scm_asyncs = SCM_EOL;
- scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
- scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
-#ifdef SCM_BIGDIG
- scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
-#endif
+ scm_protects = scm_c_make_hash_table (31);
+ scm_gc_registered_roots = scm_c_make_hash_table (31);
+
return 0;
}
+
\f
+SCM scm_after_gc_hook;
+
+static SCM gc_async;
+
+/* The function gc_async_thunk causes the execution of the after-gc-hook. It
+ * is run after the gc, as soon as the asynchronous events are handled by the
+ * evaluator.
+ */
+static SCM
+gc_async_thunk (void)
+{
+ scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
+ return SCM_UNSPECIFIED;
+}
+
+
+/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
+ * the garbage collection. The only purpose of this function is to mark the
+ * gc_async (which will eventually lead to the execution of the
+ * gc_async_thunk).
+ */
+static void *
+mark_gc_async (void * hook_data SCM_UNUSED,
+ void *func_data SCM_UNUSED,
+ void *data SCM_UNUSED)
+{
+ /* If cell access debugging is enabled, the user may choose to perform
+ * additional garbage collections after an arbitrary number of cell
+ * accesses. We don't want the scheme level after-gc-hook to be performed
+ * for each of these garbage collections for the following reason: The
+ * execution of the after-gc-hook causes cell accesses itself. Thus, if the
+ * after-gc-hook was performed with every gc, and if the gc was performed
+ * after a very small number of cell accesses, then the number of cell
+ * accesses during the execution of the after-gc-hook will suffice to cause
+ * the execution of the next gc. Then, guile would keep executing the
+ * after-gc-hook over and over again, and would never come to do other
+ * things.
+ *
+ * To overcome this problem, if cell access debugging with additional
+ * garbage collections is enabled, the after-gc-hook is never run by the
+ * garbage collecter. When running guile with cell access debugging and the
+ * execution of the after-gc-hook is desired, then it is necessary to run
+ * the hook explicitly from the user code. This has the effect, that from
+ * the scheme level point of view it seems that garbage collection is
+ * performed with a much lower frequency than it actually is. Obviously,
+ * this will not work for code that depends on a fixed one to one
+ * relationship between the execution counts of the C level garbage
+ * collection hooks and the execution count of the scheme level
+ * after-gc-hook.
+ */
+#if (SCM_DEBUG_CELL_ACCESSES == 1)
+ if (debug_cells_gc_interval == 0)
+ scm_system_async_mark (gc_async);
+#else
+ scm_system_async_mark (gc_async);
+#endif
+
+ return NULL;
+}
+
+
void
scm_init_gc ()
{
+ SCM after_gc_thunk;
+
+ scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
+ scm_c_define ("after-gc-hook", scm_after_gc_hook);
+
+ after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
+ gc_async_thunk);
+ gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
+
+ scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
+
+#ifndef SCM_MAGIC_SNARFER
#include "libguile/gc.x"
+#endif
}
+#endif /*MARK_DEPENDENCIES*/
+
/*
Local Variables:
c-file-style: "gnu"