/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this software; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
+/* #define DEBUGINFO */
+
\f
#include <stdio.h>
-#include "_scm.h"
-#include "stime.h"
-#include "stackchk.h"
-#include "struct.h"
-#include "genio.h"
-#include "weaks.h"
-#include "guardians.h"
-#include "smob.h"
-#include "unif.h"
-#include "async.h"
-
-#include "validate.h"
-#include "gc.h"
+#include "libguile/_scm.h"
+#include "libguile/stime.h"
+#include "libguile/stackchk.h"
+#include "libguile/struct.h"
+#include "libguile/smob.h"
+#include "libguile/unif.h"
+#include "libguile/async.h"
+#include "libguile/ports.h"
+#include "libguile/root.h"
+#include "libguile/strings.h"
+#include "libguile/vectors.h"
+#include "libguile/weaks.h"
+#include "libguile/hashtab.h"
+
+#include "libguile/validate.h"
+#include "libguile/gc.h"
+
+#ifdef GUILE_DEBUG_MALLOC
+#include "libguile/debug-malloc.h"
+#endif
#ifdef HAVE_MALLOC_H
#include <malloc.h>
\f
/* {heap tuning parameters}
- *
+ *
* These are parameters for controlling memory allocation. The heap
* is the area out of which scm_cons, and object headers are allocated.
*
* will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
* heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
* is in scm_init_storage() and alloc_some_heap() in sys.c
- *
+ *
* If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
* SCM_EXPHEAP(scm_heap_size) when more heap is needed.
*
* is needed.
*
* INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
- * trigger a GC.
+ * trigger a GC.
*
* SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
* reclaimed by a GC triggered by must_malloc. If less than this is
* reclaimed, the trigger threshold is raised. [I don't know what a
* good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
- * work around a oscillation that caused almost constant GC.]
+ * work around a oscillation that caused almost constant GC.]
*/
-#define SCM_INIT_HEAP_SIZE_1 (40000L * sizeof (scm_cell))
+/*
+ * Heap size 45000 and 40% min yield gives quick startup and no extra
+ * heap allocation. Having higher values on min yield may lead to
+ * large heaps, especially if code behaviour is varying its
+ * maximum consumption between different freelists.
+ */
+#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
#define SCM_CLUSTER_SIZE_1 2000L
-#define SCM_GC_TRIGGER_1 -50
+#define SCM_MIN_YIELD_1 40
#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
#define SCM_CLUSTER_SIZE_2 1000L
/* The following value may seem large, but note that if we get to GC at
* all, this means that we have a numerically intensive application
*/
-#define SCM_GC_TRIGGER_2 -50
+#define SCM_MIN_YIELD_2 40
#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
#ifdef PROT386
/*in 386 protected mode we must only adjust the offset */
-# define CELL_UP(p) MK_FP(FP_SEG(p), ~7&(FP_OFF(p)+7))
-# define CELL_DN(p) MK_FP(FP_SEG(p), ~7&FP_OFF(p))
+# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
+# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
#else
# ifdef _UNICOS
-# define CELL_UP(p) (SCM_CELLPTR)(~1L & ((long)(p)+1L))
-# define CELL_DN(p) (SCM_CELLPTR)(~1L & (long)(p))
+# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
+# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
# else
-# define CELL_UP(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & ((long)(p)+sizeof(scm_cell)-1L))
-# define CELL_DN(p) (SCM_CELLPTR)(~(sizeof(scm_cell)-1L) & (long)(p))
+# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
+# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
# endif /* UNICOS */
#endif /* PROT386 */
+#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
+#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
+#define SCM_HEAP_SIZE \
+ (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
+#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
\f
/* scm_freelists
*/
-#ifdef GUILE_NEW_GC_SCHEME
+typedef struct scm_freelist_t {
+ /* collected cells */
+ SCM cells;
+ /* number of cells left to collect before cluster is full */
+ unsigned int left_to_collect;
+ /* number of clusters which have been allocated */
+ unsigned int clusters_allocated;
+ /* a list of freelists, each of size cluster_size,
+ * except the last one which may be shorter
+ */
+ SCM clusters;
+ SCM *clustertail;
+ /* this is the number of objects in each cluster, including the spine cell */
+ int cluster_size;
+ /* indicates that we should grow heap instead of GC:ing
+ */
+ int grow_heap_p;
+ /* minimum yield on this list in order not to grow the heap
+ */
+ long min_yield;
+ /* defines min_yield as percent of total heap size
+ */
+ int min_yield_fraction;
+ /* number of cells per object on this list */
+ int span;
+ /* number of collected cells during last GC */
+ long collected;
+ /* number of collected cells during penultimate GC */
+ long collected_1;
+ /* total number of cells in heap segments
+ * belonging to this list.
+ */
+ long heap_size;
+} scm_freelist_t;
+
SCM scm_freelist = SCM_EOL;
scm_freelist_t scm_master_freelist = {
- SCM_EOL, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
+ SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
};
SCM scm_freelist2 = SCM_EOL;
scm_freelist_t scm_master_freelist2 = {
- SCM_EOL, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
+ SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
};
-#else
-scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
-scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
-#endif
/* scm_mtrigger
* is the number of bytes of must_malloc allocation needed to trigger gc.
*/
unsigned long scm_cells_allocated = 0;
long scm_mallocated = 0;
-/* unsigned long scm_gc_cells_collected; */
+unsigned long scm_gc_cells_collected;
+unsigned long scm_gc_yield;
+static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
unsigned long scm_gc_malloc_collected;
unsigned long scm_gc_ports_collected;
unsigned long scm_gc_rt;
SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
-
-struct scm_heap_seg_data
+typedef struct scm_heap_seg_data_t
{
/* lower and upper bounds of the segment */
SCM_CELLPTR bounds[2];
All segments usually point to the same one, scm_freelist. */
scm_freelist_t *freelist;
- /* number of SCM words per object in this segment */
+ /* number of cells per object in this segment */
int span;
-
- /* If SEG_DATA->valid is non-zero, the conservative marking
- functions will apply SEG_DATA->valid to the purported pointer and
- SEG_DATA, and mark the object iff the function returns non-zero.
- At the moment, I don't think anyone uses this. */
- int (*valid) ();
-};
-
+} scm_heap_seg_data_t;
-static void scm_mark_weak_vector_spines (void);
static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
static void alloc_some_heap (scm_freelist_t *);
int i;
for (i = 0; i < scm_n_heap_segs; i++)
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
- && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
+ if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
+ && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
return i;
fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
SCM_UNPACK (cell));
}
-#ifdef GUILE_NEW_GC_SCHEME
static void
map_free_list (scm_freelist_t *master, SCM freelist)
{
int last_seg = -1, count = 0;
SCM f;
-
+
for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
{
int this_seg = which_seg (f);
fprintf (stderr, " %5d %d-cells in segment %d\n",
count, master->span, last_seg);
}
-#else
-static void
-map_free_list (scm_freelist_t *freelist)
-{
- int last_seg = -1, count = 0;
- SCM f;
-
- for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
- {
- int this_seg = which_seg (f);
- if (this_seg != last_seg)
- {
- if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, freelist->span, last_seg);
- last_seg = this_seg;
- count = 0;
- }
- count++;
- }
- if (last_seg != -1)
- fprintf (stderr, " %5d %d-cells in segment %d\n",
- count, freelist->span, last_seg);
-}
-#endif
-
-SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
+SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
(),
"Print debugging information about the free-list.\n"
"`map-free-list' is only included in --enable-guile-debug builds of Guile.")
scm_heap_table[i].span,
scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
fprintf (stderr, ")\n");
-#ifdef GUILE_NEW_GC_SCHEME
map_free_list (&scm_master_freelist, scm_freelist);
map_free_list (&scm_master_freelist2, scm_freelist2);
-#else
- map_free_list (&scm_freelist);
- map_free_list (&scm_freelist2);
-#endif
fflush (stderr);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-#ifdef GUILE_NEW_GC_SCHEME
static int last_cluster;
static int last_size;
SCM ls;
int n = 0;
for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
- if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
+ if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
++n;
else
{
fprintf (stderr, "\ntotal %d objects\n\n", n);
}
-SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
+SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
(),
"Print debugging information about the free-list.\n"
"`free-list-length' is only included in --enable-guile-debug builds of Guile.")
#define FUNC_NAME s_scm_free_list_length
{
- free_list_lengths ("1-words", &scm_master_freelist, scm_freelist);
- free_list_lengths ("2-words", &scm_master_freelist2, scm_freelist2);
+ free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
+ free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-#endif
#endif
/* Search freelist for anything that isn't marked as a free cell.
Abort if we find something. */
-#ifdef GUILE_NEW_GC_SCHEME
static void
scm_check_freelist (SCM freelist)
{
abort ();
}
}
-#else
-static void
-scm_check_freelist (scm_freelist_t *freelist)
-{
- SCM f;
- int i = 0;
-
- for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
- if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
- {
- fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
- scm_newcell_count, i);
- fflush (stderr);
- abort ();
- }
-}
-#endif
static int scm_debug_check_freelist = 0;
-SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
+SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
(SCM flag),
"If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
"This procedure only exists because the GUILE_DEBUG_FREELIST \n"
#undef FUNC_NAME
-#ifdef GUILE_NEW_GC_SCHEME
-
SCM
scm_debug_newcell (void)
{
return new;
}
-#else /* GUILE_NEW_GC_SCHEME */
-
-SCM
-scm_debug_newcell (void)
-{
- SCM new;
-
- scm_newcell_count++;
- if (scm_debug_check_freelist)
- {
- scm_check_freelist (&scm_freelist);
- scm_gc();
- }
+#endif /* GUILE_DEBUG_FREELIST */
- /* The rest of this is supposed to be identical to the SCM_NEWCELL
- macro. */
- if (SCM_IMP (scm_freelist.cells))
- new = scm_gc_for_newcell (&scm_freelist);
- else
- {
- new = scm_freelist.cells;
- scm_freelist.cells = SCM_CDR (scm_freelist.cells);
- SCM_SETCAR (new, scm_tc16_allocated);
- ++scm_cells_allocated;
- }
+\f
- return new;
+static unsigned long
+master_cells_allocated (scm_freelist_t *master)
+{
+ int objects = master->clusters_allocated * (master->cluster_size - 1);
+ if (SCM_NULLP (master->clusters))
+ objects -= master->left_to_collect;
+ return master->span * objects;
}
-SCM
-scm_debug_newcell2 (void)
+static unsigned long
+freelist_length (SCM freelist)
{
- SCM new;
-
- scm_newcell2_count++;
- if (scm_debug_check_freelist) {
- scm_check_freelist (&scm_freelist2);
- scm_gc();
- }
-
- /* The rest of this is supposed to be identical to the SCM_NEWCELL2
- macro. */
- if (SCM_IMP (scm_freelist2.cells))
- new = scm_gc_for_newcell (&scm_freelist2);
- else
- {
- new = scm_freelist2.cells;
- scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
- SCM_SETCAR (new, scm_tc16_allocated);
- scm_cells_allocated += 2;
- }
-
- return new;
+ int n;
+ for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
+ ++n;
+ return n;
}
-#endif /* GUILE_NEW_GC_SCHEME */
-#endif /* GUILE_DEBUG_FREELIST */
-
-\f
+static unsigned long
+compute_cells_allocated ()
+{
+ return (scm_cells_allocated
+ + master_cells_allocated (&scm_master_freelist)
+ + master_cells_allocated (&scm_master_freelist2)
+ - scm_master_freelist.span * freelist_length (scm_freelist)
+ - scm_master_freelist2.span * freelist_length (scm_freelist2));
+}
/* {Scheme Interface to GC}
*/
-SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
+SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
(),
"Returns an association list of statistics about Guile's current use of storage. ")
#define FUNC_NAME s_scm_gc_stats
goto retry;
scm_block_gc = 0;
- /// ? ?? ?
+ /* Below, we cons to produce the resulting list. We want a snapshot of
+ * the heap situation before consing.
+ */
local_scm_mtrigger = scm_mtrigger;
local_scm_mallocated = scm_mallocated;
-#ifdef GUILE_NEW_GC_SCHEME
- local_scm_heap_size = scm_master_freelist.heap_size; /*fixme*/
-#else
- local_scm_heap_size = scm_freelist.heap_size; /*fixme*/
-#endif
- local_scm_cells_allocated = scm_cells_allocated;
+ local_scm_heap_size = SCM_HEAP_SIZE;
+ local_scm_cells_allocated = compute_cells_allocated ();
local_scm_gc_time_taken = scm_gc_time_taken;
answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
#undef FUNC_NAME
-void
+void
scm_gc_start (const char *what)
{
scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
- /* scm_gc_cells_collected = 0; */
+ scm_gc_cells_collected = 0;
+ scm_gc_yield_1 = scm_gc_yield;
+ scm_gc_yield = (scm_cells_allocated
+ + master_cells_allocated (&scm_master_freelist)
+ + master_cells_allocated (&scm_master_freelist2));
scm_gc_malloc_collected = 0;
scm_gc_ports_collected = 0;
}
-void
+void
scm_gc_end ()
{
scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
}
-SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
+SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
(SCM obj),
"Return an integer that for the lifetime of @var{obj} is uniquely\n"
"returned by this function for @var{obj}")
#define FUNC_NAME s_scm_object_address
{
- return scm_ulong2num ((unsigned long) obj);
+ return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
}
#undef FUNC_NAME
-SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
+SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
(),
"Scans all of SCM objects and reclaims for further use those that are\n"
"no longer accessible.")
/* {C Interface For When GC is Triggered}
*/
-#ifdef GUILE_NEW_GC_SCHEME
+static void
+adjust_min_yield (scm_freelist_t *freelist)
+{
+ /* min yield is adjusted upwards so that next predicted total yield
+ * (allocated cells actually freed by GC) becomes
+ * `min_yield_fraction' of total heap size. Note, however, that
+ * the absolute value of min_yield will correspond to `collected'
+ * on one master (the one which currently is triggering GC).
+ *
+ * The reason why we look at total yield instead of cells collected
+ * on one list is that we want to take other freelists into account.
+ * On this freelist, we know that (local) yield = collected cells,
+ * but that's probably not the case on the other lists.
+ *
+ * (We might consider computing a better prediction, for example
+ * by computing an average over multiple GC:s.)
+ */
+ if (freelist->min_yield_fraction)
+ {
+ /* Pick largest of last two yields. */
+ int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
+ - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
+#ifdef DEBUGINFO
+ fprintf (stderr, " after GC = %d, delta = %d\n",
+ scm_cells_allocated,
+ delta);
+#endif
+ if (delta > 0)
+ freelist->min_yield += delta;
+ }
+}
/* When we get POSIX threads support, the master will be global and
* common while the freelist will be individual for each thread.
alloc_some_heap (master);
}
else
- scm_igc ("cells");
+ {
+#ifdef DEBUGINFO
+ fprintf (stderr, "allocated = %d, ",
+ scm_cells_allocated
+ + master_cells_allocated (&scm_master_freelist)
+ + master_cells_allocated (&scm_master_freelist2));
+#endif
+ scm_igc ("cells");
+ adjust_min_yield (master);
+ }
}
cell = SCM_CAR (master->clusters);
master->clusters = SCM_CDR (master->clusters);
+ ++master->clusters_allocated;
}
while (SCM_NULLP (cell));
--scm_ints_disabled;
*freelist = SCM_CDR (cell);
- SCM_SETCAR (cell, scm_tc16_allocated);
+ SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
return cell;
}
}
#endif
-#else /* GUILE_NEW_GC_SCHEME */
-
-void
-scm_gc_for_alloc (scm_freelist_t *freelist)
-{
- SCM_REDEFER_INTS;
- scm_igc ("cells");
-#ifdef GUILE_DEBUG_FREELIST
- fprintf (stderr, "Collected: %d, min_yield: %d\n",
- freelist->collected, MIN_GC_YIELD (freelist));
-#endif
- if ((freelist->collected < MIN_GC_YIELD (freelist))
- || SCM_IMP (freelist->cells))
- alloc_some_heap (freelist);
- SCM_REALLOW_INTS;
-}
-
-
-SCM
-scm_gc_for_newcell (scm_freelist_t *freelist)
-{
- SCM fl;
- scm_gc_for_alloc (freelist);
- fl = freelist->cells;
- freelist->cells = SCM_CDR (fl);
- SCM_SETCAR (fl, scm_tc16_allocated);
- return fl;
-}
+SCM scm_after_gc_hook;
-#endif /* GUILE_NEW_GC_SCHEME */
+scm_c_hook_t scm_before_gc_c_hook;
+scm_c_hook_t scm_before_mark_c_hook;
+scm_c_hook_t scm_before_sweep_c_hook;
+scm_c_hook_t scm_after_sweep_c_hook;
+scm_c_hook_t scm_after_gc_c_hook;
void
scm_igc (const char *what)
{
int j;
+ scm_c_hook_run (&scm_before_gc_c_hook, 0);
#ifdef DEBUGINFO
fprintf (stderr,
SCM_NULLP (scm_freelist)
++scm_gc_heap_lock;
- scm_weak_vectors = SCM_EOL;
-
- scm_guardian_gc_init ();
-
/* unprotect any struct types with no instances */
#if 0
{
}
}
+ scm_c_hook_run (&scm_before_mark_c_hook, 0);
+
#ifndef USE_THREADS
-
+
/* Protect from the C stack. This must be the first marking
* done because it provides information about what objects
* are "in-use" by the C code. "in-use" objects are those
/ sizeof (SCM_STACKITEM)));
{
- /* stack_len is long rather than scm_sizet in order to guarantee that
- &stack_len is long aligned */
+ scm_sizet stack_len = scm_stack_size (scm_stack_base);
#ifdef SCM_STACK_GROWS_UP
-#ifdef nosve
- long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
+ scm_mark_locations (scm_stack_base, stack_len);
#else
- long stack_len = scm_stack_size (scm_stack_base);
-#endif
- scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
-#else
-#ifdef nosve
- long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
-#else
- long stack_len = scm_stack_size (scm_stack_base);
-#endif
- scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
+ scm_mark_locations (scm_stack_base - stack_len, stack_len);
#endif
}
/* FIXME: we should have a means to register C functions to be run
* in different phases of GC
- */
+ */
scm_mark_subr_table ();
-
+
#ifndef USE_THREADS
scm_gc_mark (scm_root->handle);
#endif
-
- scm_mark_weak_vector_spines ();
- scm_guardian_zombify ();
+ scm_c_hook_run (&scm_before_sweep_c_hook, 0);
scm_gc_sweep ();
+ scm_c_hook_run (&scm_after_sweep_c_hook, 0);
+
--scm_gc_heap_lock;
scm_gc_end ();
#ifdef USE_THREADS
SCM_THREAD_CRITICAL_SECTION_END;
#endif
+ scm_c_hook_run (&scm_after_gc_c_hook, 0);
}
\f
-/* {Mark/Sweep}
+/* {Mark/Sweep}
*/
/* Mark an object precisely.
*/
-void
+void
scm_gc_mark (SCM p)
{
register long i;
if (SCM_GCMARKP (ptr))
break;
SCM_SETGCMARK (ptr);
- scm_gc_mark (SCM_CELL_WORD (ptr, 2));
+ scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
ptr = SCM_GCCDR (ptr);
goto gc_mark_loop;
case scm_tcs_cons_gloc:
break;
SCM_SETGCMARK (ptr);
{
- SCM vcell;
- vcell = SCM_CAR (ptr) - 1L;
- switch (SCM_UNPACK (SCM_CDR (vcell)))
+ /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
+ * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
+ * to a heap cell. If it is a struct, the cell word #0 of ptr is a
+ * pointer to a struct vtable data region. The fact that these are
+ * accessed in the same way restricts the possibilites to change the
+ * data layout of structs or heap cells.
+ */
+ scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
+ scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
+ switch (vtable_data [scm_vtable_index_vcell])
{
default:
- scm_gc_mark (vcell);
- ptr = SCM_GCCDR (ptr);
- goto gc_mark_loop;
+ {
+ /* ptr is a gloc */
+ SCM gloc_car = SCM_PACK (word0);
+ scm_gc_mark (gloc_car);
+ ptr = SCM_GCCDR (ptr);
+ goto gc_mark_loop;
+ }
case 1: /* ! */
case 0: /* ! */
{
- SCM layout;
- SCM * vtable_data;
- int len;
- char * fields_desc;
- register SCM * mem;
- register int x;
-
- vtable_data = (SCM *)vcell;
- layout = vtable_data[scm_vtable_index_layout];
- len = SCM_LENGTH (layout);
- fields_desc = SCM_CHARS (layout);
+ /* ptr is a struct */
+ SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
+ int len = SCM_LENGTH (layout);
+ char * fields_desc = SCM_CHARS (layout);
/* We're using SCM_GCCDR here like STRUCT_DATA, except
that it removes the mark */
- mem = (SCM *)SCM_GCCDR (ptr);
-
- if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
+ scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
+
+ if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
{
- scm_gc_mark (mem[scm_struct_i_procedure]);
- scm_gc_mark (mem[scm_struct_i_setter]);
+ scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
+ scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
}
if (len)
{
- for (x = 0; x < len - 2; x += 2, ++mem)
+ int x;
+
+ for (x = 0; x < len - 2; x += 2, ++struct_data)
if (fields_desc[x] == 'p')
- scm_gc_mark (*mem);
+ scm_gc_mark (SCM_PACK (*struct_data));
if (fields_desc[x] == 'p')
{
- int j;
if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
- for (j = (long int) *mem; x; --x)
- scm_gc_mark (*++mem);
+ for (x = *struct_data; x; --x)
+ scm_gc_mark (SCM_PACK (*++struct_data));
else
- scm_gc_mark (*mem);
+ scm_gc_mark (SCM_PACK (*struct_data));
}
}
- if (!SCM_CDR (vcell))
+ if (vtable_data [scm_vtable_index_vcell] == 0)
{
- SCM_SETGCMARK (vcell);
- ptr = vtable_data[scm_vtable_index_vtable];
+ vtable_data [scm_vtable_index_vcell] = 1;
+ ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
goto gc_mark_loop;
}
}
len = SCM_LENGTH (ptr);
weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
-
+
for (x = 0; x < len; ++x)
{
SCM alist;
kvpair = SCM_CAR (alist);
next_alist = SCM_CDR (alist);
- /*
+ /*
* Do not do this:
* SCM_SETGCMARK (alist);
* SCM_SETGCMARK (kvpair);
/* Mark a Region Conservatively
*/
-void
+void
scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
{
register long m = n;
register SCM_CELLPTR ptr;
while (0 <= --m)
- if (SCM_CELLP (*(SCM **) (& x[m])))
+ if (SCM_CELLP (* (SCM *) &x[m]))
{
- ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
+ ptr = SCM2PTR (* (SCM *) &x[m]);
i = 0;
j = scm_n_heap_segs - 1;
if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
break;
}
}
- if ( !scm_heap_table[seg_id].valid
- || scm_heap_table[seg_id].valid (ptr,
- &scm_heap_table[seg_id]))
- scm_gc_mark (*(SCM *) & x[m]);
+ if (scm_heap_table[seg_id].span == 1
+ || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
+ scm_gc_mark (* (SCM *) &x[m]);
break;
}
}
-/* The following is a C predicate which determines if an SCM value can be
- regarded as a pointer to a cell on the heap. The code is duplicated
- from scm_mark_locations. */
-
-
+/* The function scm_cellp determines whether an SCM value can be regarded as a
+ * pointer to a cell on the heap. Binary search is used in order to determine
+ * the heap segment that contains the cell.
+ */
int
scm_cellp (SCM value)
{
- register int i, j;
- register SCM_CELLPTR ptr;
-
- if SCM_CELLP (*(SCM **) (& value))
- {
- ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
- i = 0;
- j = scm_n_heap_segs - 1;
- if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
- && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- {
- while (i <= j)
- {
- int seg_id;
- seg_id = -1;
- if ( (i == j)
- || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
- seg_id = i;
- else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
- seg_id = j;
- else
- {
- int k;
- k = (i + j) / 2;
- if (k == i)
- break;
- if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
- {
- j = k;
- ++i;
- if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
- continue;
- else
- break;
- }
- else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
- {
- i = k;
- --j;
- if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
- continue;
- else
- break;
- }
- }
- if ( !scm_heap_table[seg_id].valid
- || scm_heap_table[seg_id].valid (ptr,
- &scm_heap_table[seg_id]))
- return 1;
- break;
- }
-
- }
+ if (SCM_CELLP (value)) {
+ scm_cell * ptr = SCM2PTR (value);
+ unsigned int i = 0;
+ unsigned int j = scm_n_heap_segs - 1;
+
+ while (i < j) {
+ int k = (i + j) / 2;
+ if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
+ j = k;
+ } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
+ i = k + 1;
+ }
}
- return 0;
-}
-
-static void
-scm_mark_weak_vector_spines ()
-{
- SCM w;
-
- for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
- {
- if (SCM_IS_WHVEC_ANY (w))
- {
- SCM *ptr;
- SCM obj;
- int j;
- int n;
-
- obj = w;
- ptr = SCM_VELTS (w);
- n = SCM_LENGTH (w);
- for (j = 0; j < n; ++j)
- {
- SCM alist;
-
- alist = ptr[j];
- while ( SCM_CONSP (alist)
- && !SCM_GCMARKP (alist)
- && SCM_CONSP (SCM_CAR (alist)))
- {
- SCM_SETGCMARK (alist);
- SCM_SETGCMARK (SCM_CAR (alist));
- alist = SCM_GCCDR (alist);
- }
- }
- }
+ if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
+ && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
+ && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
+ return 1;
+ } else {
+ return 0;
}
+ } else {
+ return 0;
+ }
}
-#ifdef GUILE_NEW_GC_SCHEME
static void
gc_sweep_freelist_start (scm_freelist_t *freelist)
{
freelist->cells = SCM_EOL;
freelist->left_to_collect = freelist->cluster_size;
+ freelist->clusters_allocated = 0;
freelist->clusters = SCM_EOL;
freelist->clustertail = &freelist->clusters;
+ freelist->collected_1 = freelist->collected;
freelist->collected = 0;
}
static void
gc_sweep_freelist_finish (scm_freelist_t *freelist)
{
+ int collected;
*freelist->clustertail = freelist->cells;
if (SCM_NNULLP (freelist->cells))
{
freelist->collected +=
freelist->span * (freelist->cluster_size - freelist->left_to_collect);
}
-
- freelist->grow_heap_p = (freelist->collected < freelist->gc_trigger);
+ scm_gc_cells_collected += freelist->collected;
+
+ /* Although freelist->min_yield is used to test freelist->collected
+ * (which is the local GC yield for freelist), it is adjusted so
+ * that *total* yield is freelist->min_yield_fraction of total heap
+ * size. This means that a too low yield is compensated by more
+ * heap on the list which is currently doing most work, which is
+ * just what we want.
+ */
+ collected = SCM_MAX (freelist->collected_1, freelist->collected);
+ freelist->grow_heap_p = (collected < freelist->min_yield);
}
-#endif
-void
+void
scm_gc_sweep ()
{
register SCM_CELLPTR ptr;
-#ifdef SCM_POINTERS_MUNGED
- register SCM scmptr;
-#else
-#undef scmptr
-#define scmptr (SCM)ptr
-#endif
register SCM nfreelist;
register scm_freelist_t *freelist;
register long m;
m = 0;
-#ifdef GUILE_NEW_GC_SCHEME
gc_sweep_freelist_start (&scm_master_freelist);
gc_sweep_freelist_start (&scm_master_freelist2);
-#else
- /* Reset all free list pointers. We'll reconstruct them completely
- while scanning. */
- for (i = 0; i < scm_n_heap_segs; i++)
- scm_heap_table[i].freelist->cells = SCM_EOL;
-#endif
-
+
for (i = 0; i < scm_n_heap_segs; i++)
{
-#ifdef GUILE_NEW_GC_SCHEME
register unsigned int left_to_collect;
-#else
- register scm_sizet n = 0;
-#endif
register scm_sizet j;
/* Unmarked cells go onto the front of the freelist this heap
simply don't assign nfreelist back into the real freelist. */
freelist = scm_heap_table[i].freelist;
nfreelist = freelist->cells;
-#ifdef GUILE_NEW_GC_SCHEME
left_to_collect = freelist->left_to_collect;
-#endif
span = scm_heap_table[i].span;
- ptr = CELL_UP (scm_heap_table[i].bounds[0]);
- seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
+ ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
+ seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
for (j = seg_size + span; j -= span; ptr += span)
{
-#ifdef SCM_POINTERS_MUNGED
- scmptr = PTR2SCM (ptr);
-#endif
+ SCM scmptr = PTR2SCM (ptr);
+
switch SCM_TYP7 (scmptr)
{
case scm_tcs_cons_gloc:
- if (SCM_GCMARKP (scmptr))
- {
- if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
- SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
- goto cmrkcontinue;
- }
{
- SCM vcell;
- vcell = SCM_CAR (scmptr) - 1L;
-
- if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
+ /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
+ * struct or a gloc. See the corresponding comment in
+ * scm_gc_mark.
+ */
+ scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
+ scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
+ if (SCM_GCMARKP (scmptr))
+ {
+ if (vtable_data [scm_vtable_index_vcell] == 1)
+ vtable_data [scm_vtable_index_vcell] = 0;
+ goto cmrkcontinue;
+ }
+ else
{
- scm_struct_free_t free
- = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
- m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
+ if (vtable_data [scm_vtable_index_vcell] == 0
+ || vtable_data [scm_vtable_index_vcell] == 1)
+ {
+ scm_struct_free_t free
+ = (scm_struct_free_t) vtable_data[scm_struct_i_free];
+ m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
+ }
}
}
break;
case scm_tc7_msymbol:
if (SCM_GC8MARKP (scmptr))
goto c8mrkcontinue;
- m += ( SCM_LENGTH (scmptr)
- + 1
- + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
+ m += (SCM_LENGTH (scmptr) + 1
+ + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
scm_must_free ((char *)SCM_SLOTS (scmptr));
break;
case scm_tc7_contin:
k = SCM_SMOBNUM (scmptr);
if (!(k < scm_numsmob))
goto sweeperr;
- m += (scm_smobs[k].free) ((SCM) scmptr);
+ m += (scm_smobs[k].free) (scmptr);
break;
}
}
if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
exit (2);
#endif
-#ifndef GUILE_NEW_GC_SCHEME
- n += span;
-#else
if (!--left_to_collect)
{
SCM_SETCAR (scmptr, nfreelist);
*freelist->clustertail = scmptr;
freelist->clustertail = SCM_CDRLOC (scmptr);
-
+
nfreelist = SCM_EOL;
freelist->collected += span * freelist->cluster_size;
left_to_collect = freelist->cluster_size;
}
else
-#endif
{
/* Stick the new cell on the front of nfreelist. It's
critical that we mark this cell as freed; otherwise, the
conservative collector might trace it as some other type
of object. */
- SCM_SETCAR (scmptr, scm_tc_free_cell);
+ SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
SCM_SETCDR (scmptr, nfreelist);
nfreelist = scmptr;
}
-
+
continue;
c8mrkcontinue:
SCM_CLRGC8MARK (scmptr);
/* Update the real freelist pointer to point to the head of
the list of free cells we've built for this segment. */
freelist->cells = nfreelist;
-#ifdef GUILE_NEW_GC_SCHEME
freelist->left_to_collect = left_to_collect;
-#endif
}
-#ifndef GUILE_NEW_GC_SCHEME
- freelist->collected += n;
- scm_cells_allocated += freelist->heap_size - freelist->collected;
-#endif
-
#ifdef GUILE_DEBUG_FREELIST
-#ifdef GUILE_NEW_GC_SCHEME
scm_check_freelist (freelist == &scm_master_freelist
? scm_freelist
: scm_freelist2);
-#else
- scm_check_freelist (freelist);
-#endif
scm_map_free_list ();
#endif
}
-
-#ifdef GUILE_NEW_GC_SCHEME
+
gc_sweep_freelist_finish (&scm_master_freelist);
gc_sweep_freelist_finish (&scm_master_freelist2);
-
+
/* When we move to POSIX threads private freelists should probably
be GC-protected instead. */
scm_freelist = SCM_EOL;
scm_freelist2 = SCM_EOL;
-#endif
-
- /* Scan weak vectors. */
- {
- SCM *ptr, w;
- for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
- {
- if (!SCM_IS_WHVEC_ANY (w))
- {
- register long j, n;
-
- ptr = SCM_VELTS (w);
- n = SCM_LENGTH (w);
- for (j = 0; j < n; ++j)
- if (SCM_FREEP (ptr[j]))
- ptr[j] = SCM_BOOL_F;
- }
- else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
- {
- SCM obj = w;
- register long n = SCM_LENGTH (w);
- register long j;
-
- ptr = SCM_VELTS (w);
- for (j = 0; j < n; ++j)
- {
- SCM * fixup;
- SCM alist;
- int weak_keys;
- int weak_values;
-
- weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
- weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
-
- fixup = ptr + j;
- alist = *fixup;
-
- while ( SCM_CONSP (alist)
- && SCM_CONSP (SCM_CAR (alist)))
- {
- SCM key;
- SCM value;
-
- key = SCM_CAAR (alist);
- value = SCM_CDAR (alist);
- if ( (weak_keys && SCM_FREEP (key))
- || (weak_values && SCM_FREEP (value)))
- {
- *fixup = SCM_CDR (alist);
- }
- else
- fixup = SCM_CDRLOC (alist);
- alist = SCM_CDR (alist);
- }
- }
- }
- }
- }
+ scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
+ scm_gc_yield -= scm_cells_allocated;
scm_mallocated -= m;
scm_gc_malloc_collected = m;
}
* The primary purpose of the front end is to impose calls to gc.
*/
+
/* scm_must_malloc
* Return newly malloced storage or throw an error.
*
* The parameter WHAT is a string for error reporting.
- * If the threshold scm_mtrigger will be passed by this
+ * If the threshold scm_mtrigger will be passed by this
* allocation, or if the first call to malloc fails,
* garbage collect -- on the presumption that some objects
* using malloced storage may be collected.
if (NULL != ptr)
{
scm_mallocated = nm;
+#ifdef GUILE_DEBUG_MALLOC
+ scm_malloc_register (ptr, what);
+#endif
return ptr;
}
}
else
scm_mtrigger += scm_mtrigger / 2;
}
+#ifdef GUILE_DEBUG_MALLOC
+ scm_malloc_register (ptr, what);
+#endif
+
return ptr;
}
if (NULL != ptr)
{
scm_mallocated = nm;
+#ifdef GUILE_DEBUG_MALLOC
+ scm_malloc_reregister (where, ptr, what);
+#endif
return ptr;
}
}
else
scm_mtrigger += scm_mtrigger / 2;
}
+#ifdef GUILE_DEBUG_MALLOC
+ scm_malloc_reregister (where, ptr, what);
+#endif
return ptr;
}
return 0; /* never reached */
}
-void
+void
scm_must_free (void *obj)
{
+#ifdef GUILE_DEBUG_MALLOC
+ scm_malloc_unregister (obj);
+#endif
if (obj)
free (obj);
else
}
-#ifdef GUILE_NEW_GC_SCHEME
-static void
-adjust_gc_trigger (scm_freelist_t *freelist)
-{
- /* Adjust GC trigger based on total heap size */
- if (freelist->gc_trigger_fraction)
- freelist->gc_trigger = ((scm_master_freelist.heap_size
- + scm_master_freelist2.heap_size)
- * freelist->gc_trigger_fraction
- / 100);
-}
-#endif
-
-
\f
/* {Heap Segments}
*/
SCM_CELLPTR scm_heap_org;
-struct scm_heap_seg_data * scm_heap_table = 0;
+scm_heap_seg_data_t * scm_heap_table = 0;
int scm_n_heap_segs = 0;
/* init_heap_seg
*/
-static scm_sizet
+static scm_sizet
init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
{
register SCM_CELLPTR ptr;
-#ifdef SCM_POINTERS_MUNGED
- register SCM scmptr;
-#else
-#undef scmptr
-#define scmptr ptr
-#endif
SCM_CELLPTR seg_end;
int new_seg_index;
int n_new_cells;
int span = freelist->span;
-
+
if (seg_org == NULL)
return 0;
- ptr = seg_org;
-
- size = (size / sizeof (scm_cell) / span) * span * sizeof (scm_cell);
+ ptr = CELL_UP (seg_org, span);
- /* Compute the ceiling on valid object pointers w/in this segment.
+ /* Compute the ceiling on valid object pointers w/in this segment.
*/
- seg_end = CELL_DN ((char *) ptr + size);
+ seg_end = CELL_DN ((char *) seg_org + size, span);
- /* Find the right place and insert the segment record.
+ /* Find the right place and insert the segment record.
*
*/
for (new_seg_index = 0;
for (i = scm_n_heap_segs; i > new_seg_index; --i)
scm_heap_table[i] = scm_heap_table[i - 1];
}
-
+
++scm_n_heap_segs;
- scm_heap_table[new_seg_index].valid = 0;
scm_heap_table[new_seg_index].span = span;
scm_heap_table[new_seg_index].freelist = freelist;
- scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
- scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
+ scm_heap_table[new_seg_index].bounds[0] = ptr;
+ scm_heap_table[new_seg_index].bounds[1] = seg_end;
- /* Compute the least valid object pointer w/in this segment
+ /* Compute the least valid object pointer w/in this segment
*/
- ptr = CELL_UP (ptr);
+ ptr = CELL_UP (ptr, span);
/*n_new_cells*/
n_new_cells = seg_end - ptr;
-#ifdef GUILE_NEW_GC_SCHEME
-
freelist->heap_size += n_new_cells;
- /* Partition objects in this segment into clusters
- */
+ /* Partition objects in this segment into clusters */
{
SCM clusters;
SCM *clusterp = &clusters;
n_new_cells -= n_cluster_cells;
}
else
- {
- seg_end = ptr + n_new_cells;
- n_new_cells = 0;
- }
+ /* [cmm] looks like the segment size doesn't divide cleanly by
+ cluster size. bad cmm! */
+ abort();
/* Allocate cluster spine
*/
SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
clusterp = SCM_CDRLOC (*clusterp);
ptr += span;
-
+
while (ptr < seg_end)
{
-#ifdef SCM_POINTERS_MUNGED
- scmptr = PTR2SCM (ptr);
-#endif
- SCM_SETCAR (scmptr, scm_tc_free_cell);
+ SCM scmptr = PTR2SCM (ptr);
+
+ SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
ptr += span;
}
SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
}
-
+
/* Patch up the last cluster pointer in the segment
* to join it to the input freelist.
*/
freelist->clusters = clusters;
}
- adjust_gc_trigger (&scm_master_freelist);
- adjust_gc_trigger (&scm_master_freelist2);
-
-#else /* GUILE_NEW_GC_SCHEME */
-
- /* Prepend objects in this segment to the freelist.
- */
- while (ptr < seg_end)
- {
-#ifdef SCM_POINTERS_MUNGED
- scmptr = PTR2SCM (ptr);
-#endif
- SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
- SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
- ptr += span;
- }
-
- ptr -= span;
-
- /* Patch up the last freelist pointer in the segment
- * to join it to the input freelist.
- */
- SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
- freelist->cells = PTR2SCM (CELL_UP (seg_org));
-
- freelist->heap_size += n_new_cells;
-
-#endif /* GUILE_NEW_GC_SCHEME */
-
#ifdef DEBUGINFO
fprintf (stderr, "H");
#endif
return size;
-#ifdef scmptr
-#undef scmptr
-#endif
}
+static scm_sizet
+round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
+{
+ scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
-static void
+ return
+ (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
+ + ALIGNMENT_SLACK (freelist);
+}
+
+static void
alloc_some_heap (scm_freelist_t *freelist)
{
- struct scm_heap_seg_data * tmptable;
+ scm_heap_seg_data_t * tmptable;
SCM_CELLPTR ptr;
- scm_sizet len;
-
+ long len;
+
/* Critical code sections (such as the garbage collector)
* aren't supposed to add heap segments.
*/
* Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
* only if the allocation of the segment itself succeeds.
*/
- len = (1 + scm_n_heap_segs) * sizeof (struct scm_heap_seg_data);
+ len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
- SCM_SYSCALL (tmptable = ((struct scm_heap_seg_data *)
+ SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
realloc ((char *)scm_heap_table, len)));
if (!tmptable)
scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
/* Pick a size for the new heap segment.
- * The rule for picking the size of a segment is explained in
+ * The rule for picking the size of a segment is explained in
* gc.h
*/
-#ifdef GUILE_NEW_GC_SCHEME
{
- /* Assure that the new segment is large enough for the new trigger */
- int slack = freelist->gc_trigger - freelist->collected;
- int min_cells = 100 * slack / (99 - freelist->gc_trigger_fraction);
+ /* Assure that the new segment is predicted to be large enough.
+ *
+ * New yield should at least equal GC fraction of new heap size, i.e.
+ *
+ * y + dh > f * (h + dh)
+ *
+ * y : yield
+ * f : min yield fraction
+ * h : heap size
+ * dh : size of new heap segment
+ *
+ * This gives dh > (f * h - y) / (1 - f)
+ */
+ int f = freelist->min_yield_fraction;
+ long h = SCM_HEAP_SIZE;
+ long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
len = SCM_EXPHEAP (freelist->heap_size);
#ifdef DEBUGINFO
fprintf (stderr, "(%d < %d)", len, min_cells);
#endif
if (len < min_cells)
- len = min_cells + 1;
+ len = min_cells + freelist->cluster_size;
len *= sizeof (scm_cell);
+ /* force new sampling */
+ freelist->collected = LONG_MAX;
}
-
+
if (len > scm_max_segment_size)
len = scm_max_segment_size;
-#else
- if (scm_expmem)
- {
- len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
- if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
- != len)
- len = 0;
- }
- else
- len = SCM_HEAP_SEG_SIZE;
-#endif /* GUILE_NEW_GC_SCHEME */
{
scm_sizet smallest;
- smallest = (freelist->span * sizeof (scm_cell));
+ smallest = CLUSTER_SIZE_IN_BYTES (freelist);
+
if (len < smallest)
- len = (freelist->span * sizeof (scm_cell));
+ len = smallest;
/* Allocate with decaying ambition. */
while ((len >= SCM_MIN_HEAP_SEG_SIZE)
&& (len >= smallest))
{
- SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (len));
+ scm_sizet rounded_len = round_to_cluster_size (freelist, len);
+ SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
if (ptr)
{
- init_heap_seg (ptr, len, freelist);
+ init_heap_seg (ptr, rounded_len, freelist);
return;
}
len /= 2;
}
-
-SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
+SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
(SCM name),
"")
#define FUNC_NAME s_scm_unhash_name
{
SCM_CELLPTR p;
SCM_CELLPTR pbound;
- p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
- pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
+ p = scm_heap_table[x].bounds[0];
+ pbound = scm_heap_table[x].bounds[1];
while (p < pbound)
{
- SCM incar;
- incar = p->car;
- if (1 == (7 & (int)incar))
+ SCM cell = PTR2SCM (p);
+ if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
{
- --incar;
- if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
- && (SCM_CDR (incar) != 0)
- && (SCM_UNPACK (SCM_CDR (incar)) != 1))
+ /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
+ * struct cell. See the corresponding comment in scm_gc_mark.
+ */
+ scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
+ SCM gloc_car = SCM_PACK (word0); /* access as gloc */
+ SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
+ if ((SCM_TRUE_P (name) || SCM_EQ_P (SCM_CAR (gloc_car), name))
+ && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
{
- p->car = name;
+ SCM_SET_CELL_OBJECT_0 (cell, name);
}
}
++p;
counter which scm_protect_object(OBJ) increments and
scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
an object's counter is greater than zero, the garbage collector
- will not free it.
-
- Of course, that's not how it's implemented. scm_protect_object and
- scm_unprotect_object just maintain a list of references to things.
- Since the GC knows about this list, all objects it mentions stay
- alive. scm_protect_object adds its argument to the list;
- scm_unprotect_object removes the first occurrence of its argument
- to the list. */
+ will not free it. */
+
SCM
scm_protect_object (SCM obj)
{
- scm_protects = scm_cons (obj, scm_protects);
+ SCM handle;
+
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_DEFER_INTS;
+
+ handle = scm_hashq_get_handle (scm_protects, obj);
+ if (SCM_IMP (handle))
+ scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (1));
+ else
+ SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
+
+ SCM_ALLOW_INTS;
+
return obj;
}
SCM
scm_unprotect_object (SCM obj)
{
- SCM *tail_ptr = &scm_protects;
+ SCM handle;
+
+ /* This critical section barrier will be replaced by a mutex. */
+ SCM_DEFER_INTS;
+
+ handle = scm_hashq_get_handle (scm_protects, obj);
- while (SCM_CONSP (*tail_ptr))
- if (SCM_CAR (*tail_ptr) == obj)
- {
- *tail_ptr = SCM_CDR (*tail_ptr);
- break;
- }
- else
- tail_ptr = SCM_CDRLOC (*tail_ptr);
+ if (SCM_NIMP (handle))
+ {
+ int count = SCM_INUM (SCM_CAR (handle)) - 1;
+ if (count <= 0)
+ scm_hashq_remove_x (scm_protects, obj);
+ else
+ SCM_SETCDR (handle, SCM_MAKINUM (count));
+ }
+
+ SCM_ALLOW_INTS;
return obj;
}
static int
make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
{
- if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
- init_heap_size,
+ scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
+ if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
+ rounded_size,
freelist))
{
- init_heap_size = SCM_HEAP_SEG_SIZE;
- if (!init_heap_seg ((SCM_CELLPTR) malloc (init_heap_size),
- init_heap_size,
+ rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
+ if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
+ rounded_size,
freelist))
return 1;
}
else
scm_expmem = 1;
- freelist->grow_heap_p = (freelist->heap_size < freelist->gc_trigger);
-
+ if (freelist->min_yield_fraction)
+ freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
+ / 100);
+ freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
+
return 0;
}
\f
-#ifdef GUILE_NEW_GC_SCHEME
static void
init_freelist (scm_freelist_t *freelist,
int span,
int cluster_size,
- int gc_trigger)
+ int min_yield)
{
freelist->clusters = SCM_EOL;
freelist->cluster_size = cluster_size + 1;
- if (gc_trigger < 0)
- freelist->gc_trigger_fraction = - gc_trigger;
- else
- {
- freelist->gc_trigger = gc_trigger;
- freelist->gc_trigger_fraction = 0;
- }
+ freelist->left_to_collect = 0;
+ freelist->clusters_allocated = 0;
+ freelist->min_yield = 0;
+ freelist->min_yield_fraction = min_yield;
freelist->span = span;
freelist->collected = 0;
+ freelist->collected_1 = 0;
freelist->heap_size = 0;
}
scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
scm_sizet init_heap_size_2, int gc_trigger_2,
scm_sizet max_segment_size)
-#else
-int
-scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
-#endif
{
scm_sizet j;
scm_sys_protects[--j] = SCM_BOOL_F;
scm_block_gc = 1;
-#ifdef GUILE_NEW_GC_SCHEME
scm_freelist = SCM_EOL;
scm_freelist2 = SCM_EOL;
init_freelist (&scm_master_freelist,
1, SCM_CLUSTER_SIZE_1,
- gc_trigger_1 ? gc_trigger_1 : SCM_GC_TRIGGER_1);
+ gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
init_freelist (&scm_master_freelist2,
2, SCM_CLUSTER_SIZE_2,
- gc_trigger_2 ? gc_trigger_2 : SCM_GC_TRIGGER_2);
+ gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
scm_max_segment_size
= max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
-#else
- scm_freelist.cells = SCM_EOL;
- scm_freelist.span = 1;
- scm_freelist.collected = 0;
- scm_freelist.heap_size = 0;
-
- scm_freelist2.cells = SCM_EOL;
- scm_freelist2.span = 2;
- scm_freelist2.collected = 0;
- scm_freelist2.heap_size = 0;
-#endif
scm_expmem = 0;
j = SCM_HEAP_SEG_SIZE;
scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
- scm_heap_table = ((struct scm_heap_seg_data *)
- scm_must_malloc (sizeof (struct scm_heap_seg_data) * 2, "hplims"));
+ scm_heap_table = ((scm_heap_seg_data_t *)
+ scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
-#ifdef GUILE_NEW_GC_SCHEME
if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
make_initial_segment (init_heap_size_2, &scm_master_freelist2))
return 1;
-#else
- if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
- make_initial_segment (init_heap_size_2, &scm_freelist2))
- return 1;
-#endif
-
- scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0]);
/* scm_hplims[0] can change. do not remove scm_heap_org */
- scm_weak_vectors = SCM_EOL;
+ scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
+
+ scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
+ scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
/* Initialise the list of ports. */
scm_port_table = (scm_port **)
scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
scm_nullstr = scm_makstr (0L, 0);
scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
- scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
- scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
- scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
+ scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
+ scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
+ scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
scm_stand_in_procs = SCM_EOL;
scm_permobjs = SCM_EOL;
- scm_protects = SCM_EOL;
+ scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
scm_asyncs = SCM_EOL;
- scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
- scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
+ scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
+ scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
#ifdef SCM_BIGDIG
scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
#endif
void
scm_init_gc ()
{
-#include "gc.x"
+ scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
+ scm_protect_object (scm_after_gc_hook);
+#include "libguile/gc.x"
}
+
+/*
+ Local Variables:
+ c-file-style: "gnu"
+ End:
+*/