-/* Copyright (C) 1995,1996 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996, 1997 Free Software Foundation, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this software; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
*
* As a special exception, the Free Software Foundation gives permission
* for additional uses of the text contained in its release of GUILE.
*
* If you write modifications of your own for GUILE, it is your choice
* whether to permit this exception to apply to your modifications.
- * If you do not wish that, delete this exception notice.
- */
+ * If you do not wish that, delete this exception notice. */
\f
#include <stdio.h>
#include "_scm.h"
*
* INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
* trigger a GC.
+ *
+ * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
+ * reclaimed by a GC triggered by must_malloc. If less than this is
+ * reclaimed, the trigger threshold is raised. [I don't know what a
+ * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
+ * work around a oscillation that caused almost constant GC.]
*/
#define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
#endif
#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
#define SCM_INIT_MALLOC_LIMIT 100000
+#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
bounds for allocated storage */
struct scm_heap_seg_data
{
- SCM_CELLPTR bounds[2]; /* lower and upper */
- SCM *freelistp; /* the value of this may be shared */
- int ncells; /* per object in this segment */
+ /* lower and upper bounds of the segment */
+ SCM_CELLPTR bounds[2];
+
+ /* address of the head-of-freelist pointer for this segment's cells.
+ All segments usually point to the same one, scm_freelist. */
+ SCM *freelistp;
+
+ /* number of SCM words per object in this segment */
+ int ncells;
+
+ /* If SEG_DATA->valid is non-zero, the conservative marking
+ functions will apply SEG_DATA->valid to the purported pointer and
+ SEG_DATA, and mark the object iff the function returns non-zero.
+ At the moment, I don't think anyone uses this. */
int (*valid) ();
};
\f
+/* Debugging functions. */
+
+#ifdef DEBUG_FREELIST
+
+/* Return the number of the heap segment containing CELL. */
+static int
+which_seg (SCM cell)
+{
+ int i;
+
+ for (i = 0; i < scm_n_heap_segs; i++)
+ if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
+ && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
+ return i;
+ fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
+ cell);
+ abort ();
+}
+
+
+SCM_PROC (s_map_free_list, "map-free-list", 0, 0, 0, scm_map_free_list);
+SCM
+scm_map_free_list ()
+{
+ int last_seg = -1, count = 0;
+ SCM f;
+
+ fprintf (stderr, "%d segments total\n", scm_n_heap_segs);
+ for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f))
+ {
+ int this_seg = which_seg (f);
+
+ if (this_seg != last_seg)
+ {
+ if (last_seg != -1)
+ fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
+ last_seg = this_seg;
+ count = 0;
+ }
+ count++;
+ }
+ if (last_seg != -1)
+ fprintf (stderr, " %5d cells in segment %d\n", count, last_seg);
+
+ fflush (stderr);
+
+ return SCM_UNSPECIFIED;
+}
+
+
+/* Number of calls to SCM_NEWCELL since startup. */
+static unsigned long scm_newcell_count;
+
+/* Search freelist for anything that isn't marked as a free cell.
+ Abort if we find something. */
+static void
+scm_check_freelist ()
+{
+ SCM f;
+ int i = 0;
+
+ for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
+ if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
+ {
+ fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
+ scm_newcell_count, i);
+ fflush (stderr);
+ abort ();
+ }
+}
+
+static int scm_debug_check_freelist = 0;
+void
+scm_debug_newcell (SCM *into)
+{
+ scm_newcell_count++;
+ if (scm_debug_check_freelist)
+ scm_check_freelist ();
+
+ /* The rest of this is supposed to be identical to the SCM_NEWCELL
+ macro. */
+ if (SCM_IMP (scm_freelist))
+ *into = scm_gc_for_newcell ();
+ else
+ {
+ *into = scm_freelist;
+ scm_freelist = SCM_CDR (scm_freelist);
+ ++scm_cells_allocated;
+ }
+}
+
+#endif /* DEBUG_FREELIST */
+
+\f
/* {Scheme Interface to GC}
*/
{
scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
- scm_take_signal (SCM_GC_SIGNAL);
+ scm_system_async_mark (scm_gc_async);
}
SCM_THREAD_CRITICAL_SECTION_START;
#endif
+ // fprintf (stderr, "gc: %s\n", what);
+
scm_gc_start (what);
if (!scm_stack_base || scm_block_gc)
{
/* This assumes that all registers are saved into the jmp_buf */
setjmp (scm_save_regs_gc_mark);
scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
- ( (scm_sizet) sizeof scm_save_regs_gc_mark
- / sizeof (SCM_STACKITEM)));
+ ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
+ sizeof scm_save_regs_gc_mark)
+ / sizeof (SCM_STACKITEM)));
{
/* stack_len is long rather than scm_sizet in order to guarantee that
gc_mark_nimp:
if (SCM_NCELLP (ptr))
- scm_wta (ptr, "rogue pointer in ", "heap");
+ scm_wta (ptr, "rogue pointer in heap", NULL);
switch (SCM_TYP7 (ptr))
{
register int x;
vtable_data = (SCM *)vcell;
- layout = vtable_data[scm_struct_i_layout];
+ layout = vtable_data[scm_vtable_index_layout];
len = SCM_LENGTH (layout);
fields_desc = SCM_CHARS (layout);
/* We're using SCM_GCCDR here like STRUCT_DATA, except
if (!SCM_CDR (vcell))
{
SCM_SETGCMARK (vcell);
- ptr = vtable_data[scm_struct_i_vtable];
+ ptr = vtable_data[scm_vtable_index_vtable];
goto gc_mark_loop;
}
}
if SCM_GC8MARKP
(ptr) break;
SCM_SETGC8MARK (ptr);
- scm_mark_locations (SCM_VELTS (ptr),
- (scm_sizet) (SCM_LENGTH (ptr) + sizeof (scm_contregs) / sizeof (SCM_STACKITEM)));
+ if (SCM_VELTS (ptr))
+ scm_mark_locations (SCM_VELTS (ptr),
+ (scm_sizet)
+ (SCM_LENGTH (ptr) +
+ (sizeof (SCM_STACKITEM) + -1 +
+ sizeof (scm_contregs)) /
+ sizeof (SCM_STACKITEM)));
break;
case scm_tc7_bvect:
case scm_tc7_byvect:
#endif
case scm_tc7_string:
- case scm_tc7_mb_string:
SCM_SETGC8MARK (ptr);
break;
case scm_tc7_substring:
- case scm_tc7_mb_substring:
if (SCM_GC8MARKP(ptr))
break;
SCM_SETGC8MARK (ptr);
sizeof (SCM *) * (scm_weak_size *= 2)));
if (scm_weak_vectors == NULL)
{
- scm_gen_puts (scm_regular_string,
- "weak vector table",
- scm_cur_errp);
- scm_gen_puts (scm_regular_string,
- "\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
- scm_cur_errp);
+ scm_puts ("weak vector table", scm_cur_errp);
+ scm_puts ("\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
+ scm_cur_errp);
exit(SCM_EXIT_FAILURE);
}
}
n = 0;
m = 0;
- i = 0;
- while (i < scm_n_heap_segs)
+ /* Reset all free list pointers. We'll reconstruct them completely
+ while scanning. */
+ for (i = 0; i < scm_n_heap_segs; i++)
+ *scm_heap_table[i].freelistp = SCM_EOL;
+
+ for (i = 0; i < scm_n_heap_segs; i++)
{
+ /* Unmarked cells go onto the front of the freelist this heap
+ segment points to. Rather than updating the real freelist
+ pointer as we go along, we accumulate the new head in
+ nfreelist. Then, if it turns out that the entire segment is
+ free, we free (i.e., malloc's free) the whole segment, and
+ simply don't assign nfreelist back into the real freelist. */
hp_freelist = scm_heap_table[i].freelistp;
- nfreelist = SCM_EOL;
+ nfreelist = *hp_freelist;
+
span = scm_heap_table[i].ncells;
ptr = CELL_UP (scm_heap_table[i].bounds[0]);
seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr;
- ++i;
for (j = seg_size + span; j -= span; ptr += span)
{
#ifdef SCM_POINTERS_MUNGED
m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
goto freechars;
case scm_tc7_substring:
- case scm_tc7_mb_substring:
if (SCM_GC8MARKP (scmptr))
goto c8mrkcontinue;
break;
case scm_tc7_string:
- case scm_tc7_mb_string:
if (SCM_GC8MARKP (scmptr))
goto c8mrkcontinue;
m += SCM_HUGE_LENGTH (scmptr) + 1;
if SCM_GC8MARKP (scmptr)
goto c8mrkcontinue;
m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
- goto freechars;
+ if (SCM_VELTS (scmptr))
+ goto freechars;
case scm_tc7_ssymbol:
if SCM_GC8MARKP(scmptr)
goto c8mrkcontinue;
if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
exit (2);
#endif
+ /* Stick the new cell on the front of nfreelist. It's
+ critical that we mark this cell as freed; otherwise, the
+ conservative collector might trace it as some other type
+ of object. */
SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
SCM_SETCDR (scmptr, nfreelist);
nfreelist = scmptr;
-#if 0
- if ((nfreelist < scm_heap_table[0].bounds[0]) ||
- (nfreelist >= scm_heap_table[0].bounds[1]))
- exit (1);
-#endif
+
continue;
c8mrkcontinue:
SCM_CLRGC8MARK (scmptr);
if (n == seg_size)
{
scm_heap_size -= seg_size;
- free ((char *) scm_heap_table[i - 1].bounds[0]);
- scm_heap_table[i - 1].bounds[0] = 0;
- for (j = i; j < scm_n_heap_segs; j++)
+ free ((char *) scm_heap_table[i].bounds[0]);
+ scm_heap_table[i].bounds[0] = 0;
+ for (j = i + 1; j < scm_n_heap_segs; j++)
scm_heap_table[j - 1] = scm_heap_table[j];
scm_n_heap_segs -= 1;
- i -= 1; /* need to scan segment just moved. */
+ i--; /* We need to scan the segment just moved. */
}
else
#endif /* ifdef GC_FREE_SEGMENTS */
+ /* Update the real freelist pointer to point to the head of
+ the list of free cells we've built for this segment. */
*hp_freelist = nfreelist;
+#ifdef DEBUG_FREELIST
+ scm_check_freelist ();
+ scm_map_free_list ();
+#endif
+
scm_gc_cells_collected += n;
n = 0;
}
/* {Front end to malloc}
*
- * scm_must_malloc, scm_must_realloc, scm_must_free
+ * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
*
* These functions provide services comperable to malloc, realloc, and
* free. They are for allocating malloced parts of scheme objects.
return ptr;
}
}
+
scm_igc (what);
nm = scm_mallocated + size;
SCM_SYSCALL (ptr = (char *) malloc (size));
if (NULL != ptr)
{
scm_mallocated = nm;
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
+ if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
+ if (nm > scm_mtrigger)
+ scm_mtrigger = nm + nm / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
return ptr;
}
goto malerr;
if (NULL != ptr)
{
scm_mallocated = nm;
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
+ if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
+ if (nm > scm_mtrigger)
+ scm_mtrigger = nm + nm / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
return ptr;
}
goto ralerr;
else
scm_wta (SCM_INUM0, "already free", "");
}
-\f
+
+/* Announce that there has been some malloc done that will be freed
+ * during gc. A typical use is for a smob that uses some malloced
+ * memory but can not get it from scm_must_malloc (for whatever
+ * reason). When a new object of this smob is created you call
+ * scm_done_malloc with the size of the object. When your smob free
+ * function is called, be sure to include this size in the return
+ * value. */
+
+void
+scm_done_malloc (size)
+ long size;
+{
+ scm_mallocated += size;
+
+ if (scm_mallocated > scm_mtrigger)
+ {
+ scm_igc ("foreign mallocs");
+ if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
+ {
+ if (scm_mallocated > scm_mtrigger)
+ scm_mtrigger = scm_mallocated + scm_mallocated / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
+ }
+}
+\f
/* {Heap Segments}
*
* A table of segment records is kept that records the upper and
* lower extents of the segment; this is used during the conservative
* phase of gc to identify probably gc roots (because they point
- * into valid segments at reasonable offsets).
- */
+ * into valid segments at reasonable offsets). */
/* scm_expmem
* is true if the first segment was smaller than INIT_HEAP_SEG.
}
+/* Protect OBJ from the garbage collector. OBJ will not be freed,
+ even if all other references are dropped, until someone applies
+ scm_unprotect_object to it. This function returns OBJ.
+
+ Note that calls to scm_protect_object do not nest. You can call
+ scm_protect_object any number of times on a given object, and the
+ next call to scm_unprotect_object will unprotect it completely.
+
+ Basically, scm_protect_object and scm_unprotect_object just
+ maintain a list of references to things. Since the GC knows about
+ this list, all objects it mentions stay alive. scm_protect_object
+ adds its argument to the list; scm_unprotect_object remove its
+ argument from the list. */
+SCM
+scm_protect_object (obj)
+ SCM obj;
+{
+ /* This function really should use address hashing tables, but I
+ don't know how to use them yet. For now we just use a list. */
+ scm_protects = scm_cons (obj, scm_protects);
+
+ return obj;
+}
+
+
+/* Remove any protection for OBJ established by a prior call to
+ scm_protect_obj. This function returns OBJ.
+
+ See scm_protect_obj for more information. */
+SCM
+scm_unprotect_object (obj)
+ SCM obj;
+{
+ scm_protects = scm_delq_x (obj, scm_protects);
+
+ return obj;
+}
+
+
\f
int
scm_init_storage (init_heap_size)
scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL, SCM_UNDEFINED);
scm_stand_in_procs = SCM_EOL;
scm_permobjs = SCM_EOL;
+ scm_protects = SCM_EOL;
scm_asyncs = SCM_EOL;
scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));