-/* Copyright (C) 1995,1996 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996, 1997 Free Software Foundation, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this software; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
*
* As a special exception, the Free Software Foundation gives permission
* for additional uses of the text contained in its release of GUILE.
*
* If you write modifications of your own for GUILE, it is your choice
* whether to permit this exception to apply to your modifications.
- * If you do not wish that, delete this exception notice.
- */
+ * If you do not wish that, delete this exception notice. */
\f
#include <stdio.h>
#include "_scm.h"
*
* INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
* trigger a GC.
+ *
+ * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
+ * reclaimed by a GC triggered by must_malloc. If less than this is
+ * reclaimed, the trigger threshold is raised. [I don't know what a
+ * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
+ * work around a oscillation that caused almost constant GC.]
*/
#define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell))
#endif
#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2)
#define SCM_INIT_MALLOC_LIMIT 100000
+#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
bounds for allocated storage */
{
scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt;
- scm_take_signal (SCM_GC_SIGNAL);
+ scm_system_async_mark (scm_gc_async);
}
SCM_THREAD_CRITICAL_SECTION_START;
#endif
+ // fprintf (stderr, "gc: %s\n", what);
+
scm_gc_start (what);
if (!scm_stack_base || scm_block_gc)
{
gc_mark_nimp:
if (SCM_NCELLP (ptr))
- scm_wta (ptr, "rogue pointer in ", "heap");
+ scm_wta (ptr, "rogue pointer in heap", NULL);
switch (SCM_TYP7 (ptr))
{
register int x;
vtable_data = (SCM *)vcell;
- layout = vtable_data[scm_struct_i_layout];
+ layout = vtable_data[scm_vtable_index_layout];
len = SCM_LENGTH (layout);
fields_desc = SCM_CHARS (layout);
/* We're using SCM_GCCDR here like STRUCT_DATA, except
if (!SCM_CDR (vcell))
{
SCM_SETGCMARK (vcell);
- ptr = vtable_data[scm_struct_i_vtable];
+ ptr = vtable_data[scm_vtable_index_vtable];
goto gc_mark_loop;
}
}
if SCM_GC8MARKP
(ptr) break;
SCM_SETGC8MARK (ptr);
- scm_mark_locations (SCM_VELTS (ptr),
- (scm_sizet)
- (SCM_LENGTH (ptr) +
- (sizeof (SCM_STACKITEM) + -1 + sizeof (scm_contregs)) /
- sizeof (SCM_STACKITEM)));
+ if (SCM_VELTS (ptr))
+ scm_mark_locations (SCM_VELTS (ptr),
+ (scm_sizet)
+ (SCM_LENGTH (ptr) +
+ (sizeof (SCM_STACKITEM) + -1 +
+ sizeof (scm_contregs)) /
+ sizeof (SCM_STACKITEM)));
break;
case scm_tc7_bvect:
case scm_tc7_byvect:
#endif
case scm_tc7_string:
- case scm_tc7_mb_string:
SCM_SETGC8MARK (ptr);
break;
case scm_tc7_substring:
- case scm_tc7_mb_substring:
if (SCM_GC8MARKP(ptr))
break;
SCM_SETGC8MARK (ptr);
sizeof (SCM *) * (scm_weak_size *= 2)));
if (scm_weak_vectors == NULL)
{
- scm_gen_puts (scm_regular_string,
- "weak vector table",
- scm_cur_errp);
- scm_gen_puts (scm_regular_string,
- "\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
- scm_cur_errp);
+ scm_puts ("weak vector table", scm_cur_errp);
+ scm_puts ("\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n",
+ scm_cur_errp);
exit(SCM_EXIT_FAILURE);
}
}
m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
goto freechars;
case scm_tc7_substring:
- case scm_tc7_mb_substring:
if (SCM_GC8MARKP (scmptr))
goto c8mrkcontinue;
break;
case scm_tc7_string:
- case scm_tc7_mb_string:
if (SCM_GC8MARKP (scmptr))
goto c8mrkcontinue;
m += SCM_HUGE_LENGTH (scmptr) + 1;
if SCM_GC8MARKP (scmptr)
goto c8mrkcontinue;
m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
- goto freechars;
+ if (SCM_VELTS (scmptr))
+ goto freechars;
case scm_tc7_ssymbol:
if SCM_GC8MARKP(scmptr)
goto c8mrkcontinue;
/* {Front end to malloc}
*
- * scm_must_malloc, scm_must_realloc, scm_must_free
+ * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
*
* These functions provide services comperable to malloc, realloc, and
* free. They are for allocating malloced parts of scheme objects.
return ptr;
}
}
+
scm_igc (what);
nm = scm_mallocated + size;
SCM_SYSCALL (ptr = (char *) malloc (size));
if (NULL != ptr)
{
scm_mallocated = nm;
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
+ if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
+ if (nm > scm_mtrigger)
+ scm_mtrigger = nm + nm / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
return ptr;
}
goto malerr;
if (NULL != ptr)
{
scm_mallocated = nm;
- if (nm > scm_mtrigger)
- scm_mtrigger = nm + nm / 2;
+ if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
+ if (nm > scm_mtrigger)
+ scm_mtrigger = nm + nm / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
return ptr;
}
goto ralerr;
else
scm_wta (SCM_INUM0, "already free", "");
}
-\f
+
+/* Announce that there has been some malloc done that will be freed
+ * during gc. A typical use is for a smob that uses some malloced
+ * memory but can not get it from scm_must_malloc (for whatever
+ * reason). When a new object of this smob is created you call
+ * scm_done_malloc with the size of the object. When your smob free
+ * function is called, be sure to include this size in the return
+ * value. */
+
+void
+scm_done_malloc (size)
+ long size;
+{
+ scm_mallocated += size;
+
+ if (scm_mallocated > scm_mtrigger)
+ {
+ scm_igc ("foreign mallocs");
+ if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
+ {
+ if (scm_mallocated > scm_mtrigger)
+ scm_mtrigger = scm_mallocated + scm_mallocated / 2;
+ else
+ scm_mtrigger += scm_mtrigger / 2;
+ }
+ }
+}
+\f
/* {Heap Segments}
*
* A table of segment records is kept that records the upper and
* lower extents of the segment; this is used during the conservative
* phase of gc to identify probably gc roots (because they point
- * into valid segments at reasonable offsets).
- */
+ * into valid segments at reasonable offsets). */
/* scm_expmem
* is true if the first segment was smaller than INIT_HEAP_SEG.
}
+/* Protect OBJ from the garbage collector. OBJ will not be freed,
+ even if all other references are dropped, until someone applies
+ scm_unprotect_object to it. This function returns OBJ.
+
+ Note that calls to scm_protect_object do not nest. You can call
+ scm_protect_object any number of times on a given object, and the
+ next call to scm_unprotect_object will unprotect it completely.
+
+ Basically, scm_protect_object and scm_unprotect_object just
+ maintain a list of references to things. Since the GC knows about
+ this list, all objects it mentions stay alive. scm_protect_object
+ adds its argument to the list; scm_unprotect_object remove its
+ argument from the list. */
+SCM
+scm_protect_object (obj)
+ SCM obj;
+{
+ /* This function really should use address hashing tables, but I
+ don't know how to use them yet. For now we just use a list. */
+ scm_protects = scm_cons (obj, scm_protects);
+
+ return obj;
+}
+
+
+/* Remove any protection for OBJ established by a prior call to
+ scm_protect_obj. This function returns OBJ.
+
+ See scm_protect_obj for more information. */
+SCM
+scm_unprotect_object (obj)
+ SCM obj;
+{
+ scm_protects = scm_delq_x (obj, scm_protects);
+
+ return obj;
+}
+
+
\f
int
scm_init_storage (init_heap_size)
scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL, SCM_UNDEFINED);
scm_stand_in_procs = SCM_EOL;
scm_permobjs = SCM_EOL;
+ scm_protects = SCM_EOL;
scm_asyncs = SCM_EOL;
scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));