X-Git-Url: http://git.hcoop.net/bpt/guile.git/blobdiff_plain/42db06f0c00b9deac9db0d048d7be8a986bbf91e..c69dfa6575e6de1063f91a1a9a94808e404f06d0:/libguile/gc.c diff --git a/libguile/gc.c b/libguile/gc.c index 3ab6104c2..14a1190bf 100644 --- a/libguile/gc.c +++ b/libguile/gc.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1995,1996 Free Software Foundation, Inc. +/* Copyright (C) 1995,1996, 1997 Free Software Foundation, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,7 +12,8 @@ * * You should have received a copy of the GNU General Public License * along with this software; see the file COPYING. If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * the Free Software Foundation, Inc., 59 Temple Place, Suite 330, + * Boston, MA 02111-1307 USA * * As a special exception, the Free Software Foundation gives permission * for additional uses of the text contained in its release of GUILE. @@ -36,8 +37,7 @@ * * If you write modifications of your own for GUILE, it is your choice * whether to permit this exception to apply to your modifications. - * If you do not wish that, delete this exception notice. - */ + * If you do not wish that, delete this exception notice. */ #include #include "_scm.h" @@ -60,6 +60,14 @@ #include #endif +#ifdef __STDC__ +#include +#define var_start(x, y) va_start(x, y) +#else +#include +#define var_start(x, y) va_start(x) +#endif + /* {heap tuning parameters} * @@ -87,6 +95,12 @@ * * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will * trigger a GC. + * + * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be + * reclaimed by a GC triggered by must_malloc. If less than this is + * reclaimed, the trigger threshold is raised. [I don't know what a + * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to + * work around a oscillation that caused almost constant GC.] */ #define SCM_INIT_HEAP_SIZE (32768L*sizeof(scm_cell)) @@ -102,6 +116,7 @@ #endif #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size*2) #define SCM_INIT_MALLOC_LIMIT 100000 +#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10) /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner bounds for allocated storage */ @@ -178,9 +193,20 @@ SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken"); struct scm_heap_seg_data { - SCM_CELLPTR bounds[2]; /* lower and upper */ - SCM *freelistp; /* the value of this may be shared */ - int ncells; /* per object in this segment */ + /* lower and upper bounds of the segment */ + SCM_CELLPTR bounds[2]; + + /* address of the head-of-freelist pointer for this segment's cells. + All segments usually point to the same one, scm_freelist. */ + SCM *freelistp; + + /* number of SCM words per object in this segment */ + int ncells; + + /* If SEG_DATA->valid is non-zero, the conservative marking + functions will apply SEG_DATA->valid to the purported pointer and + SEG_DATA, and mark the object iff the function returns non-zero. + At the moment, I don't think anyone uses this. */ int (*valid) (); }; @@ -193,6 +219,100 @@ static void alloc_some_heap SCM_P ((int, SCM *)); +/* Debugging functions. */ + +#ifdef DEBUG_FREELIST + +/* Return the number of the heap segment containing CELL. */ +static int +which_seg (SCM cell) +{ + int i; + + for (i = 0; i < scm_n_heap_segs; i++) + if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell) + && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell)) + return i; + fprintf (stderr, "which_seg: can't find segment containing cell %lx\n", + cell); + abort (); +} + + +SCM_PROC (s_map_free_list, "map-free-list", 0, 0, 0, scm_map_free_list); +SCM +scm_map_free_list () +{ + int last_seg = -1, count = 0; + SCM f; + + fprintf (stderr, "%d segments total\n", scm_n_heap_segs); + for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f)) + { + int this_seg = which_seg (f); + + if (this_seg != last_seg) + { + if (last_seg != -1) + fprintf (stderr, " %5d cells in segment %d\n", count, last_seg); + last_seg = this_seg; + count = 0; + } + count++; + } + if (last_seg != -1) + fprintf (stderr, " %5d cells in segment %d\n", count, last_seg); + + fflush (stderr); + + return SCM_UNSPECIFIED; +} + + +/* Number of calls to SCM_NEWCELL since startup. */ +static unsigned long scm_newcell_count; + +/* Search freelist for anything that isn't marked as a free cell. + Abort if we find something. */ +static void +scm_check_freelist () +{ + SCM f; + int i = 0; + + for (f = scm_freelist; SCM_NIMP (f); f = SCM_CDR (f), i++) + if (SCM_CAR (f) != (SCM) scm_tc_free_cell) + { + fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n", + scm_newcell_count, i); + fflush (stderr); + abort (); + } +} + +static int scm_debug_check_freelist = 0; +void +scm_debug_newcell (SCM *into) +{ + scm_newcell_count++; + if (scm_debug_check_freelist) + scm_check_freelist (); + + /* The rest of this is supposed to be identical to the SCM_NEWCELL + macro. */ + if (SCM_IMP (scm_freelist)) + *into = scm_gc_for_newcell (); + else + { + *into = scm_freelist; + scm_freelist = SCM_CDR (scm_freelist); + ++scm_cells_allocated; + } +} + +#endif /* DEBUG_FREELIST */ + + /* {Scheme Interface to GC} */ @@ -257,7 +377,7 @@ scm_gc_end () { scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt; scm_gc_time_taken = scm_gc_time_taken + scm_gc_rt; - scm_take_signal (SCM_GC_SIGNAL); + scm_system_async_mark (scm_gc_async); } @@ -321,6 +441,8 @@ scm_igc (what) SCM_THREAD_CRITICAL_SECTION_START; #endif + // fprintf (stderr, "gc: %s\n", what); + scm_gc_start (what); if (!scm_stack_base || scm_block_gc) { @@ -342,7 +464,7 @@ scm_igc (what) while (type_list != SCM_EOL) if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt]) { - pos = &SCM_CDR (type_list); + pos = SCM_CDRLOC (type_list); type_list = SCM_CDR (type_list); } else @@ -382,8 +504,9 @@ scm_igc (what) /* This assumes that all registers are saved into the jmp_buf */ setjmp (scm_save_regs_gc_mark); scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark, - ( (scm_sizet) sizeof scm_save_regs_gc_mark - / sizeof (SCM_STACKITEM))); + ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 + + sizeof scm_save_regs_gc_mark) + / sizeof (SCM_STACKITEM))); { /* stack_len is long rather than scm_sizet in order to guarantee that @@ -459,7 +582,7 @@ gc_mark_loop: gc_mark_nimp: if (SCM_NCELLP (ptr)) - scm_wta (ptr, "rogue pointer in ", "heap"); + scm_wta (ptr, "rogue pointer in heap", NULL); switch (SCM_TYP7 (ptr)) { @@ -505,10 +628,12 @@ gc_mark_nimp: register int x; vtable_data = (SCM *)vcell; - layout = vtable_data[scm_struct_i_layout]; + layout = vtable_data[scm_vtable_index_layout]; len = SCM_LENGTH (layout); fields_desc = SCM_CHARS (layout); - mem = (SCM *)SCM_GCCDR (ptr); /* like struct_data but removes mark */ + /* We're using SCM_GCCDR here like STRUCT_DATA, except + that it removes the mark */ + mem = (SCM *)SCM_GCCDR (ptr); if (len) { @@ -527,7 +652,7 @@ gc_mark_nimp: if (!SCM_CDR (vcell)) { SCM_SETGCMARK (vcell); - ptr = vtable_data[scm_struct_i_vtable]; + ptr = vtable_data[scm_vtable_index_vtable]; goto gc_mark_loop; } } @@ -566,8 +691,13 @@ gc_mark_nimp: if SCM_GC8MARKP (ptr) break; SCM_SETGC8MARK (ptr); - scm_mark_locations (SCM_VELTS (ptr), - (scm_sizet) (SCM_LENGTH (ptr) + sizeof (regs) / sizeof (SCM_STACKITEM))); + if (SCM_VELTS (ptr)) + scm_mark_locations (SCM_VELTS (ptr), + (scm_sizet) + (SCM_LENGTH (ptr) + + (sizeof (SCM_STACKITEM) + -1 + + sizeof (scm_contregs)) / + sizeof (SCM_STACKITEM))); break; case scm_tc7_bvect: case scm_tc7_byvect: @@ -582,12 +712,10 @@ gc_mark_nimp: #endif case scm_tc7_string: - case scm_tc7_mb_string: SCM_SETGC8MARK (ptr); break; case scm_tc7_substring: - case scm_tc7_mb_substring: if (SCM_GC8MARKP(ptr)) break; SCM_SETGC8MARK (ptr); @@ -605,12 +733,9 @@ gc_mark_nimp: sizeof (SCM *) * (scm_weak_size *= 2))); if (scm_weak_vectors == NULL) { - scm_gen_puts (scm_regular_string, - "weak vector table", - scm_cur_errp); - scm_gen_puts (scm_regular_string, - "\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n", - scm_cur_errp); + scm_puts ("weak vector table", scm_cur_errp); + scm_puts ("\nFATAL ERROR DURING CRITICAL SCM_CODE SECTION\n", + scm_cur_errp); exit(SCM_EXIT_FAILURE); } } @@ -707,7 +832,7 @@ gc_mark_nimp: case scm_tc_free_cell: /* printf("found free_cell %X ", ptr); fflush(stdout); */ SCM_SETGC8MARK (ptr); - SCM_CDR (ptr) = SCM_EOL; + SCM_SETCDR (ptr, SCM_EOL); break; case scm_tcs_bignums: case scm_tc16_flo: @@ -798,14 +923,10 @@ scm_mark_locations (x, n) regarded as a pointer to a cell on the heap. The code is duplicated from scm_mark_locations. */ -#ifdef __STDC__ -int -scm_cellp (SCM value) -#else + int scm_cellp (value) SCM value; -#endif { register int i, j; register SCM_CELLPTR ptr; @@ -925,16 +1046,26 @@ scm_gc_sweep () n = 0; m = 0; - i = 0; - while (i < scm_n_heap_segs) + /* Reset all free list pointers. We'll reconstruct them completely + while scanning. */ + for (i = 0; i < scm_n_heap_segs; i++) + *scm_heap_table[i].freelistp = SCM_EOL; + + for (i = 0; i < scm_n_heap_segs; i++) { + /* Unmarked cells go onto the front of the freelist this heap + segment points to. Rather than updating the real freelist + pointer as we go along, we accumulate the new head in + nfreelist. Then, if it turns out that the entire segment is + free, we free (i.e., malloc's free) the whole segment, and + simply don't assign nfreelist back into the real freelist. */ hp_freelist = scm_heap_table[i].freelistp; - nfreelist = SCM_EOL; + nfreelist = *hp_freelist; + span = scm_heap_table[i].ncells; ptr = CELL_UP (scm_heap_table[i].bounds[0]); seg_size = CELL_DN (scm_heap_table[i].bounds[1]) - ptr; - ++i; for (j = seg_size + span; j -= span; ptr += span) { #ifdef SCM_POINTERS_MUNGED @@ -946,7 +1077,7 @@ scm_gc_sweep () if (SCM_GCMARKP (scmptr)) { if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1) - SCM_CDR (SCM_CAR (scmptr) - 1) = (SCM)0; + SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0); goto cmrkcontinue; } { @@ -955,12 +1086,10 @@ scm_gc_sweep () if ((SCM_CDR (vcell) == 0) || (SCM_CDR (vcell) == 1)) { - SCM * mem; - SCM amt; - mem = (SCM *)SCM_CDR (scmptr); - amt = mem[- scm_struct_n_extra_words]; - free (mem - scm_struct_n_extra_words); - m += amt * sizeof (SCM); + SCM *p = (SCM *) SCM_GCCDR (scmptr); + m += p[scm_struct_i_n_words] * sizeof (SCM); + /* I feel like I'm programming in BCPL here... */ + free ((char *) p[scm_struct_i_ptr]); } } break; @@ -1039,12 +1168,10 @@ scm_gc_sweep () m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double); goto freechars; case scm_tc7_substring: - case scm_tc7_mb_substring: if (SCM_GC8MARKP (scmptr)) goto c8mrkcontinue; break; case scm_tc7_string: - case scm_tc7_mb_string: if (SCM_GC8MARKP (scmptr)) goto c8mrkcontinue; m += SCM_HUGE_LENGTH (scmptr) + 1; @@ -1060,8 +1187,9 @@ scm_gc_sweep () case scm_tc7_contin: if SCM_GC8MARKP (scmptr) goto c8mrkcontinue; - m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (regs); - goto freechars; + m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs); + if (SCM_VELTS (scmptr)) + goto freechars; case scm_tc7_ssymbol: if SCM_GC8MARKP(scmptr) goto c8mrkcontinue; @@ -1086,7 +1214,7 @@ scm_gc_sweep () SCM_SETSTREAM (scmptr, 0); scm_remove_from_port_table (scmptr); scm_gc_ports_collected++; - SCM_CAR (scmptr) &= ~SCM_OPN; + SCM_SETAND_CAR (scmptr, ~SCM_OPN); } break; case scm_tc7_smob: @@ -1142,14 +1270,14 @@ scm_gc_sweep () if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell) exit (2); #endif - SCM_CAR (scmptr) = (SCM) scm_tc_free_cell; - SCM_CDR (scmptr) = nfreelist; + /* Stick the new cell on the front of nfreelist. It's + critical that we mark this cell as freed; otherwise, the + conservative collector might trace it as some other type + of object. */ + SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell); + SCM_SETCDR (scmptr, nfreelist); nfreelist = scmptr; -#if 0 - if ((nfreelist < scm_heap_table[0].bounds[0]) || - (nfreelist >= scm_heap_table[0].bounds[1])) - exit (1); -#endif + continue; c8mrkcontinue: SCM_CLRGC8MARK (scmptr); @@ -1161,17 +1289,24 @@ scm_gc_sweep () if (n == seg_size) { scm_heap_size -= seg_size; - free ((char *) scm_heap_table[i - 1].bounds[0]); - scm_heap_table[i - 1].bounds[0] = 0; - for (j = i; j < scm_n_heap_segs; j++) + free ((char *) scm_heap_table[i].bounds[0]); + scm_heap_table[i].bounds[0] = 0; + for (j = i + 1; j < scm_n_heap_segs; j++) scm_heap_table[j - 1] = scm_heap_table[j]; scm_n_heap_segs -= 1; - i -= 1; /* need to scan segment just moved. */ + i--; /* We need to scan the segment just moved. */ } else #endif /* ifdef GC_FREE_SEGMENTS */ + /* Update the real freelist pointer to point to the head of + the list of free cells we've built for this segment. */ *hp_freelist = nfreelist; +#ifdef DEBUG_FREELIST + scm_check_freelist (); + scm_map_free_list (); +#endif + scm_gc_cells_collected += n; n = 0; } @@ -1223,7 +1358,7 @@ scm_gc_sweep () *fixup = SCM_CDR (alist); } else - fixup = &SCM_CDR (alist); + fixup = SCM_CDRLOC (alist); alist = SCM_CDR (alist); } } @@ -1240,7 +1375,7 @@ scm_gc_sweep () /* {Front end to malloc} * - * scm_must_malloc, scm_must_realloc, scm_must_free + * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc * * These functions provide services comperable to malloc, realloc, and * free. They are for allocating malloced parts of scheme objects. @@ -1278,14 +1413,19 @@ scm_must_malloc (len, what) return ptr; } } + scm_igc (what); nm = scm_mallocated + size; SCM_SYSCALL (ptr = (char *) malloc (size)); if (NULL != ptr) { scm_mallocated = nm; - if (nm > scm_mtrigger) - scm_mtrigger = nm + nm / 2; + if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { + if (nm > scm_mtrigger) + scm_mtrigger = nm + nm / 2; + else + scm_mtrigger += scm_mtrigger / 2; + } return ptr; } goto malerr; @@ -1323,8 +1463,12 @@ scm_must_realloc (where, olen, len, what) if (NULL != ptr) { scm_mallocated = nm; - if (nm > scm_mtrigger) - scm_mtrigger = nm + nm / 2; + if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) { + if (nm > scm_mtrigger) + scm_mtrigger = nm + nm / 2; + else + scm_mtrigger += scm_mtrigger / 2; + } return ptr; } goto ralerr; @@ -1339,9 +1483,36 @@ scm_must_free (obj) else scm_wta (SCM_INUM0, "already free", ""); } - +/* Announce that there has been some malloc done that will be freed + * during gc. A typical use is for a smob that uses some malloced + * memory but can not get it from scm_must_malloc (for whatever + * reason). When a new object of this smob is created you call + * scm_done_malloc with the size of the object. When your smob free + * function is called, be sure to include this size in the return + * value. */ + +void +scm_done_malloc (size) + long size; +{ + scm_mallocated += size; + if (scm_mallocated > scm_mtrigger) + { + scm_igc ("foreign mallocs"); + if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) + { + if (scm_mallocated > scm_mtrigger) + scm_mtrigger = scm_mallocated + scm_mallocated / 2; + else + scm_mtrigger += scm_mtrigger / 2; + } + } +} + + + /* {Heap Segments} * @@ -1350,8 +1521,7 @@ scm_must_free (obj) * A table of segment records is kept that records the upper and * lower extents of the segment; this is used during the conservative * phase of gc to identify probably gc roots (because they point - * into valid segments at reasonable offsets). - */ + * into valid segments at reasonable offsets). */ /* scm_expmem * is true if the first segment was smaller than INIT_HEAP_SEG. @@ -1449,8 +1619,8 @@ init_heap_seg (seg_org, size, ncells, freelistp) #ifdef SCM_POINTERS_MUNGED scmptr = PTR2SCM (ptr); #endif - SCM_CAR (scmptr) = (SCM) scm_tc_free_cell; - SCM_CDR (scmptr) = PTR2SCM (ptr + ncells); + SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell); + SCM_SETCDR (scmptr, PTR2SCM (ptr + ncells)); ptr += ncells; } @@ -1459,7 +1629,7 @@ init_heap_seg (seg_org, size, ncells, freelistp) /* Patch up the last freelist pointer in the segment * to join it to the input freelist. */ - SCM_CDR (PTR2SCM (ptr)) = *freelistp; + SCM_SETCDR (PTR2SCM (ptr), *freelistp); *freelistp = PTR2SCM (CELL_UP (seg_org)); scm_heap_size += (ncells * n_new_objects); @@ -1586,6 +1756,7 @@ scm_remember (ptr) SCM * ptr; {} + #ifdef __STDC__ SCM scm_return_first (SCM elt, ...) @@ -1611,6 +1782,45 @@ scm_permanent_object (obj) } +/* Protect OBJ from the garbage collector. OBJ will not be freed, + even if all other references are dropped, until someone applies + scm_unprotect_object to it. This function returns OBJ. + + Note that calls to scm_protect_object do not nest. You can call + scm_protect_object any number of times on a given object, and the + next call to scm_unprotect_object will unprotect it completely. + + Basically, scm_protect_object and scm_unprotect_object just + maintain a list of references to things. Since the GC knows about + this list, all objects it mentions stay alive. scm_protect_object + adds its argument to the list; scm_unprotect_object remove its + argument from the list. */ +SCM +scm_protect_object (obj) + SCM obj; +{ + /* This function really should use address hashing tables, but I + don't know how to use them yet. For now we just use a list. */ + scm_protects = scm_cons (obj, scm_protects); + + return obj; +} + + +/* Remove any protection for OBJ established by a prior call to + scm_protect_obj. This function returns OBJ. + + See scm_protect_obj for more information. */ +SCM +scm_unprotect_object (obj) + SCM obj; +{ + scm_protects = scm_delq_x (obj, scm_protects); + + return obj; +} + + int scm_init_storage (init_heap_size) @@ -1654,7 +1864,7 @@ scm_init_storage (init_heap_size) scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL); - SCM_CDR (scm_undefineds) = scm_undefineds; + SCM_SETCDR (scm_undefineds, scm_undefineds); scm_listofnull = scm_cons (SCM_EOL, SCM_EOL); scm_nullstr = scm_makstr (0L, 0); @@ -1662,7 +1872,9 @@ scm_init_storage (init_heap_size) scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL, SCM_UNDEFINED); scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim)); scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL, SCM_UNDEFINED); + scm_stand_in_procs = SCM_EOL; scm_permobjs = SCM_EOL; + scm_protects = SCM_EOL; scm_asyncs = SCM_EOL; scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM)); scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));