-/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008 Free Software Foundation, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-
/* #define DEBUGINFO */
-#if HAVE_CONFIG_H
+#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
+#include "libguile/gen-scmconfig.h"
+
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include "libguile/validate.h"
#include "libguile/deprecation.h"
#include "libguile/gc.h"
+#include "libguile/dynwind.h"
+
+#include "libguile/boehm-gc.h"
#ifdef GUILE_DEBUG_MALLOC
#include "libguile/debug-malloc.h"
#include <unistd.h>
#endif
-
-
-unsigned int scm_gc_running_p = 0;
-
/* Lock this mutex before doing lazy sweeping.
*/
-scm_t_rec_mutex scm_i_sweep_mutex;
+scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
/* Set this to != 0 if every cell that is accessed shall be checked:
*/
else
{
counter = scm_debug_cells_gc_interval;
- scm_igc ("scm_assert_cell_valid");
+ scm_gc ();
}
}
}
*/
if (scm_expensive_debug_cell_accesses_p)
scm_i_expensive_validation_check (cell);
-
+#if (SCM_DEBUG_MARKING_API == 0)
if (!SCM_GC_MARK_P (cell))
{
fprintf (stderr,
(unsigned long) SCM_UNPACK (cell));
abort ();
}
-
+#endif /* SCM_DEBUG_MARKING_API */
+
scm_i_cell_validation_already_running = 0; /* re-enable */
}
}
#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
\f
-
-scm_t_key scm_i_freelist;
-scm_t_key scm_i_freelist2;
+/* Hooks. */
+scm_t_c_hook scm_before_gc_c_hook;
+scm_t_c_hook scm_before_mark_c_hook;
+scm_t_c_hook scm_before_sweep_c_hook;
+scm_t_c_hook scm_after_sweep_c_hook;
+scm_t_c_hook scm_after_gc_c_hook;
/* scm_mtrigger
*/
unsigned long scm_mtrigger;
-/* scm_gc_heap_lock
- * If set, don't expand the heap. Set only during gc, during which no allocation
- * is supposed to take place anyway.
- */
-int scm_gc_heap_lock = 0;
-
-/* GC Blocking
- * Don't pause for collection if this is set -- just
- * expand the heap.
- */
-int scm_block_gc = 1;
-
-/* During collection, this accumulates objects holding
- * weak references.
- */
-SCM scm_weak_vectors;
-
/* GC Statistics Keeping
*/
-unsigned long scm_cells_allocated = 0;
unsigned long scm_mallocated = 0;
-unsigned long scm_gc_cells_collected;
-unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
-unsigned long scm_gc_malloc_collected;
-unsigned long scm_gc_ports_collected;
-unsigned long scm_gc_time_taken = 0;
-static unsigned long t_before_gc;
-unsigned long scm_gc_mark_time_taken = 0;
-unsigned long scm_gc_times = 0;
-unsigned long scm_gc_cells_swept = 0;
-double scm_gc_cells_marked_acc = 0.;
-double scm_gc_cells_swept_acc = 0.;
-int scm_gc_cell_yield_percentage =0;
-int scm_gc_malloc_yield_percentage = 0;
-unsigned long protected_obj_count = 0;
+unsigned long scm_gc_ports_collected = 0;
+
+
+static unsigned long protected_obj_count = 0;
SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
-SCM_SYMBOL (sym_heap_size, "cell-heap-size");
+SCM_SYMBOL (sym_heap_size, "heap-size");
+SCM_SYMBOL (sym_heap_free_size, "heap-free-size");
+SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated");
SCM_SYMBOL (sym_mallocated, "bytes-malloced");
SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
SCM_SYMBOL (sym_times, "gc-times");
SCM_SYMBOL (sym_cells_marked, "cells-marked");
+SCM_SYMBOL (sym_cells_marked_conservatively, "cells-marked-conservatively");
SCM_SYMBOL (sym_cells_swept, "cells-swept");
SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
SCM_SYMBOL (sym_cell_yield, "cell-yield");
SCM_SYMBOL (sym_protected_objects, "protected-objects");
-
-
+SCM_SYMBOL (sym_total_cells_allocated, "total-cells-allocated");
/* Number of calls to SCM_NEWCELL since startup. */
/* {Scheme Interface to GC}
*/
+static SCM
+tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
+{
+ if (scm_is_integer (key))
+ {
+ int c_tag = scm_to_int (key);
+
+ char const * name = scm_i_tag_name (c_tag);
+ if (name != NULL)
+ {
+ key = scm_from_locale_string (name);
+ }
+ else
+ {
+ char s[100];
+ sprintf (s, "tag %d", c_tag);
+ key = scm_from_locale_string (s);
+ }
+ }
+
+ return scm_cons (scm_cons (key, val), acc);
+}
+
+SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
+ (),
+ "Return an alist of statistics of the current live objects. ")
+#define FUNC_NAME s_scm_gc_live_object_stats
+{
+ SCM tab = scm_make_hash_table (scm_from_int (57));
+ SCM alist;
+
+ alist
+ = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
+
+ return alist;
+}
+#undef FUNC_NAME
+
extern int scm_gc_malloc_yield_percentage;
SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
(),
"use of storage.\n")
#define FUNC_NAME s_scm_gc_stats
{
- long i = 0;
- SCM heap_segs = SCM_EOL ;
- unsigned long int local_scm_mtrigger;
- unsigned long int local_scm_mallocated;
- unsigned long int local_scm_heap_size;
- int local_scm_gc_cell_yield_percentage;
- int local_scm_gc_malloc_yield_percentage;
- unsigned long int local_scm_cells_allocated;
- unsigned long int local_scm_gc_time_taken;
- unsigned long int local_scm_gc_times;
- unsigned long int local_scm_gc_mark_time_taken;
- unsigned long int local_protected_obj_count;
- double local_scm_gc_cells_swept;
- double local_scm_gc_cells_marked;
SCM answer;
- unsigned long *bounds = 0;
- int table_size = scm_i_heap_segment_table_size;
- SCM_DEFER_INTS;
-
- /*
- temporarily store the numbers, so as not to cause GC.
- */
-
- bounds = malloc (sizeof (int) * table_size * 2);
- if (!bounds)
- abort();
- for (i = table_size; i--; )
- {
- bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
- bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
- }
+ size_t heap_size, free_bytes, bytes_since_gc, total_bytes;
+ size_t gc_times;
+ heap_size = GC_get_heap_size ();
+ free_bytes = GC_get_free_bytes ();
+ bytes_since_gc = GC_get_bytes_since_gc ();
+ total_bytes = GC_get_total_bytes ();
+ gc_times = GC_gc_no;
- /* Below, we cons to produce the resulting list. We want a snapshot of
- * the heap situation before consing.
- */
- local_scm_mtrigger = scm_mtrigger;
- local_scm_mallocated = scm_mallocated;
- local_scm_heap_size = SCM_HEAP_SIZE;
-
- local_scm_cells_allocated = scm_cells_allocated;
-
- local_scm_gc_time_taken = scm_gc_time_taken;
- local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
- local_scm_gc_times = scm_gc_times;
- local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
- local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
- local_protected_obj_count = protected_obj_count;
- local_scm_gc_cells_swept =
- (double) scm_gc_cells_swept_acc
- + (double) scm_gc_cells_swept;
- local_scm_gc_cells_marked = scm_gc_cells_marked_acc
- +(double) scm_gc_cells_swept
- -(double) scm_gc_cells_collected;
-
- for (i = table_size; i--;)
- {
- heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
- scm_from_ulong (bounds[2*i+1])),
- heap_segs);
- }
-
+ /* njrev: can any of these scm_cons's or scm_list_n signal a memory
+ error? If so we need a frame here. */
answer =
- scm_list_n (scm_cons (sym_gc_time_taken,
- scm_from_ulong (local_scm_gc_time_taken)),
+ scm_list_n (scm_cons (sym_gc_time_taken, SCM_INUM0),
+#if 0
scm_cons (sym_cells_allocated,
scm_from_ulong (local_scm_cells_allocated)),
- scm_cons (sym_heap_size,
- scm_from_ulong (local_scm_heap_size)),
scm_cons (sym_mallocated,
scm_from_ulong (local_scm_mallocated)),
scm_cons (sym_mtrigger,
scm_from_ulong (local_scm_mtrigger)),
- scm_cons (sym_times,
- scm_from_ulong (local_scm_gc_times)),
scm_cons (sym_gc_mark_time_taken,
scm_from_ulong (local_scm_gc_mark_time_taken)),
scm_cons (sym_cells_marked,
scm_cons (sym_cells_swept,
scm_from_double (local_scm_gc_cells_swept)),
scm_cons (sym_malloc_yield,
- scm_from_long(local_scm_gc_malloc_yield_percentage)),
+ scm_from_long (local_scm_gc_malloc_yield_percentage)),
scm_cons (sym_cell_yield,
scm_from_long (local_scm_gc_cell_yield_percentage)),
- scm_cons (sym_protected_objects,
- scm_from_ulong (local_protected_obj_count)),
scm_cons (sym_heap_segments, heap_segs),
+#endif
+ scm_cons (sym_heap_size, scm_from_size_t (heap_size)),
+ scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)),
+ scm_cons (sym_heap_total_allocated,
+ scm_from_size_t (total_bytes)),
+ scm_cons (sym_protected_objects,
+ scm_from_ulong (protected_obj_count)),
+ scm_cons (sym_times, scm_from_size_t (gc_times)),
SCM_UNDEFINED);
- SCM_ALLOW_INTS;
-
- free (bounds);
+
return answer;
}
#undef FUNC_NAME
-static void
-gc_start_stats (const char *what SCM_UNUSED)
-{
- t_before_gc = scm_c_get_internal_run_time ();
-
- scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
- - (double) scm_gc_cells_collected;
- scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
-
- scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
-
- scm_gc_cells_swept = 0;
- scm_gc_cells_collected_1 = scm_gc_cells_collected;
-
- /*
- CELLS SWEPT is another word for the number of cells that were
- examined during GC. YIELD is the number that we cleaned
- out. MARKED is the number that weren't cleaned.
- */
- scm_gc_cells_collected = 0;
- scm_gc_malloc_collected = 0;
- scm_gc_ports_collected = 0;
-}
-
-static void
-gc_end_stats ()
-{
- unsigned long t = scm_c_get_internal_run_time ();
- scm_gc_time_taken += (t - t_before_gc);
-
- ++scm_gc_times;
-}
SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
#undef FUNC_NAME
-SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
- (),
- "Scans all of SCM objects and reclaims for further use those that are\n"
- "no longer accessible.")
-#define FUNC_NAME s_scm_gc
+SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
+ (),
+ "Disables the garbage collector. Nested calls are permitted. "
+ "GC is re-enabled once @code{gc-enable} has been called the "
+ "same number of times @code{gc-disable} was called.")
+#define FUNC_NAME s_scm_gc_disable
{
- scm_igc ("call");
+ GC_disable ();
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
-
-\f
-
-/* When we get POSIX threads support, the master will be global and
- * common while the freelist will be individual for each thread.
- */
-
-SCM
-scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
+SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
+ (),
+ "Enables the garbage collector.")
+#define FUNC_NAME s_scm_gc_enable
{
- SCM cell;
-
- scm_rec_mutex_lock (&scm_i_sweep_mutex);
-
- *free_cells = scm_i_sweep_some_segments (freelist);
- if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
- {
- freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
- *free_cells = scm_i_sweep_some_segments (freelist);
- }
-
- if (*free_cells == SCM_EOL && !scm_block_gc)
- {
- /*
- with the advent of lazy sweep, GC yield is only know just
- before doing the GC.
- */
- scm_i_adjust_min_yield (freelist);
-
- /*
- out of fresh cells. Try to get some new ones.
- */
-
- scm_igc ("cells");
-
- *free_cells = scm_i_sweep_some_segments (freelist);
- }
-
- if (*free_cells == SCM_EOL)
- {
- /*
- failed getting new cells. Get new juice or die.
- */
- freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
- *free_cells = scm_i_sweep_some_segments (freelist);
- }
-
- if (*free_cells == SCM_EOL)
- abort ();
-
- cell = *free_cells;
-
- *free_cells = SCM_FREE_CELL_CDR (cell);
-
- scm_rec_mutex_unlock (&scm_i_sweep_mutex);
-
- return cell;
+ GC_enable ();
+ return SCM_UNSPECIFIED;
}
+#undef FUNC_NAME
-scm_t_c_hook scm_before_gc_c_hook;
-scm_t_c_hook scm_before_mark_c_hook;
-scm_t_c_hook scm_before_sweep_c_hook;
-scm_t_c_hook scm_after_sweep_c_hook;
-scm_t_c_hook scm_after_gc_c_hook;
-
-void
-scm_igc (const char *what)
+SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
+ (),
+ "Scans all of SCM objects and reclaims for further use those that are\n"
+ "no longer accessible.")
+#define FUNC_NAME s_scm_gc
{
- scm_rec_mutex_lock (&scm_i_sweep_mutex);
- ++scm_gc_running_p;
- scm_c_hook_run (&scm_before_gc_c_hook, 0);
-
-#ifdef DEBUGINFO
- fprintf (stderr,"gc reason %s\n", what);
-
- fprintf (stderr,
- scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
- ? "*"
- : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
-#endif
-
- /* During the critical section, only the current thread may run. */
- scm_i_thread_put_to_sleep ();
-
- if (!scm_root || !scm_stack_base || scm_block_gc)
- {
- --scm_gc_running_p;
- return;
- }
-
- gc_start_stats (what);
-
- if (scm_gc_heap_lock)
- /* We've invoked the collector while a GC is already in progress.
- That should never happen. */
- abort ();
-
- ++scm_gc_heap_lock;
-
- /*
- Let's finish the sweep. The conservative GC might point into the
- garbage, and marking that would create a mess.
- */
- scm_i_sweep_all_segments("GC");
- if (scm_mallocated < scm_i_deprecated_memory_return)
- {
- /* The byte count of allocated objects has underflowed. This is
- probably because you forgot to report the sizes of objects you
- have allocated, by calling scm_done_malloc or some such. When
- the GC freed them, it subtracted their size from
- scm_mallocated, which underflowed. */
- fprintf (stderr,
- "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
- "This is probably because the GC hasn't been correctly informed\n"
- "about object sizes\n");
- abort ();
- }
- scm_mallocated -= scm_i_deprecated_memory_return;
-
-
-
- scm_c_hook_run (&scm_before_mark_c_hook, 0);
-
- scm_mark_all ();
-
- scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
-
- scm_c_hook_run (&scm_before_sweep_c_hook, 0);
-
- /*
- Moved this lock upwards so that we can alloc new heap at the end of a sweep.
-
- DOCME: why should the heap be locked anyway?
- */
- --scm_gc_heap_lock;
-
- scm_gc_sweep ();
-
-
- /*
- TODO: this hook should probably be moved to just before the mark,
- since that's where the sweep is finished in lazy sweeping.
-
- MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
- original meaning implied at least two things: that it would be
- called when
-
- 1. the freelist is re-initialized (no evaluation possible, though)
-
- and
-
- 2. the heap is "fresh"
- (it is well-defined what data is used and what is not)
-
- Neither of these conditions would hold just before the mark phase.
-
- Of course, the lazy sweeping has muddled the distinction between
- scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
- there were no difference, it would still be useful to have two
- distinct classes of hook functions since this can prevent some
- bad interference when several modules adds gc hooks.
- */
- scm_c_hook_run (&scm_after_sweep_c_hook, 0);
- gc_end_stats ();
-
- scm_i_thread_wake_up ();
-
- /*
- See above.
- */
- --scm_gc_running_p;
+ scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
+ scm_gc_running_p = 1;
+ scm_i_gc ("call");
+ /* njrev: It looks as though other places, e.g. scm_realloc,
+ can call scm_i_gc without acquiring the sweep mutex. Does this
+ matter? Also scm_i_gc (or its descendants) touch the
+ scm_sys_protects, which are protected in some cases
+ (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
+ not by the sweep mutex. Shouldn't all the GC-relevant objects be
+ protected in the same way? */
+ scm_gc_running_p = 0;
+ scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
scm_c_hook_run (&scm_after_gc_c_hook, 0);
- scm_rec_mutex_unlock (&scm_i_sweep_mutex);
+ return SCM_UNSPECIFIED;
+}
+#undef FUNC_NAME
- /*
- For debugging purposes, you could do
- scm_i_sweep_all_segments("debug"), but then the remains of the
- cell aren't left to analyse.
- */
+void
+scm_i_gc (const char *what)
+{
+ GC_gcollect ();
}
+
\f
/* {GC Protection Helper Functions}
*/
SCM
scm_permanent_object (SCM obj)
{
- SCM_REDEFER_INTS;
- scm_permobjs = scm_cons (obj, scm_permobjs);
- SCM_REALLOW_INTS;
- return obj;
+ return (scm_gc_protect_object (obj));
}
*/
/* Implementation note: For every object X, there is a counter which
- scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
+ scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
*/
SCM handle;
/* This critical section barrier will be replaced by a mutex. */
- SCM_REDEFER_INTS;
+ /* njrev: Indeed; if my comment above is correct, there is the same
+ critsec/mutex inconsistency here. */
+ SCM_CRITICAL_SECTION_START;
handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
protected_obj_count ++;
- SCM_REALLOW_INTS;
+ SCM_CRITICAL_SECTION_END;
return obj;
}
SCM handle;
/* This critical section barrier will be replaced by a mutex. */
- SCM_REDEFER_INTS;
+ /* njrev: and again. */
+ SCM_CRITICAL_SECTION_START;
if (scm_gc_running_p)
{
fprintf (stderr, "scm_unprotect_object called during GC.\n");
abort ();
}
-
+
handle = scm_hashq_get_handle (scm_protects, obj);
if (scm_is_false (handle))
}
protected_obj_count --;
- SCM_REALLOW_INTS;
+ SCM_CRITICAL_SECTION_END;
return obj;
}
void
scm_gc_register_root (SCM *p)
{
- SCM handle;
- SCM key = scm_from_ulong ((unsigned long) p);
-
- /* This critical section barrier will be replaced by a mutex. */
- SCM_REDEFER_INTS;
-
- handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
- scm_from_int (0));
- SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
-
- SCM_REALLOW_INTS;
+ /* Nothing. */
}
void
scm_gc_unregister_root (SCM *p)
{
- SCM handle;
- SCM key = scm_from_ulong ((unsigned long) p);
-
- /* This critical section barrier will be replaced by a mutex. */
- SCM_REDEFER_INTS;
-
- handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
-
- if (scm_is_false (handle))
- {
- fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
- abort ();
- }
- else
- {
- SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
- if (scm_is_eq (count, scm_from_int (0)))
- scm_hashv_remove_x (scm_gc_registered_roots, key);
- else
- SCM_SETCDR (handle, count);
- }
-
- SCM_REALLOW_INTS;
+ /* Nothing. */
}
void
int scm_i_terminating;
-/* called on process termination. */
-#ifdef HAVE_ATEXIT
-static void
-cleanup (void)
-#else
-#ifdef HAVE_ON_EXIT
-extern int on_exit (void (*procp) (), int arg);
-
-static void
-cleanup (int status, void *arg)
-#else
-#error Dont know how to setup a cleanup handler on your system.
-#endif
-#endif
-{
- scm_i_terminating = 1;
- scm_flush_all_ports ();
-}
-
\f
void
scm_storage_prehistory ()
{
+ GC_all_interior_pointers = 0;
+
+ GC_INIT ();
+
+#ifdef SCM_I_GSC_USE_PTHREAD_THREADS
+ /* When using GC 6.8, this call is required to initialize thread-local
+ freelists (shouldn't be necessary with GC 7.0). */
+ GC_init ();
+#endif
+
+ GC_expand_hp (SCM_DEFAULT_INIT_HEAP_SIZE_2);
+
+ /* We only need to register a displacement for those types for which the
+ higher bits of the type tag are used to store a pointer (that is, a
+ pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
+ handled in `scm_alloc_struct ()'. */
+ GC_REGISTER_DISPLACEMENT (scm_tc3_cons);
+ GC_REGISTER_DISPLACEMENT (scm_tc3_closure);
+
+ /* Sanity check. */
+ if (!GC_is_visible (scm_sys_protects))
+ abort ();
+
scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
}
-scm_t_mutex scm_i_gc_admin_mutex;
+scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
int
scm_init_storage ()
{
size_t j;
- /* Fixme: Should use mutexattr from the low-level API. */
- scm_rec_mutex_init (&scm_i_sweep_mutex, &scm_i_plugin_rec_mutex);
-
- scm_i_plugin_mutex_init (&scm_i_gc_admin_mutex, &scm_i_plugin_mutex);
-
j = SCM_NUM_PROTECTS;
while (j)
scm_sys_protects[--j] = SCM_BOOL_F;
- scm_block_gc = 1;
-
- scm_gc_init_freelist();
- scm_gc_init_malloc ();
j = SCM_HEAP_SEG_SIZE;
-
- /* Initialise the list of ports. */
- scm_i_port_table = (scm_t_port **)
- malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
- if (!scm_i_port_table)
- return 1;
+#if 0
+ /* We can't have a cleanup handler since we have no thread to run it
+ in. */
#ifdef HAVE_ATEXIT
atexit (cleanup);
#endif
#endif
- scm_stand_in_procs = scm_c_make_hash_table (257);
- scm_permobjs = SCM_EOL;
+#endif
+
+ scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
scm_protects = scm_c_make_hash_table (31);
- scm_gc_registered_roots = scm_c_make_hash_table (31);
return 0;
}
*/
static void *
mark_gc_async (void * hook_data SCM_UNUSED,
- void *func_data SCM_UNUSED,
+ void *fn_data SCM_UNUSED,
void *data SCM_UNUSED)
{
/* If cell access debugging is enabled, the user may choose to perform
* collection hooks and the execution count of the scheme level
* after-gc-hook.
*/
+
#if (SCM_DEBUG_CELL_ACCESSES == 1)
if (scm_debug_cells_gc_interval == 0)
scm_system_async_mark (gc_async);
return NULL;
}
+char const *
+scm_i_tag_name (scm_t_bits tag)
+{
+ if (tag >= 255)
+ {
+ if (tag == scm_tc_free_cell)
+ return "free cell";
+
+ {
+ int k = 0xff & (tag >> 8);
+ return (scm_smobs[k].name);
+ }
+ }
+
+ switch (tag) /* 7 bits */
+ {
+ case scm_tcs_struct:
+ return "struct";
+ case scm_tcs_cons_imcar:
+ return "cons (immediate car)";
+ case scm_tcs_cons_nimcar:
+ return "cons (non-immediate car)";
+ case scm_tcs_closures:
+ return "closures";
+ case scm_tc7_pws:
+ return "pws";
+ case scm_tc7_wvect:
+ return "weak vector";
+ case scm_tc7_vector:
+ return "vector";
+#ifdef CCLO
+ case scm_tc7_cclo:
+ return "compiled closure";
+#endif
+ case scm_tc7_number:
+ switch (tag)
+ {
+ case scm_tc16_real:
+ return "real";
+ break;
+ case scm_tc16_big:
+ return "bignum";
+ break;
+ case scm_tc16_complex:
+ return "complex number";
+ break;
+ case scm_tc16_fraction:
+ return "fraction";
+ break;
+ }
+ break;
+ case scm_tc7_string:
+ return "string";
+ break;
+ case scm_tc7_stringbuf:
+ return "string buffer";
+ break;
+ case scm_tc7_symbol:
+ return "symbol";
+ break;
+ case scm_tc7_variable:
+ return "variable";
+ break;
+ case scm_tcs_subrs:
+ return "subrs";
+ break;
+ case scm_tc7_port:
+ return "port";
+ break;
+ case scm_tc7_smob:
+ return "smob"; /* should not occur. */
+ break;
+ }
+
+ return NULL;
+}
+
+
+
+\f
void
scm_init_gc ()
{
- scm_gc_init_mark ();
+ /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
scm_c_define ("after-gc-hook", scm_after_gc_hook);
scm_gc_sweep (void)
#define FUNC_NAME "scm_gc_sweep"
{
- scm_i_deprecated_memory_return = 0;
-
- scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
- scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
-
- /*
- NOTHING HERE: LAZY SWEEPING !
- */
- scm_i_reset_segments ();
-
- /* When we move to POSIX threads private freelists should probably
- be GC-protected instead. */
- *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
- *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
-
- /* Invalidate the freelists of other threads. */
- scm_i_thread_invalidate_freelists ();
+ /* FIXME */
+ fprintf (stderr, "%s: doing nothing\n", __FUNCTION__);
}
#undef FUNC_NAME