(scm_block_gc, scm_gc_heap_lock): Removed. Removed all uses.
(scm_gc_running_p): Now a macro that refers to the scm_i_thread
field.
(scm_i_sweep_mutex): Now a non-recursive mutex. GC can not happen
recursively.
(scm_igc, scm_i_gc): Renamed former to latter. Changed all uses.
Do not lock scm_i_sweep_mutex, which is now non-recursive, or set
scm_gc_running_p. Do not run the scm_after_gc_c_hook.
(scm_gc): Lock scm_i_sweep_mutex, set scm_gc_running_p and run the
scm_after_gc_c_hook here.
(scm_gc_for_new_cell): Set scm_gc_running_p here and run the
scm_after_gc_c_hook when a full GC has in fact been performed.
(scm_i_expensive_validation_check): Call scm_gc, not scm_i_gc.
* gc-segment.c (scm_i_get_new_heap_segment): Do not check
scm_gc_heap_lock.
* gc-malloc.c (scm_realloc, increase_mtrigger): Set
scm_gc_running_p while the scm_i_sweep_mutex is locked.
return ptr;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
-
+ scm_gc_running_p = 1;
+
scm_i_sweep_all_segments ("realloc");
SCM_SYSCALL (ptr = realloc (mem, size));
if (ptr)
{
+ scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
return ptr;
}
- scm_igc ("realloc");
+ scm_i_gc ("realloc");
scm_i_sweep_all_segments ("realloc");
+ scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
SCM_SYSCALL (ptr = realloc (mem, size));
float yield;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
+ scm_gc_running_p = 1;
prev_alloced = mallocated;
- scm_igc (what);
+ scm_i_gc (what);
scm_i_sweep_all_segments ("mtrigger");
yield = (((float) prev_alloced - (float) scm_mallocated)
scm_mtrigger);
#endif
}
-
+
+ scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
}
}
scm_t_cell * memory = 0;
/*
- We use malloc to alloc the heap. On GNU libc this is
+ We use calloc to alloc the heap. On GNU libc this is
equivalent to mmapping /dev/zero
*/
SCM_SYSCALL (memory = (scm_t_cell * ) calloc (1, mem_needed));
scm_i_sweep_some_segments (scm_t_cell_type_statistics * fl)
{
int i = fl->heap_segment_idx;
- SCM collected =SCM_EOL;
+ SCM collected = SCM_EOL;
if (i == -1)
i++;
RETURN: the index of the segment.
*/
int
-scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist, policy_on_error error_policy)
+scm_i_get_new_heap_segment (scm_t_cell_type_statistics *freelist,
+ policy_on_error error_policy)
{
size_t len;
- if (scm_gc_heap_lock)
- {
- /* Critical code sections (such as the garbage collector) aren't
- * supposed to add heap segments.
- */
- fprintf (stderr, "scm_i_get_new_heap_segment: Can not extend locked heap.\n");
- abort ();
- }
-
{
/* Assure that the new segment is predicted to be large enough.
*
#include <unistd.h>
#endif
-
-
-unsigned int scm_gc_running_p = 0;
-
/* Lock this mutex before doing lazy sweeping.
*/
-scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
+scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
/* Set this to != 0 if every cell that is accessed shall be checked:
*/
else
{
counter = scm_debug_cells_gc_interval;
- scm_igc ("scm_assert_cell_valid");
+ scm_gc ();
}
}
}
*/
unsigned long scm_mtrigger;
-/* scm_gc_heap_lock
- * If set, don't expand the heap. Set only during gc, during which no allocation
- * is supposed to take place anyway.
- */
-int scm_gc_heap_lock = 0;
-
-/* GC Blocking
- * Don't pause for collection if this is set -- just
- * expand the heap.
- */
-int scm_block_gc = 1;
-
/* During collection, this accumulates objects holding
* weak references.
*/
"no longer accessible.")
#define FUNC_NAME s_scm_gc
{
- scm_igc ("call");
+ scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
+ scm_gc_running_p = 1;
+ scm_i_gc ("call");
+ scm_gc_running_p = 0;
+ scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
+ scm_c_hook_run (&scm_after_gc_c_hook, 0);
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
\f
-/* When we get POSIX threads support, the master will be global and
- * common while the freelist will be individual for each thread.
+/* The master is global and common while the freelist will be
+ * individual for each thread.
*/
SCM
scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
{
SCM cell;
+ int did_gc = 0;
scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
+ scm_gc_running_p = 1;
*free_cells = scm_i_sweep_some_segments (freelist);
if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
*free_cells = scm_i_sweep_some_segments (freelist);
}
- if (*free_cells == SCM_EOL && !scm_block_gc)
+ if (*free_cells == SCM_EOL)
{
/*
- with the advent of lazy sweep, GC yield is only know just
+ with the advent of lazy sweep, GC yield is only known just
before doing the GC.
*/
scm_i_adjust_min_yield (freelist);
out of fresh cells. Try to get some new ones.
*/
- scm_igc ("cells");
+ did_gc = 1;
+ scm_i_gc ("cells");
*free_cells = scm_i_sweep_some_segments (freelist);
}
*free_cells = SCM_FREE_CELL_CDR (cell);
+ scm_gc_running_p = 0;
scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
+ if (did_gc)
+ scm_c_hook_run (&scm_after_gc_c_hook, 0);
+
return cell;
}
scm_t_c_hook scm_after_sweep_c_hook;
scm_t_c_hook scm_after_gc_c_hook;
+/* Must be called while holding scm_i_sweep_mutex.
+ */
+
void
-scm_igc (const char *what)
+scm_i_gc (const char *what)
{
- if (scm_block_gc)
- return;
-
- scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
-
- /* During the critical section, only the current thread may run. */
scm_i_thread_put_to_sleep ();
- ++scm_gc_running_p;
scm_c_hook_run (&scm_before_gc_c_hook, 0);
#ifdef DEBUGINFO
gc_start_stats (what);
-
-
- if (scm_gc_heap_lock)
- /* We've invoked the collector while a GC is already in progress.
- That should never happen. */
- abort ();
-
/*
Set freelists to NULL so scm_cons() always triggers gc, causing
- the above abort() to be triggered.
+ the assertion above to fail.
*/
*SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
*SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
- ++scm_gc_heap_lock;
-
/*
Let's finish the sweep. The conservative GC might point into the
garbage, and marking that would create a mess.
scm_mallocated -= scm_i_deprecated_memory_return;
-
- scm_c_hook_run (&scm_before_mark_c_hook, 0);
+ /* Mark */
+ scm_c_hook_run (&scm_before_mark_c_hook, 0);
scm_mark_all ();
-
scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
- scm_c_hook_run (&scm_before_sweep_c_hook, 0);
-
- /*
- Moved this lock upwards so that we can alloc new heap at the end of a sweep.
-
- DOCME: why should the heap be locked anyway?
- */
- --scm_gc_heap_lock;
-
- scm_gc_sweep ();
-
+ /* Sweep
- /*
- TODO: this hook should probably be moved to just before the mark,
- since that's where the sweep is finished in lazy sweeping.
+ TODO: the after_sweep hook should probably be moved to just before
+ the mark, since that's where the sweep is finished in lazy
+ sweeping.
MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
original meaning implied at least two things: that it would be
distinct classes of hook functions since this can prevent some
bad interference when several modules adds gc hooks.
*/
+
+ scm_c_hook_run (&scm_before_sweep_c_hook, 0);
+ scm_gc_sweep ();
scm_c_hook_run (&scm_after_sweep_c_hook, 0);
+
gc_end_stats ();
- --scm_gc_running_p;
scm_i_thread_wake_up ();
- /*
- See above.
- */
- scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
- scm_c_hook_run (&scm_after_gc_c_hook, 0);
-
/*
For debugging purposes, you could do
scm_i_sweep_all_segments("debug"), but then the remains of the
fprintf (stderr, "scm_unprotect_object called during GC.\n");
abort ();
}
-
+
handle = scm_hashq_get_handle (scm_protects, obj);
if (scm_is_false (handle))
j = SCM_NUM_PROTECTS;
while (j)
scm_sys_protects[--j] = SCM_BOOL_F;
- scm_block_gc = 1;
scm_gc_init_freelist();
scm_gc_init_malloc ();
*/
scm_i_reset_segments ();
- /* When we move to POSIX threads private freelists should probably
- be GC-protected instead. */
*SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
*SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
\f
+/* Cell allocation and garbage collection work rouhgly in the
+ following manner:
+
+ Each thread has a 'freelist', which is a list of available cells.
+ (It actually has two freelists, one for single cells and one for
+ double cells. Everything works analogous for double cells.)
+
+ When a thread wants to allocate a cell and the freelist is empty,
+ it refers to a global list of unswept 'cards'. A card is a small
+ block of cells that are contigous in memory, together with the
+ corresponding mark bits. A unswept card is one where the mark bits
+ are set for cells that have been in use during the last global mark
+ phase, but the unmarked cells of the card have not been scanned and
+ freed yet.
+
+ The thread takes one of the unswept cards and sweeps it, thereby
+ building a new freelist that it then uses. Sweeping a card will
+ call the smob free functions of unmarked cells, for example, and
+ thus, these free functions can run at any time, in any thread.
+
+ When there are no more unswept cards available, the thread performs
+ a global garbage collection. For this, all other threads are
+ stopped. A global mark is performed and all cards are put into the
+ global list of unswept cards. Whennecessary, new cards are
+ allocated and initialized at this time. The other threads are then
+ started again.
+*/
+
typedef struct scm_t_cell
{
SCM word_0;
#define SCM_GC_CARD_N_HEADER_CELLS 1
#define SCM_GC_CARD_N_CELLS 256
-#define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell)
+#define SCM_GC_SIZEOF_CARD SCM_GC_CARD_N_CELLS * sizeof (scm_t_cell)
#define SCM_GC_CARD_BVEC(card) ((scm_t_c_bvec_long *) ((card)->word_0))
#define SCM_GC_SET_CARD_BVEC(card, bvec) \
#define SCM_SET_CELL_OBJECT_2(x, v) SCM_SET_CELL_OBJECT ((x), 2, (v))
#define SCM_SET_CELL_OBJECT_3(x, v) SCM_SET_CELL_OBJECT ((x), 3, (v))
+#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n))))
+#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0))
+#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1))
+
#define SCM_CELL_TYPE(x) SCM_CELL_WORD_0 (x)
#define SCM_SET_CELL_TYPE(x, t) SCM_SET_CELL_WORD_0 ((x), (t))
* the freelist. Due to this structure, freelist cells are not cons cells
* and thus may not be accessed using SCM_CAR and SCM_CDR. */
-/*
- SCM_FREECELL_P removed ; the semantics are ambiguous with lazy
- sweeping. Could mean "this cell is no longer in use (will be swept)"
- or "this cell has just been swept, and is not yet in use".
- */
-
-#define SCM_FREECELL_P this_macro_has_been_removed_see_gc_header_file
-
#define SCM_FREE_CELL_CDR(x) \
(SCM_GC_CELL_OBJECT ((x), 1))
#define SCM_SET_FREE_CELL_CDR(x, v) \
(SCM_GC_SET_CELL_OBJECT ((x), 1, (v)))
-
-#define SCM_CELL_OBJECT_LOC(x, n) (SCM_VALIDATE_CELL((x), &SCM_GC_CELL_OBJECT ((x), (n))))
-#define SCM_CARLOC(x) (SCM_CELL_OBJECT_LOC ((x), 0))
-#define SCM_CDRLOC(x) (SCM_CELL_OBJECT_LOC ((x), 1))
-
-
-
-
#if (SCM_DEBUG_CELL_ACCESSES == 1)
/* Set this to != 0 if every cell that is accessed shall be checked:
*/
SCM_API scm_i_pthread_mutex_t scm_i_gc_admin_mutex;
-SCM_API int scm_block_gc;
-SCM_API int scm_gc_heap_lock;
-SCM_API unsigned int scm_gc_running_p;
+#define scm_gc_running_p (SCM_I_CURRENT_THREAD->gc_running_p)
SCM_API scm_i_pthread_mutex_t scm_i_sweep_mutex;
+
\f
#if (SCM_ENABLE_DEPRECATED == 1)
SCM_API SCM scm_gc (void);
SCM_API void scm_gc_for_alloc (struct scm_t_cell_type_statistics *freelist);
SCM_API SCM scm_gc_for_newcell (struct scm_t_cell_type_statistics *master, SCM *freelist);
-SCM_API void scm_igc (const char *what);
+SCM_API void scm_i_gc (const char *what);
SCM_API void scm_gc_mark (SCM p);
SCM_API void scm_gc_mark_dependencies (SCM p);
SCM_API void scm_mark_locations (SCM_STACKITEM x[], unsigned long n);