+2002-12-21 Mikael Djurfeldt <djurfeldt@nada.kth.se>
+
+ This change makes it possible for one thread to do lazy sweeping
+ while other threads are running. Now only the mark phase need to
+ have all threads asleep. We should look further into this issue.
+ Presently, I've put the locking of scm_i_sweep_mutex at
+ "conservative" places due to my current lack of knowledge about
+ the garbage collector. Please feel free to restrict these regions
+ further to allow for maximal parallelism!
+
+ * gc.c, gc.h (scm_i_sweep_mutex): New mutex.
+
+ * gc.c (scm_gc_for_newcell), gc-malloc.c (scm_realloc,
+ scm_gc_register_collectable_memory): Substitute locking of
+ scm_i_sweep_mutex for calls to scm_i_thread_put_to_sleep.
+ (scm_igc): Lock sweep mutex here instead of in callers; Calls to
+ scm_i_thread_put_to_sleep/scm_i_thread_wake_up used to demarkate
+ the single-thread section (which now only contains the mark
+ phase).
+ (scm_gc): Don't lock sweeo mutex here since scm_igc locks it;
+ Removed SCM_DEFER/ALLOW_INTS. Simply call scm_igc directly.
+
+ * threads.c (gc_section_mutex): Removed.
+
2002-12-19 Mikael Djurfeldt <mdj@kvast.blakulla.net>
* threads.c (create_thread): Clear parent field in root state in
if (ptr)
return ptr;
- scm_i_thread_put_to_sleep ();
+ scm_rec_mutex_lock (&scm_i_sweep_mutex);
scm_i_sweep_all_segments ("realloc");
SCM_SYSCALL (ptr = realloc (mem, size));
if (ptr)
{
- scm_i_thread_wake_up ();
+ scm_rec_mutex_unlock (&scm_i_sweep_mutex);
return ptr;
}
scm_igc ("realloc");
scm_i_sweep_all_segments ("realloc");
- scm_i_thread_wake_up ();
+ scm_rec_mutex_unlock (&scm_i_sweep_mutex);
SCM_SYSCALL (ptr = realloc (mem, size));
if (ptr)
By default, try to use calloc, as it is likely more efficient than
calling memset by hand.
*/
- SCM_SYSCALL(ptr= calloc (sz, 1));
+ SCM_SYSCALL (ptr = calloc (sz, 1));
if (ptr)
return ptr;
char *
scm_strndup (const char *str, size_t n)
{
- char *dst = scm_malloc (n+1);
+ char *dst = scm_malloc (n + 1);
memcpy (dst, str, n);
dst[n] = 0;
return dst;
unsigned long prev_alloced;
float yield;
- scm_i_thread_put_to_sleep ();
+ scm_rec_mutex_lock (&scm_i_sweep_mutex);
prev_alloced = scm_mallocated;
scm_igc (what);
scm_i_sweep_all_segments ("mtrigger");
- yield = ((float)prev_alloced - (float) scm_mallocated)
- / (float) prev_alloced;
+ yield = (((float) prev_alloced - (float) scm_mallocated)
+ / (float) prev_alloced);
scm_gc_malloc_yield_percentage = (int) (100 * yield);
#ifdef DEBUGINFO
fprintf (stderr, "prev %lud , now %lud, yield %4.2lf, want %d",
- prev_alloced, scm_mallocated, 100.0*yield, scm_i_minyield_malloc);
+ prev_alloced,
+ scm_mallocated,
+ 100.0 * yield,
+ scm_i_minyield_malloc);
#endif
if (yield < scm_i_minyield_malloc / 100.0)
scm_mtrigger = (unsigned long) no_overflow_trigger;
#ifdef DEBUGINFO
- fprintf (stderr, "Mtrigger sweep: ineffective. New trigger %d\n", scm_mtrigger);
+ fprintf (stderr, "Mtrigger sweep: ineffective. New trigger %d\n",
+ scm_mtrigger);
#endif
}
- scm_i_thread_wake_up ();
+ scm_rec_mutex_unlock (&scm_i_sweep_mutex);
}
#ifdef GUILE_DEBUG_MALLOC
unsigned int scm_gc_running_p = 0;
+/* Lock this mutex before doing lazy sweeping.
+ */
+scm_t_rec_mutex scm_i_sweep_mutex;
+
/* Set this to != 0 if every cell that is accessed shall be checked:
*/
int scm_debug_cell_accesses_p = 0;
else
{
counter = scm_debug_cells_gc_interval;
- scm_i_thread_put_to_sleep ();
scm_igc ("scm_assert_cell_valid");
- scm_i_thread_wake_up ();
}
}
}
"no longer accessible.")
#define FUNC_NAME s_scm_gc
{
- SCM_DEFER_INTS;
- scm_i_thread_put_to_sleep ();
scm_igc ("call");
- scm_i_thread_wake_up ();
- SCM_ALLOW_INTS;
return SCM_UNSPECIFIED;
}
#undef FUNC_NAME
{
SCM cell;
- scm_i_thread_put_to_sleep ();
+ scm_rec_mutex_lock (&scm_i_sweep_mutex);
*free_cells = scm_i_sweep_some_segments (freelist);
if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
*free_cells = SCM_FREE_CELL_CDR (cell);
- scm_i_thread_wake_up ();
+ scm_rec_mutex_unlock (&scm_i_sweep_mutex);
return cell;
}
void
scm_igc (const char *what)
{
+ scm_rec_mutex_lock (&scm_i_sweep_mutex);
++scm_gc_running_p;
scm_c_hook_run (&scm_before_gc_c_hook, 0);
#endif
/* During the critical section, only the current thread may run. */
-#if 0 /* MDJ 021207 <djurfeldt@nada.kth.se>
- Currently, a much larger piece of the GC is single threaded.
- Can we shrink it again? */
- SCM_CRITICAL_SECTION_START;
-#endif
+ scm_i_thread_put_to_sleep ();
if (!scm_root || !scm_stack_base || scm_block_gc)
{
scm_c_hook_run (&scm_after_sweep_c_hook, 0);
gc_end_stats ();
-#if 0 /* MDJ 021207 <djurfeldt@nada.kth.se> */
- SCM_CRITICAL_SECTION_END;
-#endif
+ scm_i_thread_wake_up ();
/*
See above.
*/
scm_c_hook_run (&scm_after_gc_c_hook, 0);
--scm_gc_running_p;
+ scm_rec_mutex_unlock (&scm_i_sweep_mutex);
/*
For debugging purposes, you could do
{
size_t j;
+ /* Fixme: Should use mutexattr from the low-level API. */
+ scm_rec_mutex_init (&scm_i_sweep_mutex, &scm_i_plugin_rec_mutex);
+
j = SCM_NUM_PROTECTS;
while (j)
scm_sys_protects[--j] = SCM_BOOL_F;
SCM_API int scm_block_gc;
SCM_API int scm_gc_heap_lock;
SCM_API unsigned int scm_gc_running_p;
+extern scm_t_rec_mutex scm_i_sweep_mutex;
\f
#if (SCM_ENABLE_DEPRECATED == 1)
static scm_t_cond wake_up_cond;
int scm_i_thread_go_to_sleep;
-static scm_t_rec_mutex gc_section_mutex;
static int gc_section_count = 0;
static int threads_initialized_p = 0;
void
scm_i_thread_put_to_sleep ()
{
- scm_rec_mutex_lock (&gc_section_mutex);
if (threads_initialized_p && !gc_section_count++)
{
SCM threads;
}
scm_i_plugin_mutex_unlock (&thread_admin_mutex);
}
- scm_rec_mutex_unlock (&gc_section_mutex);
}
void
scm_init_pthread_threads ();
#endif
scm_i_plugin_mutex_init (&thread_admin_mutex, &scm_i_plugin_mutex);
- scm_i_plugin_rec_mutex_init (&gc_section_mutex, &scm_i_plugin_rec_mutex);
scm_i_plugin_cond_init (&wake_up_cond, 0);
scm_i_plugin_mutex_init (&scm_i_critical_section_mutex, &scm_i_plugin_mutex);
thread_count = 1;