Merge remote-tracking branch 'origin/stable-2.0'
[bpt/guile.git] / libguile / gc.c
index 700f3a4..71efd03 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008, 2009, 2010 Free Software Foundation, Inc.
+/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public License
@@ -27,6 +27,8 @@
 #include <stdio.h>
 #include <errno.h>
 #include <string.h>
+#include <stdlib.h>
+#include <math.h>
 
 #ifdef __ia64__
 #include <ucontext.h>
@@ -45,7 +47,6 @@ extern unsigned long * __libc_ia64_register_backing_store_base;
 #include "libguile/root.h"
 #include "libguile/strings.h"
 #include "libguile/vectors.h"
-#include "libguile/weaks.h"
 #include "libguile/hashtab.h"
 #include "libguile/tags.h"
 
@@ -57,22 +58,17 @@ extern unsigned long * __libc_ia64_register_backing_store_base;
 
 #include "libguile/bdw-gc.h"
 
+/* For GC_set_start_callback.  */
+#include <gc/gc_mark.h>
+
 #ifdef GUILE_DEBUG_MALLOC
 #include "libguile/debug-malloc.h"
 #endif
 
-#ifdef HAVE_MALLOC_H
-#include <malloc.h>
-#endif
-
 #ifdef HAVE_UNISTD_H
 #include <unistd.h>
 #endif
 
-/* Lock this mutex before doing lazy sweeping.
- */
-scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
-
 /* Set this to != 0 if every cell that is accessed shall be checked:
  */
 int scm_debug_cell_accesses_p = 0;
@@ -83,13 +79,9 @@ int scm_expensive_debug_cell_accesses_p = 0;
  */
 int scm_debug_cells_gc_interval = 0;
 
-/*
-  Global variable, so you can switch it off at runtime by setting
-  scm_i_cell_validation_already_running.
- */
-int scm_i_cell_validation_already_running ;
-
-static SCM protects;
+/* Hash table that keeps a reference to objects the user wants to protect from
+   garbage collection.  */
+static SCM scm_protects;
 
 
 #if (SCM_DEBUG_CELL_ACCESSES == 1)
@@ -128,6 +120,9 @@ scm_i_expensive_validation_check (SCM cell)
     }
 }
 
+/* Whether cell validation is already running.  */
+static int scm_i_cell_validation_already_running = 0;
+
 void
 scm_assert_cell_valid (SCM cell)
 {
@@ -191,6 +186,36 @@ SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
 #endif  /* SCM_DEBUG_CELL_ACCESSES == 1 */
 
 \f
+
+/* Compatibility.  */
+
+#ifndef HAVE_GC_GET_HEAP_USAGE_SAFE
+static void
+GC_get_heap_usage_safe (GC_word *pheap_size, GC_word *pfree_bytes,
+                        GC_word *punmapped_bytes, GC_word *pbytes_since_gc,
+                        GC_word *ptotal_bytes)
+{
+  *pheap_size = GC_get_heap_size ();
+  *pfree_bytes = GC_get_free_bytes ();
+#ifdef HAVE_GC_GET_UNMAPPED_BYTES
+  *punmapped_bytes = GC_get_unmapped_bytes ();
+#else
+  *punmapped_bytes = 0;
+#endif
+  *pbytes_since_gc = GC_get_bytes_since_gc ();
+  *ptotal_bytes = GC_get_total_bytes ();
+}
+#endif
+
+#ifndef HAVE_GC_GET_FREE_SPACE_DIVISOR
+static GC_word
+GC_get_free_space_divisor (void)
+{
+  return GC_free_space_divisor;
+}
+#endif
+
+\f
 /* Hooks.  */
 scm_t_c_hook scm_before_gc_c_hook;
 scm_t_c_hook scm_before_mark_c_hook;
@@ -199,35 +224,37 @@ scm_t_c_hook scm_after_sweep_c_hook;
 scm_t_c_hook scm_after_gc_c_hook;
 
 
+static void
+run_before_gc_c_hook (void)
+{
+  if (!SCM_I_CURRENT_THREAD)
+    /* GC while a thread is spinning up; punt.  */
+    return;
+
+  scm_c_hook_run (&scm_before_gc_c_hook, NULL);
+}
+
+
 /* GC Statistics Keeping
  */
 unsigned long scm_gc_ports_collected = 0;
+static long gc_time_taken = 0;
+static long gc_start_time = 0;
+
+static unsigned long free_space_divisor;
+static unsigned long minimum_free_space_divisor;
+static double target_free_space_divisor;
 
 static unsigned long protected_obj_count = 0;
 
 
-SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
+SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
 SCM_SYMBOL (sym_heap_size, "heap-size");
 SCM_SYMBOL (sym_heap_free_size, "heap-free-size");
 SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated");
-SCM_SYMBOL (sym_mallocated, "bytes-malloced");
-SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
-SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
-SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
-SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
-SCM_SYMBOL (sym_times, "gc-times");
-SCM_SYMBOL (sym_cells_marked, "cells-marked");
-SCM_SYMBOL (sym_cells_marked_conservatively, "cells-marked-conservatively");
-SCM_SYMBOL (sym_cells_swept, "cells-swept");
-SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
-SCM_SYMBOL (sym_cell_yield, "cell-yield");
+SCM_SYMBOL (sym_heap_allocated_since_gc, "heap-allocated-since-gc");
 SCM_SYMBOL (sym_protected_objects, "protected-objects");
-SCM_SYMBOL (sym_total_cells_allocated, "total-cells-allocated");
-
-
-/* Number of calls to SCM_NEWCELL since startup.  */
-unsigned scm_newcell_count;
-unsigned scm_newcell2_count;
+SCM_SYMBOL (sym_times, "gc-times");
 
 
 /* {Scheme Interface to GC}
@@ -278,42 +305,21 @@ SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
 #define FUNC_NAME s_scm_gc_stats
 {
   SCM answer;
-  size_t heap_size, free_bytes, bytes_since_gc, total_bytes;
+  GC_word heap_size, free_bytes, unmapped_bytes, bytes_since_gc, total_bytes;
   size_t gc_times;
 
-  heap_size      = GC_get_heap_size ();
-  free_bytes     = GC_get_free_bytes ();
-  bytes_since_gc = GC_get_bytes_since_gc ();
-  total_bytes    = GC_get_total_bytes ();
-  gc_times       = GC_gc_no;
+  GC_get_heap_usage_safe (&heap_size, &free_bytes, &unmapped_bytes,
+                          &bytes_since_gc, &total_bytes);
+  gc_times = GC_gc_no;
 
-  /* njrev: can any of these scm_cons's or scm_list_n signal a memory
-     error?  If so we need a frame here. */
   answer =
-    scm_list_n (scm_cons (sym_gc_time_taken, SCM_INUM0),
-#if 0
-               scm_cons (sym_cells_allocated,
-                         scm_from_ulong (local_scm_cells_allocated)),
-               scm_cons (sym_mallocated,
-                         scm_from_ulong (local_scm_mallocated)),
-               scm_cons (sym_mtrigger,
-                         scm_from_ulong (local_scm_mtrigger)),
-               scm_cons (sym_gc_mark_time_taken,
-                         scm_from_ulong (local_scm_gc_mark_time_taken)),
-               scm_cons (sym_cells_marked,
-                         scm_from_double (local_scm_gc_cells_marked)),
-               scm_cons (sym_cells_swept,
-                         scm_from_double (local_scm_gc_cells_swept)),
-               scm_cons (sym_malloc_yield,
-                         scm_from_long (local_scm_gc_malloc_yield_percentage)),
-               scm_cons (sym_cell_yield,
-                         scm_from_long (local_scm_gc_cell_yield_percentage)),
-               scm_cons (sym_heap_segments, heap_segs),
-#endif
+    scm_list_n (scm_cons (sym_gc_time_taken, scm_from_long (gc_time_taken)),
                scm_cons (sym_heap_size, scm_from_size_t (heap_size)),
                scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)),
                scm_cons (sym_heap_total_allocated,
                          scm_from_size_t (total_bytes)),
+                scm_cons (sym_heap_allocated_since_gc,
+                         scm_from_size_t (bytes_since_gc)),
                scm_cons (sym_protected_objects,
                          scm_from_ulong (protected_obj_count)),
                scm_cons (sym_times, scm_from_size_t (gc_times)),
@@ -377,17 +383,10 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
            "no longer accessible.")
 #define FUNC_NAME s_scm_gc
 {
-  scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
   scm_i_gc ("call");
-  /* njrev: It looks as though other places, e.g. scm_realloc,
-     can call scm_i_gc without acquiring the sweep mutex.  Does this
-     matter?  Also scm_i_gc (or its descendants) touch the
-     scm_sys_protects, which are protected in some cases
-     (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
-     not by the sweep mutex.  Shouldn't all the GC-relevant objects be
-     protected in the same way? */
-  scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
-  scm_c_hook_run (&scm_after_gc_c_hook, 0);
+  /* If you're calling scm_gc(), you probably want synchronous
+     finalization.  */
+  GC_invoke_finalizers ();
   return SCM_UNSPECIFIED;
 }
 #undef FUNC_NAME
@@ -395,6 +394,9 @@ SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
 void
 scm_i_gc (const char *what)
 {
+#ifndef HAVE_GC_SET_START_CALLBACK
+  run_before_gc_c_hook ();
+#endif
   GC_gcollect ();
 }
 
@@ -507,7 +509,7 @@ scm_gc_protect_object (SCM obj)
      critsec/mutex inconsistency here. */
   SCM_CRITICAL_SECTION_START;
 
-  handle = scm_hashq_create_handle_x (protects, obj, scm_from_int (0));
+  handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
   SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
 
   protected_obj_count ++;
@@ -537,7 +539,7 @@ scm_gc_unprotect_object (SCM obj)
       abort ();
     }
  
-  handle = scm_hashq_get_handle (protects, obj);
+  handle = scm_hashq_get_handle (scm_protects, obj);
 
   if (scm_is_false (handle))
     {
@@ -548,7 +550,7 @@ scm_gc_unprotect_object (SCM obj)
     {
       SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
       if (scm_is_eq (count, scm_from_int (0)))
-       scm_hashq_remove_x (protects, obj);
+       scm_hashq_remove_x (scm_protects, obj);
       else
        SCM_SETCDR (handle, count);
     }
@@ -587,8 +589,6 @@ scm_gc_unregister_roots (SCM *b, unsigned long n)
     scm_gc_unregister_root (p);
 }
 
-int scm_i_terminating;
-
 \f
 
 
@@ -611,11 +611,23 @@ scm_getenv_int (const char *var, int def)
   return res;
 }
 
+#ifndef HAVE_GC_SET_FINALIZE_ON_DEMAND
+static void
+GC_set_finalize_on_demand (int foo)
+{
+  GC_finalize_on_demand = foo;
+}
+#endif
+
 void
 scm_storage_prehistory ()
 {
   GC_all_interior_pointers = 0;
-  GC_set_free_space_divisor (scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3));
+  free_space_divisor = scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3);
+  minimum_free_space_divisor = free_space_divisor;
+  target_free_space_divisor = free_space_divisor;
+  GC_set_free_space_divisor (free_space_divisor);
+  GC_set_finalize_on_demand (1);
 
   GC_INIT ();
 
@@ -636,7 +648,7 @@ scm_storage_prehistory ()
   /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */
 
   /* Sanity check.  */
-  if (!GC_is_visible (&protects))
+  if (!GC_is_visible (&scm_protects))
     abort ();
 
   scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
@@ -651,7 +663,7 @@ scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
 void
 scm_init_gc_protect_object ()
 {
-  protects = scm_c_make_hash_table (31);
+  scm_protects = scm_c_make_hash_table (31);
 
 #if 0
   /* We can't have a cleanup handler since we have no thread to run it
@@ -672,29 +684,31 @@ scm_init_gc_protect_object ()
 
 SCM scm_after_gc_hook;
 
-static SCM gc_async;
+static SCM after_gc_async_cell;
 
-/* The function gc_async_thunk causes the execution of the after-gc-hook.  It
- * is run after the gc, as soon as the asynchronous events are handled by the
- * evaluator.
+/* The function after_gc_async_thunk causes the execution of the
+ * after-gc-hook.  It is run after the gc, as soon as the asynchronous
+ * events are handled by the evaluator.
  */
 static SCM
-gc_async_thunk (void)
+after_gc_async_thunk (void)
 {
+  /* Fun, no? Hook-run *and* run-hook?  */
+  scm_c_hook_run (&scm_after_gc_c_hook, NULL);
   scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
   return SCM_UNSPECIFIED;
 }
 
 
-/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
- * the garbage collection.  The only purpose of this function is to mark the
- * gc_async (which will eventually lead to the execution of the
- * gc_async_thunk).
+/* The function queue_after_gc_hook is run by the scm_before_gc_c_hook
+ * at the end of the garbage collection.  The only purpose of this
+ * function is to mark the after_gc_async (which will eventually lead to
+ * the execution of the after_gc_async_thunk).
  */
 static void *
-mark_gc_async (void * hook_data SCM_UNUSED,
-              void *fn_data SCM_UNUSED,
-              void *data SCM_UNUSED)
+queue_after_gc_hook (void * hook_data SCM_UNUSED,
+                      void *fn_data SCM_UNUSED,
+                      void *data SCM_UNUSED)
 {
   /* If cell access debugging is enabled, the user may choose to perform
    * additional garbage collections after an arbitrary number of cell
@@ -723,24 +737,218 @@ mark_gc_async (void * hook_data SCM_UNUSED,
 
 #if (SCM_DEBUG_CELL_ACCESSES == 1)
   if (scm_debug_cells_gc_interval == 0)
-    scm_system_async_mark (gc_async);
-#else
-  scm_system_async_mark (gc_async);
 #endif
+    {
+      scm_i_thread *t = SCM_I_CURRENT_THREAD;
+
+      if (scm_is_false (SCM_CDR (after_gc_async_cell)))
+        {
+          SCM_SETCDR (after_gc_async_cell, t->active_asyncs);
+          t->active_asyncs = after_gc_async_cell;
+          t->pending_asyncs = 1;
+        }
+    }
 
   return NULL;
 }
 
-char const *
-scm_i_tag_name (scm_t_bits tag)
+\f
+
+static void *
+start_gc_timer (void * hook_data SCM_UNUSED,
+                void *fn_data SCM_UNUSED,
+                void *data SCM_UNUSED)
+{
+  if (!gc_start_time)
+    gc_start_time = scm_c_get_internal_run_time ();
+
+  return NULL;
+}
+
+static void *
+accumulate_gc_timer (void * hook_data SCM_UNUSED,
+                void *fn_data SCM_UNUSED,
+                void *data SCM_UNUSED)
+{
+  if (gc_start_time)
+    {
+      long now = scm_c_get_internal_run_time ();
+      gc_time_taken += now - gc_start_time;
+      gc_start_time = 0;
+    }
+
+  return NULL;
+}
+
+/* Return some idea of the memory footprint of a process, in bytes.
+   Currently only works on Linux systems.  */
+static size_t
+get_image_size (void)
+{
+  unsigned long size, resident, share;
+  size_t ret = 0;
+
+  FILE *fp = fopen ("/proc/self/statm", "r");
+
+  if (fp && fscanf (fp, "%lu %lu %lu", &size, &resident, &share) == 3)
+    ret = resident * 4096;
+
+  if (fp)
+    fclose (fp);
+
+  return ret;
+}
+
+/* These are discussed later.  */
+static size_t bytes_until_gc;
+static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
+
+/* Make GC run more frequently when the process image size is growing,
+   measured against the number of bytes allocated through the GC.
+
+   If Guile is allocating at a GC-managed heap size H, libgc will tend
+   to limit the process image size to H*N.  But if at the same time the
+   user program is mallocating at a rate M bytes per GC-allocated byte,
+   then the process stabilizes at H*N*M -- assuming that collecting data
+   will result in malloc'd data being freed.  It doesn't take a very
+   large M for this to be a bad situation.  To limit the image size,
+   Guile should GC more often -- the bigger the M, the more often.
+
+   Numeric functions that produce bigger and bigger integers are
+   pessimal, because M is an increasing function of time.  Here is an
+   example of such a function:
+
+      (define (factorial n)
+        (define (fac n acc)
+          (if (<= n 1)
+            acc
+            (fac (1- n) (* n acc))))
+        (fac n 1))
+
+   It is possible for a process to grow for reasons that will not be
+   solved by faster GC.  In that case M will be estimated as
+   artificially high for a while, and so GC will happen more often on
+   the Guile side.  But when it stabilizes, Guile can ease back the GC
+   frequency.
+
+   The key is to measure process image growth, not mallocation rate.
+   For maximum effectiveness, Guile reacts quickly to process growth,
+   and exponentially backs down when the process stops growing.
+
+   See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936
+   for further discussion.
+ */
+static void *
+adjust_gc_frequency (void * hook_data SCM_UNUSED,
+                     void *fn_data SCM_UNUSED,
+                     void *data SCM_UNUSED)
+{
+  static size_t prev_image_size = 0;
+  static size_t prev_bytes_alloced = 0;
+  size_t image_size;
+  size_t bytes_alloced;
+  
+  scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
+  bytes_until_gc = GC_get_heap_size ();
+  scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
+
+  image_size = get_image_size ();
+  bytes_alloced = GC_get_total_bytes ();
+
+#define HEURISTICS_DEBUG 0
+
+#if HEURISTICS_DEBUG
+  fprintf (stderr, "prev image / alloced: %lu / %lu\n", prev_image_size, prev_bytes_alloced);
+  fprintf (stderr, "     image / alloced: %lu / %lu\n", image_size, bytes_alloced);
+  fprintf (stderr, "divisor %lu / %f\n", free_space_divisor, target_free_space_divisor);
+#endif
+
+  if (prev_image_size && bytes_alloced != prev_bytes_alloced)
+    {
+      double growth_rate, new_target_free_space_divisor;
+      double decay_factor = 0.5;
+      double hysteresis = 0.1;
+
+      growth_rate = ((double) image_size - prev_image_size)
+        / ((double)bytes_alloced - prev_bytes_alloced);
+      
+#if HEURISTICS_DEBUG
+      fprintf (stderr, "growth rate %f\n", growth_rate);
+#endif
+
+      new_target_free_space_divisor = minimum_free_space_divisor;
+
+      if (growth_rate > 0)
+        new_target_free_space_divisor *= 1.0 + growth_rate;
+
+#if HEURISTICS_DEBUG
+      fprintf (stderr, "new divisor %f\n", new_target_free_space_divisor);
+#endif
+
+      if (new_target_free_space_divisor < target_free_space_divisor)
+        /* Decay down.  */
+        target_free_space_divisor =
+          (decay_factor * target_free_space_divisor
+           + (1.0 - decay_factor) * new_target_free_space_divisor);
+      else
+        /* Jump up.  */
+        target_free_space_divisor = new_target_free_space_divisor;
+
+#if HEURISTICS_DEBUG
+      fprintf (stderr, "new target divisor %f\n", target_free_space_divisor);
+#endif
+
+      if (free_space_divisor + 0.5 + hysteresis < target_free_space_divisor
+          || free_space_divisor - 0.5 - hysteresis > target_free_space_divisor)
+        {
+          free_space_divisor = lround (target_free_space_divisor);
+#if HEURISTICS_DEBUG
+          fprintf (stderr, "new divisor %lu\n", free_space_divisor);
+#endif
+          GC_set_free_space_divisor (free_space_divisor);
+        }
+    }
+
+  prev_image_size = image_size;
+  prev_bytes_alloced = bytes_alloced;
+
+  return NULL;
+}
+
+/* The adjust_gc_frequency routine handles transients in the process
+   image size.  It can't handle instense non-GC-managed steady-state
+   allocation though, as it decays the FSD at steady-state down to its
+   minimum value.
+
+   The only real way to handle continuous, high non-GC allocation is to
+   let the GC know about it.  This routine can handle non-GC allocation
+   rates that are similar in size to the GC-managed heap size.
+ */
+
+void
+scm_gc_register_allocation (size_t size)
 {
-  if (tag >= 255)
+  scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
+  if (bytes_until_gc - size > bytes_until_gc)
     {
-      int k = 0xff & (tag >> 8);
-      return (scm_smobs[k].name);
+      bytes_until_gc = GC_get_heap_size ();
+      scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
+      GC_gcollect ();
     }
+  else
+    {
+      bytes_until_gc -= size;
+      scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
+    }
+}
+
 
-  switch (tag) /* 7 bits */
+\f
+
+char const *
+scm_i_tag_name (scm_t_bits tag)
+{
+  switch (tag & 0x7f) /* 7 bits */
     {
     case scm_tcs_struct:
       return "struct";
@@ -748,10 +956,14 @@ scm_i_tag_name (scm_t_bits tag)
       return "cons (immediate car)";
     case scm_tcs_cons_nimcar:
       return "cons (non-immediate car)";
-    case scm_tc7_foreign:
+    case scm_tc7_pointer:
       return "foreign";
     case scm_tc7_hashtable:
       return "hashtable";
+    case scm_tc7_weak_set:
+      return "weak-set";
+    case scm_tc7_weak_table:
+      return "weak-table";
     case scm_tc7_fluid:
       return "fluid";
     case scm_tc7_dynamic_state:
@@ -801,7 +1013,10 @@ scm_i_tag_name (scm_t_bits tag)
       return "port";
       break;
     case scm_tc7_smob:
-      return "smob";           /* should not occur. */
+      {
+        int k = 0xff & (tag >> 8);
+        return (scm_smobs[k].name);
+      }
       break; 
     }
 
@@ -819,9 +1034,29 @@ scm_init_gc ()
   scm_after_gc_hook = scm_make_hook (SCM_INUM0);
   scm_c_define ("after-gc-hook", scm_after_gc_hook);
 
-  gc_async = scm_c_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
+  /* When the async is to run, the cdr of the gc_async pair gets set to
+     the asyncs queue of the current thread.  */
+  after_gc_async_cell = scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0,
+                                                    after_gc_async_thunk),
+                                  SCM_BOOL_F);
 
-  scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
+  scm_c_hook_add (&scm_before_gc_c_hook, queue_after_gc_hook, NULL, 0);
+  scm_c_hook_add (&scm_before_gc_c_hook, start_gc_timer, NULL, 0);
+  scm_c_hook_add (&scm_after_gc_c_hook, accumulate_gc_timer, NULL, 0);
+
+#if HAVE_GC_GET_HEAP_USAGE_SAFE
+  /* GC_get_heap_usage does not take a lock, and so can run in the GC
+     start hook.  */
+  scm_c_hook_add (&scm_before_gc_c_hook, adjust_gc_frequency, NULL, 0);
+#else
+  /* GC_get_heap_usage might take a lock (and did from 7.2alpha1 to
+     7.2alpha7), so call it in the after_gc_hook.  */
+  scm_c_hook_add (&scm_after_gc_c_hook, adjust_gc_frequency, NULL, 0);
+#endif
+
+#ifdef HAVE_GC_SET_START_CALLBACK
+  GC_set_start_callback (run_before_gc_c_hook);
+#endif
 
 #include "libguile/gc.x"
 }