1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006,
2 * 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public License
6 * as published by the Free Software Foundation; either version 3 of
7 * the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 /* #define DEBUGINFO */
26 #define SCM_BUILDING_DEPRECATED_CODE
28 #include "libguile/gen-scmconfig.h"
38 extern unsigned long * __libc_ia64_register_backing_store_base
;
41 #include "libguile/_scm.h"
42 #include "libguile/eval.h"
43 #include "libguile/stime.h"
44 #include "libguile/stackchk.h"
45 #include "libguile/struct.h"
46 #include "libguile/smob.h"
47 #include "libguile/arrays.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/root.h"
51 #include "libguile/strings.h"
52 #include "libguile/vectors.h"
53 #include "libguile/weaks.h"
54 #include "libguile/hashtab.h"
55 #include "libguile/tags.h"
57 #include "libguile/private-gc.h"
58 #include "libguile/validate.h"
59 #include "libguile/deprecation.h"
60 #include "libguile/gc.h"
61 #include "libguile/dynwind.h"
63 #include "libguile/bdw-gc.h"
65 /* For GC_set_start_callback. */
66 #include <gc/gc_mark.h>
68 #ifdef GUILE_DEBUG_MALLOC
69 #include "libguile/debug-malloc.h"
76 /* Set this to != 0 if every cell that is accessed shall be checked:
78 int scm_debug_cell_accesses_p
= 0;
79 int scm_expensive_debug_cell_accesses_p
= 0;
81 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
82 * the number of cell accesses after which a gc shall be called.
84 int scm_debug_cells_gc_interval
= 0;
86 #if SCM_ENABLE_DEPRECATED == 1
87 /* Hash table that keeps a reference to objects the user wants to protect from
88 garbage collection. It could arguably be private but applications have come
89 to rely on it (e.g., Lilypond 2.13.9). */
92 static SCM scm_protects
;
95 #if (SCM_DEBUG_CELL_ACCESSES == 1)
100 Assert that the given object is a valid reference to a valid cell. This
101 test involves to determine whether the object is a cell pointer, whether
102 this pointer actually points into a heap segment and whether the cell
103 pointed to is not a free cell. Further, additional garbage collections may
104 get executed after a user defined number of cell accesses. This helps to
105 find places in the C code where references are dropped for extremely short
110 scm_i_expensive_validation_check (SCM cell
)
112 /* If desired, perform additional garbage collections after a user
113 * defined number of cell accesses.
115 if (scm_debug_cells_gc_interval
)
117 static unsigned int counter
= 0;
125 counter
= scm_debug_cells_gc_interval
;
131 /* Whether cell validation is already running. */
132 static int scm_i_cell_validation_already_running
= 0;
135 scm_assert_cell_valid (SCM cell
)
137 if (!scm_i_cell_validation_already_running
&& scm_debug_cell_accesses_p
)
139 scm_i_cell_validation_already_running
= 1; /* set to avoid recursion */
142 During GC, no user-code should be run, and the guile core
143 should use non-protected accessors.
145 if (scm_gc_running_p
)
149 Only scm_in_heap_p and rescanning the heap is wildly
152 if (scm_expensive_debug_cell_accesses_p
)
153 scm_i_expensive_validation_check (cell
);
155 scm_i_cell_validation_already_running
= 0; /* re-enable */
161 SCM_DEFINE (scm_set_debug_cell_accesses_x
, "set-debug-cell-accesses!", 1, 0, 0,
163 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
164 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
165 "but no additional calls to garbage collection are issued.\n"
166 "If @var{flag} is a number, strict cell access checking is enabled,\n"
167 "with an additional garbage collection after the given\n"
168 "number of cell accesses.\n"
169 "This procedure only exists when the compile-time flag\n"
170 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
171 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
173 if (scm_is_false (flag
))
175 scm_debug_cell_accesses_p
= 0;
177 else if (scm_is_eq (flag
, SCM_BOOL_T
))
179 scm_debug_cells_gc_interval
= 0;
180 scm_debug_cell_accesses_p
= 1;
181 scm_expensive_debug_cell_accesses_p
= 0;
185 scm_debug_cells_gc_interval
= scm_to_signed_integer (flag
, 0, INT_MAX
);
186 scm_debug_cell_accesses_p
= 1;
187 scm_expensive_debug_cell_accesses_p
= 1;
189 return SCM_UNSPECIFIED
;
194 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
200 #ifndef HAVE_GC_GET_HEAP_USAGE_SAFE
202 GC_get_heap_usage_safe (GC_word
*pheap_size
, GC_word
*pfree_bytes
,
203 GC_word
*punmapped_bytes
, GC_word
*pbytes_since_gc
,
204 GC_word
*ptotal_bytes
)
206 *pheap_size
= GC_get_heap_size ();
207 *pfree_bytes
= GC_get_free_bytes ();
208 #ifdef HAVE_GC_GET_UNMAPPED_BYTES
209 *punmapped_bytes
= GC_get_unmapped_bytes ();
211 *punmapped_bytes
= 0;
213 *pbytes_since_gc
= GC_get_bytes_since_gc ();
214 *ptotal_bytes
= GC_get_total_bytes ();
218 #ifndef HAVE_GC_GET_FREE_SPACE_DIVISOR
220 GC_get_free_space_divisor (void)
222 return GC_free_space_divisor
;
228 scm_t_c_hook scm_before_gc_c_hook
;
229 scm_t_c_hook scm_before_mark_c_hook
;
230 scm_t_c_hook scm_before_sweep_c_hook
;
231 scm_t_c_hook scm_after_sweep_c_hook
;
232 scm_t_c_hook scm_after_gc_c_hook
;
236 run_before_gc_c_hook (void)
238 if (!SCM_I_CURRENT_THREAD
)
239 /* GC while a thread is spinning up; punt. */
242 scm_c_hook_run (&scm_before_gc_c_hook
, NULL
);
246 /* GC Statistics Keeping
248 unsigned long scm_gc_ports_collected
= 0;
249 static long gc_time_taken
= 0;
250 static long gc_start_time
= 0;
252 static unsigned long free_space_divisor
;
253 static unsigned long minimum_free_space_divisor
;
254 static double target_free_space_divisor
;
256 static unsigned long protected_obj_count
= 0;
259 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
260 SCM_SYMBOL (sym_heap_size
, "heap-size");
261 SCM_SYMBOL (sym_heap_free_size
, "heap-free-size");
262 SCM_SYMBOL (sym_heap_total_allocated
, "heap-total-allocated");
263 SCM_SYMBOL (sym_heap_allocated_since_gc
, "heap-allocated-since-gc");
264 SCM_SYMBOL (sym_protected_objects
, "protected-objects");
265 SCM_SYMBOL (sym_times
, "gc-times");
268 /* {Scheme Interface to GC}
271 tag_table_to_type_alist (void *closure
, SCM key
, SCM val
, SCM acc
)
273 if (scm_is_integer (key
))
275 int c_tag
= scm_to_int (key
);
277 char const * name
= scm_i_tag_name (c_tag
);
280 key
= scm_from_locale_string (name
);
285 sprintf (s
, "tag %d", c_tag
);
286 key
= scm_from_locale_string (s
);
290 return scm_cons (scm_cons (key
, val
), acc
);
293 SCM_DEFINE (scm_gc_live_object_stats
, "gc-live-object-stats", 0, 0, 0,
295 "Return an alist of statistics of the current live objects. ")
296 #define FUNC_NAME s_scm_gc_live_object_stats
298 SCM tab
= scm_make_hash_table (scm_from_int (57));
302 = scm_internal_hash_fold (&tag_table_to_type_alist
, NULL
, SCM_EOL
, tab
);
308 extern int scm_gc_malloc_yield_percentage
;
309 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
311 "Return an association list of statistics about Guile's current\n"
313 #define FUNC_NAME s_scm_gc_stats
316 GC_word heap_size
, free_bytes
, unmapped_bytes
, bytes_since_gc
, total_bytes
;
319 GC_get_heap_usage_safe (&heap_size
, &free_bytes
, &unmapped_bytes
,
320 &bytes_since_gc
, &total_bytes
);
321 #ifdef HAVE_GC_GET_GC_NO
322 /* This function was added in 7.2alpha2 (June 2009). */
323 gc_times
= GC_get_gc_no ();
325 /* This symbol is deprecated as of 7.3. */
330 scm_list_n (scm_cons (sym_gc_time_taken
, scm_from_long (gc_time_taken
)),
331 scm_cons (sym_heap_size
, scm_from_size_t (heap_size
)),
332 scm_cons (sym_heap_free_size
, scm_from_size_t (free_bytes
)),
333 scm_cons (sym_heap_total_allocated
,
334 scm_from_size_t (total_bytes
)),
335 scm_cons (sym_heap_allocated_since_gc
,
336 scm_from_size_t (bytes_since_gc
)),
337 scm_cons (sym_protected_objects
,
338 scm_from_ulong (protected_obj_count
)),
339 scm_cons (sym_times
, scm_from_size_t (gc_times
)),
347 SCM_DEFINE (scm_gc_dump
, "gc-dump", 0, 0, 0,
349 "Dump information about the garbage collector's internal data "
350 "structures and memory usage to the standard output.")
351 #define FUNC_NAME s_scm_gc_dump
355 return SCM_UNSPECIFIED
;
360 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
362 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
363 "returned by this function for @var{obj}")
364 #define FUNC_NAME s_scm_object_address
366 return scm_from_ulong (SCM_UNPACK (obj
));
371 SCM_DEFINE (scm_gc_disable
, "gc-disable", 0, 0, 0,
373 "Disables the garbage collector. Nested calls are permitted. "
374 "GC is re-enabled once @code{gc-enable} has been called the "
375 "same number of times @code{gc-disable} was called.")
376 #define FUNC_NAME s_scm_gc_disable
379 return SCM_UNSPECIFIED
;
383 SCM_DEFINE (scm_gc_enable
, "gc-enable", 0, 0, 0,
385 "Enables the garbage collector.")
386 #define FUNC_NAME s_scm_gc_enable
389 return SCM_UNSPECIFIED
;
394 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
396 "Scans all of SCM objects and reclaims for further use those that are\n"
397 "no longer accessible.")
398 #define FUNC_NAME s_scm_gc
401 /* If you're calling scm_gc(), you probably want synchronous
403 GC_invoke_finalizers ();
404 return SCM_UNSPECIFIED
;
409 scm_i_gc (const char *what
)
411 #ifndef HAVE_GC_SET_START_CALLBACK
412 run_before_gc_c_hook ();
419 /* {GC Protection Helper Functions}
424 * If within a function you need to protect one or more scheme objects from
425 * garbage collection, pass them as parameters to one of the
426 * scm_remember_upto_here* functions below. These functions don't do
427 * anything, but since the compiler does not know that they are actually
428 * no-ops, it will generate code that calls these functions with the given
429 * parameters. Therefore, you can be sure that the compiler will keep those
430 * scheme values alive (on the stack or in a register) up to the point where
431 * scm_remember_upto_here* is called. In other words, place the call to
432 * scm_remember_upto_here* _behind_ the last code in your function, that
433 * depends on the scheme object to exist.
435 * Example: We want to make sure that the string object str does not get
436 * garbage collected during the execution of 'some_function' in the code
437 * below, because otherwise the characters belonging to str would be freed and
438 * 'some_function' might access freed memory. To make sure that the compiler
439 * keeps str alive on the stack or in a register such that it is visible to
440 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
441 * call to 'some_function'. Note that this would not be necessary if str was
442 * used anyway after the call to 'some_function'.
443 * char *chars = scm_i_string_chars (str);
444 * some_function (chars);
445 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
448 /* Remove any macro versions of these while defining the functions.
449 Functions are always included in the library, for upward binary
450 compatibility and in case combinations of GCC and non-GCC are used. */
451 #undef scm_remember_upto_here_1
452 #undef scm_remember_upto_here_2
455 scm_remember_upto_here_1 (SCM obj SCM_UNUSED
)
457 /* Empty. Protects a single object from garbage collection. */
461 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED
, SCM obj2 SCM_UNUSED
)
463 /* Empty. Protects two objects from garbage collection. */
467 scm_remember_upto_here (SCM obj SCM_UNUSED
, ...)
469 /* Empty. Protects any number of objects from garbage collection. */
473 These crazy functions prevent garbage collection
474 of arguments after the first argument by
475 ensuring they remain live throughout the
476 function because they are used in the last
477 line of the code block.
478 It'd be better to have a nice compiler hint to
479 aid the conservative stack-scanning GC. --03/09/00 gjb */
481 scm_return_first (SCM elt
, ...)
487 scm_return_first_int (int i
, ...)
494 scm_permanent_object (SCM obj
)
496 return (scm_gc_protect_object (obj
));
500 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
501 other references are dropped, until the object is unprotected by calling
502 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
503 i. e. it is possible to protect the same object several times, but it is
504 necessary to unprotect the object the same number of times to actually get
505 the object unprotected. It is an error to unprotect an object more often
506 than it has been protected before. The function scm_protect_object returns
510 /* Implementation note: For every object X, there is a counter which
511 scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
517 scm_gc_protect_object (SCM obj
)
521 /* This critical section barrier will be replaced by a mutex. */
522 /* njrev: Indeed; if my comment above is correct, there is the same
523 critsec/mutex inconsistency here. */
524 SCM_CRITICAL_SECTION_START
;
526 handle
= scm_hashq_create_handle_x (scm_protects
, obj
, scm_from_int (0));
527 SCM_SETCDR (handle
, scm_sum (SCM_CDR (handle
), scm_from_int (1)));
529 protected_obj_count
++;
531 SCM_CRITICAL_SECTION_END
;
537 /* Remove any protection for OBJ established by a prior call to
538 scm_protect_object. This function returns OBJ.
540 See scm_protect_object for more information. */
542 scm_gc_unprotect_object (SCM obj
)
546 /* This critical section barrier will be replaced by a mutex. */
547 /* njrev: and again. */
548 SCM_CRITICAL_SECTION_START
;
550 if (scm_gc_running_p
)
552 fprintf (stderr
, "scm_unprotect_object called during GC.\n");
556 handle
= scm_hashq_get_handle (scm_protects
, obj
);
558 if (scm_is_false (handle
))
560 fprintf (stderr
, "scm_unprotect_object called on unprotected object\n");
565 SCM count
= scm_difference (SCM_CDR (handle
), scm_from_int (1));
566 if (scm_is_eq (count
, scm_from_int (0)))
567 scm_hashq_remove_x (scm_protects
, obj
);
569 SCM_SETCDR (handle
, count
);
571 protected_obj_count
--;
573 SCM_CRITICAL_SECTION_END
;
579 scm_gc_register_root (SCM
*p
)
585 scm_gc_unregister_root (SCM
*p
)
591 scm_gc_register_roots (SCM
*b
, unsigned long n
)
594 for (; p
< b
+ n
; ++p
)
595 scm_gc_register_root (p
);
599 scm_gc_unregister_roots (SCM
*b
, unsigned long n
)
602 for (; p
< b
+ n
; ++p
)
603 scm_gc_unregister_root (p
);
610 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
613 /* Get an integer from an environment variable. */
615 scm_getenv_int (const char *var
, int def
)
618 char *val
= getenv (var
);
622 res
= strtol (val
, &end
, 10);
628 #ifndef HAVE_GC_SET_FINALIZE_ON_DEMAND
630 GC_set_finalize_on_demand (int foo
)
632 GC_finalize_on_demand
= foo
;
637 scm_storage_prehistory ()
639 #ifdef HAVE_GC_SET_ALL_INTERIOR_POINTERS
640 /* This function was added in 7.2alpha2 (June 2009). */
641 GC_set_all_interior_pointers (0);
643 /* This symbol is deprecated in 7.3. */
644 GC_all_interior_pointers
= 0;
647 free_space_divisor
= scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3);
648 minimum_free_space_divisor
= free_space_divisor
;
649 target_free_space_divisor
= free_space_divisor
;
650 GC_set_free_space_divisor (free_space_divisor
);
651 GC_set_finalize_on_demand (1);
655 #if (! ((defined GC_VERSION_MAJOR) && (GC_VERSION_MAJOR >= 7))) \
656 && (defined SCM_I_GSC_USE_PTHREAD_THREADS)
657 /* When using GC 6.8, this call is required to initialize thread-local
658 freelists (shouldn't be necessary with GC 7.0). */
662 GC_expand_hp (SCM_DEFAULT_INIT_HEAP_SIZE_2
);
664 /* We only need to register a displacement for those types for which the
665 higher bits of the type tag are used to store a pointer (that is, a
666 pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
667 handled in `scm_alloc_struct ()'. */
668 GC_REGISTER_DISPLACEMENT (scm_tc3_cons
);
669 /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */
672 if (!GC_is_visible (&scm_protects
))
675 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
676 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
677 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
678 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
679 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
682 scm_i_pthread_mutex_t scm_i_gc_admin_mutex
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
685 scm_init_gc_protect_object ()
687 scm_protects
= scm_c_make_hash_table (31);
690 /* We can't have a cleanup handler since we have no thread to run it
697 on_exit (cleanup
, 0);
706 SCM scm_after_gc_hook
;
708 static SCM after_gc_async_cell
;
710 /* The function after_gc_async_thunk causes the execution of the
711 * after-gc-hook. It is run after the gc, as soon as the asynchronous
712 * events are handled by the evaluator.
715 after_gc_async_thunk (void)
717 /* Fun, no? Hook-run *and* run-hook? */
718 scm_c_hook_run (&scm_after_gc_c_hook
, NULL
);
719 scm_c_run_hook (scm_after_gc_hook
, SCM_EOL
);
720 return SCM_UNSPECIFIED
;
724 /* The function queue_after_gc_hook is run by the scm_before_gc_c_hook
725 * at the end of the garbage collection. The only purpose of this
726 * function is to mark the after_gc_async (which will eventually lead to
727 * the execution of the after_gc_async_thunk).
730 queue_after_gc_hook (void * hook_data SCM_UNUSED
,
731 void *fn_data SCM_UNUSED
,
732 void *data SCM_UNUSED
)
734 /* If cell access debugging is enabled, the user may choose to perform
735 * additional garbage collections after an arbitrary number of cell
736 * accesses. We don't want the scheme level after-gc-hook to be performed
737 * for each of these garbage collections for the following reason: The
738 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
739 * after-gc-hook was performed with every gc, and if the gc was performed
740 * after a very small number of cell accesses, then the number of cell
741 * accesses during the execution of the after-gc-hook will suffice to cause
742 * the execution of the next gc. Then, guile would keep executing the
743 * after-gc-hook over and over again, and would never come to do other
746 * To overcome this problem, if cell access debugging with additional
747 * garbage collections is enabled, the after-gc-hook is never run by the
748 * garbage collecter. When running guile with cell access debugging and the
749 * execution of the after-gc-hook is desired, then it is necessary to run
750 * the hook explicitly from the user code. This has the effect, that from
751 * the scheme level point of view it seems that garbage collection is
752 * performed with a much lower frequency than it actually is. Obviously,
753 * this will not work for code that depends on a fixed one to one
754 * relationship between the execution counts of the C level garbage
755 * collection hooks and the execution count of the scheme level
759 #if (SCM_DEBUG_CELL_ACCESSES == 1)
760 if (scm_debug_cells_gc_interval
== 0)
763 scm_i_thread
*t
= SCM_I_CURRENT_THREAD
;
765 if (scm_is_false (SCM_CDR (after_gc_async_cell
)))
767 SCM_SETCDR (after_gc_async_cell
, t
->active_asyncs
);
768 t
->active_asyncs
= after_gc_async_cell
;
769 t
->pending_asyncs
= 1;
779 start_gc_timer (void * hook_data SCM_UNUSED
,
780 void *fn_data SCM_UNUSED
,
781 void *data SCM_UNUSED
)
784 gc_start_time
= scm_c_get_internal_run_time ();
790 accumulate_gc_timer (void * hook_data SCM_UNUSED
,
791 void *fn_data SCM_UNUSED
,
792 void *data SCM_UNUSED
)
796 long now
= scm_c_get_internal_run_time ();
797 gc_time_taken
+= now
- gc_start_time
;
804 /* Return some idea of the memory footprint of a process, in bytes.
805 Currently only works on Linux systems. */
807 get_image_size (void)
809 unsigned long size
, resident
, share
;
812 FILE *fp
= fopen ("/proc/self/statm", "r");
814 if (fp
&& fscanf (fp
, "%lu %lu %lu", &size
, &resident
, &share
) == 3)
815 ret
= resident
* 4096;
823 /* These are discussed later. */
824 static size_t bytes_until_gc
;
825 static scm_i_pthread_mutex_t bytes_until_gc_lock
= SCM_I_PTHREAD_MUTEX_INITIALIZER
;
827 /* Make GC run more frequently when the process image size is growing,
828 measured against the number of bytes allocated through the GC.
830 If Guile is allocating at a GC-managed heap size H, libgc will tend
831 to limit the process image size to H*N. But if at the same time the
832 user program is mallocating at a rate M bytes per GC-allocated byte,
833 then the process stabilizes at H*N*M -- assuming that collecting data
834 will result in malloc'd data being freed. It doesn't take a very
835 large M for this to be a bad situation. To limit the image size,
836 Guile should GC more often -- the bigger the M, the more often.
838 Numeric functions that produce bigger and bigger integers are
839 pessimal, because M is an increasing function of time. Here is an
840 example of such a function:
842 (define (factorial n)
846 (fac (1- n) (* n acc))))
849 It is possible for a process to grow for reasons that will not be
850 solved by faster GC. In that case M will be estimated as
851 artificially high for a while, and so GC will happen more often on
852 the Guile side. But when it stabilizes, Guile can ease back the GC
855 The key is to measure process image growth, not mallocation rate.
856 For maximum effectiveness, Guile reacts quickly to process growth,
857 and exponentially backs down when the process stops growing.
859 See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936
860 for further discussion.
863 adjust_gc_frequency (void * hook_data SCM_UNUSED
,
864 void *fn_data SCM_UNUSED
,
865 void *data SCM_UNUSED
)
867 static size_t prev_image_size
= 0;
868 static size_t prev_bytes_alloced
= 0;
870 size_t bytes_alloced
;
872 scm_i_pthread_mutex_lock (&bytes_until_gc_lock
);
873 bytes_until_gc
= GC_get_heap_size ();
874 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock
);
876 image_size
= get_image_size ();
877 bytes_alloced
= GC_get_total_bytes ();
879 #define HEURISTICS_DEBUG 0
882 fprintf (stderr
, "prev image / alloced: %lu / %lu\n", prev_image_size
, prev_bytes_alloced
);
883 fprintf (stderr
, " image / alloced: %lu / %lu\n", image_size
, bytes_alloced
);
884 fprintf (stderr
, "divisor %lu / %f\n", free_space_divisor
, target_free_space_divisor
);
887 if (prev_image_size
&& bytes_alloced
!= prev_bytes_alloced
)
889 double growth_rate
, new_target_free_space_divisor
;
890 double decay_factor
= 0.5;
891 double hysteresis
= 0.1;
893 growth_rate
= ((double) image_size
- prev_image_size
)
894 / ((double)bytes_alloced
- prev_bytes_alloced
);
897 fprintf (stderr
, "growth rate %f\n", growth_rate
);
900 new_target_free_space_divisor
= minimum_free_space_divisor
;
903 new_target_free_space_divisor
*= 1.0 + growth_rate
;
906 fprintf (stderr
, "new divisor %f\n", new_target_free_space_divisor
);
909 if (new_target_free_space_divisor
< target_free_space_divisor
)
911 target_free_space_divisor
=
912 (decay_factor
* target_free_space_divisor
913 + (1.0 - decay_factor
) * new_target_free_space_divisor
);
916 target_free_space_divisor
= new_target_free_space_divisor
;
919 fprintf (stderr
, "new target divisor %f\n", target_free_space_divisor
);
922 if (free_space_divisor
+ 0.5 + hysteresis
< target_free_space_divisor
923 || free_space_divisor
- 0.5 - hysteresis
> target_free_space_divisor
)
925 free_space_divisor
= lround (target_free_space_divisor
);
927 fprintf (stderr
, "new divisor %lu\n", free_space_divisor
);
929 GC_set_free_space_divisor (free_space_divisor
);
933 prev_image_size
= image_size
;
934 prev_bytes_alloced
= bytes_alloced
;
939 /* The adjust_gc_frequency routine handles transients in the process
940 image size. It can't handle instense non-GC-managed steady-state
941 allocation though, as it decays the FSD at steady-state down to its
944 The only real way to handle continuous, high non-GC allocation is to
945 let the GC know about it. This routine can handle non-GC allocation
946 rates that are similar in size to the GC-managed heap size.
950 scm_gc_register_allocation (size_t size
)
952 scm_i_pthread_mutex_lock (&bytes_until_gc_lock
);
953 if (bytes_until_gc
- size
> bytes_until_gc
)
955 bytes_until_gc
= GC_get_heap_size ();
956 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock
);
961 bytes_until_gc
-= size
;
962 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock
);
970 scm_i_tag_name (scm_t_bits tag
)
972 switch (tag
& 0x7f) /* 7 bits */
976 case scm_tcs_cons_imcar
:
977 return "cons (immediate car)";
978 case scm_tcs_cons_nimcar
:
979 return "cons (non-immediate car)";
980 case scm_tc7_pointer
:
982 case scm_tc7_hashtable
:
986 case scm_tc7_dynamic_state
:
987 return "dynamic state";
990 case scm_tc7_objcode
:
994 case scm_tc7_vm_cont
:
995 return "vm continuation";
997 return "weak vector";
1000 case scm_tc7_number
:
1009 case scm_tc16_complex
:
1010 return "complex number";
1012 case scm_tc16_fraction
:
1017 case scm_tc7_string
:
1020 case scm_tc7_stringbuf
:
1021 return "string buffer";
1023 case scm_tc7_symbol
:
1026 case scm_tc7_variable
:
1034 int k
= 0xff & (tag
>> 8);
1035 return (scm_smobs
[k
].name
);
1049 /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
1051 scm_after_gc_hook
= scm_make_hook (SCM_INUM0
);
1052 scm_c_define ("after-gc-hook", scm_after_gc_hook
);
1054 /* When the async is to run, the cdr of the gc_async pair gets set to
1055 the asyncs queue of the current thread. */
1056 after_gc_async_cell
= scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0,
1057 after_gc_async_thunk
),
1060 scm_c_hook_add (&scm_before_gc_c_hook
, queue_after_gc_hook
, NULL
, 0);
1061 scm_c_hook_add (&scm_before_gc_c_hook
, start_gc_timer
, NULL
, 0);
1062 scm_c_hook_add (&scm_after_gc_c_hook
, accumulate_gc_timer
, NULL
, 0);
1064 #if HAVE_GC_GET_HEAP_USAGE_SAFE
1065 /* GC_get_heap_usage does not take a lock, and so can run in the GC
1067 scm_c_hook_add (&scm_before_gc_c_hook
, adjust_gc_frequency
, NULL
, 0);
1069 /* GC_get_heap_usage might take a lock (and did from 7.2alpha1 to
1070 7.2alpha7), so call it in the after_gc_hook. */
1071 scm_c_hook_add (&scm_after_gc_c_hook
, adjust_gc_frequency
, NULL
, 0);
1074 #ifdef HAVE_GC_SET_START_CALLBACK
1075 GC_set_start_callback (run_before_gc_c_hook
);
1078 #include "libguile/gc.x"
1084 #define FUNC_NAME "scm_gc_sweep"
1087 fprintf (stderr
, "%s: doing nothing\n", FUNC_NAME
);