Use accessors instead of symbols deprecated in libgc 7.3.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006,
2 * 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public License
6 * as published by the Free Software Foundation; either version 3 of
7 * the License, or (at your option) any later version.
8 *
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301 USA
18 */
19
20 /* #define DEBUGINFO */
21
22 #ifdef HAVE_CONFIG_H
23 # include <config.h>
24 #endif
25
26 #define SCM_BUILDING_DEPRECATED_CODE
27
28 #include "libguile/gen-scmconfig.h"
29
30 #include <stdio.h>
31 #include <errno.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <math.h>
35
36 #ifdef __ia64__
37 #include <ucontext.h>
38 extern unsigned long * __libc_ia64_register_backing_store_base;
39 #endif
40
41 #include "libguile/_scm.h"
42 #include "libguile/eval.h"
43 #include "libguile/stime.h"
44 #include "libguile/stackchk.h"
45 #include "libguile/struct.h"
46 #include "libguile/smob.h"
47 #include "libguile/arrays.h"
48 #include "libguile/async.h"
49 #include "libguile/ports.h"
50 #include "libguile/root.h"
51 #include "libguile/strings.h"
52 #include "libguile/vectors.h"
53 #include "libguile/weaks.h"
54 #include "libguile/hashtab.h"
55 #include "libguile/tags.h"
56
57 #include "libguile/private-gc.h"
58 #include "libguile/validate.h"
59 #include "libguile/deprecation.h"
60 #include "libguile/gc.h"
61 #include "libguile/dynwind.h"
62
63 #include "libguile/bdw-gc.h"
64
65 /* For GC_set_start_callback. */
66 #include <gc/gc_mark.h>
67
68 #ifdef GUILE_DEBUG_MALLOC
69 #include "libguile/debug-malloc.h"
70 #endif
71
72 #ifdef HAVE_UNISTD_H
73 #include <unistd.h>
74 #endif
75
76 /* Set this to != 0 if every cell that is accessed shall be checked:
77 */
78 int scm_debug_cell_accesses_p = 0;
79 int scm_expensive_debug_cell_accesses_p = 0;
80
81 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
82 * the number of cell accesses after which a gc shall be called.
83 */
84 int scm_debug_cells_gc_interval = 0;
85
86 #if SCM_ENABLE_DEPRECATED == 1
87 /* Hash table that keeps a reference to objects the user wants to protect from
88 garbage collection. It could arguably be private but applications have come
89 to rely on it (e.g., Lilypond 2.13.9). */
90 SCM scm_protects;
91 #else
92 static SCM scm_protects;
93 #endif
94
95 #if (SCM_DEBUG_CELL_ACCESSES == 1)
96
97
98 /*
99
100 Assert that the given object is a valid reference to a valid cell. This
101 test involves to determine whether the object is a cell pointer, whether
102 this pointer actually points into a heap segment and whether the cell
103 pointed to is not a free cell. Further, additional garbage collections may
104 get executed after a user defined number of cell accesses. This helps to
105 find places in the C code where references are dropped for extremely short
106 periods.
107
108 */
109 void
110 scm_i_expensive_validation_check (SCM cell)
111 {
112 /* If desired, perform additional garbage collections after a user
113 * defined number of cell accesses.
114 */
115 if (scm_debug_cells_gc_interval)
116 {
117 static unsigned int counter = 0;
118
119 if (counter != 0)
120 {
121 --counter;
122 }
123 else
124 {
125 counter = scm_debug_cells_gc_interval;
126 scm_gc ();
127 }
128 }
129 }
130
131 /* Whether cell validation is already running. */
132 static int scm_i_cell_validation_already_running = 0;
133
134 void
135 scm_assert_cell_valid (SCM cell)
136 {
137 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
138 {
139 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
140
141 /*
142 During GC, no user-code should be run, and the guile core
143 should use non-protected accessors.
144 */
145 if (scm_gc_running_p)
146 return;
147
148 /*
149 Only scm_in_heap_p and rescanning the heap is wildly
150 expensive.
151 */
152 if (scm_expensive_debug_cell_accesses_p)
153 scm_i_expensive_validation_check (cell);
154
155 scm_i_cell_validation_already_running = 0; /* re-enable */
156 }
157 }
158
159
160
161 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
162 (SCM flag),
163 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
164 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
165 "but no additional calls to garbage collection are issued.\n"
166 "If @var{flag} is a number, strict cell access checking is enabled,\n"
167 "with an additional garbage collection after the given\n"
168 "number of cell accesses.\n"
169 "This procedure only exists when the compile-time flag\n"
170 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
171 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
172 {
173 if (scm_is_false (flag))
174 {
175 scm_debug_cell_accesses_p = 0;
176 }
177 else if (scm_is_eq (flag, SCM_BOOL_T))
178 {
179 scm_debug_cells_gc_interval = 0;
180 scm_debug_cell_accesses_p = 1;
181 scm_expensive_debug_cell_accesses_p = 0;
182 }
183 else
184 {
185 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
186 scm_debug_cell_accesses_p = 1;
187 scm_expensive_debug_cell_accesses_p = 1;
188 }
189 return SCM_UNSPECIFIED;
190 }
191 #undef FUNC_NAME
192
193
194 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
195
196 \f
197
198 /* Compatibility. */
199
200 #ifndef HAVE_GC_GET_HEAP_USAGE_SAFE
201 static void
202 GC_get_heap_usage_safe (GC_word *pheap_size, GC_word *pfree_bytes,
203 GC_word *punmapped_bytes, GC_word *pbytes_since_gc,
204 GC_word *ptotal_bytes)
205 {
206 *pheap_size = GC_get_heap_size ();
207 *pfree_bytes = GC_get_free_bytes ();
208 #ifdef HAVE_GC_GET_UNMAPPED_BYTES
209 *punmapped_bytes = GC_get_unmapped_bytes ();
210 #else
211 *punmapped_bytes = 0;
212 #endif
213 *pbytes_since_gc = GC_get_bytes_since_gc ();
214 *ptotal_bytes = GC_get_total_bytes ();
215 }
216 #endif
217
218 #ifndef HAVE_GC_GET_FREE_SPACE_DIVISOR
219 static GC_word
220 GC_get_free_space_divisor (void)
221 {
222 return GC_free_space_divisor;
223 }
224 #endif
225
226 \f
227 /* Hooks. */
228 scm_t_c_hook scm_before_gc_c_hook;
229 scm_t_c_hook scm_before_mark_c_hook;
230 scm_t_c_hook scm_before_sweep_c_hook;
231 scm_t_c_hook scm_after_sweep_c_hook;
232 scm_t_c_hook scm_after_gc_c_hook;
233
234
235 static void
236 run_before_gc_c_hook (void)
237 {
238 if (!SCM_I_CURRENT_THREAD)
239 /* GC while a thread is spinning up; punt. */
240 return;
241
242 scm_c_hook_run (&scm_before_gc_c_hook, NULL);
243 }
244
245
246 /* GC Statistics Keeping
247 */
248 unsigned long scm_gc_ports_collected = 0;
249 static long gc_time_taken = 0;
250 static long gc_start_time = 0;
251
252 static unsigned long free_space_divisor;
253 static unsigned long minimum_free_space_divisor;
254 static double target_free_space_divisor;
255
256 static unsigned long protected_obj_count = 0;
257
258
259 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
260 SCM_SYMBOL (sym_heap_size, "heap-size");
261 SCM_SYMBOL (sym_heap_free_size, "heap-free-size");
262 SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated");
263 SCM_SYMBOL (sym_heap_allocated_since_gc, "heap-allocated-since-gc");
264 SCM_SYMBOL (sym_protected_objects, "protected-objects");
265 SCM_SYMBOL (sym_times, "gc-times");
266
267
268 /* {Scheme Interface to GC}
269 */
270 static SCM
271 tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
272 {
273 if (scm_is_integer (key))
274 {
275 int c_tag = scm_to_int (key);
276
277 char const * name = scm_i_tag_name (c_tag);
278 if (name != NULL)
279 {
280 key = scm_from_locale_string (name);
281 }
282 else
283 {
284 char s[100];
285 sprintf (s, "tag %d", c_tag);
286 key = scm_from_locale_string (s);
287 }
288 }
289
290 return scm_cons (scm_cons (key, val), acc);
291 }
292
293 SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
294 (),
295 "Return an alist of statistics of the current live objects. ")
296 #define FUNC_NAME s_scm_gc_live_object_stats
297 {
298 SCM tab = scm_make_hash_table (scm_from_int (57));
299 SCM alist;
300
301 alist
302 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
303
304 return alist;
305 }
306 #undef FUNC_NAME
307
308 extern int scm_gc_malloc_yield_percentage;
309 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
310 (),
311 "Return an association list of statistics about Guile's current\n"
312 "use of storage.\n")
313 #define FUNC_NAME s_scm_gc_stats
314 {
315 SCM answer;
316 GC_word heap_size, free_bytes, unmapped_bytes, bytes_since_gc, total_bytes;
317 size_t gc_times;
318
319 GC_get_heap_usage_safe (&heap_size, &free_bytes, &unmapped_bytes,
320 &bytes_since_gc, &total_bytes);
321 #ifdef HAVE_GC_GET_GC_NO
322 /* This function was added in 7.2alpha2 (June 2009). */
323 gc_times = GC_get_gc_no ();
324 #else
325 /* This symbol is deprecated as of 7.3. */
326 gc_times = GC_gc_no;
327 #endif
328
329 answer =
330 scm_list_n (scm_cons (sym_gc_time_taken, scm_from_long (gc_time_taken)),
331 scm_cons (sym_heap_size, scm_from_size_t (heap_size)),
332 scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)),
333 scm_cons (sym_heap_total_allocated,
334 scm_from_size_t (total_bytes)),
335 scm_cons (sym_heap_allocated_since_gc,
336 scm_from_size_t (bytes_since_gc)),
337 scm_cons (sym_protected_objects,
338 scm_from_ulong (protected_obj_count)),
339 scm_cons (sym_times, scm_from_size_t (gc_times)),
340 SCM_UNDEFINED);
341
342 return answer;
343 }
344 #undef FUNC_NAME
345
346
347 SCM_DEFINE (scm_gc_dump, "gc-dump", 0, 0, 0,
348 (void),
349 "Dump information about the garbage collector's internal data "
350 "structures and memory usage to the standard output.")
351 #define FUNC_NAME s_scm_gc_dump
352 {
353 GC_dump ();
354
355 return SCM_UNSPECIFIED;
356 }
357 #undef FUNC_NAME
358
359
360 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
361 (SCM obj),
362 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
363 "returned by this function for @var{obj}")
364 #define FUNC_NAME s_scm_object_address
365 {
366 return scm_from_ulong (SCM_UNPACK (obj));
367 }
368 #undef FUNC_NAME
369
370
371 SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
372 (),
373 "Disables the garbage collector. Nested calls are permitted. "
374 "GC is re-enabled once @code{gc-enable} has been called the "
375 "same number of times @code{gc-disable} was called.")
376 #define FUNC_NAME s_scm_gc_disable
377 {
378 GC_disable ();
379 return SCM_UNSPECIFIED;
380 }
381 #undef FUNC_NAME
382
383 SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
384 (),
385 "Enables the garbage collector.")
386 #define FUNC_NAME s_scm_gc_enable
387 {
388 GC_enable ();
389 return SCM_UNSPECIFIED;
390 }
391 #undef FUNC_NAME
392
393
394 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
395 (),
396 "Scans all of SCM objects and reclaims for further use those that are\n"
397 "no longer accessible.")
398 #define FUNC_NAME s_scm_gc
399 {
400 scm_i_gc ("call");
401 /* If you're calling scm_gc(), you probably want synchronous
402 finalization. */
403 GC_invoke_finalizers ();
404 return SCM_UNSPECIFIED;
405 }
406 #undef FUNC_NAME
407
408 void
409 scm_i_gc (const char *what)
410 {
411 #ifndef HAVE_GC_SET_START_CALLBACK
412 run_before_gc_c_hook ();
413 #endif
414 GC_gcollect ();
415 }
416
417
418 \f
419 /* {GC Protection Helper Functions}
420 */
421
422
423 /*
424 * If within a function you need to protect one or more scheme objects from
425 * garbage collection, pass them as parameters to one of the
426 * scm_remember_upto_here* functions below. These functions don't do
427 * anything, but since the compiler does not know that they are actually
428 * no-ops, it will generate code that calls these functions with the given
429 * parameters. Therefore, you can be sure that the compiler will keep those
430 * scheme values alive (on the stack or in a register) up to the point where
431 * scm_remember_upto_here* is called. In other words, place the call to
432 * scm_remember_upto_here* _behind_ the last code in your function, that
433 * depends on the scheme object to exist.
434 *
435 * Example: We want to make sure that the string object str does not get
436 * garbage collected during the execution of 'some_function' in the code
437 * below, because otherwise the characters belonging to str would be freed and
438 * 'some_function' might access freed memory. To make sure that the compiler
439 * keeps str alive on the stack or in a register such that it is visible to
440 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
441 * call to 'some_function'. Note that this would not be necessary if str was
442 * used anyway after the call to 'some_function'.
443 * char *chars = scm_i_string_chars (str);
444 * some_function (chars);
445 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
446 */
447
448 /* Remove any macro versions of these while defining the functions.
449 Functions are always included in the library, for upward binary
450 compatibility and in case combinations of GCC and non-GCC are used. */
451 #undef scm_remember_upto_here_1
452 #undef scm_remember_upto_here_2
453
454 void
455 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
456 {
457 /* Empty. Protects a single object from garbage collection. */
458 }
459
460 void
461 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
462 {
463 /* Empty. Protects two objects from garbage collection. */
464 }
465
466 void
467 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
468 {
469 /* Empty. Protects any number of objects from garbage collection. */
470 }
471
472 /*
473 These crazy functions prevent garbage collection
474 of arguments after the first argument by
475 ensuring they remain live throughout the
476 function because they are used in the last
477 line of the code block.
478 It'd be better to have a nice compiler hint to
479 aid the conservative stack-scanning GC. --03/09/00 gjb */
480 SCM
481 scm_return_first (SCM elt, ...)
482 {
483 return elt;
484 }
485
486 int
487 scm_return_first_int (int i, ...)
488 {
489 return i;
490 }
491
492
493 SCM
494 scm_permanent_object (SCM obj)
495 {
496 return (scm_gc_protect_object (obj));
497 }
498
499
500 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
501 other references are dropped, until the object is unprotected by calling
502 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
503 i. e. it is possible to protect the same object several times, but it is
504 necessary to unprotect the object the same number of times to actually get
505 the object unprotected. It is an error to unprotect an object more often
506 than it has been protected before. The function scm_protect_object returns
507 OBJ.
508 */
509
510 /* Implementation note: For every object X, there is a counter which
511 scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
512 */
513
514
515
516 SCM
517 scm_gc_protect_object (SCM obj)
518 {
519 SCM handle;
520
521 /* This critical section barrier will be replaced by a mutex. */
522 /* njrev: Indeed; if my comment above is correct, there is the same
523 critsec/mutex inconsistency here. */
524 SCM_CRITICAL_SECTION_START;
525
526 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
527 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
528
529 protected_obj_count ++;
530
531 SCM_CRITICAL_SECTION_END;
532
533 return obj;
534 }
535
536
537 /* Remove any protection for OBJ established by a prior call to
538 scm_protect_object. This function returns OBJ.
539
540 See scm_protect_object for more information. */
541 SCM
542 scm_gc_unprotect_object (SCM obj)
543 {
544 SCM handle;
545
546 /* This critical section barrier will be replaced by a mutex. */
547 /* njrev: and again. */
548 SCM_CRITICAL_SECTION_START;
549
550 if (scm_gc_running_p)
551 {
552 fprintf (stderr, "scm_unprotect_object called during GC.\n");
553 abort ();
554 }
555
556 handle = scm_hashq_get_handle (scm_protects, obj);
557
558 if (scm_is_false (handle))
559 {
560 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
561 abort ();
562 }
563 else
564 {
565 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
566 if (scm_is_eq (count, scm_from_int (0)))
567 scm_hashq_remove_x (scm_protects, obj);
568 else
569 SCM_SETCDR (handle, count);
570 }
571 protected_obj_count --;
572
573 SCM_CRITICAL_SECTION_END;
574
575 return obj;
576 }
577
578 void
579 scm_gc_register_root (SCM *p)
580 {
581 /* Nothing. */
582 }
583
584 void
585 scm_gc_unregister_root (SCM *p)
586 {
587 /* Nothing. */
588 }
589
590 void
591 scm_gc_register_roots (SCM *b, unsigned long n)
592 {
593 SCM *p = b;
594 for (; p < b + n; ++p)
595 scm_gc_register_root (p);
596 }
597
598 void
599 scm_gc_unregister_roots (SCM *b, unsigned long n)
600 {
601 SCM *p = b;
602 for (; p < b + n; ++p)
603 scm_gc_unregister_root (p);
604 }
605
606 \f
607
608
609 /*
610 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
611 */
612
613 /* Get an integer from an environment variable. */
614 int
615 scm_getenv_int (const char *var, int def)
616 {
617 char *end = 0;
618 char *val = getenv (var);
619 long res = def;
620 if (!val)
621 return def;
622 res = strtol (val, &end, 10);
623 if (end == val)
624 return def;
625 return res;
626 }
627
628 #ifndef HAVE_GC_SET_FINALIZE_ON_DEMAND
629 static void
630 GC_set_finalize_on_demand (int foo)
631 {
632 GC_finalize_on_demand = foo;
633 }
634 #endif
635
636 void
637 scm_storage_prehistory ()
638 {
639 #ifdef HAVE_GC_SET_ALL_INTERIOR_POINTERS
640 /* This function was added in 7.2alpha2 (June 2009). */
641 GC_set_all_interior_pointers (0);
642 #else
643 /* This symbol is deprecated in 7.3. */
644 GC_all_interior_pointers = 0;
645 #endif
646
647 free_space_divisor = scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3);
648 minimum_free_space_divisor = free_space_divisor;
649 target_free_space_divisor = free_space_divisor;
650 GC_set_free_space_divisor (free_space_divisor);
651 GC_set_finalize_on_demand (1);
652
653 GC_INIT ();
654
655 #if (! ((defined GC_VERSION_MAJOR) && (GC_VERSION_MAJOR >= 7))) \
656 && (defined SCM_I_GSC_USE_PTHREAD_THREADS)
657 /* When using GC 6.8, this call is required to initialize thread-local
658 freelists (shouldn't be necessary with GC 7.0). */
659 GC_init ();
660 #endif
661
662 GC_expand_hp (SCM_DEFAULT_INIT_HEAP_SIZE_2);
663
664 /* We only need to register a displacement for those types for which the
665 higher bits of the type tag are used to store a pointer (that is, a
666 pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
667 handled in `scm_alloc_struct ()'. */
668 GC_REGISTER_DISPLACEMENT (scm_tc3_cons);
669 /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */
670
671 /* Sanity check. */
672 if (!GC_is_visible (&scm_protects))
673 abort ();
674
675 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
676 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
677 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
678 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
679 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
680 }
681
682 scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
683
684 void
685 scm_init_gc_protect_object ()
686 {
687 scm_protects = scm_c_make_hash_table (31);
688
689 #if 0
690 /* We can't have a cleanup handler since we have no thread to run it
691 in. */
692
693 #ifdef HAVE_ATEXIT
694 atexit (cleanup);
695 #else
696 #ifdef HAVE_ON_EXIT
697 on_exit (cleanup, 0);
698 #endif
699 #endif
700
701 #endif
702 }
703
704 \f
705
706 SCM scm_after_gc_hook;
707
708 static SCM after_gc_async_cell;
709
710 /* The function after_gc_async_thunk causes the execution of the
711 * after-gc-hook. It is run after the gc, as soon as the asynchronous
712 * events are handled by the evaluator.
713 */
714 static SCM
715 after_gc_async_thunk (void)
716 {
717 /* Fun, no? Hook-run *and* run-hook? */
718 scm_c_hook_run (&scm_after_gc_c_hook, NULL);
719 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
720 return SCM_UNSPECIFIED;
721 }
722
723
724 /* The function queue_after_gc_hook is run by the scm_before_gc_c_hook
725 * at the end of the garbage collection. The only purpose of this
726 * function is to mark the after_gc_async (which will eventually lead to
727 * the execution of the after_gc_async_thunk).
728 */
729 static void *
730 queue_after_gc_hook (void * hook_data SCM_UNUSED,
731 void *fn_data SCM_UNUSED,
732 void *data SCM_UNUSED)
733 {
734 /* If cell access debugging is enabled, the user may choose to perform
735 * additional garbage collections after an arbitrary number of cell
736 * accesses. We don't want the scheme level after-gc-hook to be performed
737 * for each of these garbage collections for the following reason: The
738 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
739 * after-gc-hook was performed with every gc, and if the gc was performed
740 * after a very small number of cell accesses, then the number of cell
741 * accesses during the execution of the after-gc-hook will suffice to cause
742 * the execution of the next gc. Then, guile would keep executing the
743 * after-gc-hook over and over again, and would never come to do other
744 * things.
745 *
746 * To overcome this problem, if cell access debugging with additional
747 * garbage collections is enabled, the after-gc-hook is never run by the
748 * garbage collecter. When running guile with cell access debugging and the
749 * execution of the after-gc-hook is desired, then it is necessary to run
750 * the hook explicitly from the user code. This has the effect, that from
751 * the scheme level point of view it seems that garbage collection is
752 * performed with a much lower frequency than it actually is. Obviously,
753 * this will not work for code that depends on a fixed one to one
754 * relationship between the execution counts of the C level garbage
755 * collection hooks and the execution count of the scheme level
756 * after-gc-hook.
757 */
758
759 #if (SCM_DEBUG_CELL_ACCESSES == 1)
760 if (scm_debug_cells_gc_interval == 0)
761 #endif
762 {
763 scm_i_thread *t = SCM_I_CURRENT_THREAD;
764
765 if (scm_is_false (SCM_CDR (after_gc_async_cell)))
766 {
767 SCM_SETCDR (after_gc_async_cell, t->active_asyncs);
768 t->active_asyncs = after_gc_async_cell;
769 t->pending_asyncs = 1;
770 }
771 }
772
773 return NULL;
774 }
775
776 \f
777
778 static void *
779 start_gc_timer (void * hook_data SCM_UNUSED,
780 void *fn_data SCM_UNUSED,
781 void *data SCM_UNUSED)
782 {
783 if (!gc_start_time)
784 gc_start_time = scm_c_get_internal_run_time ();
785
786 return NULL;
787 }
788
789 static void *
790 accumulate_gc_timer (void * hook_data SCM_UNUSED,
791 void *fn_data SCM_UNUSED,
792 void *data SCM_UNUSED)
793 {
794 if (gc_start_time)
795 {
796 long now = scm_c_get_internal_run_time ();
797 gc_time_taken += now - gc_start_time;
798 gc_start_time = 0;
799 }
800
801 return NULL;
802 }
803
804 /* Return some idea of the memory footprint of a process, in bytes.
805 Currently only works on Linux systems. */
806 static size_t
807 get_image_size (void)
808 {
809 unsigned long size, resident, share;
810 size_t ret = 0;
811
812 FILE *fp = fopen ("/proc/self/statm", "r");
813
814 if (fp && fscanf (fp, "%lu %lu %lu", &size, &resident, &share) == 3)
815 ret = resident * 4096;
816
817 if (fp)
818 fclose (fp);
819
820 return ret;
821 }
822
823 /* These are discussed later. */
824 static size_t bytes_until_gc;
825 static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
826
827 /* Make GC run more frequently when the process image size is growing,
828 measured against the number of bytes allocated through the GC.
829
830 If Guile is allocating at a GC-managed heap size H, libgc will tend
831 to limit the process image size to H*N. But if at the same time the
832 user program is mallocating at a rate M bytes per GC-allocated byte,
833 then the process stabilizes at H*N*M -- assuming that collecting data
834 will result in malloc'd data being freed. It doesn't take a very
835 large M for this to be a bad situation. To limit the image size,
836 Guile should GC more often -- the bigger the M, the more often.
837
838 Numeric functions that produce bigger and bigger integers are
839 pessimal, because M is an increasing function of time. Here is an
840 example of such a function:
841
842 (define (factorial n)
843 (define (fac n acc)
844 (if (<= n 1)
845 acc
846 (fac (1- n) (* n acc))))
847 (fac n 1))
848
849 It is possible for a process to grow for reasons that will not be
850 solved by faster GC. In that case M will be estimated as
851 artificially high for a while, and so GC will happen more often on
852 the Guile side. But when it stabilizes, Guile can ease back the GC
853 frequency.
854
855 The key is to measure process image growth, not mallocation rate.
856 For maximum effectiveness, Guile reacts quickly to process growth,
857 and exponentially backs down when the process stops growing.
858
859 See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936
860 for further discussion.
861 */
862 static void *
863 adjust_gc_frequency (void * hook_data SCM_UNUSED,
864 void *fn_data SCM_UNUSED,
865 void *data SCM_UNUSED)
866 {
867 static size_t prev_image_size = 0;
868 static size_t prev_bytes_alloced = 0;
869 size_t image_size;
870 size_t bytes_alloced;
871
872 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
873 bytes_until_gc = GC_get_heap_size ();
874 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
875
876 image_size = get_image_size ();
877 bytes_alloced = GC_get_total_bytes ();
878
879 #define HEURISTICS_DEBUG 0
880
881 #if HEURISTICS_DEBUG
882 fprintf (stderr, "prev image / alloced: %lu / %lu\n", prev_image_size, prev_bytes_alloced);
883 fprintf (stderr, " image / alloced: %lu / %lu\n", image_size, bytes_alloced);
884 fprintf (stderr, "divisor %lu / %f\n", free_space_divisor, target_free_space_divisor);
885 #endif
886
887 if (prev_image_size && bytes_alloced != prev_bytes_alloced)
888 {
889 double growth_rate, new_target_free_space_divisor;
890 double decay_factor = 0.5;
891 double hysteresis = 0.1;
892
893 growth_rate = ((double) image_size - prev_image_size)
894 / ((double)bytes_alloced - prev_bytes_alloced);
895
896 #if HEURISTICS_DEBUG
897 fprintf (stderr, "growth rate %f\n", growth_rate);
898 #endif
899
900 new_target_free_space_divisor = minimum_free_space_divisor;
901
902 if (growth_rate > 0)
903 new_target_free_space_divisor *= 1.0 + growth_rate;
904
905 #if HEURISTICS_DEBUG
906 fprintf (stderr, "new divisor %f\n", new_target_free_space_divisor);
907 #endif
908
909 if (new_target_free_space_divisor < target_free_space_divisor)
910 /* Decay down. */
911 target_free_space_divisor =
912 (decay_factor * target_free_space_divisor
913 + (1.0 - decay_factor) * new_target_free_space_divisor);
914 else
915 /* Jump up. */
916 target_free_space_divisor = new_target_free_space_divisor;
917
918 #if HEURISTICS_DEBUG
919 fprintf (stderr, "new target divisor %f\n", target_free_space_divisor);
920 #endif
921
922 if (free_space_divisor + 0.5 + hysteresis < target_free_space_divisor
923 || free_space_divisor - 0.5 - hysteresis > target_free_space_divisor)
924 {
925 free_space_divisor = lround (target_free_space_divisor);
926 #if HEURISTICS_DEBUG
927 fprintf (stderr, "new divisor %lu\n", free_space_divisor);
928 #endif
929 GC_set_free_space_divisor (free_space_divisor);
930 }
931 }
932
933 prev_image_size = image_size;
934 prev_bytes_alloced = bytes_alloced;
935
936 return NULL;
937 }
938
939 /* The adjust_gc_frequency routine handles transients in the process
940 image size. It can't handle instense non-GC-managed steady-state
941 allocation though, as it decays the FSD at steady-state down to its
942 minimum value.
943
944 The only real way to handle continuous, high non-GC allocation is to
945 let the GC know about it. This routine can handle non-GC allocation
946 rates that are similar in size to the GC-managed heap size.
947 */
948
949 void
950 scm_gc_register_allocation (size_t size)
951 {
952 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
953 if (bytes_until_gc - size > bytes_until_gc)
954 {
955 bytes_until_gc = GC_get_heap_size ();
956 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
957 GC_gcollect ();
958 }
959 else
960 {
961 bytes_until_gc -= size;
962 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
963 }
964 }
965
966
967 \f
968
969 char const *
970 scm_i_tag_name (scm_t_bits tag)
971 {
972 switch (tag & 0x7f) /* 7 bits */
973 {
974 case scm_tcs_struct:
975 return "struct";
976 case scm_tcs_cons_imcar:
977 return "cons (immediate car)";
978 case scm_tcs_cons_nimcar:
979 return "cons (non-immediate car)";
980 case scm_tc7_pointer:
981 return "foreign";
982 case scm_tc7_hashtable:
983 return "hashtable";
984 case scm_tc7_fluid:
985 return "fluid";
986 case scm_tc7_dynamic_state:
987 return "dynamic state";
988 case scm_tc7_frame:
989 return "frame";
990 case scm_tc7_objcode:
991 return "objcode";
992 case scm_tc7_vm:
993 return "vm";
994 case scm_tc7_vm_cont:
995 return "vm continuation";
996 case scm_tc7_wvect:
997 return "weak vector";
998 case scm_tc7_vector:
999 return "vector";
1000 case scm_tc7_number:
1001 switch (tag)
1002 {
1003 case scm_tc16_real:
1004 return "real";
1005 break;
1006 case scm_tc16_big:
1007 return "bignum";
1008 break;
1009 case scm_tc16_complex:
1010 return "complex number";
1011 break;
1012 case scm_tc16_fraction:
1013 return "fraction";
1014 break;
1015 }
1016 break;
1017 case scm_tc7_string:
1018 return "string";
1019 break;
1020 case scm_tc7_stringbuf:
1021 return "string buffer";
1022 break;
1023 case scm_tc7_symbol:
1024 return "symbol";
1025 break;
1026 case scm_tc7_variable:
1027 return "variable";
1028 break;
1029 case scm_tc7_port:
1030 return "port";
1031 break;
1032 case scm_tc7_smob:
1033 {
1034 int k = 0xff & (tag >> 8);
1035 return (scm_smobs[k].name);
1036 }
1037 break;
1038 }
1039
1040 return NULL;
1041 }
1042
1043
1044
1045 \f
1046 void
1047 scm_init_gc ()
1048 {
1049 /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
1050
1051 scm_after_gc_hook = scm_make_hook (SCM_INUM0);
1052 scm_c_define ("after-gc-hook", scm_after_gc_hook);
1053
1054 /* When the async is to run, the cdr of the gc_async pair gets set to
1055 the asyncs queue of the current thread. */
1056 after_gc_async_cell = scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0,
1057 after_gc_async_thunk),
1058 SCM_BOOL_F);
1059
1060 scm_c_hook_add (&scm_before_gc_c_hook, queue_after_gc_hook, NULL, 0);
1061 scm_c_hook_add (&scm_before_gc_c_hook, start_gc_timer, NULL, 0);
1062 scm_c_hook_add (&scm_after_gc_c_hook, accumulate_gc_timer, NULL, 0);
1063
1064 #if HAVE_GC_GET_HEAP_USAGE_SAFE
1065 /* GC_get_heap_usage does not take a lock, and so can run in the GC
1066 start hook. */
1067 scm_c_hook_add (&scm_before_gc_c_hook, adjust_gc_frequency, NULL, 0);
1068 #else
1069 /* GC_get_heap_usage might take a lock (and did from 7.2alpha1 to
1070 7.2alpha7), so call it in the after_gc_hook. */
1071 scm_c_hook_add (&scm_after_gc_c_hook, adjust_gc_frequency, NULL, 0);
1072 #endif
1073
1074 #ifdef HAVE_GC_SET_START_CALLBACK
1075 GC_set_start_callback (run_before_gc_c_hook);
1076 #endif
1077
1078 #include "libguile/gc.x"
1079 }
1080
1081
1082 void
1083 scm_gc_sweep (void)
1084 #define FUNC_NAME "scm_gc_sweep"
1085 {
1086 /* FIXME */
1087 fprintf (stderr, "%s: doing nothing\n", FUNC_NAME);
1088 }
1089 #undef FUNC_NAME
1090
1091 /*
1092 Local Variables:
1093 c-file-style: "gnu"
1094 End:
1095 */