Merge commit 'd364a8971828e38e8f9112b711066f4962bb400e'
[bpt/guile.git] / libguile / gc.c
CommitLineData
0f595d7d
LC
1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006,
2 * 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
a00c95d9 3 *
73be1d9e 4 * This library is free software; you can redistribute it and/or
53befeb7
NJ
5 * modify it under the terms of the GNU Lesser General Public License
6 * as published by the Free Software Foundation; either version 3 of
7 * the License, or (at your option) any later version.
a00c95d9 8 *
53befeb7
NJ
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
a00c95d9 13 *
73be1d9e
MV
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
53befeb7
NJ
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301 USA
73be1d9e 18 */
1bbd0b84 19
37ddcaf6
MD
20/* #define DEBUGINFO */
21
dbb605f5 22#ifdef HAVE_CONFIG_H
aa54a9b0
RB
23# include <config.h>
24#endif
56495472 25
e7bca227
LC
26#include "libguile/gen-scmconfig.h"
27
0f2d19dd 28#include <stdio.h>
e6e2e95a 29#include <errno.h>
783e7774 30#include <string.h>
34cf38c3 31#include <stdlib.h>
6360beb2 32#include <math.h>
e6e2e95a 33
3ec17f28
LC
34#ifdef __ia64__
35#include <ucontext.h>
36extern unsigned long * __libc_ia64_register_backing_store_base;
37#endif
38
a0599745 39#include "libguile/_scm.h"
0a7a7445 40#include "libguile/eval.h"
a0599745
MD
41#include "libguile/stime.h"
42#include "libguile/stackchk.h"
43#include "libguile/struct.h"
a0599745 44#include "libguile/smob.h"
2fa901a5 45#include "libguile/arrays.h"
a0599745
MD
46#include "libguile/async.h"
47#include "libguile/ports.h"
48#include "libguile/root.h"
87fc4596 49#include "libguile/simpos.h"
a0599745
MD
50#include "libguile/strings.h"
51#include "libguile/vectors.h"
686765af 52#include "libguile/hashtab.h"
ecf470a2 53#include "libguile/tags.h"
a0599745
MD
54
55#include "libguile/validate.h"
1be6b49c 56#include "libguile/deprecation.h"
a0599745 57#include "libguile/gc.h"
9de87eea 58#include "libguile/dynwind.h"
fce59c93 59
1c44468d 60#include "libguile/bdw-gc.h"
a82e7953 61
cc3546b0
AW
62/* For GC_set_start_callback. */
63#include <gc/gc_mark.h>
64
bc9d9bb2 65#ifdef GUILE_DEBUG_MALLOC
a0599745 66#include "libguile/debug-malloc.h"
bc9d9bb2
MD
67#endif
68
0f2d19dd 69#ifdef HAVE_UNISTD_H
95b88819 70#include <unistd.h>
0f2d19dd
JB
71#endif
72
064d2409
AW
73/* Size in bytes of the initial heap. This should be about the size of
74 result of 'guile -c "(display (assq-ref (gc-stats)
75 'heap-total-allocated))"'. */
76
77#define DEFAULT_INITIAL_HEAP_SIZE (128 * 1024 * SIZEOF_SCM_T_BITS)
78
eae33935 79/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 80 */
eab1b259
HWN
81int scm_debug_cell_accesses_p = 0;
82int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 83
e81d98ec
DH
84/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
85 * the number of cell accesses after which a gc shall be called.
86 */
eab1b259 87int scm_debug_cells_gc_interval = 0;
e81d98ec 88
acbccb0c 89/* Hash table that keeps a reference to objects the user wants to protect from
fbe1cb7f
AW
90 garbage collection. */
91static SCM scm_protects;
e7efe8e7
AW
92
93
eab1b259
HWN
94#if (SCM_DEBUG_CELL_ACCESSES == 1)
95
96
97/*
98
99 Assert that the given object is a valid reference to a valid cell. This
100 test involves to determine whether the object is a cell pointer, whether
101 this pointer actually points into a heap segment and whether the cell
102 pointed to is not a free cell. Further, additional garbage collections may
103 get executed after a user defined number of cell accesses. This helps to
104 find places in the C code where references are dropped for extremely short
105 periods.
106
107*/
406c7d90 108void
eab1b259 109scm_i_expensive_validation_check (SCM cell)
406c7d90 110{
eab1b259
HWN
111 /* If desired, perform additional garbage collections after a user
112 * defined number of cell accesses.
113 */
114 if (scm_debug_cells_gc_interval)
115 {
116 static unsigned int counter = 0;
61045190 117
eab1b259
HWN
118 if (counter != 0)
119 {
120 --counter;
121 }
122 else
123 {
124 counter = scm_debug_cells_gc_interval;
b17e0ac3 125 scm_gc ();
eab1b259
HWN
126 }
127 }
128}
129
8c93b597
LC
130/* Whether cell validation is already running. */
131static int scm_i_cell_validation_already_running = 0;
132
eab1b259
HWN
133void
134scm_assert_cell_valid (SCM cell)
135{
136 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 137 {
eab1b259 138 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 139
c8a1bdc4 140 /*
eab1b259
HWN
141 During GC, no user-code should be run, and the guile core
142 should use non-protected accessors.
143 */
c8a1bdc4 144 if (scm_gc_running_p)
eab1b259 145 return;
c8a1bdc4
HWN
146
147 /*
eab1b259
HWN
148 Only scm_in_heap_p and rescanning the heap is wildly
149 expensive.
150 */
151 if (scm_expensive_debug_cell_accesses_p)
152 scm_i_expensive_validation_check (cell);
b4246e5b 153
eab1b259 154 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
155 }
156}
157
158
eab1b259 159
406c7d90
DH
160SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
161 (SCM flag),
1e6808ea 162 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 163 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 164 "but no additional calls to garbage collection are issued.\n"
eab1b259 165 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
166 "with an additional garbage collection after the given\n"
167 "number of cell accesses.\n"
1e6808ea
MG
168 "This procedure only exists when the compile-time flag\n"
169 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
170#define FUNC_NAME s_scm_set_debug_cell_accesses_x
171{
7888309b 172 if (scm_is_false (flag))
eab1b259
HWN
173 {
174 scm_debug_cell_accesses_p = 0;
175 }
bc36d050 176 else if (scm_is_eq (flag, SCM_BOOL_T))
eab1b259
HWN
177 {
178 scm_debug_cells_gc_interval = 0;
179 scm_debug_cell_accesses_p = 1;
180 scm_expensive_debug_cell_accesses_p = 0;
181 }
e11e83f3 182 else
eab1b259 183 {
e11e83f3 184 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
eab1b259
HWN
185 scm_debug_cell_accesses_p = 1;
186 scm_expensive_debug_cell_accesses_p = 1;
187 }
406c7d90
DH
188 return SCM_UNSPECIFIED;
189}
190#undef FUNC_NAME
0f2d19dd 191
ecf470a2 192
c8a1bdc4 193#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
194
195\f
14294ce0 196
26224b3f
LC
197/* Hooks. */
198scm_t_c_hook scm_before_gc_c_hook;
199scm_t_c_hook scm_before_mark_c_hook;
200scm_t_c_hook scm_before_sweep_c_hook;
201scm_t_c_hook scm_after_sweep_c_hook;
202scm_t_c_hook scm_after_gc_c_hook;
945fec60 203
0f2d19dd 204
0fbdbe6c
AW
205static void
206run_before_gc_c_hook (void)
207{
e1fbe716
AW
208 if (!SCM_I_CURRENT_THREAD)
209 /* GC while a thread is spinning up; punt. */
210 return;
211
0fbdbe6c
AW
212 scm_c_hook_run (&scm_before_gc_c_hook, NULL);
213}
214
215
0f2d19dd
JB
216/* GC Statistics Keeping
217 */
b74e86cf 218unsigned long scm_gc_ports_collected = 0;
00b6ef23
AW
219static long gc_time_taken = 0;
220static long gc_start_time = 0;
221
6360beb2
AW
222static unsigned long free_space_divisor;
223static unsigned long minimum_free_space_divisor;
224static double target_free_space_divisor;
b74e86cf 225
915b3f9f 226static unsigned long protected_obj_count = 0;
c2cbcc57 227
0f2d19dd 228
17ab1dc3 229SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
915b3f9f
LC
230SCM_SYMBOL (sym_heap_size, "heap-size");
231SCM_SYMBOL (sym_heap_free_size, "heap-free-size");
232SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated");
17ab1dc3 233SCM_SYMBOL (sym_heap_allocated_since_gc, "heap-allocated-since-gc");
7eec4c37 234SCM_SYMBOL (sym_protected_objects, "protected-objects");
17ab1dc3 235SCM_SYMBOL (sym_times, "gc-times");
cf2d30f6 236
d3dd80ab 237
0f2d19dd
JB
238/* {Scheme Interface to GC}
239 */
c2cbcc57 240extern int scm_gc_malloc_yield_percentage;
a00c95d9 241SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 242 (),
1e6808ea 243 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 244 "use of storage.\n")
1bbd0b84 245#define FUNC_NAME s_scm_gc_stats
0f2d19dd 246{
0f2d19dd 247 SCM answer;
14294ce0 248 GC_word heap_size, free_bytes, unmapped_bytes, bytes_since_gc, total_bytes;
915b3f9f 249 size_t gc_times;
4c9419ac 250
14294ce0
AW
251 GC_get_heap_usage_safe (&heap_size, &free_bytes, &unmapped_bytes,
252 &bytes_since_gc, &total_bytes);
0f595d7d 253 gc_times = GC_get_gc_no ();
fca43887 254
b9bd8526 255 answer =
00b6ef23 256 scm_list_n (scm_cons (sym_gc_time_taken, scm_from_long (gc_time_taken)),
915b3f9f
LC
257 scm_cons (sym_heap_size, scm_from_size_t (heap_size)),
258 scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)),
259 scm_cons (sym_heap_total_allocated,
260 scm_from_size_t (total_bytes)),
17ab1dc3
AW
261 scm_cons (sym_heap_allocated_since_gc,
262 scm_from_size_t (bytes_since_gc)),
915b3f9f
LC
263 scm_cons (sym_protected_objects,
264 scm_from_ulong (protected_obj_count)),
265 scm_cons (sym_times, scm_from_size_t (gc_times)),
b9bd8526 266 SCM_UNDEFINED);
fca43887 267
c8a1bdc4 268 return answer;
0f2d19dd 269}
c8a1bdc4 270#undef FUNC_NAME
0f2d19dd 271
539b08a4 272
7f9ec18a
LC
273SCM_DEFINE (scm_gc_dump, "gc-dump", 0, 0, 0,
274 (void),
275 "Dump information about the garbage collector's internal data "
276 "structures and memory usage to the standard output.")
277#define FUNC_NAME s_scm_gc_dump
278{
279 GC_dump ();
280
281 return SCM_UNSPECIFIED;
282}
283#undef FUNC_NAME
284
acf4331f 285
c8a1bdc4
HWN
286SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
287 (SCM obj),
288 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
289 "returned by this function for @var{obj}")
290#define FUNC_NAME s_scm_object_address
c68296f8 291{
b9bd8526 292 return scm_from_ulong (SCM_UNPACK (obj));
c68296f8 293}
c8a1bdc4 294#undef FUNC_NAME
c68296f8 295
1be6b49c 296
915b3f9f
LC
297SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
298 (),
299 "Disables the garbage collector. Nested calls are permitted. "
300 "GC is re-enabled once @code{gc-enable} has been called the "
301 "same number of times @code{gc-disable} was called.")
302#define FUNC_NAME s_scm_gc_disable
303{
304 GC_disable ();
305 return SCM_UNSPECIFIED;
306}
307#undef FUNC_NAME
308
309SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
310 (),
311 "Enables the garbage collector.")
312#define FUNC_NAME s_scm_gc_enable
313{
314 GC_enable ();
315 return SCM_UNSPECIFIED;
316}
317#undef FUNC_NAME
318
319
c8a1bdc4
HWN
320SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
321 (),
322 "Scans all of SCM objects and reclaims for further use those that are\n"
323 "no longer accessible.")
324#define FUNC_NAME s_scm_gc
325{
b17e0ac3 326 scm_i_gc ("call");
f740445a
AW
327 /* If you're calling scm_gc(), you probably want synchronous
328 finalization. */
eaf99988 329 GC_invoke_finalizers ();
c8a1bdc4 330 return SCM_UNSPECIFIED;
9d47a1e6 331}
c8a1bdc4 332#undef FUNC_NAME
9d47a1e6 333
c8a1bdc4 334void
b17e0ac3 335scm_i_gc (const char *what)
c8a1bdc4 336{
26224b3f 337 GC_gcollect ();
eab1b259 338}
0f2d19dd 339
4c7016dc 340
0f2d19dd
JB
341\f
342/* {GC Protection Helper Functions}
343 */
344
345
5d2b97cd
DH
346/*
347 * If within a function you need to protect one or more scheme objects from
348 * garbage collection, pass them as parameters to one of the
349 * scm_remember_upto_here* functions below. These functions don't do
350 * anything, but since the compiler does not know that they are actually
351 * no-ops, it will generate code that calls these functions with the given
352 * parameters. Therefore, you can be sure that the compiler will keep those
353 * scheme values alive (on the stack or in a register) up to the point where
354 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 355 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
356 * depends on the scheme object to exist.
357 *
8c494e99
DH
358 * Example: We want to make sure that the string object str does not get
359 * garbage collected during the execution of 'some_function' in the code
360 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
361 * 'some_function' might access freed memory. To make sure that the compiler
362 * keeps str alive on the stack or in a register such that it is visible to
363 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
364 * call to 'some_function'. Note that this would not be necessary if str was
365 * used anyway after the call to 'some_function'.
eb01cb64 366 * char *chars = scm_i_string_chars (str);
5d2b97cd
DH
367 * some_function (chars);
368 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
369 */
370
9e1569bd
KR
371/* Remove any macro versions of these while defining the functions.
372 Functions are always included in the library, for upward binary
373 compatibility and in case combinations of GCC and non-GCC are used. */
374#undef scm_remember_upto_here_1
375#undef scm_remember_upto_here_2
376
5d2b97cd 377void
e81d98ec 378scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
379{
380 /* Empty. Protects a single object from garbage collection. */
381}
382
383void
e81d98ec 384scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
385{
386 /* Empty. Protects two objects from garbage collection. */
387}
388
389void
e81d98ec 390scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
391{
392 /* Empty. Protects any number of objects from garbage collection. */
393}
394
c209c88e 395/*
41b0806d
GB
396 These crazy functions prevent garbage collection
397 of arguments after the first argument by
398 ensuring they remain live throughout the
399 function because they are used in the last
400 line of the code block.
401 It'd be better to have a nice compiler hint to
402 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
403SCM
404scm_return_first (SCM elt, ...)
0f2d19dd
JB
405{
406 return elt;
407}
408
41b0806d
GB
409int
410scm_return_first_int (int i, ...)
411{
412 return i;
413}
414
0f2d19dd 415
0f2d19dd 416SCM
6e8d25a6 417scm_permanent_object (SCM obj)
0f2d19dd 418{
8e7b3e98 419 return (scm_gc_protect_object (obj));
0f2d19dd
JB
420}
421
422
7bd4fbe2
MD
423/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
424 other references are dropped, until the object is unprotected by calling
6b1b030e 425 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
426 i. e. it is possible to protect the same object several times, but it is
427 necessary to unprotect the object the same number of times to actually get
428 the object unprotected. It is an error to unprotect an object more often
429 than it has been protected before. The function scm_protect_object returns
430 OBJ.
431*/
432
433/* Implementation note: For every object X, there is a counter which
1f584400 434 scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
7bd4fbe2 435*/
686765af 436
7eec4c37
HWN
437
438
ef290276 439SCM
6b1b030e 440scm_gc_protect_object (SCM obj)
ef290276 441{
686765af 442 SCM handle;
9d47a1e6 443
686765af 444 /* This critical section barrier will be replaced by a mutex. */
33b320ae
NJ
445 /* njrev: Indeed; if my comment above is correct, there is the same
446 critsec/mutex inconsistency here. */
9de87eea 447 SCM_CRITICAL_SECTION_START;
9d47a1e6 448
acbccb0c 449 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
e11e83f3 450 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
9d47a1e6 451
7eec4c37
HWN
452 protected_obj_count ++;
453
9de87eea 454 SCM_CRITICAL_SECTION_END;
9d47a1e6 455
ef290276
JB
456 return obj;
457}
458
459
460/* Remove any protection for OBJ established by a prior call to
dab7f566 461 scm_protect_object. This function returns OBJ.
ef290276 462
dab7f566 463 See scm_protect_object for more information. */
ef290276 464SCM
6b1b030e 465scm_gc_unprotect_object (SCM obj)
ef290276 466{
686765af 467 SCM handle;
9d47a1e6 468
686765af 469 /* This critical section barrier will be replaced by a mutex. */
33b320ae 470 /* njrev: and again. */
9de87eea 471 SCM_CRITICAL_SECTION_START;
9d47a1e6 472
0ff7e3ff
HWN
473 if (scm_gc_running_p)
474 {
475 fprintf (stderr, "scm_unprotect_object called during GC.\n");
476 abort ();
477 }
b17e0ac3 478
acbccb0c 479 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 480
7888309b 481 if (scm_is_false (handle))
686765af 482 {
0f0f0899
MD
483 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
484 abort ();
686765af 485 }
6a199940
DH
486 else
487 {
e11e83f3 488 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 489 if (scm_is_eq (count, scm_from_int (0)))
acbccb0c 490 scm_hashq_remove_x (scm_protects, obj);
6a199940 491 else
1be6b49c 492 SCM_SETCDR (handle, count);
6a199940 493 }
7eec4c37 494 protected_obj_count --;
686765af 495
9de87eea 496 SCM_CRITICAL_SECTION_END;
ef290276
JB
497
498 return obj;
499}
500
6b1b030e
ML
501void
502scm_gc_register_root (SCM *p)
503{
8e7b3e98 504 /* Nothing. */
6b1b030e
ML
505}
506
507void
508scm_gc_unregister_root (SCM *p)
509{
8e7b3e98 510 /* Nothing. */
6b1b030e
ML
511}
512
513void
514scm_gc_register_roots (SCM *b, unsigned long n)
515{
516 SCM *p = b;
517 for (; p < b + n; ++p)
518 scm_gc_register_root (p);
519}
520
521void
522scm_gc_unregister_roots (SCM *b, unsigned long n)
523{
524 SCM *p = b;
525 for (; p < b + n; ++p)
526 scm_gc_unregister_root (p);
527}
528
0f2d19dd 529\f
a00c95d9 530
4c48ba06 531
c35738c1
MD
532void
533scm_storage_prehistory ()
534{
0f595d7d 535 GC_set_all_interior_pointers (0);
0f595d7d 536
6360beb2
AW
537 free_space_divisor = scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3);
538 minimum_free_space_divisor = free_space_divisor;
539 target_free_space_divisor = free_space_divisor;
540 GC_set_free_space_divisor (free_space_divisor);
eaf99988 541 GC_set_finalize_on_demand (1);
184327a6 542
a82e7953 543 GC_INIT ();
e7bca227 544
064d2409 545 GC_expand_hp (DEFAULT_INITIAL_HEAP_SIZE);
915b3f9f 546
184327a6
LC
547 /* We only need to register a displacement for those types for which the
548 higher bits of the type tag are used to store a pointer (that is, a
549 pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
550 handled in `scm_alloc_struct ()'. */
551 GC_REGISTER_DISPLACEMENT (scm_tc3_cons);
314b8716 552 /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */
184327a6 553
915b3f9f 554 /* Sanity check. */
acbccb0c 555 if (!GC_is_visible (&scm_protects))
915b3f9f 556 abort ();
a82e7953 557
c35738c1
MD
558 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
559 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
560 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
561 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
562 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
563}
85db4a2c 564
9de87eea 565scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
eb01cb64 566
562cd1b8
AW
567void
568scm_init_gc_protect_object ()
0f2d19dd 569{
acbccb0c 570 scm_protects = scm_c_make_hash_table (31);
4a4c9785 571
9de87eea
MV
572#if 0
573 /* We can't have a cleanup handler since we have no thread to run it
574 in. */
575
a18bcd0e 576#ifdef HAVE_ATEXIT
c45acc34 577 atexit (cleanup);
e52ceaac
MD
578#else
579#ifdef HAVE_ON_EXIT
580 on_exit (cleanup, 0);
581#endif
9de87eea
MV
582#endif
583
a18bcd0e 584#endif
0f2d19dd 585}
939794ce 586
0f2d19dd
JB
587\f
588
939794ce
DH
589SCM scm_after_gc_hook;
590
cc3546b0 591static SCM after_gc_async_cell;
939794ce 592
cc3546b0
AW
593/* The function after_gc_async_thunk causes the execution of the
594 * after-gc-hook. It is run after the gc, as soon as the asynchronous
595 * events are handled by the evaluator.
939794ce
DH
596 */
597static SCM
cc3546b0 598after_gc_async_thunk (void)
939794ce 599{
cc3546b0
AW
600 /* Fun, no? Hook-run *and* run-hook? */
601 scm_c_hook_run (&scm_after_gc_c_hook, NULL);
939794ce 602 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
603 return SCM_UNSPECIFIED;
604}
605
606
cc3546b0
AW
607/* The function queue_after_gc_hook is run by the scm_before_gc_c_hook
608 * at the end of the garbage collection. The only purpose of this
609 * function is to mark the after_gc_async (which will eventually lead to
610 * the execution of the after_gc_async_thunk).
939794ce
DH
611 */
612static void *
cc3546b0
AW
613queue_after_gc_hook (void * hook_data SCM_UNUSED,
614 void *fn_data SCM_UNUSED,
615 void *data SCM_UNUSED)
e81d98ec
DH
616{
617 /* If cell access debugging is enabled, the user may choose to perform
618 * additional garbage collections after an arbitrary number of cell
619 * accesses. We don't want the scheme level after-gc-hook to be performed
620 * for each of these garbage collections for the following reason: The
621 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
622 * after-gc-hook was performed with every gc, and if the gc was performed
623 * after a very small number of cell accesses, then the number of cell
624 * accesses during the execution of the after-gc-hook will suffice to cause
625 * the execution of the next gc. Then, guile would keep executing the
626 * after-gc-hook over and over again, and would never come to do other
627 * things.
eae33935 628 *
e81d98ec
DH
629 * To overcome this problem, if cell access debugging with additional
630 * garbage collections is enabled, the after-gc-hook is never run by the
631 * garbage collecter. When running guile with cell access debugging and the
632 * execution of the after-gc-hook is desired, then it is necessary to run
633 * the hook explicitly from the user code. This has the effect, that from
634 * the scheme level point of view it seems that garbage collection is
635 * performed with a much lower frequency than it actually is. Obviously,
636 * this will not work for code that depends on a fixed one to one
637 * relationship between the execution counts of the C level garbage
638 * collection hooks and the execution count of the scheme level
639 * after-gc-hook.
640 */
9de87eea 641
e81d98ec 642#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 643 if (scm_debug_cells_gc_interval == 0)
e81d98ec 644#endif
cc3546b0
AW
645 {
646 scm_i_thread *t = SCM_I_CURRENT_THREAD;
647
648 if (scm_is_false (SCM_CDR (after_gc_async_cell)))
649 {
650 SCM_SETCDR (after_gc_async_cell, t->active_asyncs);
651 t->active_asyncs = after_gc_async_cell;
652 t->pending_asyncs = 1;
653 }
654 }
e81d98ec 655
939794ce
DH
656 return NULL;
657}
658
00b6ef23
AW
659\f
660
661static void *
662start_gc_timer (void * hook_data SCM_UNUSED,
663 void *fn_data SCM_UNUSED,
664 void *data SCM_UNUSED)
665{
666 if (!gc_start_time)
667 gc_start_time = scm_c_get_internal_run_time ();
668
669 return NULL;
670}
671
672static void *
673accumulate_gc_timer (void * hook_data SCM_UNUSED,
674 void *fn_data SCM_UNUSED,
675 void *data SCM_UNUSED)
676{
677 if (gc_start_time)
6360beb2
AW
678 {
679 long now = scm_c_get_internal_run_time ();
00b6ef23
AW
680 gc_time_taken += now - gc_start_time;
681 gc_start_time = 0;
682 }
683
684 return NULL;
685}
686
6360beb2
AW
687/* Return some idea of the memory footprint of a process, in bytes.
688 Currently only works on Linux systems. */
689static size_t
690get_image_size (void)
691{
692 unsigned long size, resident, share;
8ac70433 693 size_t ret = 0;
6360beb2
AW
694
695 FILE *fp = fopen ("/proc/self/statm", "r");
696
697 if (fp && fscanf (fp, "%lu %lu %lu", &size, &resident, &share) == 3)
698 ret = resident * 4096;
699
700 if (fp)
701 fclose (fp);
702
703 return ret;
704}
705
fd51e661 706/* These are discussed later. */
553294d9 707static size_t bytes_until_gc = DEFAULT_INITIAL_HEAP_SIZE;
fd51e661
AW
708static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
709
6360beb2
AW
710/* Make GC run more frequently when the process image size is growing,
711 measured against the number of bytes allocated through the GC.
712
713 If Guile is allocating at a GC-managed heap size H, libgc will tend
714 to limit the process image size to H*N. But if at the same time the
715 user program is mallocating at a rate M bytes per GC-allocated byte,
716 then the process stabilizes at H*N*M -- assuming that collecting data
717 will result in malloc'd data being freed. It doesn't take a very
718 large M for this to be a bad situation. To limit the image size,
719 Guile should GC more often -- the bigger the M, the more often.
720
721 Numeric functions that produce bigger and bigger integers are
722 pessimal, because M is an increasing function of time. Here is an
723 example of such a function:
724
725 (define (factorial n)
726 (define (fac n acc)
727 (if (<= n 1)
728 acc
729 (fac (1- n) (* n acc))))
730 (fac n 1))
731
732 It is possible for a process to grow for reasons that will not be
733 solved by faster GC. In that case M will be estimated as
734 artificially high for a while, and so GC will happen more often on
735 the Guile side. But when it stabilizes, Guile can ease back the GC
736 frequency.
737
738 The key is to measure process image growth, not mallocation rate.
739 For maximum effectiveness, Guile reacts quickly to process growth,
740 and exponentially backs down when the process stops growing.
741
742 See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936
743 for further discussion.
744 */
745static void *
746adjust_gc_frequency (void * hook_data SCM_UNUSED,
747 void *fn_data SCM_UNUSED,
748 void *data SCM_UNUSED)
749{
750 static size_t prev_image_size = 0;
751 static size_t prev_bytes_alloced = 0;
752 size_t image_size;
753 size_t bytes_alloced;
754
fd51e661
AW
755 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
756 bytes_until_gc = GC_get_heap_size ();
757 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
758
6360beb2
AW
759 image_size = get_image_size ();
760 bytes_alloced = GC_get_total_bytes ();
761
d1c03624 762#define HEURISTICS_DEBUG 0
6360beb2
AW
763
764#if HEURISTICS_DEBUG
765 fprintf (stderr, "prev image / alloced: %lu / %lu\n", prev_image_size, prev_bytes_alloced);
766 fprintf (stderr, " image / alloced: %lu / %lu\n", image_size, bytes_alloced);
767 fprintf (stderr, "divisor %lu / %f\n", free_space_divisor, target_free_space_divisor);
768#endif
769
770 if (prev_image_size && bytes_alloced != prev_bytes_alloced)
771 {
772 double growth_rate, new_target_free_space_divisor;
773 double decay_factor = 0.5;
774 double hysteresis = 0.1;
775
776 growth_rate = ((double) image_size - prev_image_size)
777 / ((double)bytes_alloced - prev_bytes_alloced);
778
779#if HEURISTICS_DEBUG
780 fprintf (stderr, "growth rate %f\n", growth_rate);
781#endif
782
783 new_target_free_space_divisor = minimum_free_space_divisor;
784
785 if (growth_rate > 0)
786 new_target_free_space_divisor *= 1.0 + growth_rate;
787
788#if HEURISTICS_DEBUG
789 fprintf (stderr, "new divisor %f\n", new_target_free_space_divisor);
790#endif
791
792 if (new_target_free_space_divisor < target_free_space_divisor)
793 /* Decay down. */
794 target_free_space_divisor =
795 (decay_factor * target_free_space_divisor
796 + (1.0 - decay_factor) * new_target_free_space_divisor);
797 else
798 /* Jump up. */
799 target_free_space_divisor = new_target_free_space_divisor;
800
801#if HEURISTICS_DEBUG
802 fprintf (stderr, "new target divisor %f\n", target_free_space_divisor);
803#endif
804
805 if (free_space_divisor + 0.5 + hysteresis < target_free_space_divisor
806 || free_space_divisor - 0.5 - hysteresis > target_free_space_divisor)
807 {
808 free_space_divisor = lround (target_free_space_divisor);
809#if HEURISTICS_DEBUG
810 fprintf (stderr, "new divisor %lu\n", free_space_divisor);
811#endif
812 GC_set_free_space_divisor (free_space_divisor);
813 }
814 }
815
816 prev_image_size = image_size;
817 prev_bytes_alloced = bytes_alloced;
818
819 return NULL;
820}
821
fd51e661
AW
822/* The adjust_gc_frequency routine handles transients in the process
823 image size. It can't handle instense non-GC-managed steady-state
824 allocation though, as it decays the FSD at steady-state down to its
825 minimum value.
826
827 The only real way to handle continuous, high non-GC allocation is to
828 let the GC know about it. This routine can handle non-GC allocation
829 rates that are similar in size to the GC-managed heap size.
830 */
831
832void
833scm_gc_register_allocation (size_t size)
834{
835 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
836 if (bytes_until_gc - size > bytes_until_gc)
837 {
838 bytes_until_gc = GC_get_heap_size ();
839 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
840 GC_gcollect ();
841 }
842 else
843 {
844 bytes_until_gc -= size;
845 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
846 }
847}
848
00b6ef23
AW
849
850\f
851
35164d84 852static char const *
26224b3f
LC
853scm_i_tag_name (scm_t_bits tag)
854{
74ec8d78 855 switch (tag & 0x7f) /* 7 bits */
26224b3f
LC
856 {
857 case scm_tcs_struct:
858 return "struct";
859 case scm_tcs_cons_imcar:
860 return "cons (immediate car)";
861 case scm_tcs_cons_nimcar:
862 return "cons (non-immediate car)";
5b46a8c2 863 case scm_tc7_pointer:
e2c2a699 864 return "foreign";
c99de5aa
AW
865 case scm_tc7_hashtable:
866 return "hashtable";
26b26354
AW
867 case scm_tc7_weak_set:
868 return "weak-set";
7005c60f
AW
869 case scm_tc7_weak_table:
870 return "weak-table";
9ea31741
AW
871 case scm_tc7_fluid:
872 return "fluid";
873 case scm_tc7_dynamic_state:
874 return "dynamic state";
6f3b0cc2
AW
875 case scm_tc7_frame:
876 return "frame";
6f3b0cc2
AW
877 case scm_tc7_vm_cont:
878 return "vm continuation";
26224b3f
LC
879 case scm_tc7_wvect:
880 return "weak vector";
881 case scm_tc7_vector:
882 return "vector";
26224b3f
LC
883 case scm_tc7_number:
884 switch (tag)
885 {
886 case scm_tc16_real:
887 return "real";
888 break;
889 case scm_tc16_big:
890 return "bignum";
891 break;
892 case scm_tc16_complex:
893 return "complex number";
894 break;
895 case scm_tc16_fraction:
896 return "fraction";
897 break;
898 }
899 break;
900 case scm_tc7_string:
901 return "string";
902 break;
903 case scm_tc7_stringbuf:
904 return "string buffer";
905 break;
906 case scm_tc7_symbol:
907 return "symbol";
908 break;
909 case scm_tc7_variable:
910 return "variable";
911 break;
26224b3f
LC
912 case scm_tc7_port:
913 return "port";
914 break;
915 case scm_tc7_smob:
74ec8d78
AW
916 {
917 int k = 0xff & (tag >> 8);
918 return (scm_smobs[k].name);
919 }
26224b3f
LC
920 break;
921 }
922
923 return NULL;
924}
925
926
26224b3f
LC
927
928\f
0f2d19dd
JB
929void
930scm_init_gc ()
0f2d19dd 931{
a82e7953 932 /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
d678e25c 933
f39448c5 934 scm_after_gc_hook = scm_make_hook (SCM_INUM0);
fde50407 935 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 936
cc3546b0
AW
937 /* When the async is to run, the cdr of the gc_async pair gets set to
938 the asyncs queue of the current thread. */
939 after_gc_async_cell = scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0,
940 after_gc_async_thunk),
941 SCM_BOOL_F);
939794ce 942
cc3546b0 943 scm_c_hook_add (&scm_before_gc_c_hook, queue_after_gc_hook, NULL, 0);
00b6ef23
AW
944 scm_c_hook_add (&scm_before_gc_c_hook, start_gc_timer, NULL, 0);
945 scm_c_hook_add (&scm_after_gc_c_hook, accumulate_gc_timer, NULL, 0);
66b229d5 946
738c899e
AW
947 /* GC_get_heap_usage does not take a lock, and so can run in the GC
948 start hook. */
949 scm_c_hook_add (&scm_before_gc_c_hook, adjust_gc_frequency, NULL, 0);
738c899e 950
cc3546b0 951 GC_set_start_callback (run_before_gc_c_hook);
939794ce 952
a0599745 953#include "libguile/gc.x"
0f2d19dd 954}
89e00824 955
c8a1bdc4
HWN
956
957void
958scm_gc_sweep (void)
959#define FUNC_NAME "scm_gc_sweep"
960{
26224b3f 961 /* FIXME */
cd169c5a 962 fprintf (stderr, "%s: doing nothing\n", FUNC_NAME);
c8a1bdc4 963}
c8a1bdc4
HWN
964#undef FUNC_NAME
965
89e00824
ML
966/*
967 Local Variables:
968 c-file-style: "gnu"
969 End:
970*/