scm_i_tag_name internal to gc.c
[bpt/guile.git] / libguile / gc.c
CommitLineData
0f595d7d
LC
1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003, 2006,
2 * 2008, 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
a00c95d9 3 *
73be1d9e 4 * This library is free software; you can redistribute it and/or
53befeb7
NJ
5 * modify it under the terms of the GNU Lesser General Public License
6 * as published by the Free Software Foundation; either version 3 of
7 * the License, or (at your option) any later version.
a00c95d9 8 *
53befeb7
NJ
9 * This library is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
a00c95d9 13 *
73be1d9e
MV
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
53befeb7
NJ
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301 USA
73be1d9e 18 */
1bbd0b84 19
37ddcaf6
MD
20/* #define DEBUGINFO */
21
dbb605f5 22#ifdef HAVE_CONFIG_H
aa54a9b0
RB
23# include <config.h>
24#endif
56495472 25
e7bca227
LC
26#include "libguile/gen-scmconfig.h"
27
0f2d19dd 28#include <stdio.h>
e6e2e95a 29#include <errno.h>
783e7774 30#include <string.h>
34cf38c3 31#include <stdlib.h>
6360beb2 32#include <math.h>
e6e2e95a 33
3ec17f28
LC
34#ifdef __ia64__
35#include <ucontext.h>
36extern unsigned long * __libc_ia64_register_backing_store_base;
37#endif
38
a0599745 39#include "libguile/_scm.h"
0a7a7445 40#include "libguile/eval.h"
a0599745
MD
41#include "libguile/stime.h"
42#include "libguile/stackchk.h"
43#include "libguile/struct.h"
a0599745 44#include "libguile/smob.h"
2fa901a5 45#include "libguile/arrays.h"
a0599745
MD
46#include "libguile/async.h"
47#include "libguile/ports.h"
48#include "libguile/root.h"
49#include "libguile/strings.h"
50#include "libguile/vectors.h"
686765af 51#include "libguile/hashtab.h"
ecf470a2 52#include "libguile/tags.h"
a0599745 53
c8a1bdc4 54#include "libguile/private-gc.h"
a0599745 55#include "libguile/validate.h"
1be6b49c 56#include "libguile/deprecation.h"
a0599745 57#include "libguile/gc.h"
9de87eea 58#include "libguile/dynwind.h"
fce59c93 59
1c44468d 60#include "libguile/bdw-gc.h"
a82e7953 61
cc3546b0
AW
62/* For GC_set_start_callback. */
63#include <gc/gc_mark.h>
64
bc9d9bb2 65#ifdef GUILE_DEBUG_MALLOC
a0599745 66#include "libguile/debug-malloc.h"
bc9d9bb2
MD
67#endif
68
0f2d19dd 69#ifdef HAVE_UNISTD_H
95b88819 70#include <unistd.h>
0f2d19dd
JB
71#endif
72
064d2409
AW
73/* Size in bytes of the initial heap. This should be about the size of
74 result of 'guile -c "(display (assq-ref (gc-stats)
75 'heap-total-allocated))"'. */
76
77#define DEFAULT_INITIAL_HEAP_SIZE (128 * 1024 * SIZEOF_SCM_T_BITS)
78
eae33935 79/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 80 */
eab1b259
HWN
81int scm_debug_cell_accesses_p = 0;
82int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 83
e81d98ec
DH
84/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
85 * the number of cell accesses after which a gc shall be called.
86 */
eab1b259 87int scm_debug_cells_gc_interval = 0;
e81d98ec 88
acbccb0c 89/* Hash table that keeps a reference to objects the user wants to protect from
fbe1cb7f
AW
90 garbage collection. */
91static SCM scm_protects;
e7efe8e7
AW
92
93
eab1b259
HWN
94#if (SCM_DEBUG_CELL_ACCESSES == 1)
95
96
97/*
98
99 Assert that the given object is a valid reference to a valid cell. This
100 test involves to determine whether the object is a cell pointer, whether
101 this pointer actually points into a heap segment and whether the cell
102 pointed to is not a free cell. Further, additional garbage collections may
103 get executed after a user defined number of cell accesses. This helps to
104 find places in the C code where references are dropped for extremely short
105 periods.
106
107*/
406c7d90 108void
eab1b259 109scm_i_expensive_validation_check (SCM cell)
406c7d90 110{
eab1b259
HWN
111 /* If desired, perform additional garbage collections after a user
112 * defined number of cell accesses.
113 */
114 if (scm_debug_cells_gc_interval)
115 {
116 static unsigned int counter = 0;
61045190 117
eab1b259
HWN
118 if (counter != 0)
119 {
120 --counter;
121 }
122 else
123 {
124 counter = scm_debug_cells_gc_interval;
b17e0ac3 125 scm_gc ();
eab1b259
HWN
126 }
127 }
128}
129
8c93b597
LC
130/* Whether cell validation is already running. */
131static int scm_i_cell_validation_already_running = 0;
132
eab1b259
HWN
133void
134scm_assert_cell_valid (SCM cell)
135{
136 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 137 {
eab1b259 138 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 139
c8a1bdc4 140 /*
eab1b259
HWN
141 During GC, no user-code should be run, and the guile core
142 should use non-protected accessors.
143 */
c8a1bdc4 144 if (scm_gc_running_p)
eab1b259 145 return;
c8a1bdc4
HWN
146
147 /*
eab1b259
HWN
148 Only scm_in_heap_p and rescanning the heap is wildly
149 expensive.
150 */
151 if (scm_expensive_debug_cell_accesses_p)
152 scm_i_expensive_validation_check (cell);
b4246e5b 153
eab1b259 154 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
155 }
156}
157
158
eab1b259 159
406c7d90
DH
160SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
161 (SCM flag),
1e6808ea 162 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 163 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 164 "but no additional calls to garbage collection are issued.\n"
eab1b259 165 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
166 "with an additional garbage collection after the given\n"
167 "number of cell accesses.\n"
1e6808ea
MG
168 "This procedure only exists when the compile-time flag\n"
169 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
170#define FUNC_NAME s_scm_set_debug_cell_accesses_x
171{
7888309b 172 if (scm_is_false (flag))
eab1b259
HWN
173 {
174 scm_debug_cell_accesses_p = 0;
175 }
bc36d050 176 else if (scm_is_eq (flag, SCM_BOOL_T))
eab1b259
HWN
177 {
178 scm_debug_cells_gc_interval = 0;
179 scm_debug_cell_accesses_p = 1;
180 scm_expensive_debug_cell_accesses_p = 0;
181 }
e11e83f3 182 else
eab1b259 183 {
e11e83f3 184 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
eab1b259
HWN
185 scm_debug_cell_accesses_p = 1;
186 scm_expensive_debug_cell_accesses_p = 1;
187 }
406c7d90
DH
188 return SCM_UNSPECIFIED;
189}
190#undef FUNC_NAME
0f2d19dd 191
ecf470a2 192
c8a1bdc4 193#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
194
195\f
14294ce0 196
26224b3f
LC
197/* Hooks. */
198scm_t_c_hook scm_before_gc_c_hook;
199scm_t_c_hook scm_before_mark_c_hook;
200scm_t_c_hook scm_before_sweep_c_hook;
201scm_t_c_hook scm_after_sweep_c_hook;
202scm_t_c_hook scm_after_gc_c_hook;
945fec60 203
0f2d19dd 204
0fbdbe6c
AW
205static void
206run_before_gc_c_hook (void)
207{
e1fbe716
AW
208 if (!SCM_I_CURRENT_THREAD)
209 /* GC while a thread is spinning up; punt. */
210 return;
211
0fbdbe6c
AW
212 scm_c_hook_run (&scm_before_gc_c_hook, NULL);
213}
214
215
0f2d19dd
JB
216/* GC Statistics Keeping
217 */
b74e86cf 218unsigned long scm_gc_ports_collected = 0;
00b6ef23
AW
219static long gc_time_taken = 0;
220static long gc_start_time = 0;
221
6360beb2
AW
222static unsigned long free_space_divisor;
223static unsigned long minimum_free_space_divisor;
224static double target_free_space_divisor;
b74e86cf 225
915b3f9f 226static unsigned long protected_obj_count = 0;
c2cbcc57 227
0f2d19dd 228
17ab1dc3 229SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
915b3f9f
LC
230SCM_SYMBOL (sym_heap_size, "heap-size");
231SCM_SYMBOL (sym_heap_free_size, "heap-free-size");
232SCM_SYMBOL (sym_heap_total_allocated, "heap-total-allocated");
17ab1dc3 233SCM_SYMBOL (sym_heap_allocated_since_gc, "heap-allocated-since-gc");
7eec4c37 234SCM_SYMBOL (sym_protected_objects, "protected-objects");
17ab1dc3 235SCM_SYMBOL (sym_times, "gc-times");
cf2d30f6 236
d3dd80ab 237
0f2d19dd
JB
238/* {Scheme Interface to GC}
239 */
35164d84 240static char const * scm_i_tag_name (scm_t_bits tag);
1367aa5e
HWN
241static SCM
242tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
243{
8fecbb19 244 if (scm_is_integer (key))
8a00ba71 245 {
3e2073bd 246 int c_tag = scm_to_int (key);
8fecbb19
HWN
247
248 char const * name = scm_i_tag_name (c_tag);
249 if (name != NULL)
250 {
251 key = scm_from_locale_string (name);
252 }
253 else
254 {
255 char s[100];
256 sprintf (s, "tag %d", c_tag);
257 key = scm_from_locale_string (s);
258 }
8a00ba71 259 }
8fecbb19 260
1367aa5e
HWN
261 return scm_cons (scm_cons (key, val), acc);
262}
263
264SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
265 (),
266 "Return an alist of statistics of the current live objects. ")
267#define FUNC_NAME s_scm_gc_live_object_stats
268{
269 SCM tab = scm_make_hash_table (scm_from_int (57));
b01532af
NJ
270 SCM alist;
271
b01532af 272 alist
1367aa5e
HWN
273 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
274
275 return alist;
276}
277#undef FUNC_NAME
278
c2cbcc57 279extern int scm_gc_malloc_yield_percentage;
a00c95d9 280SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 281 (),
1e6808ea 282 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 283 "use of storage.\n")
1bbd0b84 284#define FUNC_NAME s_scm_gc_stats
0f2d19dd 285{
0f2d19dd 286 SCM answer;
14294ce0 287 GC_word heap_size, free_bytes, unmapped_bytes, bytes_since_gc, total_bytes;
915b3f9f 288 size_t gc_times;
4c9419ac 289
14294ce0
AW
290 GC_get_heap_usage_safe (&heap_size, &free_bytes, &unmapped_bytes,
291 &bytes_since_gc, &total_bytes);
0f595d7d 292 gc_times = GC_get_gc_no ();
fca43887 293
b9bd8526 294 answer =
00b6ef23 295 scm_list_n (scm_cons (sym_gc_time_taken, scm_from_long (gc_time_taken)),
915b3f9f
LC
296 scm_cons (sym_heap_size, scm_from_size_t (heap_size)),
297 scm_cons (sym_heap_free_size, scm_from_size_t (free_bytes)),
298 scm_cons (sym_heap_total_allocated,
299 scm_from_size_t (total_bytes)),
17ab1dc3
AW
300 scm_cons (sym_heap_allocated_since_gc,
301 scm_from_size_t (bytes_since_gc)),
915b3f9f
LC
302 scm_cons (sym_protected_objects,
303 scm_from_ulong (protected_obj_count)),
304 scm_cons (sym_times, scm_from_size_t (gc_times)),
b9bd8526 305 SCM_UNDEFINED);
fca43887 306
c8a1bdc4 307 return answer;
0f2d19dd 308}
c8a1bdc4 309#undef FUNC_NAME
0f2d19dd 310
539b08a4 311
7f9ec18a
LC
312SCM_DEFINE (scm_gc_dump, "gc-dump", 0, 0, 0,
313 (void),
314 "Dump information about the garbage collector's internal data "
315 "structures and memory usage to the standard output.")
316#define FUNC_NAME s_scm_gc_dump
317{
318 GC_dump ();
319
320 return SCM_UNSPECIFIED;
321}
322#undef FUNC_NAME
323
acf4331f 324
c8a1bdc4
HWN
325SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
326 (SCM obj),
327 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
328 "returned by this function for @var{obj}")
329#define FUNC_NAME s_scm_object_address
c68296f8 330{
b9bd8526 331 return scm_from_ulong (SCM_UNPACK (obj));
c68296f8 332}
c8a1bdc4 333#undef FUNC_NAME
c68296f8 334
1be6b49c 335
915b3f9f
LC
336SCM_DEFINE (scm_gc_disable, "gc-disable", 0, 0, 0,
337 (),
338 "Disables the garbage collector. Nested calls are permitted. "
339 "GC is re-enabled once @code{gc-enable} has been called the "
340 "same number of times @code{gc-disable} was called.")
341#define FUNC_NAME s_scm_gc_disable
342{
343 GC_disable ();
344 return SCM_UNSPECIFIED;
345}
346#undef FUNC_NAME
347
348SCM_DEFINE (scm_gc_enable, "gc-enable", 0, 0, 0,
349 (),
350 "Enables the garbage collector.")
351#define FUNC_NAME s_scm_gc_enable
352{
353 GC_enable ();
354 return SCM_UNSPECIFIED;
355}
356#undef FUNC_NAME
357
358
c8a1bdc4
HWN
359SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
360 (),
361 "Scans all of SCM objects and reclaims for further use those that are\n"
362 "no longer accessible.")
363#define FUNC_NAME s_scm_gc
364{
b17e0ac3 365 scm_i_gc ("call");
f740445a
AW
366 /* If you're calling scm_gc(), you probably want synchronous
367 finalization. */
eaf99988 368 GC_invoke_finalizers ();
c8a1bdc4 369 return SCM_UNSPECIFIED;
9d47a1e6 370}
c8a1bdc4 371#undef FUNC_NAME
9d47a1e6 372
c8a1bdc4 373void
b17e0ac3 374scm_i_gc (const char *what)
c8a1bdc4 375{
26224b3f 376 GC_gcollect ();
eab1b259 377}
0f2d19dd 378
4c7016dc 379
0f2d19dd
JB
380\f
381/* {GC Protection Helper Functions}
382 */
383
384
5d2b97cd
DH
385/*
386 * If within a function you need to protect one or more scheme objects from
387 * garbage collection, pass them as parameters to one of the
388 * scm_remember_upto_here* functions below. These functions don't do
389 * anything, but since the compiler does not know that they are actually
390 * no-ops, it will generate code that calls these functions with the given
391 * parameters. Therefore, you can be sure that the compiler will keep those
392 * scheme values alive (on the stack or in a register) up to the point where
393 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 394 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
395 * depends on the scheme object to exist.
396 *
8c494e99
DH
397 * Example: We want to make sure that the string object str does not get
398 * garbage collected during the execution of 'some_function' in the code
399 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
400 * 'some_function' might access freed memory. To make sure that the compiler
401 * keeps str alive on the stack or in a register such that it is visible to
402 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
403 * call to 'some_function'. Note that this would not be necessary if str was
404 * used anyway after the call to 'some_function'.
eb01cb64 405 * char *chars = scm_i_string_chars (str);
5d2b97cd
DH
406 * some_function (chars);
407 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
408 */
409
9e1569bd
KR
410/* Remove any macro versions of these while defining the functions.
411 Functions are always included in the library, for upward binary
412 compatibility and in case combinations of GCC and non-GCC are used. */
413#undef scm_remember_upto_here_1
414#undef scm_remember_upto_here_2
415
5d2b97cd 416void
e81d98ec 417scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
418{
419 /* Empty. Protects a single object from garbage collection. */
420}
421
422void
e81d98ec 423scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
424{
425 /* Empty. Protects two objects from garbage collection. */
426}
427
428void
e81d98ec 429scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
430{
431 /* Empty. Protects any number of objects from garbage collection. */
432}
433
c209c88e 434/*
41b0806d
GB
435 These crazy functions prevent garbage collection
436 of arguments after the first argument by
437 ensuring they remain live throughout the
438 function because they are used in the last
439 line of the code block.
440 It'd be better to have a nice compiler hint to
441 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
442SCM
443scm_return_first (SCM elt, ...)
0f2d19dd
JB
444{
445 return elt;
446}
447
41b0806d
GB
448int
449scm_return_first_int (int i, ...)
450{
451 return i;
452}
453
0f2d19dd 454
0f2d19dd 455SCM
6e8d25a6 456scm_permanent_object (SCM obj)
0f2d19dd 457{
8e7b3e98 458 return (scm_gc_protect_object (obj));
0f2d19dd
JB
459}
460
461
7bd4fbe2
MD
462/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
463 other references are dropped, until the object is unprotected by calling
6b1b030e 464 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
465 i. e. it is possible to protect the same object several times, but it is
466 necessary to unprotect the object the same number of times to actually get
467 the object unprotected. It is an error to unprotect an object more often
468 than it has been protected before. The function scm_protect_object returns
469 OBJ.
470*/
471
472/* Implementation note: For every object X, there is a counter which
1f584400 473 scm_gc_protect_object (X) increments and scm_gc_unprotect_object (X) decrements.
7bd4fbe2 474*/
686765af 475
7eec4c37
HWN
476
477
ef290276 478SCM
6b1b030e 479scm_gc_protect_object (SCM obj)
ef290276 480{
686765af 481 SCM handle;
9d47a1e6 482
686765af 483 /* This critical section barrier will be replaced by a mutex. */
33b320ae
NJ
484 /* njrev: Indeed; if my comment above is correct, there is the same
485 critsec/mutex inconsistency here. */
9de87eea 486 SCM_CRITICAL_SECTION_START;
9d47a1e6 487
acbccb0c 488 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
e11e83f3 489 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
9d47a1e6 490
7eec4c37
HWN
491 protected_obj_count ++;
492
9de87eea 493 SCM_CRITICAL_SECTION_END;
9d47a1e6 494
ef290276
JB
495 return obj;
496}
497
498
499/* Remove any protection for OBJ established by a prior call to
dab7f566 500 scm_protect_object. This function returns OBJ.
ef290276 501
dab7f566 502 See scm_protect_object for more information. */
ef290276 503SCM
6b1b030e 504scm_gc_unprotect_object (SCM obj)
ef290276 505{
686765af 506 SCM handle;
9d47a1e6 507
686765af 508 /* This critical section barrier will be replaced by a mutex. */
33b320ae 509 /* njrev: and again. */
9de87eea 510 SCM_CRITICAL_SECTION_START;
9d47a1e6 511
0ff7e3ff
HWN
512 if (scm_gc_running_p)
513 {
514 fprintf (stderr, "scm_unprotect_object called during GC.\n");
515 abort ();
516 }
b17e0ac3 517
acbccb0c 518 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 519
7888309b 520 if (scm_is_false (handle))
686765af 521 {
0f0f0899
MD
522 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
523 abort ();
686765af 524 }
6a199940
DH
525 else
526 {
e11e83f3 527 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 528 if (scm_is_eq (count, scm_from_int (0)))
acbccb0c 529 scm_hashq_remove_x (scm_protects, obj);
6a199940 530 else
1be6b49c 531 SCM_SETCDR (handle, count);
6a199940 532 }
7eec4c37 533 protected_obj_count --;
686765af 534
9de87eea 535 SCM_CRITICAL_SECTION_END;
ef290276
JB
536
537 return obj;
538}
539
6b1b030e
ML
540void
541scm_gc_register_root (SCM *p)
542{
8e7b3e98 543 /* Nothing. */
6b1b030e
ML
544}
545
546void
547scm_gc_unregister_root (SCM *p)
548{
8e7b3e98 549 /* Nothing. */
6b1b030e
ML
550}
551
552void
553scm_gc_register_roots (SCM *b, unsigned long n)
554{
555 SCM *p = b;
556 for (; p < b + n; ++p)
557 scm_gc_register_root (p);
558}
559
560void
561scm_gc_unregister_roots (SCM *b, unsigned long n)
562{
563 SCM *p = b;
564 for (; p < b + n; ++p)
565 scm_gc_unregister_root (p);
566}
567
0f2d19dd 568\f
a00c95d9 569
4c48ba06 570
c8a1bdc4
HWN
571/*
572 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
573 */
85db4a2c
DH
574
575/* Get an integer from an environment variable. */
c8a1bdc4
HWN
576int
577scm_getenv_int (const char *var, int def)
85db4a2c 578{
c8a1bdc4
HWN
579 char *end = 0;
580 char *val = getenv (var);
581 long res = def;
85db4a2c
DH
582 if (!val)
583 return def;
584 res = strtol (val, &end, 10);
585 if (end == val)
586 return def;
587 return res;
588}
589
c35738c1
MD
590void
591scm_storage_prehistory ()
592{
0f595d7d 593 GC_set_all_interior_pointers (0);
0f595d7d 594
6360beb2
AW
595 free_space_divisor = scm_getenv_int ("GC_FREE_SPACE_DIVISOR", 3);
596 minimum_free_space_divisor = free_space_divisor;
597 target_free_space_divisor = free_space_divisor;
598 GC_set_free_space_divisor (free_space_divisor);
eaf99988 599 GC_set_finalize_on_demand (1);
184327a6 600
a82e7953 601 GC_INIT ();
e7bca227 602
064d2409 603 GC_expand_hp (DEFAULT_INITIAL_HEAP_SIZE);
915b3f9f 604
184327a6
LC
605 /* We only need to register a displacement for those types for which the
606 higher bits of the type tag are used to store a pointer (that is, a
607 pointer to an 8-octet aligned region). For `scm_tc3_struct', this is
608 handled in `scm_alloc_struct ()'. */
609 GC_REGISTER_DISPLACEMENT (scm_tc3_cons);
314b8716 610 /* GC_REGISTER_DISPLACEMENT (scm_tc3_unused); */
184327a6 611
915b3f9f 612 /* Sanity check. */
acbccb0c 613 if (!GC_is_visible (&scm_protects))
915b3f9f 614 abort ();
a82e7953 615
c35738c1
MD
616 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
617 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
618 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
619 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
620 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
621}
85db4a2c 622
9de87eea 623scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
eb01cb64 624
562cd1b8
AW
625void
626scm_init_gc_protect_object ()
0f2d19dd 627{
acbccb0c 628 scm_protects = scm_c_make_hash_table (31);
4a4c9785 629
9de87eea
MV
630#if 0
631 /* We can't have a cleanup handler since we have no thread to run it
632 in. */
633
a18bcd0e 634#ifdef HAVE_ATEXIT
c45acc34 635 atexit (cleanup);
e52ceaac
MD
636#else
637#ifdef HAVE_ON_EXIT
638 on_exit (cleanup, 0);
639#endif
9de87eea
MV
640#endif
641
a18bcd0e 642#endif
0f2d19dd 643}
939794ce 644
0f2d19dd
JB
645\f
646
939794ce
DH
647SCM scm_after_gc_hook;
648
cc3546b0 649static SCM after_gc_async_cell;
939794ce 650
cc3546b0
AW
651/* The function after_gc_async_thunk causes the execution of the
652 * after-gc-hook. It is run after the gc, as soon as the asynchronous
653 * events are handled by the evaluator.
939794ce
DH
654 */
655static SCM
cc3546b0 656after_gc_async_thunk (void)
939794ce 657{
cc3546b0
AW
658 /* Fun, no? Hook-run *and* run-hook? */
659 scm_c_hook_run (&scm_after_gc_c_hook, NULL);
939794ce 660 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
661 return SCM_UNSPECIFIED;
662}
663
664
cc3546b0
AW
665/* The function queue_after_gc_hook is run by the scm_before_gc_c_hook
666 * at the end of the garbage collection. The only purpose of this
667 * function is to mark the after_gc_async (which will eventually lead to
668 * the execution of the after_gc_async_thunk).
939794ce
DH
669 */
670static void *
cc3546b0
AW
671queue_after_gc_hook (void * hook_data SCM_UNUSED,
672 void *fn_data SCM_UNUSED,
673 void *data SCM_UNUSED)
e81d98ec
DH
674{
675 /* If cell access debugging is enabled, the user may choose to perform
676 * additional garbage collections after an arbitrary number of cell
677 * accesses. We don't want the scheme level after-gc-hook to be performed
678 * for each of these garbage collections for the following reason: The
679 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
680 * after-gc-hook was performed with every gc, and if the gc was performed
681 * after a very small number of cell accesses, then the number of cell
682 * accesses during the execution of the after-gc-hook will suffice to cause
683 * the execution of the next gc. Then, guile would keep executing the
684 * after-gc-hook over and over again, and would never come to do other
685 * things.
eae33935 686 *
e81d98ec
DH
687 * To overcome this problem, if cell access debugging with additional
688 * garbage collections is enabled, the after-gc-hook is never run by the
689 * garbage collecter. When running guile with cell access debugging and the
690 * execution of the after-gc-hook is desired, then it is necessary to run
691 * the hook explicitly from the user code. This has the effect, that from
692 * the scheme level point of view it seems that garbage collection is
693 * performed with a much lower frequency than it actually is. Obviously,
694 * this will not work for code that depends on a fixed one to one
695 * relationship between the execution counts of the C level garbage
696 * collection hooks and the execution count of the scheme level
697 * after-gc-hook.
698 */
9de87eea 699
e81d98ec 700#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 701 if (scm_debug_cells_gc_interval == 0)
e81d98ec 702#endif
cc3546b0
AW
703 {
704 scm_i_thread *t = SCM_I_CURRENT_THREAD;
705
706 if (scm_is_false (SCM_CDR (after_gc_async_cell)))
707 {
708 SCM_SETCDR (after_gc_async_cell, t->active_asyncs);
709 t->active_asyncs = after_gc_async_cell;
710 t->pending_asyncs = 1;
711 }
712 }
e81d98ec 713
939794ce
DH
714 return NULL;
715}
716
00b6ef23
AW
717\f
718
719static void *
720start_gc_timer (void * hook_data SCM_UNUSED,
721 void *fn_data SCM_UNUSED,
722 void *data SCM_UNUSED)
723{
724 if (!gc_start_time)
725 gc_start_time = scm_c_get_internal_run_time ();
726
727 return NULL;
728}
729
730static void *
731accumulate_gc_timer (void * hook_data SCM_UNUSED,
732 void *fn_data SCM_UNUSED,
733 void *data SCM_UNUSED)
734{
735 if (gc_start_time)
6360beb2
AW
736 {
737 long now = scm_c_get_internal_run_time ();
00b6ef23
AW
738 gc_time_taken += now - gc_start_time;
739 gc_start_time = 0;
740 }
741
742 return NULL;
743}
744
6360beb2
AW
745/* Return some idea of the memory footprint of a process, in bytes.
746 Currently only works on Linux systems. */
747static size_t
748get_image_size (void)
749{
750 unsigned long size, resident, share;
8ac70433 751 size_t ret = 0;
6360beb2
AW
752
753 FILE *fp = fopen ("/proc/self/statm", "r");
754
755 if (fp && fscanf (fp, "%lu %lu %lu", &size, &resident, &share) == 3)
756 ret = resident * 4096;
757
758 if (fp)
759 fclose (fp);
760
761 return ret;
762}
763
fd51e661
AW
764/* These are discussed later. */
765static size_t bytes_until_gc;
766static scm_i_pthread_mutex_t bytes_until_gc_lock = SCM_I_PTHREAD_MUTEX_INITIALIZER;
767
6360beb2
AW
768/* Make GC run more frequently when the process image size is growing,
769 measured against the number of bytes allocated through the GC.
770
771 If Guile is allocating at a GC-managed heap size H, libgc will tend
772 to limit the process image size to H*N. But if at the same time the
773 user program is mallocating at a rate M bytes per GC-allocated byte,
774 then the process stabilizes at H*N*M -- assuming that collecting data
775 will result in malloc'd data being freed. It doesn't take a very
776 large M for this to be a bad situation. To limit the image size,
777 Guile should GC more often -- the bigger the M, the more often.
778
779 Numeric functions that produce bigger and bigger integers are
780 pessimal, because M is an increasing function of time. Here is an
781 example of such a function:
782
783 (define (factorial n)
784 (define (fac n acc)
785 (if (<= n 1)
786 acc
787 (fac (1- n) (* n acc))))
788 (fac n 1))
789
790 It is possible for a process to grow for reasons that will not be
791 solved by faster GC. In that case M will be estimated as
792 artificially high for a while, and so GC will happen more often on
793 the Guile side. But when it stabilizes, Guile can ease back the GC
794 frequency.
795
796 The key is to measure process image growth, not mallocation rate.
797 For maximum effectiveness, Guile reacts quickly to process growth,
798 and exponentially backs down when the process stops growing.
799
800 See http://thread.gmane.org/gmane.lisp.guile.devel/12552/focus=12936
801 for further discussion.
802 */
803static void *
804adjust_gc_frequency (void * hook_data SCM_UNUSED,
805 void *fn_data SCM_UNUSED,
806 void *data SCM_UNUSED)
807{
808 static size_t prev_image_size = 0;
809 static size_t prev_bytes_alloced = 0;
810 size_t image_size;
811 size_t bytes_alloced;
812
fd51e661
AW
813 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
814 bytes_until_gc = GC_get_heap_size ();
815 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
816
6360beb2
AW
817 image_size = get_image_size ();
818 bytes_alloced = GC_get_total_bytes ();
819
d1c03624 820#define HEURISTICS_DEBUG 0
6360beb2
AW
821
822#if HEURISTICS_DEBUG
823 fprintf (stderr, "prev image / alloced: %lu / %lu\n", prev_image_size, prev_bytes_alloced);
824 fprintf (stderr, " image / alloced: %lu / %lu\n", image_size, bytes_alloced);
825 fprintf (stderr, "divisor %lu / %f\n", free_space_divisor, target_free_space_divisor);
826#endif
827
828 if (prev_image_size && bytes_alloced != prev_bytes_alloced)
829 {
830 double growth_rate, new_target_free_space_divisor;
831 double decay_factor = 0.5;
832 double hysteresis = 0.1;
833
834 growth_rate = ((double) image_size - prev_image_size)
835 / ((double)bytes_alloced - prev_bytes_alloced);
836
837#if HEURISTICS_DEBUG
838 fprintf (stderr, "growth rate %f\n", growth_rate);
839#endif
840
841 new_target_free_space_divisor = minimum_free_space_divisor;
842
843 if (growth_rate > 0)
844 new_target_free_space_divisor *= 1.0 + growth_rate;
845
846#if HEURISTICS_DEBUG
847 fprintf (stderr, "new divisor %f\n", new_target_free_space_divisor);
848#endif
849
850 if (new_target_free_space_divisor < target_free_space_divisor)
851 /* Decay down. */
852 target_free_space_divisor =
853 (decay_factor * target_free_space_divisor
854 + (1.0 - decay_factor) * new_target_free_space_divisor);
855 else
856 /* Jump up. */
857 target_free_space_divisor = new_target_free_space_divisor;
858
859#if HEURISTICS_DEBUG
860 fprintf (stderr, "new target divisor %f\n", target_free_space_divisor);
861#endif
862
863 if (free_space_divisor + 0.5 + hysteresis < target_free_space_divisor
864 || free_space_divisor - 0.5 - hysteresis > target_free_space_divisor)
865 {
866 free_space_divisor = lround (target_free_space_divisor);
867#if HEURISTICS_DEBUG
868 fprintf (stderr, "new divisor %lu\n", free_space_divisor);
869#endif
870 GC_set_free_space_divisor (free_space_divisor);
871 }
872 }
873
874 prev_image_size = image_size;
875 prev_bytes_alloced = bytes_alloced;
876
877 return NULL;
878}
879
fd51e661
AW
880/* The adjust_gc_frequency routine handles transients in the process
881 image size. It can't handle instense non-GC-managed steady-state
882 allocation though, as it decays the FSD at steady-state down to its
883 minimum value.
884
885 The only real way to handle continuous, high non-GC allocation is to
886 let the GC know about it. This routine can handle non-GC allocation
887 rates that are similar in size to the GC-managed heap size.
888 */
889
890void
891scm_gc_register_allocation (size_t size)
892{
893 scm_i_pthread_mutex_lock (&bytes_until_gc_lock);
894 if (bytes_until_gc - size > bytes_until_gc)
895 {
896 bytes_until_gc = GC_get_heap_size ();
897 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
898 GC_gcollect ();
899 }
900 else
901 {
902 bytes_until_gc -= size;
903 scm_i_pthread_mutex_unlock (&bytes_until_gc_lock);
904 }
905}
906
00b6ef23
AW
907
908\f
909
35164d84 910static char const *
26224b3f
LC
911scm_i_tag_name (scm_t_bits tag)
912{
74ec8d78 913 switch (tag & 0x7f) /* 7 bits */
26224b3f
LC
914 {
915 case scm_tcs_struct:
916 return "struct";
917 case scm_tcs_cons_imcar:
918 return "cons (immediate car)";
919 case scm_tcs_cons_nimcar:
920 return "cons (non-immediate car)";
5b46a8c2 921 case scm_tc7_pointer:
e2c2a699 922 return "foreign";
c99de5aa
AW
923 case scm_tc7_hashtable:
924 return "hashtable";
26b26354
AW
925 case scm_tc7_weak_set:
926 return "weak-set";
7005c60f
AW
927 case scm_tc7_weak_table:
928 return "weak-table";
9ea31741
AW
929 case scm_tc7_fluid:
930 return "fluid";
931 case scm_tc7_dynamic_state:
932 return "dynamic state";
6f3b0cc2
AW
933 case scm_tc7_frame:
934 return "frame";
6f3b0cc2
AW
935 case scm_tc7_vm_cont:
936 return "vm continuation";
26224b3f
LC
937 case scm_tc7_wvect:
938 return "weak vector";
939 case scm_tc7_vector:
940 return "vector";
26224b3f
LC
941 case scm_tc7_number:
942 switch (tag)
943 {
944 case scm_tc16_real:
945 return "real";
946 break;
947 case scm_tc16_big:
948 return "bignum";
949 break;
950 case scm_tc16_complex:
951 return "complex number";
952 break;
953 case scm_tc16_fraction:
954 return "fraction";
955 break;
956 }
957 break;
958 case scm_tc7_string:
959 return "string";
960 break;
961 case scm_tc7_stringbuf:
962 return "string buffer";
963 break;
964 case scm_tc7_symbol:
965 return "symbol";
966 break;
967 case scm_tc7_variable:
968 return "variable";
969 break;
26224b3f
LC
970 case scm_tc7_port:
971 return "port";
972 break;
973 case scm_tc7_smob:
74ec8d78
AW
974 {
975 int k = 0xff & (tag >> 8);
976 return (scm_smobs[k].name);
977 }
26224b3f
LC
978 break;
979 }
980
981 return NULL;
982}
983
984
26224b3f
LC
985
986\f
0f2d19dd
JB
987void
988scm_init_gc ()
0f2d19dd 989{
a82e7953 990 /* `GC_INIT ()' was invoked in `scm_storage_prehistory ()'. */
d678e25c 991
f39448c5 992 scm_after_gc_hook = scm_make_hook (SCM_INUM0);
fde50407 993 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 994
cc3546b0
AW
995 /* When the async is to run, the cdr of the gc_async pair gets set to
996 the asyncs queue of the current thread. */
997 after_gc_async_cell = scm_cons (scm_c_make_gsubr ("%after-gc-thunk", 0, 0, 0,
998 after_gc_async_thunk),
999 SCM_BOOL_F);
939794ce 1000
cc3546b0 1001 scm_c_hook_add (&scm_before_gc_c_hook, queue_after_gc_hook, NULL, 0);
00b6ef23
AW
1002 scm_c_hook_add (&scm_before_gc_c_hook, start_gc_timer, NULL, 0);
1003 scm_c_hook_add (&scm_after_gc_c_hook, accumulate_gc_timer, NULL, 0);
66b229d5 1004
738c899e
AW
1005 /* GC_get_heap_usage does not take a lock, and so can run in the GC
1006 start hook. */
1007 scm_c_hook_add (&scm_before_gc_c_hook, adjust_gc_frequency, NULL, 0);
738c899e 1008
cc3546b0 1009 GC_set_start_callback (run_before_gc_c_hook);
939794ce 1010
a0599745 1011#include "libguile/gc.x"
0f2d19dd 1012}
89e00824 1013
c8a1bdc4
HWN
1014
1015void
1016scm_gc_sweep (void)
1017#define FUNC_NAME "scm_gc_sweep"
1018{
26224b3f 1019 /* FIXME */
cd169c5a 1020 fprintf (stderr, "%s: doing nothing\n", FUNC_NAME);
c8a1bdc4 1021}
c8a1bdc4
HWN
1022#undef FUNC_NAME
1023
89e00824
ML
1024/*
1025 Local Variables:
1026 c-file-style: "gnu"
1027 End:
1028*/