(Object Properties): Removed confusing
[bpt/guile.git] / libguile / gc.c
CommitLineData
c35738c1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
a00c95d9 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
a00c95d9 7 *
73be1d9e 8 * This library is distributed in the hope that it will be useful,
0f2d19dd 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
a00c95d9 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
92205699 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
73be1d9e 16 */
1bbd0b84 17
9de87eea 18#define _GNU_SOURCE
1bbd0b84 19
37ddcaf6
MD
20/* #define DEBUGINFO */
21
aa54a9b0
RB
22#if HAVE_CONFIG_H
23# include <config.h>
24#endif
56495472 25
0f2d19dd 26#include <stdio.h>
e6e2e95a 27#include <errno.h>
783e7774 28#include <string.h>
c8a1bdc4 29#include <assert.h>
e6e2e95a 30
d9189652
RB
31#ifdef __ia64__
32#include <ucontext.h>
bb1180ef 33extern unsigned long * __libc_ia64_register_backing_store_base;
d9189652
RB
34#endif
35
a0599745 36#include "libguile/_scm.h"
0a7a7445 37#include "libguile/eval.h"
a0599745
MD
38#include "libguile/stime.h"
39#include "libguile/stackchk.h"
40#include "libguile/struct.h"
a0599745
MD
41#include "libguile/smob.h"
42#include "libguile/unif.h"
43#include "libguile/async.h"
44#include "libguile/ports.h"
45#include "libguile/root.h"
46#include "libguile/strings.h"
47#include "libguile/vectors.h"
801cb5e7 48#include "libguile/weaks.h"
686765af 49#include "libguile/hashtab.h"
ecf470a2 50#include "libguile/tags.h"
a0599745 51
c8a1bdc4 52#include "libguile/private-gc.h"
a0599745 53#include "libguile/validate.h"
1be6b49c 54#include "libguile/deprecation.h"
a0599745 55#include "libguile/gc.h"
9de87eea 56#include "libguile/dynwind.h"
fce59c93 57
bc9d9bb2 58#ifdef GUILE_DEBUG_MALLOC
a0599745 59#include "libguile/debug-malloc.h"
bc9d9bb2
MD
60#endif
61
0f2d19dd 62#ifdef HAVE_MALLOC_H
95b88819 63#include <malloc.h>
0f2d19dd
JB
64#endif
65
66#ifdef HAVE_UNISTD_H
95b88819 67#include <unistd.h>
0f2d19dd
JB
68#endif
69
fb50ef08
MD
70/* Lock this mutex before doing lazy sweeping.
71 */
b17e0ac3 72scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
fb50ef08 73
eae33935 74/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 75 */
eab1b259
HWN
76int scm_debug_cell_accesses_p = 0;
77int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 78
e81d98ec
DH
79/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
80 * the number of cell accesses after which a gc shall be called.
81 */
eab1b259 82int scm_debug_cells_gc_interval = 0;
e81d98ec 83
eab1b259
HWN
84/*
85 Global variable, so you can switch it off at runtime by setting
86 scm_i_cell_validation_already_running.
406c7d90 87 */
eab1b259
HWN
88int scm_i_cell_validation_already_running ;
89
90#if (SCM_DEBUG_CELL_ACCESSES == 1)
91
92
93/*
94
95 Assert that the given object is a valid reference to a valid cell. This
96 test involves to determine whether the object is a cell pointer, whether
97 this pointer actually points into a heap segment and whether the cell
98 pointed to is not a free cell. Further, additional garbage collections may
99 get executed after a user defined number of cell accesses. This helps to
100 find places in the C code where references are dropped for extremely short
101 periods.
102
103*/
406c7d90 104void
eab1b259 105scm_i_expensive_validation_check (SCM cell)
406c7d90 106{
eab1b259
HWN
107 if (!scm_in_heap_p (cell))
108 {
109 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
110 (unsigned long) SCM_UNPACK (cell));
111 abort ();
112 }
113
114 /* If desired, perform additional garbage collections after a user
115 * defined number of cell accesses.
116 */
117 if (scm_debug_cells_gc_interval)
118 {
119 static unsigned int counter = 0;
61045190 120
eab1b259
HWN
121 if (counter != 0)
122 {
123 --counter;
124 }
125 else
126 {
127 counter = scm_debug_cells_gc_interval;
b17e0ac3 128 scm_gc ();
eab1b259
HWN
129 }
130 }
131}
132
133void
134scm_assert_cell_valid (SCM cell)
135{
136 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 137 {
eab1b259 138 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 139
c8a1bdc4 140 /*
eab1b259
HWN
141 During GC, no user-code should be run, and the guile core
142 should use non-protected accessors.
143 */
c8a1bdc4 144 if (scm_gc_running_p)
eab1b259 145 return;
c8a1bdc4
HWN
146
147 /*
eab1b259
HWN
148 Only scm_in_heap_p and rescanning the heap is wildly
149 expensive.
150 */
151 if (scm_expensive_debug_cell_accesses_p)
152 scm_i_expensive_validation_check (cell);
c8a1bdc4
HWN
153
154 if (!SCM_GC_MARK_P (cell))
406c7d90 155 {
c8a1bdc4
HWN
156 fprintf (stderr,
157 "scm_assert_cell_valid: this object is unmarked. \n"
158 "It has been garbage-collected in the last GC run: "
159 "%lux\n",
1be6b49c 160 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
161 abort ();
162 }
c8a1bdc4 163
eab1b259 164 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
165 }
166}
167
168
eab1b259 169
406c7d90
DH
170SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
171 (SCM flag),
1e6808ea 172 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 173 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 174 "but no additional calls to garbage collection are issued.\n"
eab1b259 175 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
176 "with an additional garbage collection after the given\n"
177 "number of cell accesses.\n"
1e6808ea
MG
178 "This procedure only exists when the compile-time flag\n"
179 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
180#define FUNC_NAME s_scm_set_debug_cell_accesses_x
181{
7888309b 182 if (scm_is_false (flag))
eab1b259
HWN
183 {
184 scm_debug_cell_accesses_p = 0;
185 }
bc36d050 186 else if (scm_is_eq (flag, SCM_BOOL_T))
eab1b259
HWN
187 {
188 scm_debug_cells_gc_interval = 0;
189 scm_debug_cell_accesses_p = 1;
190 scm_expensive_debug_cell_accesses_p = 0;
191 }
e11e83f3 192 else
eab1b259 193 {
e11e83f3 194 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
eab1b259
HWN
195 scm_debug_cell_accesses_p = 1;
196 scm_expensive_debug_cell_accesses_p = 1;
197 }
406c7d90
DH
198 return SCM_UNSPECIFIED;
199}
200#undef FUNC_NAME
0f2d19dd 201
ecf470a2 202
c8a1bdc4 203#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
204
205\f
945fec60 206
0f2d19dd
JB
207
208/* scm_mtrigger
539b08a4 209 * is the number of bytes of malloc allocation needed to trigger gc.
0f2d19dd 210 */
c014a02e 211unsigned long scm_mtrigger;
0f2d19dd 212
0f2d19dd
JB
213/* GC Statistics Keeping
214 */
f2893a25 215unsigned long scm_cells_allocated = 0;
c014a02e
ML
216unsigned long scm_mallocated = 0;
217unsigned long scm_gc_cells_collected;
c8a1bdc4 218unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
c014a02e
ML
219unsigned long scm_gc_malloc_collected;
220unsigned long scm_gc_ports_collected;
0f2d19dd 221unsigned long scm_gc_time_taken = 0;
c014a02e 222static unsigned long t_before_gc;
c9b0d4b0 223unsigned long scm_gc_mark_time_taken = 0;
c014a02e
ML
224unsigned long scm_gc_times = 0;
225unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
226double scm_gc_cells_marked_acc = 0.;
227double scm_gc_cells_swept_acc = 0.;
c2cbcc57
HWN
228int scm_gc_cell_yield_percentage =0;
229int scm_gc_malloc_yield_percentage = 0;
7eec4c37 230unsigned long protected_obj_count = 0;
c2cbcc57 231
0f2d19dd
JB
232
233SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
234SCM_SYMBOL (sym_heap_size, "cell-heap-size");
235SCM_SYMBOL (sym_mallocated, "bytes-malloced");
236SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
237SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
238SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0 239SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
c9b0d4b0
ML
240SCM_SYMBOL (sym_times, "gc-times");
241SCM_SYMBOL (sym_cells_marked, "cells-marked");
242SCM_SYMBOL (sym_cells_swept, "cells-swept");
c2cbcc57
HWN
243SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
244SCM_SYMBOL (sym_cell_yield, "cell-yield");
7eec4c37 245SCM_SYMBOL (sym_protected_objects, "protected-objects");
0f2d19dd 246
bb2c57fa 247
cf2d30f6 248
d3dd80ab 249
cf2d30f6 250/* Number of calls to SCM_NEWCELL since startup. */
c8a1bdc4
HWN
251unsigned scm_newcell_count;
252unsigned scm_newcell2_count;
b37fe1c5 253
b37fe1c5 254
0f2d19dd
JB
255/* {Scheme Interface to GC}
256 */
1367aa5e
HWN
257static SCM
258tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
259{
8fecbb19 260 if (scm_is_integer (key))
8a00ba71 261 {
3e2073bd 262 int c_tag = scm_to_int (key);
8fecbb19
HWN
263
264 char const * name = scm_i_tag_name (c_tag);
265 if (name != NULL)
266 {
267 key = scm_from_locale_string (name);
268 }
269 else
270 {
271 char s[100];
272 sprintf (s, "tag %d", c_tag);
273 key = scm_from_locale_string (s);
274 }
8a00ba71 275 }
8fecbb19 276
1367aa5e
HWN
277 return scm_cons (scm_cons (key, val), acc);
278}
279
280SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
281 (),
282 "Return an alist of statistics of the current live objects. ")
283#define FUNC_NAME s_scm_gc_live_object_stats
284{
285 SCM tab = scm_make_hash_table (scm_from_int (57));
b01532af
NJ
286 SCM alist;
287
1367aa5e
HWN
288 scm_i_all_segments_statistics (tab);
289
b01532af 290 alist
1367aa5e
HWN
291 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
292
293 return alist;
294}
295#undef FUNC_NAME
296
c2cbcc57 297extern int scm_gc_malloc_yield_percentage;
a00c95d9 298SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 299 (),
1e6808ea 300 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 301 "use of storage.\n")
1bbd0b84 302#define FUNC_NAME s_scm_gc_stats
0f2d19dd 303{
c8a1bdc4
HWN
304 long i = 0;
305 SCM heap_segs = SCM_EOL ;
c014a02e
ML
306 unsigned long int local_scm_mtrigger;
307 unsigned long int local_scm_mallocated;
308 unsigned long int local_scm_heap_size;
c2cbcc57
HWN
309 int local_scm_gc_cell_yield_percentage;
310 int local_scm_gc_malloc_yield_percentage;
f2893a25 311 unsigned long int local_scm_cells_allocated;
c014a02e
ML
312 unsigned long int local_scm_gc_time_taken;
313 unsigned long int local_scm_gc_times;
314 unsigned long int local_scm_gc_mark_time_taken;
7eec4c37 315 unsigned long int local_protected_obj_count;
c9b0d4b0
ML
316 double local_scm_gc_cells_swept;
317 double local_scm_gc_cells_marked;
0f2d19dd 318 SCM answer;
c8a1bdc4
HWN
319 unsigned long *bounds = 0;
320 int table_size = scm_i_heap_segment_table_size;
9de87eea 321 SCM_CRITICAL_SECTION_START;
939794ce 322
c8a1bdc4
HWN
323 /*
324 temporarily store the numbers, so as not to cause GC.
7febb4a2 325 */
c8a1bdc4 326
3e2073bd 327 bounds = malloc (sizeof (unsigned long) * table_size * 2);
c8a1bdc4
HWN
328 if (!bounds)
329 abort();
330 for (i = table_size; i--; )
331 {
332 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
333 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
334 }
0f2d19dd 335
4c9419ac 336
c8a1bdc4
HWN
337 /* Below, we cons to produce the resulting list. We want a snapshot of
338 * the heap situation before consing.
339 */
340 local_scm_mtrigger = scm_mtrigger;
341 local_scm_mallocated = scm_mallocated;
342 local_scm_heap_size = SCM_HEAP_SIZE;
539b08a4 343
c8a1bdc4
HWN
344 local_scm_cells_allocated = scm_cells_allocated;
345
346 local_scm_gc_time_taken = scm_gc_time_taken;
347 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
348 local_scm_gc_times = scm_gc_times;
c2cbcc57
HWN
349 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
350 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
7eec4c37 351 local_protected_obj_count = protected_obj_count;
c2cbcc57
HWN
352 local_scm_gc_cells_swept =
353 (double) scm_gc_cells_swept_acc
354 + (double) scm_gc_cells_swept;
c8a1bdc4
HWN
355 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
356 +(double) scm_gc_cells_swept
357 -(double) scm_gc_cells_collected;
0f2d19dd 358
c8a1bdc4
HWN
359 for (i = table_size; i--;)
360 {
b9bd8526
MV
361 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
362 scm_from_ulong (bounds[2*i+1])),
c8a1bdc4
HWN
363 heap_segs);
364 }
33b320ae
NJ
365 /* njrev: can any of these scm_cons's or scm_list_n signal a memory
366 error? If so we need a frame here. */
b9bd8526
MV
367 answer =
368 scm_list_n (scm_cons (sym_gc_time_taken,
369 scm_from_ulong (local_scm_gc_time_taken)),
370 scm_cons (sym_cells_allocated,
371 scm_from_ulong (local_scm_cells_allocated)),
372 scm_cons (sym_heap_size,
373 scm_from_ulong (local_scm_heap_size)),
374 scm_cons (sym_mallocated,
375 scm_from_ulong (local_scm_mallocated)),
376 scm_cons (sym_mtrigger,
377 scm_from_ulong (local_scm_mtrigger)),
378 scm_cons (sym_times,
379 scm_from_ulong (local_scm_gc_times)),
380 scm_cons (sym_gc_mark_time_taken,
381 scm_from_ulong (local_scm_gc_mark_time_taken)),
382 scm_cons (sym_cells_marked,
383 scm_from_double (local_scm_gc_cells_marked)),
384 scm_cons (sym_cells_swept,
385 scm_from_double (local_scm_gc_cells_swept)),
386 scm_cons (sym_malloc_yield,
387 scm_from_long(local_scm_gc_malloc_yield_percentage)),
388 scm_cons (sym_cell_yield,
389 scm_from_long (local_scm_gc_cell_yield_percentage)),
390 scm_cons (sym_protected_objects,
391 scm_from_ulong (local_protected_obj_count)),
392 scm_cons (sym_heap_segments, heap_segs),
393 SCM_UNDEFINED);
9de87eea 394 SCM_CRITICAL_SECTION_END;
c8a1bdc4
HWN
395
396 free (bounds);
397 return answer;
0f2d19dd 398}
c8a1bdc4 399#undef FUNC_NAME
0f2d19dd 400
c8a1bdc4
HWN
401static void
402gc_start_stats (const char *what SCM_UNUSED)
e4a7824f 403{
c8a1bdc4 404 t_before_gc = scm_c_get_internal_run_time ();
539b08a4 405
c8a1bdc4
HWN
406 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
407 - (double) scm_gc_cells_collected;
c2cbcc57 408 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
e4a7824f 409
c2cbcc57
HWN
410 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
411
c8a1bdc4
HWN
412 scm_gc_cells_swept = 0;
413 scm_gc_cells_collected_1 = scm_gc_cells_collected;
539b08a4 414
c8a1bdc4
HWN
415 /*
416 CELLS SWEPT is another word for the number of cells that were
417 examined during GC. YIELD is the number that we cleaned
418 out. MARKED is the number that weren't cleaned.
419 */
420 scm_gc_cells_collected = 0;
421 scm_gc_malloc_collected = 0;
422 scm_gc_ports_collected = 0;
e4a7824f 423}
acf4331f 424
c8a1bdc4
HWN
425static void
426gc_end_stats ()
0f2d19dd 427{
c8a1bdc4
HWN
428 unsigned long t = scm_c_get_internal_run_time ();
429 scm_gc_time_taken += (t - t_before_gc);
539b08a4 430
c8a1bdc4 431 ++scm_gc_times;
0f2d19dd 432}
acf4331f 433
0f2d19dd 434
c8a1bdc4
HWN
435SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
436 (SCM obj),
437 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
438 "returned by this function for @var{obj}")
439#define FUNC_NAME s_scm_object_address
c68296f8 440{
b9bd8526 441 return scm_from_ulong (SCM_UNPACK (obj));
c68296f8 442}
c8a1bdc4 443#undef FUNC_NAME
c68296f8 444
1be6b49c 445
c8a1bdc4
HWN
446SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
447 (),
448 "Scans all of SCM objects and reclaims for further use those that are\n"
449 "no longer accessible.")
450#define FUNC_NAME s_scm_gc
451{
b17e0ac3
MV
452 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
453 scm_gc_running_p = 1;
454 scm_i_gc ("call");
33b320ae
NJ
455 /* njrev: It looks as though other places, e.g. scm_realloc,
456 can call scm_i_gc without acquiring the sweep mutex. Does this
457 matter? Also scm_i_gc (or its descendants) touch the
458 scm_sys_protects, which are protected in some cases
459 (e.g. scm_permobjs above in scm_gc_stats) by a critical section,
460 not by the sweep mutex. Shouldn't all the GC-relevant objects be
461 protected in the same way? */
b17e0ac3
MV
462 scm_gc_running_p = 0;
463 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
464 scm_c_hook_run (&scm_after_gc_c_hook, 0);
c8a1bdc4 465 return SCM_UNSPECIFIED;
9d47a1e6 466}
c8a1bdc4 467#undef FUNC_NAME
9d47a1e6 468
c68296f8
MV
469
470\f
0f2d19dd 471
b17e0ac3
MV
472/* The master is global and common while the freelist will be
473 * individual for each thread.
0f2d19dd
JB
474 */
475
c8a1bdc4
HWN
476SCM
477scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
0f2d19dd 478{
c8a1bdc4 479 SCM cell;
b17e0ac3 480 int did_gc = 0;
c8a1bdc4 481
9de87eea 482 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
b17e0ac3 483 scm_gc_running_p = 1;
9bc4701c 484
c8a1bdc4
HWN
485 *free_cells = scm_i_sweep_some_segments (freelist);
486 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
487 {
488 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
489 *free_cells = scm_i_sweep_some_segments (freelist);
490 }
acb0a19c 491
b17e0ac3 492 if (*free_cells == SCM_EOL)
c8a1bdc4
HWN
493 {
494 /*
b17e0ac3 495 with the advent of lazy sweep, GC yield is only known just
c8a1bdc4
HWN
496 before doing the GC.
497 */
498 scm_i_adjust_min_yield (freelist);
499
500 /*
501 out of fresh cells. Try to get some new ones.
502 */
0f2d19dd 503
b17e0ac3
MV
504 did_gc = 1;
505 scm_i_gc ("cells");
a00c95d9 506
c8a1bdc4
HWN
507 *free_cells = scm_i_sweep_some_segments (freelist);
508 }
509
510 if (*free_cells == SCM_EOL)
511 {
512 /*
513 failed getting new cells. Get new juice or die.
514 */
515 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
516 *free_cells = scm_i_sweep_some_segments (freelist);
517 }
518
519 if (*free_cells == SCM_EOL)
520 abort ();
0f2d19dd 521
c8a1bdc4 522 cell = *free_cells;
0f2d19dd 523
c8a1bdc4 524 *free_cells = SCM_FREE_CELL_CDR (cell);
eab1b259 525
b17e0ac3 526 scm_gc_running_p = 0;
9de87eea 527 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
eab1b259 528
b17e0ac3
MV
529 if (did_gc)
530 scm_c_hook_run (&scm_after_gc_c_hook, 0);
531
c8a1bdc4
HWN
532 return cell;
533}
4a4c9785 534
4a4c9785 535
c8a1bdc4
HWN
536scm_t_c_hook scm_before_gc_c_hook;
537scm_t_c_hook scm_before_mark_c_hook;
538scm_t_c_hook scm_before_sweep_c_hook;
539scm_t_c_hook scm_after_sweep_c_hook;
540scm_t_c_hook scm_after_gc_c_hook;
4a4c9785 541
b17e0ac3
MV
542/* Must be called while holding scm_i_sweep_mutex.
543 */
544
c8a1bdc4 545void
b17e0ac3 546scm_i_gc (const char *what)
c8a1bdc4 547{
9de87eea
MV
548 scm_i_thread_put_to_sleep ();
549
c8a1bdc4 550 scm_c_hook_run (&scm_before_gc_c_hook, 0);
a00c95d9 551
c8a1bdc4
HWN
552#ifdef DEBUGINFO
553 fprintf (stderr,"gc reason %s\n", what);
554
555 fprintf (stderr,
d2e53ed6 556 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
c8a1bdc4 557 ? "*"
d2e53ed6 558 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
c8a1bdc4 559#endif
4c48ba06 560
c8a1bdc4 561 gc_start_stats (what);
a00c95d9 562
1367aa5e
HWN
563 /*
564 Set freelists to NULL so scm_cons() always triggers gc, causing
b17e0ac3 565 the assertion above to fail.
1367aa5e
HWN
566 */
567 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
568 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
569
c8a1bdc4
HWN
570 /*
571 Let's finish the sweep. The conservative GC might point into the
572 garbage, and marking that would create a mess.
573 */
574 scm_i_sweep_all_segments("GC");
575 if (scm_mallocated < scm_i_deprecated_memory_return)
b6efc951 576 {
c8a1bdc4
HWN
577 /* The byte count of allocated objects has underflowed. This is
578 probably because you forgot to report the sizes of objects you
579 have allocated, by calling scm_done_malloc or some such. When
580 the GC freed them, it subtracted their size from
581 scm_mallocated, which underflowed. */
582 fprintf (stderr,
583 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
584 "This is probably because the GC hasn't been correctly informed\n"
585 "about object sizes\n");
b6efc951
DH
586 abort ();
587 }
c8a1bdc4 588 scm_mallocated -= scm_i_deprecated_memory_return;
0f2d19dd 589
c8a1bdc4 590
b17e0ac3 591 /* Mark */
b6efc951 592
b17e0ac3 593 scm_c_hook_run (&scm_before_mark_c_hook, 0);
c8a1bdc4 594 scm_mark_all ();
c2cbcc57 595 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
c8a1bdc4 596
b17e0ac3 597 /* Sweep
ffd72400 598
b17e0ac3
MV
599 TODO: the after_sweep hook should probably be moved to just before
600 the mark, since that's where the sweep is finished in lazy
601 sweeping.
c35738c1
MD
602
603 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
604 original meaning implied at least two things: that it would be
605 called when
606
607 1. the freelist is re-initialized (no evaluation possible, though)
608
609 and
610
611 2. the heap is "fresh"
612 (it is well-defined what data is used and what is not)
613
614 Neither of these conditions would hold just before the mark phase.
615
616 Of course, the lazy sweeping has muddled the distinction between
617 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
618 there were no difference, it would still be useful to have two
619 distinct classes of hook functions since this can prevent some
620 bad interference when several modules adds gc hooks.
ffd72400 621 */
b17e0ac3
MV
622
623 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
624 scm_gc_sweep ();
c8a1bdc4 625 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
b17e0ac3 626
c8a1bdc4
HWN
627 gc_end_stats ();
628
fb50ef08 629 scm_i_thread_wake_up ();
ffd72400 630
eab1b259
HWN
631 /*
632 For debugging purposes, you could do
633 scm_i_sweep_all_segments("debug"), but then the remains of the
634 cell aren't left to analyse.
635 */
636}
0f2d19dd 637
0f2d19dd
JB
638\f
639/* {GC Protection Helper Functions}
640 */
641
642
5d2b97cd
DH
643/*
644 * If within a function you need to protect one or more scheme objects from
645 * garbage collection, pass them as parameters to one of the
646 * scm_remember_upto_here* functions below. These functions don't do
647 * anything, but since the compiler does not know that they are actually
648 * no-ops, it will generate code that calls these functions with the given
649 * parameters. Therefore, you can be sure that the compiler will keep those
650 * scheme values alive (on the stack or in a register) up to the point where
651 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 652 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
653 * depends on the scheme object to exist.
654 *
8c494e99
DH
655 * Example: We want to make sure that the string object str does not get
656 * garbage collected during the execution of 'some_function' in the code
657 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
658 * 'some_function' might access freed memory. To make sure that the compiler
659 * keeps str alive on the stack or in a register such that it is visible to
660 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
661 * call to 'some_function'. Note that this would not be necessary if str was
662 * used anyway after the call to 'some_function'.
eb01cb64 663 * char *chars = scm_i_string_chars (str);
5d2b97cd
DH
664 * some_function (chars);
665 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
666 */
667
9e1569bd
KR
668/* Remove any macro versions of these while defining the functions.
669 Functions are always included in the library, for upward binary
670 compatibility and in case combinations of GCC and non-GCC are used. */
671#undef scm_remember_upto_here_1
672#undef scm_remember_upto_here_2
673
5d2b97cd 674void
e81d98ec 675scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
676{
677 /* Empty. Protects a single object from garbage collection. */
678}
679
680void
e81d98ec 681scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
682{
683 /* Empty. Protects two objects from garbage collection. */
684}
685
686void
e81d98ec 687scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
688{
689 /* Empty. Protects any number of objects from garbage collection. */
690}
691
c209c88e 692/*
41b0806d
GB
693 These crazy functions prevent garbage collection
694 of arguments after the first argument by
695 ensuring they remain live throughout the
696 function because they are used in the last
697 line of the code block.
698 It'd be better to have a nice compiler hint to
699 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
700SCM
701scm_return_first (SCM elt, ...)
0f2d19dd
JB
702{
703 return elt;
704}
705
41b0806d
GB
706int
707scm_return_first_int (int i, ...)
708{
709 return i;
710}
711
0f2d19dd 712
0f2d19dd 713SCM
6e8d25a6 714scm_permanent_object (SCM obj)
0f2d19dd 715{
9de87eea
MV
716 SCM cell = scm_cons (obj, SCM_EOL);
717 SCM_CRITICAL_SECTION_START;
718 SCM_SETCDR (cell, scm_permobjs);
719 scm_permobjs = cell;
720 SCM_CRITICAL_SECTION_END;
0f2d19dd
JB
721 return obj;
722}
723
724
7bd4fbe2
MD
725/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
726 other references are dropped, until the object is unprotected by calling
6b1b030e 727 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
728 i. e. it is possible to protect the same object several times, but it is
729 necessary to unprotect the object the same number of times to actually get
730 the object unprotected. It is an error to unprotect an object more often
731 than it has been protected before. The function scm_protect_object returns
732 OBJ.
733*/
734
735/* Implementation note: For every object X, there is a counter which
6b1b030e 736 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 737*/
686765af 738
7eec4c37
HWN
739
740
ef290276 741SCM
6b1b030e 742scm_gc_protect_object (SCM obj)
ef290276 743{
686765af 744 SCM handle;
9d47a1e6 745
686765af 746 /* This critical section barrier will be replaced by a mutex. */
33b320ae
NJ
747 /* njrev: Indeed; if my comment above is correct, there is the same
748 critsec/mutex inconsistency here. */
9de87eea 749 SCM_CRITICAL_SECTION_START;
9d47a1e6 750
e11e83f3
MV
751 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
752 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
9d47a1e6 753
7eec4c37
HWN
754 protected_obj_count ++;
755
9de87eea 756 SCM_CRITICAL_SECTION_END;
9d47a1e6 757
ef290276
JB
758 return obj;
759}
760
761
762/* Remove any protection for OBJ established by a prior call to
dab7f566 763 scm_protect_object. This function returns OBJ.
ef290276 764
dab7f566 765 See scm_protect_object for more information. */
ef290276 766SCM
6b1b030e 767scm_gc_unprotect_object (SCM obj)
ef290276 768{
686765af 769 SCM handle;
9d47a1e6 770
686765af 771 /* This critical section barrier will be replaced by a mutex. */
33b320ae 772 /* njrev: and again. */
9de87eea 773 SCM_CRITICAL_SECTION_START;
9d47a1e6 774
0ff7e3ff
HWN
775 if (scm_gc_running_p)
776 {
777 fprintf (stderr, "scm_unprotect_object called during GC.\n");
778 abort ();
779 }
b17e0ac3 780
686765af 781 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 782
7888309b 783 if (scm_is_false (handle))
686765af 784 {
0f0f0899
MD
785 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
786 abort ();
686765af 787 }
6a199940
DH
788 else
789 {
e11e83f3 790 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 791 if (scm_is_eq (count, scm_from_int (0)))
6a199940
DH
792 scm_hashq_remove_x (scm_protects, obj);
793 else
1be6b49c 794 SCM_SETCDR (handle, count);
6a199940 795 }
7eec4c37 796 protected_obj_count --;
686765af 797
9de87eea 798 SCM_CRITICAL_SECTION_END;
ef290276
JB
799
800 return obj;
801}
802
6b1b030e
ML
803void
804scm_gc_register_root (SCM *p)
805{
806 SCM handle;
b9bd8526 807 SCM key = scm_from_ulong ((unsigned long) p);
eae33935 808
6b1b030e 809 /* This critical section barrier will be replaced by a mutex. */
33b320ae 810 /* njrev: and again. */
9de87eea 811 SCM_CRITICAL_SECTION_START;
6b1b030e 812
e11e83f3
MV
813 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
814 scm_from_int (0));
33b320ae 815 /* njrev: note also that the above can probably signal an error */
e11e83f3 816 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
6b1b030e 817
9de87eea 818 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
819}
820
821void
822scm_gc_unregister_root (SCM *p)
823{
824 SCM handle;
b9bd8526 825 SCM key = scm_from_ulong ((unsigned long) p);
6b1b030e
ML
826
827 /* This critical section barrier will be replaced by a mutex. */
33b320ae 828 /* njrev: and again. */
9de87eea 829 SCM_CRITICAL_SECTION_START;
6b1b030e
ML
830
831 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
832
7888309b 833 if (scm_is_false (handle))
6b1b030e
ML
834 {
835 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
836 abort ();
837 }
838 else
839 {
e11e83f3 840 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 841 if (scm_is_eq (count, scm_from_int (0)))
6b1b030e
ML
842 scm_hashv_remove_x (scm_gc_registered_roots, key);
843 else
844 SCM_SETCDR (handle, count);
845 }
846
9de87eea 847 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
848}
849
850void
851scm_gc_register_roots (SCM *b, unsigned long n)
852{
853 SCM *p = b;
854 for (; p < b + n; ++p)
855 scm_gc_register_root (p);
856}
857
858void
859scm_gc_unregister_roots (SCM *b, unsigned long n)
860{
861 SCM *p = b;
862 for (; p < b + n; ++p)
863 scm_gc_unregister_root (p);
864}
865
04a98cff 866int scm_i_terminating;
c45acc34 867
0f2d19dd 868\f
a00c95d9 869
4c48ba06 870
c8a1bdc4
HWN
871/*
872 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
873 */
85db4a2c
DH
874
875/* Get an integer from an environment variable. */
c8a1bdc4
HWN
876int
877scm_getenv_int (const char *var, int def)
85db4a2c 878{
c8a1bdc4
HWN
879 char *end = 0;
880 char *val = getenv (var);
881 long res = def;
85db4a2c
DH
882 if (!val)
883 return def;
884 res = strtol (val, &end, 10);
885 if (end == val)
886 return def;
887 return res;
888}
889
c35738c1
MD
890void
891scm_storage_prehistory ()
892{
893 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
894 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
895 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
896 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
897 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
898}
85db4a2c 899
9de87eea 900scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
eb01cb64 901
4a4c9785 902int
85db4a2c 903scm_init_storage ()
0f2d19dd 904{
1be6b49c 905 size_t j;
0f2d19dd
JB
906
907 j = SCM_NUM_PROTECTS;
908 while (j)
909 scm_sys_protects[--j] = SCM_BOOL_F;
4a4c9785 910
c8a1bdc4
HWN
911 scm_gc_init_freelist();
912 scm_gc_init_malloc ();
0f2d19dd
JB
913
914 j = SCM_HEAP_SEG_SIZE;
d6884e63 915
c8a1bdc4 916
0f2d19dd 917 /* Initialise the list of ports. */
67329a9e
HWN
918 scm_i_port_table = (scm_t_port **)
919 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
920 if (!scm_i_port_table)
0f2d19dd
JB
921 return 1;
922
9de87eea
MV
923#if 0
924 /* We can't have a cleanup handler since we have no thread to run it
925 in. */
926
a18bcd0e 927#ifdef HAVE_ATEXIT
c45acc34 928 atexit (cleanup);
e52ceaac
MD
929#else
930#ifdef HAVE_ON_EXIT
931 on_exit (cleanup, 0);
932#endif
9de87eea
MV
933#endif
934
a18bcd0e 935#endif
0f2d19dd 936
e4da0740 937 scm_stand_in_procs = scm_make_weak_key_hash_table (scm_from_int (257));
0f2d19dd 938 scm_permobjs = SCM_EOL;
00ffa0e7 939 scm_protects = scm_c_make_hash_table (31);
6b1b030e 940 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 941
0f2d19dd
JB
942 return 0;
943}
939794ce 944
0f2d19dd
JB
945\f
946
939794ce
DH
947SCM scm_after_gc_hook;
948
939794ce
DH
949static SCM gc_async;
950
939794ce
DH
951/* The function gc_async_thunk causes the execution of the after-gc-hook. It
952 * is run after the gc, as soon as the asynchronous events are handled by the
953 * evaluator.
954 */
955static SCM
956gc_async_thunk (void)
957{
958 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
959 return SCM_UNSPECIFIED;
960}
961
962
963/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
964 * the garbage collection. The only purpose of this function is to mark the
965 * gc_async (which will eventually lead to the execution of the
966 * gc_async_thunk).
967 */
968static void *
e81d98ec
DH
969mark_gc_async (void * hook_data SCM_UNUSED,
970 void *func_data SCM_UNUSED,
971 void *data SCM_UNUSED)
972{
973 /* If cell access debugging is enabled, the user may choose to perform
974 * additional garbage collections after an arbitrary number of cell
975 * accesses. We don't want the scheme level after-gc-hook to be performed
976 * for each of these garbage collections for the following reason: The
977 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
978 * after-gc-hook was performed with every gc, and if the gc was performed
979 * after a very small number of cell accesses, then the number of cell
980 * accesses during the execution of the after-gc-hook will suffice to cause
981 * the execution of the next gc. Then, guile would keep executing the
982 * after-gc-hook over and over again, and would never come to do other
983 * things.
eae33935 984 *
e81d98ec
DH
985 * To overcome this problem, if cell access debugging with additional
986 * garbage collections is enabled, the after-gc-hook is never run by the
987 * garbage collecter. When running guile with cell access debugging and the
988 * execution of the after-gc-hook is desired, then it is necessary to run
989 * the hook explicitly from the user code. This has the effect, that from
990 * the scheme level point of view it seems that garbage collection is
991 * performed with a much lower frequency than it actually is. Obviously,
992 * this will not work for code that depends on a fixed one to one
993 * relationship between the execution counts of the C level garbage
994 * collection hooks and the execution count of the scheme level
995 * after-gc-hook.
996 */
9de87eea 997
e81d98ec 998#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 999 if (scm_debug_cells_gc_interval == 0)
e81d98ec
DH
1000 scm_system_async_mark (gc_async);
1001#else
939794ce 1002 scm_system_async_mark (gc_async);
e81d98ec
DH
1003#endif
1004
939794ce
DH
1005 return NULL;
1006}
1007
0f2d19dd
JB
1008void
1009scm_init_gc ()
0f2d19dd 1010{
c8a1bdc4 1011 scm_gc_init_mark ();
d678e25c 1012
fde50407
ML
1013 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1014 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 1015
2592c4c7
MV
1016 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1017 gc_async_thunk);
939794ce
DH
1018
1019 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1020
a0599745 1021#include "libguile/gc.x"
0f2d19dd 1022}
89e00824 1023
c8a1bdc4
HWN
1024
1025void
1026scm_gc_sweep (void)
1027#define FUNC_NAME "scm_gc_sweep"
1028{
1029 scm_i_deprecated_memory_return = 0;
1030
1031 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1032 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1033
1034 /*
1035 NOTHING HERE: LAZY SWEEPING !
1036 */
1037 scm_i_reset_segments ();
1038
9bc4701c
MD
1039 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1040 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
392d2833
MD
1041
1042 /* Invalidate the freelists of other threads. */
1043 scm_i_thread_invalidate_freelists ();
c8a1bdc4
HWN
1044}
1045
1046#undef FUNC_NAME
1047
1048
56495472 1049
89e00824
ML
1050/*
1051 Local Variables:
1052 c-file-style: "gnu"
1053 End:
1054*/