*** empty log message ***
[bpt/guile.git] / libguile / gc.c
CommitLineData
c35738c1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84 41
1bbd0b84 42
37ddcaf6
MD
43/* #define DEBUGINFO */
44
aa54a9b0
RB
45#if HAVE_CONFIG_H
46# include <config.h>
47#endif
56495472 48
0f2d19dd 49#include <stdio.h>
e6e2e95a 50#include <errno.h>
783e7774 51#include <string.h>
c8a1bdc4 52#include <assert.h>
e6e2e95a 53
d9189652
RB
54#ifdef __ia64__
55#include <ucontext.h>
bb1180ef 56extern unsigned long * __libc_ia64_register_backing_store_base;
d9189652
RB
57#endif
58
a0599745 59#include "libguile/_scm.h"
0a7a7445 60#include "libguile/eval.h"
a0599745
MD
61#include "libguile/stime.h"
62#include "libguile/stackchk.h"
63#include "libguile/struct.h"
a0599745
MD
64#include "libguile/smob.h"
65#include "libguile/unif.h"
66#include "libguile/async.h"
67#include "libguile/ports.h"
68#include "libguile/root.h"
69#include "libguile/strings.h"
70#include "libguile/vectors.h"
801cb5e7 71#include "libguile/weaks.h"
686765af 72#include "libguile/hashtab.h"
ecf470a2 73#include "libguile/tags.h"
a0599745 74
c8a1bdc4 75#include "libguile/private-gc.h"
a0599745 76#include "libguile/validate.h"
1be6b49c 77#include "libguile/deprecation.h"
a0599745 78#include "libguile/gc.h"
fce59c93 79
bc9d9bb2 80#ifdef GUILE_DEBUG_MALLOC
a0599745 81#include "libguile/debug-malloc.h"
bc9d9bb2
MD
82#endif
83
0f2d19dd 84#ifdef HAVE_MALLOC_H
95b88819 85#include <malloc.h>
0f2d19dd
JB
86#endif
87
88#ifdef HAVE_UNISTD_H
95b88819 89#include <unistd.h>
0f2d19dd
JB
90#endif
91
406c7d90 92
8c494e99 93
406c7d90
DH
94unsigned int scm_gc_running_p = 0;
95
fb50ef08
MD
96/* Lock this mutex before doing lazy sweeping.
97 */
98scm_t_rec_mutex scm_i_sweep_mutex;
99
eae33935 100/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 101 */
eab1b259
HWN
102int scm_debug_cell_accesses_p = 0;
103int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 104
e81d98ec
DH
105/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
106 * the number of cell accesses after which a gc shall be called.
107 */
eab1b259 108int scm_debug_cells_gc_interval = 0;
e81d98ec 109
eab1b259
HWN
110/*
111 Global variable, so you can switch it off at runtime by setting
112 scm_i_cell_validation_already_running.
406c7d90 113 */
eab1b259
HWN
114int scm_i_cell_validation_already_running ;
115
116#if (SCM_DEBUG_CELL_ACCESSES == 1)
117
118
119/*
120
121 Assert that the given object is a valid reference to a valid cell. This
122 test involves to determine whether the object is a cell pointer, whether
123 this pointer actually points into a heap segment and whether the cell
124 pointed to is not a free cell. Further, additional garbage collections may
125 get executed after a user defined number of cell accesses. This helps to
126 find places in the C code where references are dropped for extremely short
127 periods.
128
129*/
406c7d90 130void
eab1b259 131scm_i_expensive_validation_check (SCM cell)
406c7d90 132{
eab1b259
HWN
133 if (!scm_in_heap_p (cell))
134 {
135 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
136 (unsigned long) SCM_UNPACK (cell));
137 abort ();
138 }
139
140 /* If desired, perform additional garbage collections after a user
141 * defined number of cell accesses.
142 */
143 if (scm_debug_cells_gc_interval)
144 {
145 static unsigned int counter = 0;
61045190 146
eab1b259
HWN
147 if (counter != 0)
148 {
149 --counter;
150 }
151 else
152 {
153 counter = scm_debug_cells_gc_interval;
154 scm_igc ("scm_assert_cell_valid");
155 }
156 }
157}
158
159void
160scm_assert_cell_valid (SCM cell)
161{
162 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 163 {
eab1b259 164 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 165
c8a1bdc4 166 /*
eab1b259
HWN
167 During GC, no user-code should be run, and the guile core
168 should use non-protected accessors.
169 */
c8a1bdc4 170 if (scm_gc_running_p)
eab1b259 171 return;
c8a1bdc4
HWN
172
173 /*
eab1b259
HWN
174 Only scm_in_heap_p and rescanning the heap is wildly
175 expensive.
176 */
177 if (scm_expensive_debug_cell_accesses_p)
178 scm_i_expensive_validation_check (cell);
c8a1bdc4
HWN
179
180 if (!SCM_GC_MARK_P (cell))
406c7d90 181 {
c8a1bdc4
HWN
182 fprintf (stderr,
183 "scm_assert_cell_valid: this object is unmarked. \n"
184 "It has been garbage-collected in the last GC run: "
185 "%lux\n",
1be6b49c 186 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
187 abort ();
188 }
c8a1bdc4 189
eab1b259 190 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
191 }
192}
193
194
eab1b259 195
406c7d90
DH
196SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
197 (SCM flag),
1e6808ea 198 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 199 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 200 "but no additional calls to garbage collection are issued.\n"
eab1b259 201 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
202 "with an additional garbage collection after the given\n"
203 "number of cell accesses.\n"
1e6808ea
MG
204 "This procedure only exists when the compile-time flag\n"
205 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
206#define FUNC_NAME s_scm_set_debug_cell_accesses_x
207{
eab1b259
HWN
208 if (SCM_FALSEP (flag))
209 {
210 scm_debug_cell_accesses_p = 0;
211 }
212 else if (SCM_EQ_P (flag, SCM_BOOL_T))
213 {
214 scm_debug_cells_gc_interval = 0;
215 scm_debug_cell_accesses_p = 1;
216 scm_expensive_debug_cell_accesses_p = 0;
217 }
218 else if (SCM_INUMP (flag))
219 {
220 long int f = SCM_INUM (flag);
221 if (f <= 0)
222 SCM_OUT_OF_RANGE (1, flag);
223 scm_debug_cells_gc_interval = f;
224 scm_debug_cell_accesses_p = 1;
225 scm_expensive_debug_cell_accesses_p = 1;
226 }
227 else
228 {
229 SCM_WRONG_TYPE_ARG (1, flag);
230 }
406c7d90
DH
231 return SCM_UNSPECIFIED;
232}
233#undef FUNC_NAME
c8a1bdc4 234#else
0f2d19dd 235
8fef55a8 236/*
c8a1bdc4
HWN
237 Provide a stub, so people can use their Scheme code on non-debug
238 versions of GUILE as well.
4c48ba06 239 */
c8a1bdc4
HWN
240SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
241 (SCM flag),
242 "This function is used to turn on checking for a debug version of GUILE. This version does not support this functionality\n")
243#define FUNC_NAME s_scm_set_debug_cell_accesses_x
244{
245
246 /*
247 do nothing
248 */
1e71eafb 249 fprintf (stderr, "\nWARNING: GUILE was not compiled with SCM_DEBUG_CELL_ACCESSES");
c8a1bdc4
HWN
250 scm_remember_upto_here (flag);
251 return SCM_UNSPECIFIED;
252}
253#undef FUNC_NAME
ecf470a2 254
c8a1bdc4 255#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
256
257\f
945fec60 258
9bc4701c
MD
259scm_t_key scm_i_freelist;
260scm_t_key scm_i_freelist2;
c8a1bdc4 261
0f2d19dd
JB
262
263/* scm_mtrigger
539b08a4 264 * is the number of bytes of malloc allocation needed to trigger gc.
0f2d19dd 265 */
c014a02e 266unsigned long scm_mtrigger;
0f2d19dd 267
0f2d19dd
JB
268/* scm_gc_heap_lock
269 * If set, don't expand the heap. Set only during gc, during which no allocation
270 * is supposed to take place anyway.
271 */
272int scm_gc_heap_lock = 0;
273
274/* GC Blocking
275 * Don't pause for collection if this is set -- just
276 * expand the heap.
277 */
0f2d19dd
JB
278int scm_block_gc = 1;
279
0f2d19dd
JB
280/* During collection, this accumulates objects holding
281 * weak references.
282 */
ab4bef85 283SCM scm_weak_vectors;
0f2d19dd
JB
284
285/* GC Statistics Keeping
286 */
f2893a25 287unsigned long scm_cells_allocated = 0;
c014a02e
ML
288unsigned long scm_mallocated = 0;
289unsigned long scm_gc_cells_collected;
c8a1bdc4 290unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
c014a02e
ML
291unsigned long scm_gc_malloc_collected;
292unsigned long scm_gc_ports_collected;
0f2d19dd 293unsigned long scm_gc_time_taken = 0;
c014a02e 294static unsigned long t_before_gc;
c9b0d4b0 295unsigned long scm_gc_mark_time_taken = 0;
c014a02e
ML
296unsigned long scm_gc_times = 0;
297unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
298double scm_gc_cells_marked_acc = 0.;
299double scm_gc_cells_swept_acc = 0.;
c2cbcc57
HWN
300int scm_gc_cell_yield_percentage =0;
301int scm_gc_malloc_yield_percentage = 0;
302
0f2d19dd
JB
303
304SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
305SCM_SYMBOL (sym_heap_size, "cell-heap-size");
306SCM_SYMBOL (sym_mallocated, "bytes-malloced");
307SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
308SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
309SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0 310SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
c9b0d4b0
ML
311SCM_SYMBOL (sym_times, "gc-times");
312SCM_SYMBOL (sym_cells_marked, "cells-marked");
313SCM_SYMBOL (sym_cells_swept, "cells-swept");
c2cbcc57
HWN
314SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
315SCM_SYMBOL (sym_cell_yield, "cell-yield");
0f2d19dd 316
bb2c57fa 317
cf2d30f6 318
d3dd80ab 319
cf2d30f6 320/* Number of calls to SCM_NEWCELL since startup. */
c8a1bdc4
HWN
321unsigned scm_newcell_count;
322unsigned scm_newcell2_count;
b37fe1c5 323
b37fe1c5 324
0f2d19dd
JB
325/* {Scheme Interface to GC}
326 */
c2cbcc57 327extern int scm_gc_malloc_yield_percentage;
a00c95d9 328SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 329 (),
1e6808ea 330 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 331 "use of storage.\n")
1bbd0b84 332#define FUNC_NAME s_scm_gc_stats
0f2d19dd 333{
c8a1bdc4
HWN
334 long i = 0;
335 SCM heap_segs = SCM_EOL ;
c014a02e
ML
336 unsigned long int local_scm_mtrigger;
337 unsigned long int local_scm_mallocated;
338 unsigned long int local_scm_heap_size;
c2cbcc57
HWN
339 int local_scm_gc_cell_yield_percentage;
340 int local_scm_gc_malloc_yield_percentage;
f2893a25 341 unsigned long int local_scm_cells_allocated;
c014a02e
ML
342 unsigned long int local_scm_gc_time_taken;
343 unsigned long int local_scm_gc_times;
344 unsigned long int local_scm_gc_mark_time_taken;
c9b0d4b0
ML
345 double local_scm_gc_cells_swept;
346 double local_scm_gc_cells_marked;
0f2d19dd 347 SCM answer;
c8a1bdc4
HWN
348 unsigned long *bounds = 0;
349 int table_size = scm_i_heap_segment_table_size;
0f2d19dd 350 SCM_DEFER_INTS;
939794ce 351
c8a1bdc4
HWN
352 /*
353 temporarily store the numbers, so as not to cause GC.
7febb4a2 354 */
c8a1bdc4
HWN
355
356 bounds = malloc (sizeof (int) * table_size * 2);
357 if (!bounds)
358 abort();
359 for (i = table_size; i--; )
360 {
361 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
362 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
363 }
0f2d19dd 364
4c9419ac 365
c8a1bdc4
HWN
366 /* Below, we cons to produce the resulting list. We want a snapshot of
367 * the heap situation before consing.
368 */
369 local_scm_mtrigger = scm_mtrigger;
370 local_scm_mallocated = scm_mallocated;
371 local_scm_heap_size = SCM_HEAP_SIZE;
539b08a4 372
c8a1bdc4
HWN
373 local_scm_cells_allocated = scm_cells_allocated;
374
375 local_scm_gc_time_taken = scm_gc_time_taken;
376 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
377 local_scm_gc_times = scm_gc_times;
c2cbcc57
HWN
378 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
379 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
380
381 local_scm_gc_cells_swept =
382 (double) scm_gc_cells_swept_acc
383 + (double) scm_gc_cells_swept;
c8a1bdc4
HWN
384 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
385 +(double) scm_gc_cells_swept
386 -(double) scm_gc_cells_collected;
0f2d19dd 387
c8a1bdc4
HWN
388 for (i = table_size; i--;)
389 {
390 heap_segs = scm_cons (scm_cons (scm_ulong2num (bounds[2*i]),
391 scm_ulong2num (bounds[2*i+1])),
392 heap_segs);
393 }
394
395 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
f2893a25 396 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
c8a1bdc4
HWN
397 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
398 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
399 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
400 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
401 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
402 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
403 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
c2cbcc57
HWN
404 scm_cons (sym_malloc_yield, scm_long2num (local_scm_gc_malloc_yield_percentage)),
405 scm_cons (sym_cell_yield, scm_long2num (local_scm_gc_cell_yield_percentage)),
c8a1bdc4
HWN
406 scm_cons (sym_heap_segments, heap_segs),
407 SCM_UNDEFINED);
408 SCM_ALLOW_INTS;
409
410 free (bounds);
411 return answer;
0f2d19dd 412}
c8a1bdc4 413#undef FUNC_NAME
0f2d19dd 414
c8a1bdc4
HWN
415static void
416gc_start_stats (const char *what SCM_UNUSED)
e4a7824f 417{
c8a1bdc4 418 t_before_gc = scm_c_get_internal_run_time ();
539b08a4 419
c8a1bdc4
HWN
420 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
421 - (double) scm_gc_cells_collected;
c2cbcc57 422 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
e4a7824f 423
c2cbcc57
HWN
424 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
425
c8a1bdc4
HWN
426 scm_gc_cells_swept = 0;
427 scm_gc_cells_collected_1 = scm_gc_cells_collected;
539b08a4 428
c8a1bdc4
HWN
429 /*
430 CELLS SWEPT is another word for the number of cells that were
431 examined during GC. YIELD is the number that we cleaned
432 out. MARKED is the number that weren't cleaned.
433 */
434 scm_gc_cells_collected = 0;
435 scm_gc_malloc_collected = 0;
436 scm_gc_ports_collected = 0;
e4a7824f 437}
acf4331f 438
c8a1bdc4
HWN
439static void
440gc_end_stats ()
0f2d19dd 441{
c8a1bdc4
HWN
442 unsigned long t = scm_c_get_internal_run_time ();
443 scm_gc_time_taken += (t - t_before_gc);
539b08a4 444
c8a1bdc4 445 ++scm_gc_times;
0f2d19dd 446}
acf4331f 447
0f2d19dd 448
c8a1bdc4
HWN
449SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
450 (SCM obj),
451 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
452 "returned by this function for @var{obj}")
453#define FUNC_NAME s_scm_object_address
c68296f8 454{
c8a1bdc4 455 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
c68296f8 456}
c8a1bdc4 457#undef FUNC_NAME
c68296f8 458
1be6b49c 459
c8a1bdc4
HWN
460SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
461 (),
462 "Scans all of SCM objects and reclaims for further use those that are\n"
463 "no longer accessible.")
464#define FUNC_NAME s_scm_gc
465{
c8a1bdc4 466 scm_igc ("call");
c8a1bdc4 467 return SCM_UNSPECIFIED;
9d47a1e6 468}
c8a1bdc4 469#undef FUNC_NAME
9d47a1e6 470
c68296f8
MV
471
472\f
0f2d19dd 473
c8a1bdc4
HWN
474/* When we get POSIX threads support, the master will be global and
475 * common while the freelist will be individual for each thread.
0f2d19dd
JB
476 */
477
c8a1bdc4
HWN
478SCM
479scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
0f2d19dd 480{
c8a1bdc4
HWN
481 SCM cell;
482
fb50ef08 483 scm_rec_mutex_lock (&scm_i_sweep_mutex);
9bc4701c 484
c8a1bdc4
HWN
485 *free_cells = scm_i_sweep_some_segments (freelist);
486 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
487 {
488 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
489 *free_cells = scm_i_sweep_some_segments (freelist);
490 }
acb0a19c 491
c8a1bdc4
HWN
492 if (*free_cells == SCM_EOL && !scm_block_gc)
493 {
494 /*
495 with the advent of lazy sweep, GC yield is only know just
496 before doing the GC.
497 */
498 scm_i_adjust_min_yield (freelist);
499
500 /*
501 out of fresh cells. Try to get some new ones.
502 */
0f2d19dd 503
c8a1bdc4 504 scm_igc ("cells");
a00c95d9 505
c8a1bdc4
HWN
506 *free_cells = scm_i_sweep_some_segments (freelist);
507 }
508
509 if (*free_cells == SCM_EOL)
510 {
511 /*
512 failed getting new cells. Get new juice or die.
513 */
514 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
515 *free_cells = scm_i_sweep_some_segments (freelist);
516 }
517
518 if (*free_cells == SCM_EOL)
519 abort ();
0f2d19dd 520
c8a1bdc4 521 cell = *free_cells;
0f2d19dd 522
c8a1bdc4 523 *free_cells = SCM_FREE_CELL_CDR (cell);
eab1b259 524
fb50ef08 525 scm_rec_mutex_unlock (&scm_i_sweep_mutex);
eab1b259 526
c8a1bdc4
HWN
527 return cell;
528}
4a4c9785 529
4a4c9785 530
c8a1bdc4
HWN
531scm_t_c_hook scm_before_gc_c_hook;
532scm_t_c_hook scm_before_mark_c_hook;
533scm_t_c_hook scm_before_sweep_c_hook;
534scm_t_c_hook scm_after_sweep_c_hook;
535scm_t_c_hook scm_after_gc_c_hook;
4a4c9785 536
c8a1bdc4
HWN
537void
538scm_igc (const char *what)
539{
fb50ef08 540 scm_rec_mutex_lock (&scm_i_sweep_mutex);
c8a1bdc4
HWN
541 ++scm_gc_running_p;
542 scm_c_hook_run (&scm_before_gc_c_hook, 0);
a00c95d9 543
c8a1bdc4
HWN
544#ifdef DEBUGINFO
545 fprintf (stderr,"gc reason %s\n", what);
546
547 fprintf (stderr,
9bc4701c 548 SCM_NULLP (*SCM_FREELIST_LOC (scm_i_freelist))
c8a1bdc4 549 ? "*"
9bc4701c 550 : (SCM_NULLP (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
c8a1bdc4 551#endif
4c48ba06 552
c8a1bdc4 553 /* During the critical section, only the current thread may run. */
fb50ef08 554 scm_i_thread_put_to_sleep ();
a00c95d9 555
eab1b259 556 if (!scm_root || !scm_stack_base || scm_block_gc)
d6884e63 557 {
c8a1bdc4
HWN
558 --scm_gc_running_p;
559 return;
d6884e63
ML
560 }
561
c8a1bdc4 562 gc_start_stats (what);
a00c95d9 563
c8a1bdc4
HWN
564 if (scm_gc_heap_lock)
565 /* We've invoked the collector while a GC is already in progress.
566 That should never happen. */
567 abort ();
a00c95d9 568
c8a1bdc4 569 ++scm_gc_heap_lock;
a00c95d9 570
c8a1bdc4
HWN
571 /*
572 Let's finish the sweep. The conservative GC might point into the
573 garbage, and marking that would create a mess.
574 */
575 scm_i_sweep_all_segments("GC");
576 if (scm_mallocated < scm_i_deprecated_memory_return)
b6efc951 577 {
c8a1bdc4
HWN
578 /* The byte count of allocated objects has underflowed. This is
579 probably because you forgot to report the sizes of objects you
580 have allocated, by calling scm_done_malloc or some such. When
581 the GC freed them, it subtracted their size from
582 scm_mallocated, which underflowed. */
583 fprintf (stderr,
584 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
585 "This is probably because the GC hasn't been correctly informed\n"
586 "about object sizes\n");
b6efc951
DH
587 abort ();
588 }
c8a1bdc4 589 scm_mallocated -= scm_i_deprecated_memory_return;
0f2d19dd 590
c8a1bdc4
HWN
591
592
593 scm_c_hook_run (&scm_before_mark_c_hook, 0);
b6efc951 594
c8a1bdc4
HWN
595 scm_mark_all ();
596
c2cbcc57 597 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
c8a1bdc4
HWN
598
599 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
600
601 /*
602 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
0f2d19dd 603
c8a1bdc4 604 DOCME: why should the heap be locked anyway?
0f2d19dd 605 */
c8a1bdc4 606 --scm_gc_heap_lock;
a00c95d9 607
c8a1bdc4 608 scm_gc_sweep ();
0f2d19dd 609
ffd72400
HWN
610
611 /*
612 TODO: this hook should probably be moved to just before the mark,
613 since that's where the sweep is finished in lazy sweeping.
c35738c1
MD
614
615 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
616 original meaning implied at least two things: that it would be
617 called when
618
619 1. the freelist is re-initialized (no evaluation possible, though)
620
621 and
622
623 2. the heap is "fresh"
624 (it is well-defined what data is used and what is not)
625
626 Neither of these conditions would hold just before the mark phase.
627
628 Of course, the lazy sweeping has muddled the distinction between
629 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
630 there were no difference, it would still be useful to have two
631 distinct classes of hook functions since this can prevent some
632 bad interference when several modules adds gc hooks.
ffd72400 633 */
c8a1bdc4
HWN
634 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
635 gc_end_stats ();
636
fb50ef08 637 scm_i_thread_wake_up ();
ffd72400
HWN
638
639 /*
640 See above.
641 */
c8a1bdc4
HWN
642 scm_c_hook_run (&scm_after_gc_c_hook, 0);
643 --scm_gc_running_p;
fb50ef08 644 scm_rec_mutex_unlock (&scm_i_sweep_mutex);
a00c95d9 645
eab1b259
HWN
646 /*
647 For debugging purposes, you could do
648 scm_i_sweep_all_segments("debug"), but then the remains of the
649 cell aren't left to analyse.
650 */
651}
0f2d19dd 652
0f2d19dd
JB
653\f
654/* {GC Protection Helper Functions}
655 */
656
657
5d2b97cd
DH
658/*
659 * If within a function you need to protect one or more scheme objects from
660 * garbage collection, pass them as parameters to one of the
661 * scm_remember_upto_here* functions below. These functions don't do
662 * anything, but since the compiler does not know that they are actually
663 * no-ops, it will generate code that calls these functions with the given
664 * parameters. Therefore, you can be sure that the compiler will keep those
665 * scheme values alive (on the stack or in a register) up to the point where
666 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 667 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
668 * depends on the scheme object to exist.
669 *
8c494e99
DH
670 * Example: We want to make sure that the string object str does not get
671 * garbage collected during the execution of 'some_function' in the code
672 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
673 * 'some_function' might access freed memory. To make sure that the compiler
674 * keeps str alive on the stack or in a register such that it is visible to
675 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
676 * call to 'some_function'. Note that this would not be necessary if str was
677 * used anyway after the call to 'some_function'.
678 * char *chars = SCM_STRING_CHARS (str);
679 * some_function (chars);
680 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
681 */
682
683void
e81d98ec 684scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
685{
686 /* Empty. Protects a single object from garbage collection. */
687}
688
689void
e81d98ec 690scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
691{
692 /* Empty. Protects two objects from garbage collection. */
693}
694
695void
e81d98ec 696scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
697{
698 /* Empty. Protects any number of objects from garbage collection. */
699}
700
c209c88e 701/*
41b0806d
GB
702 These crazy functions prevent garbage collection
703 of arguments after the first argument by
704 ensuring they remain live throughout the
705 function because they are used in the last
706 line of the code block.
707 It'd be better to have a nice compiler hint to
708 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
709SCM
710scm_return_first (SCM elt, ...)
0f2d19dd
JB
711{
712 return elt;
713}
714
41b0806d
GB
715int
716scm_return_first_int (int i, ...)
717{
718 return i;
719}
720
0f2d19dd 721
0f2d19dd 722SCM
6e8d25a6 723scm_permanent_object (SCM obj)
0f2d19dd
JB
724{
725 SCM_REDEFER_INTS;
726 scm_permobjs = scm_cons (obj, scm_permobjs);
727 SCM_REALLOW_INTS;
728 return obj;
729}
730
731
7bd4fbe2
MD
732/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
733 other references are dropped, until the object is unprotected by calling
6b1b030e 734 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
735 i. e. it is possible to protect the same object several times, but it is
736 necessary to unprotect the object the same number of times to actually get
737 the object unprotected. It is an error to unprotect an object more often
738 than it has been protected before. The function scm_protect_object returns
739 OBJ.
740*/
741
742/* Implementation note: For every object X, there is a counter which
6b1b030e 743 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 744*/
686765af 745
ef290276 746SCM
6b1b030e 747scm_gc_protect_object (SCM obj)
ef290276 748{
686765af 749 SCM handle;
9d47a1e6 750
686765af 751 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 752 SCM_REDEFER_INTS;
9d47a1e6 753
0f0f0899 754 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
1be6b49c 755 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
9d47a1e6 756
2dd6a83a 757 SCM_REALLOW_INTS;
9d47a1e6 758
ef290276
JB
759 return obj;
760}
761
762
763/* Remove any protection for OBJ established by a prior call to
dab7f566 764 scm_protect_object. This function returns OBJ.
ef290276 765
dab7f566 766 See scm_protect_object for more information. */
ef290276 767SCM
6b1b030e 768scm_gc_unprotect_object (SCM obj)
ef290276 769{
686765af 770 SCM handle;
9d47a1e6 771
686765af 772 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 773 SCM_REDEFER_INTS;
9d47a1e6 774
686765af 775 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 776
22a52da1 777 if (SCM_FALSEP (handle))
686765af 778 {
0f0f0899
MD
779 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
780 abort ();
686765af 781 }
6a199940
DH
782 else
783 {
1be6b49c
ML
784 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
785 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
6a199940
DH
786 scm_hashq_remove_x (scm_protects, obj);
787 else
1be6b49c 788 SCM_SETCDR (handle, count);
6a199940 789 }
686765af 790
2dd6a83a 791 SCM_REALLOW_INTS;
ef290276
JB
792
793 return obj;
794}
795
6b1b030e
ML
796void
797scm_gc_register_root (SCM *p)
798{
799 SCM handle;
800 SCM key = scm_long2num ((long) p);
eae33935 801
6b1b030e
ML
802 /* This critical section barrier will be replaced by a mutex. */
803 SCM_REDEFER_INTS;
804
805 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
806 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
807
808 SCM_REALLOW_INTS;
809}
810
811void
812scm_gc_unregister_root (SCM *p)
813{
814 SCM handle;
815 SCM key = scm_long2num ((long) p);
816
817 /* This critical section barrier will be replaced by a mutex. */
818 SCM_REDEFER_INTS;
819
820 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
821
822 if (SCM_FALSEP (handle))
823 {
824 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
825 abort ();
826 }
827 else
828 {
829 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
830 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
831 scm_hashv_remove_x (scm_gc_registered_roots, key);
832 else
833 SCM_SETCDR (handle, count);
834 }
835
836 SCM_REALLOW_INTS;
837}
838
839void
840scm_gc_register_roots (SCM *b, unsigned long n)
841{
842 SCM *p = b;
843 for (; p < b + n; ++p)
844 scm_gc_register_root (p);
845}
846
847void
848scm_gc_unregister_roots (SCM *b, unsigned long n)
849{
850 SCM *p = b;
851 for (; p < b + n; ++p)
852 scm_gc_unregister_root (p);
853}
854
04a98cff 855int scm_i_terminating;
c45acc34
JB
856
857/* called on process termination. */
e52ceaac
MD
858#ifdef HAVE_ATEXIT
859static void
860cleanup (void)
861#else
862#ifdef HAVE_ON_EXIT
51157deb
MD
863extern int on_exit (void (*procp) (), int arg);
864
e52ceaac
MD
865static void
866cleanup (int status, void *arg)
867#else
868#error Dont know how to setup a cleanup handler on your system.
869#endif
870#endif
c45acc34 871{
04a98cff 872 scm_i_terminating = 1;
c45acc34
JB
873 scm_flush_all_ports ();
874}
ef290276 875
0f2d19dd 876\f
a00c95d9 877
4c48ba06 878
c8a1bdc4
HWN
879/*
880 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
881 */
85db4a2c
DH
882
883/* Get an integer from an environment variable. */
c8a1bdc4
HWN
884int
885scm_getenv_int (const char *var, int def)
85db4a2c 886{
c8a1bdc4
HWN
887 char *end = 0;
888 char *val = getenv (var);
889 long res = def;
85db4a2c
DH
890 if (!val)
891 return def;
892 res = strtol (val, &end, 10);
893 if (end == val)
894 return def;
895 return res;
896}
897
c35738c1
MD
898void
899scm_storage_prehistory ()
900{
901 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
902 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
903 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
904 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
905 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
906}
85db4a2c 907
4a4c9785 908int
85db4a2c 909scm_init_storage ()
0f2d19dd 910{
1be6b49c 911 size_t j;
0f2d19dd 912
fb50ef08
MD
913 /* Fixme: Should use mutexattr from the low-level API. */
914 scm_rec_mutex_init (&scm_i_sweep_mutex, &scm_i_plugin_rec_mutex);
915
0f2d19dd
JB
916 j = SCM_NUM_PROTECTS;
917 while (j)
918 scm_sys_protects[--j] = SCM_BOOL_F;
919 scm_block_gc = 1;
4a4c9785 920
c8a1bdc4
HWN
921 scm_gc_init_freelist();
922 scm_gc_init_malloc ();
0f2d19dd
JB
923
924 j = SCM_HEAP_SEG_SIZE;
d6884e63 925
c8a1bdc4 926
0f2d19dd 927 /* Initialise the list of ports. */
67329a9e
HWN
928 scm_i_port_table = (scm_t_port **)
929 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
930 if (!scm_i_port_table)
0f2d19dd
JB
931 return 1;
932
a18bcd0e 933#ifdef HAVE_ATEXIT
c45acc34 934 atexit (cleanup);
e52ceaac
MD
935#else
936#ifdef HAVE_ON_EXIT
937 on_exit (cleanup, 0);
938#endif
a18bcd0e 939#endif
0f2d19dd 940
8960e0a0 941 scm_stand_in_procs = SCM_EOL;
0f2d19dd 942 scm_permobjs = SCM_EOL;
00ffa0e7 943 scm_protects = scm_c_make_hash_table (31);
6b1b030e 944 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 945
0f2d19dd
JB
946 return 0;
947}
939794ce 948
0f2d19dd
JB
949\f
950
939794ce
DH
951SCM scm_after_gc_hook;
952
939794ce
DH
953static SCM gc_async;
954
939794ce
DH
955/* The function gc_async_thunk causes the execution of the after-gc-hook. It
956 * is run after the gc, as soon as the asynchronous events are handled by the
957 * evaluator.
958 */
959static SCM
960gc_async_thunk (void)
961{
962 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
963 return SCM_UNSPECIFIED;
964}
965
966
967/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
968 * the garbage collection. The only purpose of this function is to mark the
969 * gc_async (which will eventually lead to the execution of the
970 * gc_async_thunk).
971 */
972static void *
e81d98ec
DH
973mark_gc_async (void * hook_data SCM_UNUSED,
974 void *func_data SCM_UNUSED,
975 void *data SCM_UNUSED)
976{
977 /* If cell access debugging is enabled, the user may choose to perform
978 * additional garbage collections after an arbitrary number of cell
979 * accesses. We don't want the scheme level after-gc-hook to be performed
980 * for each of these garbage collections for the following reason: The
981 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
982 * after-gc-hook was performed with every gc, and if the gc was performed
983 * after a very small number of cell accesses, then the number of cell
984 * accesses during the execution of the after-gc-hook will suffice to cause
985 * the execution of the next gc. Then, guile would keep executing the
986 * after-gc-hook over and over again, and would never come to do other
987 * things.
eae33935 988 *
e81d98ec
DH
989 * To overcome this problem, if cell access debugging with additional
990 * garbage collections is enabled, the after-gc-hook is never run by the
991 * garbage collecter. When running guile with cell access debugging and the
992 * execution of the after-gc-hook is desired, then it is necessary to run
993 * the hook explicitly from the user code. This has the effect, that from
994 * the scheme level point of view it seems that garbage collection is
995 * performed with a much lower frequency than it actually is. Obviously,
996 * this will not work for code that depends on a fixed one to one
997 * relationship between the execution counts of the C level garbage
998 * collection hooks and the execution count of the scheme level
999 * after-gc-hook.
1000 */
1001#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 1002 if (scm_debug_cells_gc_interval == 0)
e81d98ec
DH
1003 scm_system_async_mark (gc_async);
1004#else
939794ce 1005 scm_system_async_mark (gc_async);
e81d98ec
DH
1006#endif
1007
939794ce
DH
1008 return NULL;
1009}
1010
0f2d19dd
JB
1011void
1012scm_init_gc ()
0f2d19dd 1013{
c8a1bdc4 1014 scm_gc_init_mark ();
d678e25c 1015
fde50407
ML
1016 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1017 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 1018
2592c4c7
MV
1019 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1020 gc_async_thunk);
939794ce
DH
1021
1022 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1023
a0599745 1024#include "libguile/gc.x"
0f2d19dd 1025}
89e00824 1026
c8a1bdc4
HWN
1027
1028void
1029scm_gc_sweep (void)
1030#define FUNC_NAME "scm_gc_sweep"
1031{
1032 scm_i_deprecated_memory_return = 0;
1033
1034 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1035 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1036
1037 /*
1038 NOTHING HERE: LAZY SWEEPING !
1039 */
1040 scm_i_reset_segments ();
1041
1042 /* When we move to POSIX threads private freelists should probably
1043 be GC-protected instead. */
9bc4701c
MD
1044 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1045 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
392d2833
MD
1046
1047 /* Invalidate the freelists of other threads. */
1048 scm_i_thread_invalidate_freelists ();
c8a1bdc4
HWN
1049}
1050
1051#undef FUNC_NAME
1052
1053
56495472 1054
89e00824
ML
1055/*
1056 Local Variables:
1057 c-file-style: "gnu"
1058 End:
1059*/