(scm_cell, scm_double_cell): Do not check scm_gc_running_p, allocation
[bpt/guile.git] / libguile / gc.c
CommitLineData
c35738c1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001, 2002, 2003 Free Software Foundation, Inc.
a00c95d9 2 *
73be1d9e
MV
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
a00c95d9 7 *
73be1d9e 8 * This library is distributed in the hope that it will be useful,
0f2d19dd 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
73be1d9e
MV
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
a00c95d9 12 *
73be1d9e
MV
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
1bbd0b84 17
9de87eea 18#define _GNU_SOURCE
1bbd0b84 19
37ddcaf6
MD
20/* #define DEBUGINFO */
21
aa54a9b0
RB
22#if HAVE_CONFIG_H
23# include <config.h>
24#endif
56495472 25
0f2d19dd 26#include <stdio.h>
e6e2e95a 27#include <errno.h>
783e7774 28#include <string.h>
c8a1bdc4 29#include <assert.h>
e6e2e95a 30
d9189652
RB
31#ifdef __ia64__
32#include <ucontext.h>
bb1180ef 33extern unsigned long * __libc_ia64_register_backing_store_base;
d9189652
RB
34#endif
35
a0599745 36#include "libguile/_scm.h"
0a7a7445 37#include "libguile/eval.h"
a0599745
MD
38#include "libguile/stime.h"
39#include "libguile/stackchk.h"
40#include "libguile/struct.h"
a0599745
MD
41#include "libguile/smob.h"
42#include "libguile/unif.h"
43#include "libguile/async.h"
44#include "libguile/ports.h"
45#include "libguile/root.h"
46#include "libguile/strings.h"
47#include "libguile/vectors.h"
801cb5e7 48#include "libguile/weaks.h"
686765af 49#include "libguile/hashtab.h"
ecf470a2 50#include "libguile/tags.h"
a0599745 51
c8a1bdc4 52#include "libguile/private-gc.h"
a0599745 53#include "libguile/validate.h"
1be6b49c 54#include "libguile/deprecation.h"
a0599745 55#include "libguile/gc.h"
9de87eea 56#include "libguile/dynwind.h"
fce59c93 57
bc9d9bb2 58#ifdef GUILE_DEBUG_MALLOC
a0599745 59#include "libguile/debug-malloc.h"
bc9d9bb2
MD
60#endif
61
0f2d19dd 62#ifdef HAVE_MALLOC_H
95b88819 63#include <malloc.h>
0f2d19dd
JB
64#endif
65
66#ifdef HAVE_UNISTD_H
95b88819 67#include <unistd.h>
0f2d19dd
JB
68#endif
69
406c7d90 70
8c494e99 71
406c7d90
DH
72unsigned int scm_gc_running_p = 0;
73
fb50ef08
MD
74/* Lock this mutex before doing lazy sweeping.
75 */
9de87eea 76scm_i_pthread_mutex_t scm_i_sweep_mutex = SCM_I_PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
fb50ef08 77
eae33935 78/* Set this to != 0 if every cell that is accessed shall be checked:
61045190 79 */
eab1b259
HWN
80int scm_debug_cell_accesses_p = 0;
81int scm_expensive_debug_cell_accesses_p = 0;
406c7d90 82
e81d98ec
DH
83/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
84 * the number of cell accesses after which a gc shall be called.
85 */
eab1b259 86int scm_debug_cells_gc_interval = 0;
e81d98ec 87
eab1b259
HWN
88/*
89 Global variable, so you can switch it off at runtime by setting
90 scm_i_cell_validation_already_running.
406c7d90 91 */
eab1b259
HWN
92int scm_i_cell_validation_already_running ;
93
94#if (SCM_DEBUG_CELL_ACCESSES == 1)
95
96
97/*
98
99 Assert that the given object is a valid reference to a valid cell. This
100 test involves to determine whether the object is a cell pointer, whether
101 this pointer actually points into a heap segment and whether the cell
102 pointed to is not a free cell. Further, additional garbage collections may
103 get executed after a user defined number of cell accesses. This helps to
104 find places in the C code where references are dropped for extremely short
105 periods.
106
107*/
406c7d90 108void
eab1b259 109scm_i_expensive_validation_check (SCM cell)
406c7d90 110{
eab1b259
HWN
111 if (!scm_in_heap_p (cell))
112 {
113 fprintf (stderr, "scm_assert_cell_valid: this object does not live in the heap: %lux\n",
114 (unsigned long) SCM_UNPACK (cell));
115 abort ();
116 }
117
118 /* If desired, perform additional garbage collections after a user
119 * defined number of cell accesses.
120 */
121 if (scm_debug_cells_gc_interval)
122 {
123 static unsigned int counter = 0;
61045190 124
eab1b259
HWN
125 if (counter != 0)
126 {
127 --counter;
128 }
129 else
130 {
131 counter = scm_debug_cells_gc_interval;
132 scm_igc ("scm_assert_cell_valid");
133 }
134 }
135}
136
137void
138scm_assert_cell_valid (SCM cell)
139{
140 if (!scm_i_cell_validation_already_running && scm_debug_cell_accesses_p)
406c7d90 141 {
eab1b259 142 scm_i_cell_validation_already_running = 1; /* set to avoid recursion */
406c7d90 143
c8a1bdc4 144 /*
eab1b259
HWN
145 During GC, no user-code should be run, and the guile core
146 should use non-protected accessors.
147 */
c8a1bdc4 148 if (scm_gc_running_p)
eab1b259 149 return;
c8a1bdc4
HWN
150
151 /*
eab1b259
HWN
152 Only scm_in_heap_p and rescanning the heap is wildly
153 expensive.
154 */
155 if (scm_expensive_debug_cell_accesses_p)
156 scm_i_expensive_validation_check (cell);
c8a1bdc4
HWN
157
158 if (!SCM_GC_MARK_P (cell))
406c7d90 159 {
c8a1bdc4
HWN
160 fprintf (stderr,
161 "scm_assert_cell_valid: this object is unmarked. \n"
162 "It has been garbage-collected in the last GC run: "
163 "%lux\n",
1be6b49c 164 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
165 abort ();
166 }
c8a1bdc4 167
eab1b259 168 scm_i_cell_validation_already_running = 0; /* re-enable */
406c7d90
DH
169 }
170}
171
172
eab1b259 173
406c7d90
DH
174SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
175 (SCM flag),
1e6808ea 176 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
eab1b259 177 "If @var{flag} is @code{#t}, cheap cell access checking is enabled,\n"
e81d98ec 178 "but no additional calls to garbage collection are issued.\n"
eab1b259 179 "If @var{flag} is a number, strict cell access checking is enabled,\n"
e81d98ec
DH
180 "with an additional garbage collection after the given\n"
181 "number of cell accesses.\n"
1e6808ea
MG
182 "This procedure only exists when the compile-time flag\n"
183 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
184#define FUNC_NAME s_scm_set_debug_cell_accesses_x
185{
7888309b 186 if (scm_is_false (flag))
eab1b259
HWN
187 {
188 scm_debug_cell_accesses_p = 0;
189 }
bc36d050 190 else if (scm_is_eq (flag, SCM_BOOL_T))
eab1b259
HWN
191 {
192 scm_debug_cells_gc_interval = 0;
193 scm_debug_cell_accesses_p = 1;
194 scm_expensive_debug_cell_accesses_p = 0;
195 }
e11e83f3 196 else
eab1b259 197 {
e11e83f3 198 scm_debug_cells_gc_interval = scm_to_signed_integer (flag, 0, INT_MAX);
eab1b259
HWN
199 scm_debug_cell_accesses_p = 1;
200 scm_expensive_debug_cell_accesses_p = 1;
201 }
406c7d90
DH
202 return SCM_UNSPECIFIED;
203}
204#undef FUNC_NAME
0f2d19dd 205
ecf470a2 206
c8a1bdc4 207#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
0f2d19dd
JB
208
209\f
945fec60 210
0f2d19dd
JB
211
212/* scm_mtrigger
539b08a4 213 * is the number of bytes of malloc allocation needed to trigger gc.
0f2d19dd 214 */
c014a02e 215unsigned long scm_mtrigger;
0f2d19dd 216
0f2d19dd
JB
217/* scm_gc_heap_lock
218 * If set, don't expand the heap. Set only during gc, during which no allocation
219 * is supposed to take place anyway.
220 */
221int scm_gc_heap_lock = 0;
222
223/* GC Blocking
224 * Don't pause for collection if this is set -- just
225 * expand the heap.
226 */
0f2d19dd
JB
227int scm_block_gc = 1;
228
0f2d19dd
JB
229/* During collection, this accumulates objects holding
230 * weak references.
231 */
ab4bef85 232SCM scm_weak_vectors;
0f2d19dd
JB
233
234/* GC Statistics Keeping
235 */
f2893a25 236unsigned long scm_cells_allocated = 0;
c014a02e
ML
237unsigned long scm_mallocated = 0;
238unsigned long scm_gc_cells_collected;
c8a1bdc4 239unsigned long scm_gc_cells_collected_1 = 0; /* previous GC yield */
c014a02e
ML
240unsigned long scm_gc_malloc_collected;
241unsigned long scm_gc_ports_collected;
0f2d19dd 242unsigned long scm_gc_time_taken = 0;
c014a02e 243static unsigned long t_before_gc;
c9b0d4b0 244unsigned long scm_gc_mark_time_taken = 0;
c014a02e
ML
245unsigned long scm_gc_times = 0;
246unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
247double scm_gc_cells_marked_acc = 0.;
248double scm_gc_cells_swept_acc = 0.;
c2cbcc57
HWN
249int scm_gc_cell_yield_percentage =0;
250int scm_gc_malloc_yield_percentage = 0;
7eec4c37 251unsigned long protected_obj_count = 0;
c2cbcc57 252
0f2d19dd
JB
253
254SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
255SCM_SYMBOL (sym_heap_size, "cell-heap-size");
256SCM_SYMBOL (sym_mallocated, "bytes-malloced");
257SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
258SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
259SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0 260SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
c9b0d4b0
ML
261SCM_SYMBOL (sym_times, "gc-times");
262SCM_SYMBOL (sym_cells_marked, "cells-marked");
263SCM_SYMBOL (sym_cells_swept, "cells-swept");
c2cbcc57
HWN
264SCM_SYMBOL (sym_malloc_yield, "malloc-yield");
265SCM_SYMBOL (sym_cell_yield, "cell-yield");
7eec4c37 266SCM_SYMBOL (sym_protected_objects, "protected-objects");
0f2d19dd 267
bb2c57fa 268
cf2d30f6 269
d3dd80ab 270
cf2d30f6 271/* Number of calls to SCM_NEWCELL since startup. */
c8a1bdc4
HWN
272unsigned scm_newcell_count;
273unsigned scm_newcell2_count;
b37fe1c5 274
b37fe1c5 275
0f2d19dd
JB
276/* {Scheme Interface to GC}
277 */
1367aa5e
HWN
278static SCM
279tag_table_to_type_alist (void *closure, SCM key, SCM val, SCM acc)
280{
281 scm_t_bits c_tag = scm_to_int (key);
73a4c24e
HWN
282
283 char const * name = scm_i_tag_name (c_tag);
284 if (name != NULL)
285 key = scm_from_locale_string (name);
286
1367aa5e
HWN
287 return scm_cons (scm_cons (key, val), acc);
288}
289
290SCM_DEFINE (scm_gc_live_object_stats, "gc-live-object-stats", 0, 0, 0,
291 (),
292 "Return an alist of statistics of the current live objects. ")
293#define FUNC_NAME s_scm_gc_live_object_stats
294{
295 SCM tab = scm_make_hash_table (scm_from_int (57));
296 scm_i_all_segments_statistics (tab);
297
298 SCM alist
299 = scm_internal_hash_fold (&tag_table_to_type_alist, NULL, SCM_EOL, tab);
300
301 return alist;
302}
303#undef FUNC_NAME
304
c2cbcc57 305extern int scm_gc_malloc_yield_percentage;
a00c95d9 306SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 307 (),
1e6808ea 308 "Return an association list of statistics about Guile's current\n"
c8a1bdc4 309 "use of storage.\n")
1bbd0b84 310#define FUNC_NAME s_scm_gc_stats
0f2d19dd 311{
c8a1bdc4
HWN
312 long i = 0;
313 SCM heap_segs = SCM_EOL ;
c014a02e
ML
314 unsigned long int local_scm_mtrigger;
315 unsigned long int local_scm_mallocated;
316 unsigned long int local_scm_heap_size;
c2cbcc57
HWN
317 int local_scm_gc_cell_yield_percentage;
318 int local_scm_gc_malloc_yield_percentage;
f2893a25 319 unsigned long int local_scm_cells_allocated;
c014a02e
ML
320 unsigned long int local_scm_gc_time_taken;
321 unsigned long int local_scm_gc_times;
322 unsigned long int local_scm_gc_mark_time_taken;
7eec4c37 323 unsigned long int local_protected_obj_count;
c9b0d4b0
ML
324 double local_scm_gc_cells_swept;
325 double local_scm_gc_cells_marked;
0f2d19dd 326 SCM answer;
c8a1bdc4
HWN
327 unsigned long *bounds = 0;
328 int table_size = scm_i_heap_segment_table_size;
9de87eea 329 SCM_CRITICAL_SECTION_START;
939794ce 330
c8a1bdc4
HWN
331 /*
332 temporarily store the numbers, so as not to cause GC.
7febb4a2 333 */
c8a1bdc4
HWN
334
335 bounds = malloc (sizeof (int) * table_size * 2);
336 if (!bounds)
337 abort();
338 for (i = table_size; i--; )
339 {
340 bounds[2*i] = (unsigned long)scm_i_heap_segment_table[i]->bounds[0];
341 bounds[2*i+1] = (unsigned long)scm_i_heap_segment_table[i]->bounds[1];
342 }
0f2d19dd 343
4c9419ac 344
c8a1bdc4
HWN
345 /* Below, we cons to produce the resulting list. We want a snapshot of
346 * the heap situation before consing.
347 */
348 local_scm_mtrigger = scm_mtrigger;
349 local_scm_mallocated = scm_mallocated;
350 local_scm_heap_size = SCM_HEAP_SIZE;
539b08a4 351
c8a1bdc4
HWN
352 local_scm_cells_allocated = scm_cells_allocated;
353
354 local_scm_gc_time_taken = scm_gc_time_taken;
355 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
356 local_scm_gc_times = scm_gc_times;
c2cbcc57
HWN
357 local_scm_gc_malloc_yield_percentage = scm_gc_malloc_yield_percentage;
358 local_scm_gc_cell_yield_percentage= scm_gc_cell_yield_percentage;
7eec4c37 359 local_protected_obj_count = protected_obj_count;
c2cbcc57
HWN
360 local_scm_gc_cells_swept =
361 (double) scm_gc_cells_swept_acc
362 + (double) scm_gc_cells_swept;
c8a1bdc4
HWN
363 local_scm_gc_cells_marked = scm_gc_cells_marked_acc
364 +(double) scm_gc_cells_swept
365 -(double) scm_gc_cells_collected;
0f2d19dd 366
c8a1bdc4
HWN
367 for (i = table_size; i--;)
368 {
b9bd8526
MV
369 heap_segs = scm_cons (scm_cons (scm_from_ulong (bounds[2*i]),
370 scm_from_ulong (bounds[2*i+1])),
c8a1bdc4
HWN
371 heap_segs);
372 }
373
b9bd8526
MV
374 answer =
375 scm_list_n (scm_cons (sym_gc_time_taken,
376 scm_from_ulong (local_scm_gc_time_taken)),
377 scm_cons (sym_cells_allocated,
378 scm_from_ulong (local_scm_cells_allocated)),
379 scm_cons (sym_heap_size,
380 scm_from_ulong (local_scm_heap_size)),
381 scm_cons (sym_mallocated,
382 scm_from_ulong (local_scm_mallocated)),
383 scm_cons (sym_mtrigger,
384 scm_from_ulong (local_scm_mtrigger)),
385 scm_cons (sym_times,
386 scm_from_ulong (local_scm_gc_times)),
387 scm_cons (sym_gc_mark_time_taken,
388 scm_from_ulong (local_scm_gc_mark_time_taken)),
389 scm_cons (sym_cells_marked,
390 scm_from_double (local_scm_gc_cells_marked)),
391 scm_cons (sym_cells_swept,
392 scm_from_double (local_scm_gc_cells_swept)),
393 scm_cons (sym_malloc_yield,
394 scm_from_long(local_scm_gc_malloc_yield_percentage)),
395 scm_cons (sym_cell_yield,
396 scm_from_long (local_scm_gc_cell_yield_percentage)),
397 scm_cons (sym_protected_objects,
398 scm_from_ulong (local_protected_obj_count)),
399 scm_cons (sym_heap_segments, heap_segs),
400 SCM_UNDEFINED);
9de87eea 401 SCM_CRITICAL_SECTION_END;
c8a1bdc4
HWN
402
403 free (bounds);
404 return answer;
0f2d19dd 405}
c8a1bdc4 406#undef FUNC_NAME
0f2d19dd 407
c8a1bdc4
HWN
408static void
409gc_start_stats (const char *what SCM_UNUSED)
e4a7824f 410{
c8a1bdc4 411 t_before_gc = scm_c_get_internal_run_time ();
539b08a4 412
c8a1bdc4
HWN
413 scm_gc_cells_marked_acc += (double) scm_gc_cells_swept
414 - (double) scm_gc_cells_collected;
c2cbcc57 415 scm_gc_cells_swept_acc += (double) scm_gc_cells_swept;
e4a7824f 416
c2cbcc57
HWN
417 scm_gc_cell_yield_percentage = ( scm_gc_cells_collected * 100 ) / SCM_HEAP_SIZE;
418
c8a1bdc4
HWN
419 scm_gc_cells_swept = 0;
420 scm_gc_cells_collected_1 = scm_gc_cells_collected;
539b08a4 421
c8a1bdc4
HWN
422 /*
423 CELLS SWEPT is another word for the number of cells that were
424 examined during GC. YIELD is the number that we cleaned
425 out. MARKED is the number that weren't cleaned.
426 */
427 scm_gc_cells_collected = 0;
428 scm_gc_malloc_collected = 0;
429 scm_gc_ports_collected = 0;
e4a7824f 430}
acf4331f 431
c8a1bdc4
HWN
432static void
433gc_end_stats ()
0f2d19dd 434{
c8a1bdc4
HWN
435 unsigned long t = scm_c_get_internal_run_time ();
436 scm_gc_time_taken += (t - t_before_gc);
539b08a4 437
c8a1bdc4 438 ++scm_gc_times;
0f2d19dd 439}
acf4331f 440
0f2d19dd 441
c8a1bdc4
HWN
442SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
443 (SCM obj),
444 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
445 "returned by this function for @var{obj}")
446#define FUNC_NAME s_scm_object_address
c68296f8 447{
b9bd8526 448 return scm_from_ulong (SCM_UNPACK (obj));
c68296f8 449}
c8a1bdc4 450#undef FUNC_NAME
c68296f8 451
1be6b49c 452
c8a1bdc4
HWN
453SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
454 (),
455 "Scans all of SCM objects and reclaims for further use those that are\n"
456 "no longer accessible.")
457#define FUNC_NAME s_scm_gc
458{
c8a1bdc4 459 scm_igc ("call");
c8a1bdc4 460 return SCM_UNSPECIFIED;
9d47a1e6 461}
c8a1bdc4 462#undef FUNC_NAME
9d47a1e6 463
c68296f8
MV
464
465\f
0f2d19dd 466
c8a1bdc4
HWN
467/* When we get POSIX threads support, the master will be global and
468 * common while the freelist will be individual for each thread.
0f2d19dd
JB
469 */
470
c8a1bdc4
HWN
471SCM
472scm_gc_for_newcell (scm_t_cell_type_statistics *freelist, SCM *free_cells)
0f2d19dd 473{
c8a1bdc4
HWN
474 SCM cell;
475
9de87eea 476 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
9bc4701c 477
c8a1bdc4
HWN
478 *free_cells = scm_i_sweep_some_segments (freelist);
479 if (*free_cells == SCM_EOL && scm_i_gc_grow_heap_p (freelist))
480 {
481 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
482 *free_cells = scm_i_sweep_some_segments (freelist);
483 }
acb0a19c 484
c8a1bdc4
HWN
485 if (*free_cells == SCM_EOL && !scm_block_gc)
486 {
487 /*
488 with the advent of lazy sweep, GC yield is only know just
489 before doing the GC.
490 */
491 scm_i_adjust_min_yield (freelist);
492
493 /*
494 out of fresh cells. Try to get some new ones.
495 */
0f2d19dd 496
c8a1bdc4 497 scm_igc ("cells");
a00c95d9 498
c8a1bdc4
HWN
499 *free_cells = scm_i_sweep_some_segments (freelist);
500 }
501
502 if (*free_cells == SCM_EOL)
503 {
504 /*
505 failed getting new cells. Get new juice or die.
506 */
507 freelist->heap_segment_idx = scm_i_get_new_heap_segment (freelist, abort_on_error);
508 *free_cells = scm_i_sweep_some_segments (freelist);
509 }
510
511 if (*free_cells == SCM_EOL)
512 abort ();
0f2d19dd 513
c8a1bdc4 514 cell = *free_cells;
0f2d19dd 515
c8a1bdc4 516 *free_cells = SCM_FREE_CELL_CDR (cell);
eab1b259 517
9de87eea 518 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
eab1b259 519
c8a1bdc4
HWN
520 return cell;
521}
4a4c9785 522
4a4c9785 523
c8a1bdc4
HWN
524scm_t_c_hook scm_before_gc_c_hook;
525scm_t_c_hook scm_before_mark_c_hook;
526scm_t_c_hook scm_before_sweep_c_hook;
527scm_t_c_hook scm_after_sweep_c_hook;
528scm_t_c_hook scm_after_gc_c_hook;
4a4c9785 529
c8a1bdc4
HWN
530void
531scm_igc (const char *what)
532{
9de87eea
MV
533 if (scm_block_gc)
534 return;
535
536 scm_i_scm_pthread_mutex_lock (&scm_i_sweep_mutex);
537
538 /* During the critical section, only the current thread may run. */
539 scm_i_thread_put_to_sleep ();
540
c8a1bdc4
HWN
541 ++scm_gc_running_p;
542 scm_c_hook_run (&scm_before_gc_c_hook, 0);
a00c95d9 543
c8a1bdc4
HWN
544#ifdef DEBUGINFO
545 fprintf (stderr,"gc reason %s\n", what);
546
547 fprintf (stderr,
d2e53ed6 548 scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist))
c8a1bdc4 549 ? "*"
d2e53ed6 550 : (scm_is_null (*SCM_FREELIST_LOC (scm_i_freelist2)) ? "o" : "m"));
c8a1bdc4 551#endif
4c48ba06 552
c8a1bdc4 553 gc_start_stats (what);
a00c95d9 554
1367aa5e
HWN
555
556
c8a1bdc4
HWN
557 if (scm_gc_heap_lock)
558 /* We've invoked the collector while a GC is already in progress.
559 That should never happen. */
560 abort ();
a00c95d9 561
1367aa5e
HWN
562 /*
563 Set freelists to NULL so scm_cons() always triggers gc, causing
564 the above abort() to be triggered.
565 */
566 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
567 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
568
c8a1bdc4 569 ++scm_gc_heap_lock;
a00c95d9 570
c8a1bdc4
HWN
571 /*
572 Let's finish the sweep. The conservative GC might point into the
573 garbage, and marking that would create a mess.
574 */
575 scm_i_sweep_all_segments("GC");
576 if (scm_mallocated < scm_i_deprecated_memory_return)
b6efc951 577 {
c8a1bdc4
HWN
578 /* The byte count of allocated objects has underflowed. This is
579 probably because you forgot to report the sizes of objects you
580 have allocated, by calling scm_done_malloc or some such. When
581 the GC freed them, it subtracted their size from
582 scm_mallocated, which underflowed. */
583 fprintf (stderr,
584 "scm_gc_sweep: Byte count of allocated objects has underflowed.\n"
585 "This is probably because the GC hasn't been correctly informed\n"
586 "about object sizes\n");
b6efc951
DH
587 abort ();
588 }
c8a1bdc4 589 scm_mallocated -= scm_i_deprecated_memory_return;
0f2d19dd 590
c8a1bdc4
HWN
591
592
593 scm_c_hook_run (&scm_before_mark_c_hook, 0);
b6efc951 594
c8a1bdc4
HWN
595 scm_mark_all ();
596
c2cbcc57 597 scm_gc_mark_time_taken += (scm_c_get_internal_run_time () - t_before_gc);
c8a1bdc4
HWN
598
599 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
600
601 /*
602 Moved this lock upwards so that we can alloc new heap at the end of a sweep.
0f2d19dd 603
c8a1bdc4 604 DOCME: why should the heap be locked anyway?
0f2d19dd 605 */
c8a1bdc4 606 --scm_gc_heap_lock;
a00c95d9 607
c8a1bdc4 608 scm_gc_sweep ();
0f2d19dd 609
ffd72400
HWN
610
611 /*
612 TODO: this hook should probably be moved to just before the mark,
613 since that's where the sweep is finished in lazy sweeping.
c35738c1
MD
614
615 MDJ 030219 <djurfeldt@nada.kth.se>: No, probably not. The
616 original meaning implied at least two things: that it would be
617 called when
618
619 1. the freelist is re-initialized (no evaluation possible, though)
620
621 and
622
623 2. the heap is "fresh"
624 (it is well-defined what data is used and what is not)
625
626 Neither of these conditions would hold just before the mark phase.
627
628 Of course, the lazy sweeping has muddled the distinction between
629 scm_before_sweep_c_hook and scm_after_sweep_c_hook, but even if
630 there were no difference, it would still be useful to have two
631 distinct classes of hook functions since this can prevent some
632 bad interference when several modules adds gc hooks.
ffd72400 633 */
c8a1bdc4
HWN
634 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
635 gc_end_stats ();
636
9de87eea 637 --scm_gc_running_p;
fb50ef08 638 scm_i_thread_wake_up ();
ffd72400
HWN
639
640 /*
641 See above.
642 */
9de87eea 643 scm_i_pthread_mutex_unlock (&scm_i_sweep_mutex);
07b99edb 644 scm_c_hook_run (&scm_after_gc_c_hook, 0);
a00c95d9 645
eab1b259
HWN
646 /*
647 For debugging purposes, you could do
648 scm_i_sweep_all_segments("debug"), but then the remains of the
649 cell aren't left to analyse.
650 */
651}
0f2d19dd 652
0f2d19dd
JB
653\f
654/* {GC Protection Helper Functions}
655 */
656
657
5d2b97cd
DH
658/*
659 * If within a function you need to protect one or more scheme objects from
660 * garbage collection, pass them as parameters to one of the
661 * scm_remember_upto_here* functions below. These functions don't do
662 * anything, but since the compiler does not know that they are actually
663 * no-ops, it will generate code that calls these functions with the given
664 * parameters. Therefore, you can be sure that the compiler will keep those
665 * scheme values alive (on the stack or in a register) up to the point where
666 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 667 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
668 * depends on the scheme object to exist.
669 *
8c494e99
DH
670 * Example: We want to make sure that the string object str does not get
671 * garbage collected during the execution of 'some_function' in the code
672 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
673 * 'some_function' might access freed memory. To make sure that the compiler
674 * keeps str alive on the stack or in a register such that it is visible to
675 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
676 * call to 'some_function'. Note that this would not be necessary if str was
677 * used anyway after the call to 'some_function'.
eb01cb64 678 * char *chars = scm_i_string_chars (str);
5d2b97cd
DH
679 * some_function (chars);
680 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
681 */
682
9e1569bd
KR
683/* Remove any macro versions of these while defining the functions.
684 Functions are always included in the library, for upward binary
685 compatibility and in case combinations of GCC and non-GCC are used. */
686#undef scm_remember_upto_here_1
687#undef scm_remember_upto_here_2
688
5d2b97cd 689void
e81d98ec 690scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
691{
692 /* Empty. Protects a single object from garbage collection. */
693}
694
695void
e81d98ec 696scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
697{
698 /* Empty. Protects two objects from garbage collection. */
699}
700
701void
e81d98ec 702scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
703{
704 /* Empty. Protects any number of objects from garbage collection. */
705}
706
c209c88e 707/*
41b0806d
GB
708 These crazy functions prevent garbage collection
709 of arguments after the first argument by
710 ensuring they remain live throughout the
711 function because they are used in the last
712 line of the code block.
713 It'd be better to have a nice compiler hint to
714 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
715SCM
716scm_return_first (SCM elt, ...)
0f2d19dd
JB
717{
718 return elt;
719}
720
41b0806d
GB
721int
722scm_return_first_int (int i, ...)
723{
724 return i;
725}
726
0f2d19dd 727
0f2d19dd 728SCM
6e8d25a6 729scm_permanent_object (SCM obj)
0f2d19dd 730{
9de87eea
MV
731 SCM cell = scm_cons (obj, SCM_EOL);
732 SCM_CRITICAL_SECTION_START;
733 SCM_SETCDR (cell, scm_permobjs);
734 scm_permobjs = cell;
735 SCM_CRITICAL_SECTION_END;
0f2d19dd
JB
736 return obj;
737}
738
739
7bd4fbe2
MD
740/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
741 other references are dropped, until the object is unprotected by calling
6b1b030e 742 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
743 i. e. it is possible to protect the same object several times, but it is
744 necessary to unprotect the object the same number of times to actually get
745 the object unprotected. It is an error to unprotect an object more often
746 than it has been protected before. The function scm_protect_object returns
747 OBJ.
748*/
749
750/* Implementation note: For every object X, there is a counter which
6b1b030e 751 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 752*/
686765af 753
7eec4c37
HWN
754
755
ef290276 756SCM
6b1b030e 757scm_gc_protect_object (SCM obj)
ef290276 758{
686765af 759 SCM handle;
9d47a1e6 760
686765af 761 /* This critical section barrier will be replaced by a mutex. */
9de87eea 762 SCM_CRITICAL_SECTION_START;
9d47a1e6 763
e11e83f3
MV
764 handle = scm_hashq_create_handle_x (scm_protects, obj, scm_from_int (0));
765 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
9d47a1e6 766
7eec4c37
HWN
767 protected_obj_count ++;
768
9de87eea 769 SCM_CRITICAL_SECTION_END;
9d47a1e6 770
ef290276
JB
771 return obj;
772}
773
774
775/* Remove any protection for OBJ established by a prior call to
dab7f566 776 scm_protect_object. This function returns OBJ.
ef290276 777
dab7f566 778 See scm_protect_object for more information. */
ef290276 779SCM
6b1b030e 780scm_gc_unprotect_object (SCM obj)
ef290276 781{
686765af 782 SCM handle;
9d47a1e6 783
686765af 784 /* This critical section barrier will be replaced by a mutex. */
9de87eea 785 SCM_CRITICAL_SECTION_START;
9d47a1e6 786
0ff7e3ff
HWN
787 if (scm_gc_running_p)
788 {
789 fprintf (stderr, "scm_unprotect_object called during GC.\n");
790 abort ();
791 }
792
686765af 793 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 794
7888309b 795 if (scm_is_false (handle))
686765af 796 {
0f0f0899
MD
797 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
798 abort ();
686765af 799 }
6a199940
DH
800 else
801 {
e11e83f3 802 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 803 if (scm_is_eq (count, scm_from_int (0)))
6a199940
DH
804 scm_hashq_remove_x (scm_protects, obj);
805 else
1be6b49c 806 SCM_SETCDR (handle, count);
6a199940 807 }
7eec4c37 808 protected_obj_count --;
686765af 809
9de87eea 810 SCM_CRITICAL_SECTION_END;
ef290276
JB
811
812 return obj;
813}
814
6b1b030e
ML
815void
816scm_gc_register_root (SCM *p)
817{
818 SCM handle;
b9bd8526 819 SCM key = scm_from_ulong ((unsigned long) p);
eae33935 820
6b1b030e 821 /* This critical section barrier will be replaced by a mutex. */
9de87eea 822 SCM_CRITICAL_SECTION_START;
6b1b030e 823
e11e83f3
MV
824 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key,
825 scm_from_int (0));
826 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), scm_from_int (1)));
6b1b030e 827
9de87eea 828 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
829}
830
831void
832scm_gc_unregister_root (SCM *p)
833{
834 SCM handle;
b9bd8526 835 SCM key = scm_from_ulong ((unsigned long) p);
6b1b030e
ML
836
837 /* This critical section barrier will be replaced by a mutex. */
9de87eea 838 SCM_CRITICAL_SECTION_START;
6b1b030e
ML
839
840 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
841
7888309b 842 if (scm_is_false (handle))
6b1b030e
ML
843 {
844 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
845 abort ();
846 }
847 else
848 {
e11e83f3 849 SCM count = scm_difference (SCM_CDR (handle), scm_from_int (1));
bc36d050 850 if (scm_is_eq (count, scm_from_int (0)))
6b1b030e
ML
851 scm_hashv_remove_x (scm_gc_registered_roots, key);
852 else
853 SCM_SETCDR (handle, count);
854 }
855
9de87eea 856 SCM_CRITICAL_SECTION_END;
6b1b030e
ML
857}
858
859void
860scm_gc_register_roots (SCM *b, unsigned long n)
861{
862 SCM *p = b;
863 for (; p < b + n; ++p)
864 scm_gc_register_root (p);
865}
866
867void
868scm_gc_unregister_roots (SCM *b, unsigned long n)
869{
870 SCM *p = b;
871 for (; p < b + n; ++p)
872 scm_gc_unregister_root (p);
873}
874
04a98cff 875int scm_i_terminating;
c45acc34 876
0f2d19dd 877\f
a00c95d9 878
4c48ba06 879
c8a1bdc4
HWN
880/*
881 MOVE THIS FUNCTION. IT DOES NOT HAVE ANYTHING TODO WITH GC.
882 */
85db4a2c
DH
883
884/* Get an integer from an environment variable. */
c8a1bdc4
HWN
885int
886scm_getenv_int (const char *var, int def)
85db4a2c 887{
c8a1bdc4
HWN
888 char *end = 0;
889 char *val = getenv (var);
890 long res = def;
85db4a2c
DH
891 if (!val)
892 return def;
893 res = strtol (val, &end, 10);
894 if (end == val)
895 return def;
896 return res;
897}
898
c35738c1
MD
899void
900scm_storage_prehistory ()
901{
902 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
903 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
904 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
905 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
906 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
907}
85db4a2c 908
9de87eea 909scm_i_pthread_mutex_t scm_i_gc_admin_mutex = SCM_I_PTHREAD_MUTEX_INITIALIZER;
eb01cb64 910
4a4c9785 911int
85db4a2c 912scm_init_storage ()
0f2d19dd 913{
1be6b49c 914 size_t j;
0f2d19dd
JB
915
916 j = SCM_NUM_PROTECTS;
917 while (j)
918 scm_sys_protects[--j] = SCM_BOOL_F;
919 scm_block_gc = 1;
4a4c9785 920
c8a1bdc4
HWN
921 scm_gc_init_freelist();
922 scm_gc_init_malloc ();
0f2d19dd
JB
923
924 j = SCM_HEAP_SEG_SIZE;
d6884e63 925
c8a1bdc4 926
0f2d19dd 927 /* Initialise the list of ports. */
67329a9e
HWN
928 scm_i_port_table = (scm_t_port **)
929 malloc (sizeof (scm_t_port *) * scm_i_port_table_room);
930 if (!scm_i_port_table)
0f2d19dd
JB
931 return 1;
932
9de87eea
MV
933#if 0
934 /* We can't have a cleanup handler since we have no thread to run it
935 in. */
936
a18bcd0e 937#ifdef HAVE_ATEXIT
c45acc34 938 atexit (cleanup);
e52ceaac
MD
939#else
940#ifdef HAVE_ON_EXIT
941 on_exit (cleanup, 0);
942#endif
9de87eea
MV
943#endif
944
a18bcd0e 945#endif
0f2d19dd 946
82c76fd3 947 scm_stand_in_procs = scm_c_make_hash_table (257);
0f2d19dd 948 scm_permobjs = SCM_EOL;
00ffa0e7 949 scm_protects = scm_c_make_hash_table (31);
6b1b030e 950 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 951
0f2d19dd
JB
952 return 0;
953}
939794ce 954
0f2d19dd
JB
955\f
956
939794ce
DH
957SCM scm_after_gc_hook;
958
939794ce
DH
959static SCM gc_async;
960
939794ce
DH
961/* The function gc_async_thunk causes the execution of the after-gc-hook. It
962 * is run after the gc, as soon as the asynchronous events are handled by the
963 * evaluator.
964 */
965static SCM
966gc_async_thunk (void)
967{
968 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
969 return SCM_UNSPECIFIED;
970}
971
972
973/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
974 * the garbage collection. The only purpose of this function is to mark the
975 * gc_async (which will eventually lead to the execution of the
976 * gc_async_thunk).
977 */
978static void *
e81d98ec
DH
979mark_gc_async (void * hook_data SCM_UNUSED,
980 void *func_data SCM_UNUSED,
981 void *data SCM_UNUSED)
982{
983 /* If cell access debugging is enabled, the user may choose to perform
984 * additional garbage collections after an arbitrary number of cell
985 * accesses. We don't want the scheme level after-gc-hook to be performed
986 * for each of these garbage collections for the following reason: The
987 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
988 * after-gc-hook was performed with every gc, and if the gc was performed
989 * after a very small number of cell accesses, then the number of cell
990 * accesses during the execution of the after-gc-hook will suffice to cause
991 * the execution of the next gc. Then, guile would keep executing the
992 * after-gc-hook over and over again, and would never come to do other
993 * things.
eae33935 994 *
e81d98ec
DH
995 * To overcome this problem, if cell access debugging with additional
996 * garbage collections is enabled, the after-gc-hook is never run by the
997 * garbage collecter. When running guile with cell access debugging and the
998 * execution of the after-gc-hook is desired, then it is necessary to run
999 * the hook explicitly from the user code. This has the effect, that from
1000 * the scheme level point of view it seems that garbage collection is
1001 * performed with a much lower frequency than it actually is. Obviously,
1002 * this will not work for code that depends on a fixed one to one
1003 * relationship between the execution counts of the C level garbage
1004 * collection hooks and the execution count of the scheme level
1005 * after-gc-hook.
1006 */
9de87eea 1007
e81d98ec 1008#if (SCM_DEBUG_CELL_ACCESSES == 1)
eab1b259 1009 if (scm_debug_cells_gc_interval == 0)
e81d98ec
DH
1010 scm_system_async_mark (gc_async);
1011#else
939794ce 1012 scm_system_async_mark (gc_async);
e81d98ec
DH
1013#endif
1014
939794ce
DH
1015 return NULL;
1016}
1017
0f2d19dd
JB
1018void
1019scm_init_gc ()
0f2d19dd 1020{
c8a1bdc4 1021 scm_gc_init_mark ();
d678e25c 1022
fde50407
ML
1023 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
1024 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 1025
2592c4c7
MV
1026 gc_async = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
1027 gc_async_thunk);
939794ce
DH
1028
1029 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
1030
a0599745 1031#include "libguile/gc.x"
0f2d19dd 1032}
89e00824 1033
c8a1bdc4
HWN
1034
1035void
1036scm_gc_sweep (void)
1037#define FUNC_NAME "scm_gc_sweep"
1038{
1039 scm_i_deprecated_memory_return = 0;
1040
1041 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist);
1042 scm_i_gc_sweep_freelist_reset (&scm_i_master_freelist2);
1043
1044 /*
1045 NOTHING HERE: LAZY SWEEPING !
1046 */
1047 scm_i_reset_segments ();
1048
1049 /* When we move to POSIX threads private freelists should probably
1050 be GC-protected instead. */
9bc4701c
MD
1051 *SCM_FREELIST_LOC (scm_i_freelist) = SCM_EOL;
1052 *SCM_FREELIST_LOC (scm_i_freelist2) = SCM_EOL;
392d2833
MD
1053
1054 /* Invalidate the freelists of other threads. */
1055 scm_i_thread_invalidate_freelists ();
c8a1bdc4
HWN
1056}
1057
1058#undef FUNC_NAME
1059
1060
56495472 1061
89e00824
ML
1062/*
1063 Local Variables:
1064 c-file-style: "gnu"
1065 End:
1066*/