*** empty log message ***
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84 41
1bbd0b84 42
37ddcaf6
MD
43/* #define DEBUGINFO */
44
56495472
ML
45/* SECTION: This code is compiled once.
46 */
47
48#ifndef MARK_DEPENDENCIES
49
0f2d19dd
JB
50\f
51#include <stdio.h>
e6e2e95a 52#include <errno.h>
783e7774 53#include <string.h>
e6e2e95a 54
a0599745 55#include "libguile/_scm.h"
0a7a7445 56#include "libguile/eval.h"
a0599745
MD
57#include "libguile/stime.h"
58#include "libguile/stackchk.h"
59#include "libguile/struct.h"
a0599745
MD
60#include "libguile/smob.h"
61#include "libguile/unif.h"
62#include "libguile/async.h"
63#include "libguile/ports.h"
64#include "libguile/root.h"
65#include "libguile/strings.h"
66#include "libguile/vectors.h"
801cb5e7 67#include "libguile/weaks.h"
686765af 68#include "libguile/hashtab.h"
ecf470a2 69#include "libguile/tags.h"
a0599745
MD
70
71#include "libguile/validate.h"
1be6b49c 72#include "libguile/deprecation.h"
a0599745 73#include "libguile/gc.h"
fce59c93 74
bc9d9bb2 75#ifdef GUILE_DEBUG_MALLOC
a0599745 76#include "libguile/debug-malloc.h"
bc9d9bb2
MD
77#endif
78
0f2d19dd 79#ifdef HAVE_MALLOC_H
95b88819 80#include <malloc.h>
0f2d19dd
JB
81#endif
82
83#ifdef HAVE_UNISTD_H
95b88819 84#include <unistd.h>
0f2d19dd
JB
85#endif
86
1cc91f1b
JB
87#ifdef __STDC__
88#include <stdarg.h>
89#define var_start(x, y) va_start(x, y)
90#else
91#include <varargs.h>
92#define var_start(x, y) va_start(x)
93#endif
94
0f2d19dd 95\f
406c7d90 96
8c494e99
DH
97#define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
98
406c7d90
DH
99unsigned int scm_gc_running_p = 0;
100
101\f
102
103#if (SCM_DEBUG_CELL_ACCESSES == 1)
104
92c2555f 105scm_t_bits scm_tc16_allocated;
61045190 106
eae33935 107/* Set this to != 0 if every cell that is accessed shall be checked:
61045190
DH
108 */
109unsigned int scm_debug_cell_accesses_p = 1;
406c7d90 110
e81d98ec
DH
111/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
112 * the number of cell accesses after which a gc shall be called.
113 */
114static unsigned int debug_cells_gc_interval = 0;
115
406c7d90 116
592996c9
DH
117/* If an allocated cell is detected during garbage collection, this means that
118 * some code has just obtained the object but was preempted before the
119 * initialization of the object was completed. This meanst that some entries
120 * of the allocated cell may already contain SCM objects. Therefore,
121 * allocated cells are scanned conservatively. */
122static SCM
123allocated_mark (SCM allocated)
124{
125 scm_gc_mark_cell_conservatively (allocated);
126 return SCM_BOOL_F;
127}
128
129
406c7d90
DH
130/* Assert that the given object is a valid reference to a valid cell. This
131 * test involves to determine whether the object is a cell pointer, whether
132 * this pointer actually points into a heap segment and whether the cell
e81d98ec
DH
133 * pointed to is not a free cell. Further, additional garbage collections may
134 * get executed after a user defined number of cell accesses. This helps to
135 * find places in the C code where references are dropped for extremely short
136 * periods.
406c7d90
DH
137 */
138void
139scm_assert_cell_valid (SCM cell)
140{
61045190
DH
141 static unsigned int already_running = 0;
142
143 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 144 {
61045190 145 already_running = 1; /* set to avoid recursion */
406c7d90 146
9d47a1e6 147 if (!scm_cellp (cell))
406c7d90 148 {
1be6b49c
ML
149 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
150 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
151 abort ();
152 }
153 else if (!scm_gc_running_p)
154 {
155 /* Dirk::FIXME:: During garbage collection there occur references to
156 free cells. This is allright during conservative marking, but
157 should not happen otherwise (I think). The case of free cells
158 accessed during conservative marking is handled in function
159 scm_mark_locations. However, there still occur accesses to free
160 cells during gc. I don't understand why this happens. If it is
161 a bug and gets fixed, the following test should also work while
162 gc is running.
163 */
164 if (SCM_FREE_CELL_P (cell))
165 {
1be6b49c
ML
166 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
167 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
168 abort ();
169 }
e81d98ec
DH
170
171 /* If desired, perform additional garbage collections after a user
172 * defined number of cell accesses.
173 */
174 if (debug_cells_gc_interval)
175 {
176 static unsigned int counter = 0;
eae33935 177
e81d98ec
DH
178 if (counter != 0)
179 {
180 --counter;
181 }
182 else
183 {
184 counter = debug_cells_gc_interval;
185 scm_igc ("scm_assert_cell_valid");
186 }
187 }
406c7d90 188 }
61045190 189 already_running = 0; /* re-enable */
406c7d90
DH
190 }
191}
192
193
194SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
195 (SCM flag),
1e6808ea 196 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
e81d98ec
DH
197 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
198 "but no additional calls to garbage collection are issued.\n"
199 "If @var{flag} is a number, cell access checking is enabled,\n"
200 "with an additional garbage collection after the given\n"
201 "number of cell accesses.\n"
1e6808ea
MG
202 "This procedure only exists when the compile-time flag\n"
203 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
204#define FUNC_NAME s_scm_set_debug_cell_accesses_x
205{
206 if (SCM_FALSEP (flag)) {
207 scm_debug_cell_accesses_p = 0;
208 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
e81d98ec
DH
209 debug_cells_gc_interval = 0;
210 scm_debug_cell_accesses_p = 1;
211 } else if (SCM_INUMP (flag)) {
212 long int f = SCM_INUM (flag);
213 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
214 debug_cells_gc_interval = f;
406c7d90
DH
215 scm_debug_cell_accesses_p = 1;
216 } else {
217 SCM_WRONG_TYPE_ARG (1, flag);
218 }
219 return SCM_UNSPECIFIED;
220}
221#undef FUNC_NAME
222
223#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
224
225\f
226
0f2d19dd 227/* {heap tuning parameters}
a00c95d9 228 *
0f2d19dd
JB
229 * These are parameters for controlling memory allocation. The heap
230 * is the area out of which scm_cons, and object headers are allocated.
231 *
232 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
233 * 64 bit machine. The units of the _SIZE parameters are bytes.
234 * Cons pairs and object headers occupy one heap cell.
235 *
236 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
237 * allocated initially the heap will grow by half its current size
238 * each subsequent time more heap is needed.
239 *
240 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
241 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
1be6b49c 242 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
0f2d19dd 243 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 244 *
0f2d19dd
JB
245 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
246 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
247 *
248 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
249 * is needed.
250 *
251 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 252 * trigger a GC.
6064dcc6
MV
253 *
254 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
255 * reclaimed by a GC triggered by must_malloc. If less than this is
256 * reclaimed, the trigger threshold is raised. [I don't know what a
257 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 258 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
259 */
260
8fef55a8
MD
261/*
262 * Heap size 45000 and 40% min yield gives quick startup and no extra
263 * heap allocation. Having higher values on min yield may lead to
264 * large heaps, especially if code behaviour is varying its
265 * maximum consumption between different freelists.
266 */
d6884e63
ML
267
268#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
269#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
270#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
1be6b49c 271size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 272 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 273int scm_default_min_yield_1 = 40;
4c48ba06 274
d6884e63 275#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
1be6b49c 276size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 277 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
278/* The following value may seem large, but note that if we get to GC at
279 * all, this means that we have a numerically intensive application
280 */
aeacfc8f 281int scm_default_min_yield_2 = 40;
4c48ba06 282
1be6b49c 283size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 284
d6884e63 285#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
286#ifdef _QC
287# define SCM_HEAP_SEG_SIZE 32768L
288#else
289# ifdef sequent
4c48ba06 290# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 291# else
4c48ba06 292# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
293# endif
294#endif
4c48ba06 295/* Make heap grow with factor 1.5 */
4a4c9785 296#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 297#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 298#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 299
d6884e63
ML
300/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
301 aligned inner bounds for allocated storage */
0f2d19dd
JB
302
303#ifdef PROT386
304/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
305# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
306# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
307#else
308# ifdef _UNICOS
c014a02e
ML
309# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
310# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 311# else
c014a02e
ML
312# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
313# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
314# endif /* UNICOS */
315#endif /* PROT386 */
316
ecf470a2
ML
317#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
318
d6884e63
ML
319#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
320#define CLUSTER_SIZE_IN_BYTES(freelist) \
321 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
322
323\f
945fec60 324/* scm_freelists
0f2d19dd 325 */
945fec60 326
92c2555f 327typedef struct scm_t_freelist {
a00c95d9
ML
328 /* collected cells */
329 SCM cells;
a00c95d9
ML
330 /* number of cells left to collect before cluster is full */
331 unsigned int left_to_collect;
b37fe1c5
MD
332 /* number of clusters which have been allocated */
333 unsigned int clusters_allocated;
8fef55a8
MD
334 /* a list of freelists, each of size cluster_size,
335 * except the last one which may be shorter
336 */
a00c95d9
ML
337 SCM clusters;
338 SCM *clustertail;
b37fe1c5 339 /* this is the number of objects in each cluster, including the spine cell */
1be6b49c 340 unsigned int cluster_size;
8fef55a8 341 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
342 */
343 int grow_heap_p;
8fef55a8 344 /* minimum yield on this list in order not to grow the heap
a00c95d9 345 */
8fef55a8
MD
346 long min_yield;
347 /* defines min_yield as percent of total heap size
a00c95d9 348 */
8fef55a8 349 int min_yield_fraction;
a00c95d9
ML
350 /* number of cells per object on this list */
351 int span;
352 /* number of collected cells during last GC */
c014a02e 353 unsigned long collected;
1811ebce 354 /* number of collected cells during penultimate GC */
c014a02e 355 unsigned long collected_1;
a00c95d9
ML
356 /* total number of cells in heap segments
357 * belonging to this list.
358 */
c014a02e 359 unsigned long heap_size;
92c2555f 360} scm_t_freelist;
a00c95d9 361
4a4c9785 362SCM scm_freelist = SCM_EOL;
92c2555f 363scm_t_freelist scm_master_freelist = {
729dbac3 364 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
4a4c9785
MD
365};
366SCM scm_freelist2 = SCM_EOL;
92c2555f 367scm_t_freelist scm_master_freelist2 = {
729dbac3 368 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
4a4c9785 369};
0f2d19dd
JB
370
371/* scm_mtrigger
372 * is the number of bytes of must_malloc allocation needed to trigger gc.
373 */
c014a02e 374unsigned long scm_mtrigger;
0f2d19dd 375
0f2d19dd
JB
376/* scm_gc_heap_lock
377 * If set, don't expand the heap. Set only during gc, during which no allocation
378 * is supposed to take place anyway.
379 */
380int scm_gc_heap_lock = 0;
381
382/* GC Blocking
383 * Don't pause for collection if this is set -- just
384 * expand the heap.
385 */
0f2d19dd
JB
386int scm_block_gc = 1;
387
0f2d19dd
JB
388/* During collection, this accumulates objects holding
389 * weak references.
390 */
ab4bef85 391SCM scm_weak_vectors;
0f2d19dd 392
7445e0e8
MD
393/* During collection, this accumulates structures which are to be freed.
394 */
395SCM scm_structs_to_free;
396
0f2d19dd
JB
397/* GC Statistics Keeping
398 */
c014a02e
ML
399unsigned long scm_cells_allocated = 0;
400unsigned long scm_mallocated = 0;
401unsigned long scm_gc_cells_collected;
402unsigned long scm_gc_yield;
403static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
404unsigned long scm_gc_malloc_collected;
405unsigned long scm_gc_ports_collected;
0f2d19dd 406unsigned long scm_gc_time_taken = 0;
c014a02e
ML
407static unsigned long t_before_gc;
408static unsigned long t_before_sweep;
c9b0d4b0
ML
409unsigned long scm_gc_mark_time_taken = 0;
410unsigned long scm_gc_sweep_time_taken = 0;
c014a02e
ML
411unsigned long scm_gc_times = 0;
412unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
413double scm_gc_cells_marked_acc = 0.;
414double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
415
416SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
417SCM_SYMBOL (sym_heap_size, "cell-heap-size");
418SCM_SYMBOL (sym_mallocated, "bytes-malloced");
419SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
420SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
421SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
422SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
423SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
424SCM_SYMBOL (sym_times, "gc-times");
425SCM_SYMBOL (sym_cells_marked, "cells-marked");
426SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 427
92c2555f 428typedef struct scm_t_heap_seg_data
0f2d19dd 429{
cf2d30f6
JB
430 /* lower and upper bounds of the segment */
431 SCM_CELLPTR bounds[2];
432
433 /* address of the head-of-freelist pointer for this segment's cells.
434 All segments usually point to the same one, scm_freelist. */
92c2555f 435 scm_t_freelist *freelist;
cf2d30f6 436
fe517a7d 437 /* number of cells per object in this segment */
945fec60 438 int span;
92c2555f 439} scm_t_heap_seg_data;
0f2d19dd
JB
440
441
442
92c2555f 443static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
b6efc951
DH
444
445typedef enum { return_on_error, abort_on_error } policy_on_error;
92c2555f 446static void alloc_some_heap (scm_t_freelist *, policy_on_error);
0f2d19dd
JB
447
448
d6884e63
ML
449#define SCM_HEAP_SIZE \
450 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
451#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
452
453#define BVEC_GROW_SIZE 256
454#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
92c2555f 455#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
d6884e63
ML
456
457/* mark space allocation */
458
92c2555f 459typedef struct scm_t_mark_space
d6884e63 460{
92c2555f
MV
461 scm_t_c_bvec_limb *bvec_space;
462 struct scm_t_mark_space *next;
463} scm_t_mark_space;
d6884e63 464
92c2555f
MV
465static scm_t_mark_space *current_mark_space;
466static scm_t_mark_space **mark_space_ptr;
1be6b49c 467static ptrdiff_t current_mark_space_offset;
92c2555f 468static scm_t_mark_space *mark_space_head;
d6884e63 469
92c2555f 470static scm_t_c_bvec_limb *
d6884e63 471get_bvec ()
db4b4ca6 472#define FUNC_NAME "get_bvec"
d6884e63 473{
92c2555f 474 scm_t_c_bvec_limb *res;
d6884e63
ML
475
476 if (!current_mark_space)
477 {
92c2555f 478 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
d6884e63 479 if (!current_mark_space)
db4b4ca6 480 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
481
482 current_mark_space->bvec_space = NULL;
483 current_mark_space->next = NULL;
484
485 *mark_space_ptr = current_mark_space;
486 mark_space_ptr = &(current_mark_space->next);
487
488 return get_bvec ();
489 }
490
491 if (!(current_mark_space->bvec_space))
492 {
493 SCM_SYSCALL (current_mark_space->bvec_space =
92c2555f 494 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
d6884e63 495 if (!(current_mark_space->bvec_space))
db4b4ca6 496 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
497
498 current_mark_space_offset = 0;
499
500 return get_bvec ();
501 }
502
503 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
504 {
505 current_mark_space = NULL;
506
507 return get_bvec ();
508 }
509
510 res = current_mark_space->bvec_space + current_mark_space_offset;
511 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
512
513 return res;
514}
db4b4ca6
DH
515#undef FUNC_NAME
516
d6884e63
ML
517
518static void
519clear_mark_space ()
520{
92c2555f 521 scm_t_mark_space *ms;
d6884e63
ML
522
523 for (ms = mark_space_head; ms; ms = ms->next)
524 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
525}
526
527
0f2d19dd 528\f
cf2d30f6
JB
529/* Debugging functions. */
530
bb2c57fa 531#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6 532
eae33935
TTN
533static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
534
8ded62a3 535static void
92c2555f 536map_free_list (scm_t_freelist *master, SCM freelist)
8ded62a3 537{
c014a02e 538 long last_seg = -1, count = 0;
8ded62a3 539 SCM f;
a00c95d9 540
3f5d82cd 541 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3 542 {
592996c9 543 long int this_seg = heap_segment (f);
8ded62a3 544
592996c9
DH
545 if (this_seg == -1)
546 {
eae33935 547 fprintf (stderr,
592996c9 548 "map_free_list: can't find segment containing cell %lux\n",
eae33935 549 (unsigned long int) SCM_UNPACK (f));
592996c9
DH
550 abort ();
551 }
552 else if (this_seg != last_seg)
8ded62a3
MD
553 {
554 if (last_seg != -1)
1be6b49c
ML
555 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
556 (long) count, master->span, (long) last_seg);
8ded62a3
MD
557 last_seg = this_seg;
558 count = 0;
559 }
560 count++;
561 }
562 if (last_seg != -1)
1be6b49c
ML
563 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
564 (long) count, master->span, (long) last_seg);
8ded62a3 565}
cf2d30f6 566
a00c95d9 567SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 568 (),
5352393c
MG
569 "Print debugging information about the free-list.\n"
570 "@code{map-free-list} is only included in\n"
571 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
572#define FUNC_NAME s_scm_map_free_list
573{
592996c9
DH
574 size_t i;
575
1be6b49c
ML
576 fprintf (stderr, "%ld segments total (%d:%ld",
577 (long) scm_n_heap_segs,
4c48ba06 578 scm_heap_table[0].span,
1be6b49c 579 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
592996c9
DH
580
581 for (i = 1; i != scm_n_heap_segs; i++)
1be6b49c 582 fprintf (stderr, ", %d:%ld",
4c48ba06 583 scm_heap_table[i].span,
1be6b49c 584 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
4c48ba06 585 fprintf (stderr, ")\n");
8ded62a3
MD
586 map_free_list (&scm_master_freelist, scm_freelist);
587 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
588 fflush (stderr);
589
590 return SCM_UNSPECIFIED;
591}
1bbd0b84 592#undef FUNC_NAME
cf2d30f6 593
c014a02e
ML
594static long last_cluster;
595static long last_size;
4c48ba06 596
c014a02e
ML
597static long
598free_list_length (char *title, long i, SCM freelist)
5384bc5b
MD
599{
600 SCM ls;
c014a02e 601 long n = 0;
3f5d82cd
DH
602 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
603 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
604 ++n;
605 else
606 {
1be6b49c 607 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
5384bc5b
MD
608 abort ();
609 }
4c48ba06
MD
610 if (n != last_size)
611 {
612 if (i > 0)
613 {
614 if (last_cluster == i - 1)
1be6b49c 615 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 616 else
1be6b49c 617 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
4c48ba06
MD
618 }
619 if (i >= 0)
1be6b49c 620 fprintf (stderr, "%s %ld", title, (long) i);
4c48ba06 621 else
1be6b49c 622 fprintf (stderr, "%s\t%ld\n", title, (long) n);
4c48ba06
MD
623 last_cluster = i;
624 last_size = n;
625 }
5384bc5b
MD
626 return n;
627}
628
629static void
92c2555f 630free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
5384bc5b
MD
631{
632 SCM clusters;
c014a02e 633 long i = 0, len, n = 0;
5384bc5b
MD
634 fprintf (stderr, "%s\n\n", title);
635 n += free_list_length ("free list", -1, freelist);
636 for (clusters = master->clusters;
637 SCM_NNULLP (clusters);
638 clusters = SCM_CDR (clusters))
4c48ba06
MD
639 {
640 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
641 n += len;
642 }
643 if (last_cluster == i - 1)
1be6b49c 644 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 645 else
1be6b49c
ML
646 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
647 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
5384bc5b
MD
648}
649
a00c95d9 650SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 651 (),
5352393c
MG
652 "Print debugging information about the free-list.\n"
653 "@code{free-list-length} is only included in\n"
654 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
655#define FUNC_NAME s_scm_free_list_length
656{
b37fe1c5
MD
657 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
658 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 659 return SCM_UNSPECIFIED;
5384bc5b
MD
660}
661#undef FUNC_NAME
662
eae33935 663#endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
bb2c57fa
MD
664
665#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 666
d3dd80ab
MG
667/* Non-zero if freelist debugging is in effect. Set this via
668 `gc-set-debug-check-freelist!'. */
669static int scm_debug_check_freelist = 0;
670
cf2d30f6 671/* Number of calls to SCM_NEWCELL since startup. */
c014a02e
ML
672static unsigned long scm_newcell_count;
673static unsigned long scm_newcell2_count;
cf2d30f6
JB
674
675/* Search freelist for anything that isn't marked as a free cell.
676 Abort if we find something. */
8ded62a3
MD
677static void
678scm_check_freelist (SCM freelist)
679{
680 SCM f;
c014a02e 681 long i = 0;
8ded62a3 682
3f5d82cd
DH
683 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
684 if (!SCM_FREE_CELL_P (f))
8ded62a3 685 {
1be6b49c
ML
686 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
687 (long) scm_newcell_count, (long) i);
8ded62a3
MD
688 abort ();
689 }
690}
cf2d30f6 691
a00c95d9 692SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 693 (SCM flag),
1e6808ea
MG
694 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
695 "on each cell allocation. This procedure only exists when the\n"
696 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
1bbd0b84 697#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 698{
d6884e63
ML
699 /* [cmm] I did a double-take when I read this code the first time.
700 well, FWIW. */
945fec60 701 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
702 return SCM_UNSPECIFIED;
703}
1bbd0b84 704#undef FUNC_NAME
25748c78
GB
705
706
4a4c9785
MD
707SCM
708scm_debug_newcell (void)
709{
710 SCM new;
711
712 scm_newcell_count++;
713 if (scm_debug_check_freelist)
714 {
8ded62a3 715 scm_check_freelist (scm_freelist);
4a4c9785
MD
716 scm_gc();
717 }
718
719 /* The rest of this is supposed to be identical to the SCM_NEWCELL
720 macro. */
3f5d82cd 721 if (SCM_NULLP (scm_freelist))
7c33806a
DH
722 {
723 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
724 SCM_GC_SET_ALLOCATED (new);
725 }
4a4c9785
MD
726 else
727 {
728 new = scm_freelist;
3f5d82cd 729 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
7c33806a 730 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
731 }
732
733 return new;
734}
735
736SCM
737scm_debug_newcell2 (void)
738{
739 SCM new;
740
741 scm_newcell2_count++;
742 if (scm_debug_check_freelist)
743 {
8ded62a3 744 scm_check_freelist (scm_freelist2);
4a4c9785
MD
745 scm_gc ();
746 }
747
748 /* The rest of this is supposed to be identical to the SCM_NEWCELL
749 macro. */
3f5d82cd 750 if (SCM_NULLP (scm_freelist2))
7c33806a
DH
751 {
752 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
753 SCM_GC_SET_ALLOCATED (new);
754 }
4a4c9785
MD
755 else
756 {
757 new = scm_freelist2;
3f5d82cd 758 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
7c33806a 759 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
760 }
761
762 return new;
763}
764
fca7547b 765#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
766
767\f
0f2d19dd 768
c014a02e 769static unsigned long
92c2555f 770master_cells_allocated (scm_t_freelist *master)
b37fe1c5 771{
d6884e63 772 /* the '- 1' below is to ignore the cluster spine cells. */
c014a02e 773 long objects = master->clusters_allocated * (master->cluster_size - 1);
b37fe1c5
MD
774 if (SCM_NULLP (master->clusters))
775 objects -= master->left_to_collect;
776 return master->span * objects;
777}
778
c014a02e 779static unsigned long
b37fe1c5
MD
780freelist_length (SCM freelist)
781{
c014a02e 782 long n;
3f5d82cd 783 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
784 ++n;
785 return n;
786}
787
c014a02e 788static unsigned long
b37fe1c5
MD
789compute_cells_allocated ()
790{
791 return (scm_cells_allocated
792 + master_cells_allocated (&scm_master_freelist)
793 + master_cells_allocated (&scm_master_freelist2)
794 - scm_master_freelist.span * freelist_length (scm_freelist)
795 - scm_master_freelist2.span * freelist_length (scm_freelist2));
796}
b37fe1c5 797
0f2d19dd
JB
798/* {Scheme Interface to GC}
799 */
800
a00c95d9 801SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 802 (),
1e6808ea
MG
803 "Return an association list of statistics about Guile's current\n"
804 "use of storage.")
1bbd0b84 805#define FUNC_NAME s_scm_gc_stats
0f2d19dd 806{
c014a02e
ML
807 long i;
808 long n;
0f2d19dd 809 SCM heap_segs;
c014a02e
ML
810 unsigned long int local_scm_mtrigger;
811 unsigned long int local_scm_mallocated;
812 unsigned long int local_scm_heap_size;
813 unsigned long int local_scm_cells_allocated;
814 unsigned long int local_scm_gc_time_taken;
815 unsigned long int local_scm_gc_times;
816 unsigned long int local_scm_gc_mark_time_taken;
817 unsigned long int local_scm_gc_sweep_time_taken;
c9b0d4b0
ML
818 double local_scm_gc_cells_swept;
819 double local_scm_gc_cells_marked;
0f2d19dd
JB
820 SCM answer;
821
822 SCM_DEFER_INTS;
939794ce
DH
823
824 ++scm_block_gc;
825
0f2d19dd
JB
826 retry:
827 heap_segs = SCM_EOL;
828 n = scm_n_heap_segs;
829 for (i = scm_n_heap_segs; i--; )
c014a02e
ML
830 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
831 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
0f2d19dd
JB
832 heap_segs);
833 if (scm_n_heap_segs != n)
834 goto retry;
939794ce
DH
835
836 --scm_block_gc;
0f2d19dd 837
7febb4a2
MD
838 /* Below, we cons to produce the resulting list. We want a snapshot of
839 * the heap situation before consing.
840 */
0f2d19dd
JB
841 local_scm_mtrigger = scm_mtrigger;
842 local_scm_mallocated = scm_mallocated;
b37fe1c5 843 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 844 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 845 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
846 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
847 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
848 local_scm_gc_times = scm_gc_times;
849 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
850 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd 851
1afff620
KN
852 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
853 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
854 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
855 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
856 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
857 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
858 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
859 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
860 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
861 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
862 scm_cons (sym_heap_segments, heap_segs),
863 SCM_UNDEFINED);
0f2d19dd
JB
864 SCM_ALLOW_INTS;
865 return answer;
866}
1bbd0b84 867#undef FUNC_NAME
0f2d19dd
JB
868
869
c9b0d4b0 870static void
e81d98ec 871gc_start_stats (const char *what SCM_UNUSED)
0f2d19dd 872{
c9b0d4b0
ML
873 t_before_gc = scm_c_get_internal_run_time ();
874 scm_gc_cells_swept = 0;
b37fe1c5 875 scm_gc_cells_collected = 0;
37ddcaf6 876 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
877 scm_gc_yield = (scm_cells_allocated
878 + master_cells_allocated (&scm_master_freelist)
879 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
880 scm_gc_malloc_collected = 0;
881 scm_gc_ports_collected = 0;
882}
883
939794ce 884
c9b0d4b0
ML
885static void
886gc_end_stats ()
0f2d19dd 887{
c9b0d4b0
ML
888 unsigned long t = scm_c_get_internal_run_time ();
889 scm_gc_time_taken += (t - t_before_gc);
890 scm_gc_sweep_time_taken += (t - t_before_sweep);
891 ++scm_gc_times;
892
893 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
894 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
895}
896
897
a00c95d9 898SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 899 (SCM obj),
b380b885
MD
900 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
901 "returned by this function for @var{obj}")
1bbd0b84 902#define FUNC_NAME s_scm_object_address
0f2d19dd 903{
c014a02e 904 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 905}
1bbd0b84 906#undef FUNC_NAME
0f2d19dd
JB
907
908
a00c95d9 909SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 910 (),
b380b885
MD
911 "Scans all of SCM objects and reclaims for further use those that are\n"
912 "no longer accessible.")
1bbd0b84 913#define FUNC_NAME s_scm_gc
0f2d19dd
JB
914{
915 SCM_DEFER_INTS;
916 scm_igc ("call");
917 SCM_ALLOW_INTS;
918 return SCM_UNSPECIFIED;
919}
1bbd0b84 920#undef FUNC_NAME
0f2d19dd
JB
921
922
923\f
924/* {C Interface For When GC is Triggered}
925 */
926
b37fe1c5 927static void
92c2555f 928adjust_min_yield (scm_t_freelist *freelist)
b37fe1c5 929{
8fef55a8 930 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 931 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
932 * `min_yield_fraction' of total heap size. Note, however, that
933 * the absolute value of min_yield will correspond to `collected'
bda1446c 934 * on one master (the one which currently is triggering GC).
b37fe1c5 935 *
bda1446c
MD
936 * The reason why we look at total yield instead of cells collected
937 * on one list is that we want to take other freelists into account.
938 * On this freelist, we know that (local) yield = collected cells,
939 * but that's probably not the case on the other lists.
b37fe1c5
MD
940 *
941 * (We might consider computing a better prediction, for example
942 * by computing an average over multiple GC:s.)
943 */
8fef55a8 944 if (freelist->min_yield_fraction)
b37fe1c5 945 {
37ddcaf6 946 /* Pick largest of last two yields. */
1be6b49c 947 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
8fef55a8 948 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5 949#ifdef DEBUGINFO
1be6b49c
ML
950 fprintf (stderr, " after GC = %lu, delta = %ld\n",
951 (long) scm_cells_allocated,
952 (long) delta);
b37fe1c5
MD
953#endif
954 if (delta > 0)
8fef55a8 955 freelist->min_yield += delta;
b37fe1c5
MD
956 }
957}
958
b6efc951 959
4a4c9785 960/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
961 * common while the freelist will be individual for each thread.
962 */
4a4c9785
MD
963
964SCM
92c2555f 965scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
4a4c9785
MD
966{
967 SCM cell;
968 ++scm_ints_disabled;
4c48ba06
MD
969 do
970 {
c7387918 971 if (SCM_NULLP (master->clusters))
4c48ba06 972 {
150c200b 973 if (master->grow_heap_p || scm_block_gc)
4c48ba06 974 {
b6efc951
DH
975 /* In order to reduce gc frequency, try to allocate a new heap
976 * segment first, even if gc might find some free cells. If we
977 * can't obtain a new heap segment, we will try gc later.
978 */
4c48ba06 979 master->grow_heap_p = 0;
b6efc951 980 alloc_some_heap (master, return_on_error);
4c48ba06 981 }
b6efc951 982 if (SCM_NULLP (master->clusters))
b37fe1c5 983 {
b6efc951
DH
984 /* The heap was not grown, either because it wasn't scheduled to
985 * grow, or because there was not enough memory available. In
986 * both cases we have to try gc to get some free cells.
987 */
37ddcaf6 988#ifdef DEBUGINFO
1be6b49c
ML
989 fprintf (stderr, "allocated = %lu, ",
990 (long) (scm_cells_allocated
37ddcaf6 991 + master_cells_allocated (&scm_master_freelist)
c014a02e 992 + master_cells_allocated (&scm_master_freelist2)));
37ddcaf6 993#endif
b37fe1c5 994 scm_igc ("cells");
8fef55a8 995 adjust_min_yield (master);
c7387918
DH
996 if (SCM_NULLP (master->clusters))
997 {
b6efc951
DH
998 /* gc could not free any cells. Now, we _must_ allocate a
999 * new heap segment, because there is no other possibility
1000 * to provide a new cell for the caller.
1001 */
1002 alloc_some_heap (master, abort_on_error);
c7387918 1003 }
b37fe1c5 1004 }
4c48ba06
MD
1005 }
1006 cell = SCM_CAR (master->clusters);
1007 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 1008 ++master->clusters_allocated;
4c48ba06
MD
1009 }
1010 while (SCM_NULLP (cell));
d6884e63
ML
1011
1012#ifdef GUILE_DEBUG_FREELIST
1013 scm_check_freelist (cell);
1014#endif
1015
4a4c9785 1016 --scm_ints_disabled;
3f5d82cd 1017 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
1018 return cell;
1019}
1020
b6efc951 1021
4c48ba06
MD
1022#if 0
1023/* This is a support routine which can be used to reserve a cluster
1024 * for some special use, such as debugging. It won't be useful until
1025 * free cells are preserved between garbage collections.
1026 */
1027
1028void
92c2555f 1029scm_alloc_cluster (scm_t_freelist *master)
4c48ba06
MD
1030{
1031 SCM freelist, cell;
1032 cell = scm_gc_for_newcell (master, &freelist);
1033 SCM_SETCDR (cell, freelist);
1034 return cell;
1035}
1036#endif
1037
801cb5e7 1038
92c2555f
MV
1039scm_t_c_hook scm_before_gc_c_hook;
1040scm_t_c_hook scm_before_mark_c_hook;
1041scm_t_c_hook scm_before_sweep_c_hook;
1042scm_t_c_hook scm_after_sweep_c_hook;
1043scm_t_c_hook scm_after_gc_c_hook;
801cb5e7 1044
b6efc951 1045
0f2d19dd 1046void
1bbd0b84 1047scm_igc (const char *what)
0f2d19dd 1048{
c014a02e 1049 long j;
0f2d19dd 1050
406c7d90 1051 ++scm_gc_running_p;
801cb5e7 1052 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
1053#ifdef DEBUGINFO
1054 fprintf (stderr,
1055 SCM_NULLP (scm_freelist)
1056 ? "*"
1057 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1058#endif
42db06f0 1059 /* During the critical section, only the current thread may run. */
216eedfc 1060 SCM_CRITICAL_SECTION_START;
42db06f0 1061
e242dfd2 1062 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1063
ab4bef85
JB
1064 if (!scm_stack_base || scm_block_gc)
1065 {
406c7d90 1066 --scm_gc_running_p;
ab4bef85
JB
1067 return;
1068 }
1069
c9b0d4b0
ML
1070 gc_start_stats (what);
1071
ab4bef85
JB
1072 if (scm_gc_heap_lock)
1073 /* We've invoked the collector while a GC is already in progress.
1074 That should never happen. */
1075 abort ();
0f2d19dd
JB
1076
1077 ++scm_gc_heap_lock;
ab4bef85 1078
0f2d19dd
JB
1079 /* flush dead entries from the continuation stack */
1080 {
c014a02e
ML
1081 long x;
1082 long bound;
0f2d19dd
JB
1083 SCM * elts;
1084 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1085 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1086 x = SCM_INUM (scm_continuation_stack_ptr);
1087 while (x < bound)
1088 {
1089 elts[x] = SCM_BOOL_F;
1090 ++x;
1091 }
1092 }
1093
801cb5e7
MD
1094 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1095
d6884e63
ML
1096 clear_mark_space ();
1097
42db06f0 1098#ifndef USE_THREADS
a00c95d9 1099
1b9be268 1100 /* Mark objects on the C stack. */
0f2d19dd
JB
1101 SCM_FLUSH_REGISTER_WINDOWS;
1102 /* This assumes that all registers are saved into the jmp_buf */
1103 setjmp (scm_save_regs_gc_mark);
1104 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1be6b49c 1105 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
ce4a361d
JB
1106 sizeof scm_save_regs_gc_mark)
1107 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1108
1109 {
6b1b030e 1110 unsigned long stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1111#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1112 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1113#else
6ba93e5e 1114 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1115#endif
1116 }
1117
42db06f0
MD
1118#else /* USE_THREADS */
1119
1120 /* Mark every thread's stack and registers */
945fec60 1121 scm_threads_mark_stacks ();
42db06f0
MD
1122
1123#endif /* USE_THREADS */
0f2d19dd 1124
0f2d19dd
JB
1125 j = SCM_NUM_PROTECTS;
1126 while (j--)
1127 scm_gc_mark (scm_sys_protects[j]);
1128
6b1b030e
ML
1129 /* mark the registered roots */
1130 {
592996c9 1131 size_t i;
6b1b030e
ML
1132 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1133 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
592996c9 1134 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
6b1b030e
ML
1135 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1136 scm_gc_mark (*p);
1137 }
1138 }
1139 }
1140
9de33deb
MD
1141 /* FIXME: we should have a means to register C functions to be run
1142 * in different phases of GC
a00c95d9 1143 */
9de33deb 1144 scm_mark_subr_table ();
a00c95d9 1145
42db06f0
MD
1146#ifndef USE_THREADS
1147 scm_gc_mark (scm_root->handle);
1148#endif
a00c95d9 1149
c9b0d4b0
ML
1150 t_before_sweep = scm_c_get_internal_run_time ();
1151 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1152
801cb5e7 1153 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1154
0f2d19dd
JB
1155 scm_gc_sweep ();
1156
801cb5e7
MD
1157 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1158
0f2d19dd 1159 --scm_gc_heap_lock;
c9b0d4b0 1160 gc_end_stats ();
42db06f0 1161
216eedfc 1162 SCM_CRITICAL_SECTION_END;
801cb5e7 1163 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1164 --scm_gc_running_p;
0f2d19dd
JB
1165}
1166
1167\f
939794ce 1168
a00c95d9 1169/* {Mark/Sweep}
0f2d19dd
JB
1170 */
1171
56495472
ML
1172#define MARK scm_gc_mark
1173#define FNAME "scm_gc_mark"
0f2d19dd 1174
56495472 1175#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1176
1177/* Mark an object precisely.
1178 */
a00c95d9 1179void
56495472
ML
1180MARK (SCM p)
1181#define FUNC_NAME FNAME
0f2d19dd 1182{
c014a02e 1183 register long i;
0f2d19dd 1184 register SCM ptr;
92c2555f 1185 scm_t_bits cell_type;
0f2d19dd 1186
56495472
ML
1187#ifndef MARK_DEPENDENCIES
1188# define RECURSE scm_gc_mark
1189#else
1190 /* go through the usual marking, but not for self-cycles. */
1191# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
eae33935 1192#endif
0f2d19dd
JB
1193 ptr = p;
1194
56495472
ML
1195#ifdef MARK_DEPENDENCIES
1196 goto gc_mark_loop_first_time;
1197#endif
1198
86d31dfe
MV
1199/* A simple hack for debugging. Chose the second branch to get a
1200 meaningful backtrace for crashes inside the GC.
1201*/
1202#if 1
1203#define goto_gc_mark_loop goto gc_mark_loop
1204#define goto_gc_mark_nimp goto gc_mark_nimp
1205#else
1206#define goto_gc_mark_loop RECURSE(ptr); return
1207#define goto_gc_mark_nimp RECURSE(ptr); return
1208#endif
1209
0f2d19dd
JB
1210gc_mark_loop:
1211 if (SCM_IMP (ptr))
1212 return;
1213
1214gc_mark_nimp:
eae33935 1215
56495472 1216#ifdef MARK_DEPENDENCIES
0209177b 1217 if (SCM_EQ_P (ptr, p))
56495472
ML
1218 return;
1219
1220 scm_gc_mark (ptr);
0209177b 1221 return;
56495472
ML
1222
1223gc_mark_loop_first_time:
1224#endif
9a6976cd 1225
61045190 1226#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1227 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1228 if (!scm_cellp (ptr))
db4b4ca6 1229 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1230#else
1231 /* In non-debug mode, do at least some cheap testing. */
8c494e99 1232 if (!CELL_P (ptr))
9a6976cd 1233 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1234#endif
1235
56495472 1236#ifndef MARK_DEPENDENCIES
eae33935 1237
d6884e63
ML
1238 if (SCM_GCMARKP (ptr))
1239 return;
eae33935 1240
d6884e63
ML
1241 SCM_SETGCMARK (ptr);
1242
56495472
ML
1243#endif
1244
61045190
DH
1245 cell_type = SCM_GC_CELL_TYPE (ptr);
1246 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1247 {
1248 case scm_tcs_cons_nimcar:
d6884e63 1249 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1250 {
1251 ptr = SCM_CAR (ptr);
86d31dfe 1252 goto_gc_mark_nimp;
0f2d19dd 1253 }
56495472 1254 RECURSE (SCM_CAR (ptr));
d6884e63 1255 ptr = SCM_CDR (ptr);
86d31dfe 1256 goto_gc_mark_nimp;
0f2d19dd 1257 case scm_tcs_cons_imcar:
d6884e63 1258 ptr = SCM_CDR (ptr);
86d31dfe 1259 goto_gc_mark_loop;
e641afaf 1260 case scm_tc7_pws:
22a52da1
DH
1261 RECURSE (SCM_SETTER (ptr));
1262 ptr = SCM_PROCEDURE (ptr);
86d31dfe 1263 goto_gc_mark_loop;
904a077d 1264 case scm_tcs_struct:
0f2d19dd 1265 {
904a077d
MV
1266 /* XXX - use less explicit code. */
1267 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1268 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1269 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1270 long len = SCM_SYMBOL_LENGTH (layout);
1271 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1272 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1273
1274 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
0f2d19dd 1275 {
904a077d
MV
1276 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1277 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
0f2d19dd 1278 }
904a077d
MV
1279 if (len)
1280 {
1281 long x;
eae33935 1282
904a077d
MV
1283 for (x = 0; x < len - 2; x += 2, ++struct_data)
1284 if (fields_desc[x] == 'p')
1285 RECURSE (SCM_PACK (*struct_data));
1286 if (fields_desc[x] == 'p')
1287 {
1288 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1289 for (x = *struct_data++; x; --x, ++struct_data)
1290 RECURSE (SCM_PACK (*struct_data));
1291 else
1292 RECURSE (SCM_PACK (*struct_data));
1293 }
1294 }
1295 /* mark vtable */
1296 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1297 goto_gc_mark_loop;
0f2d19dd
JB
1298 }
1299 break;
1300 case scm_tcs_closures:
22a52da1 1301 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1302 {
1303 ptr = SCM_CLOSCAR (ptr);
86d31dfe 1304 goto_gc_mark_nimp;
0f2d19dd 1305 }
56495472 1306 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1307 ptr = SCM_ENV (ptr);
86d31dfe 1308 goto_gc_mark_nimp;
0f2d19dd 1309 case scm_tc7_vector:
b5c2579a
DH
1310 i = SCM_VECTOR_LENGTH (ptr);
1311 if (i == 0)
1312 break;
1313 while (--i > 0)
1314 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1315 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a 1316 ptr = SCM_VELTS (ptr)[0];
86d31dfe 1317 goto_gc_mark_loop;
0f2d19dd
JB
1318#ifdef CCLO
1319 case scm_tc7_cclo:
362306b9 1320 {
1be6b49c
ML
1321 size_t i = SCM_CCLO_LENGTH (ptr);
1322 size_t j;
362306b9
DH
1323 for (j = 1; j != i; ++j)
1324 {
1325 SCM obj = SCM_CCLO_REF (ptr, j);
1326 if (!SCM_IMP (obj))
56495472 1327 RECURSE (obj);
362306b9
DH
1328 }
1329 ptr = SCM_CCLO_REF (ptr, 0);
86d31dfe 1330 goto_gc_mark_loop;
362306b9 1331 }
b5c2579a 1332#endif
afe5177e 1333#ifdef HAVE_ARRAYS
0f2d19dd
JB
1334 case scm_tc7_bvect:
1335 case scm_tc7_byvect:
1336 case scm_tc7_ivect:
1337 case scm_tc7_uvect:
1338 case scm_tc7_fvect:
1339 case scm_tc7_dvect:
1340 case scm_tc7_cvect:
1341 case scm_tc7_svect:
5c11cc9d 1342#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1343 case scm_tc7_llvect:
1344#endif
afe5177e 1345#endif
0f2d19dd 1346 case scm_tc7_string:
0f2d19dd
JB
1347 break;
1348
0f2d19dd 1349 case scm_tc7_wvect:
592996c9 1350 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
ab4bef85 1351 scm_weak_vectors = ptr;
0f2d19dd
JB
1352 if (SCM_IS_WHVEC_ANY (ptr))
1353 {
c014a02e
ML
1354 long x;
1355 long len;
0f2d19dd
JB
1356 int weak_keys;
1357 int weak_values;
1358
b5c2579a 1359 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1360 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1361 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1362
0f2d19dd
JB
1363 for (x = 0; x < len; ++x)
1364 {
1365 SCM alist;
1366 alist = SCM_VELTS (ptr)[x];
46408039
JB
1367
1368 /* mark everything on the alist except the keys or
1369 * values, according to weak_values and weak_keys. */
0b5f3f34 1370 while ( SCM_CONSP (alist)
0f2d19dd 1371 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1372 && SCM_CONSP (SCM_CAR (alist)))
1373 {
1374 SCM kvpair;
1375 SCM next_alist;
1376
1377 kvpair = SCM_CAR (alist);
1378 next_alist = SCM_CDR (alist);
a00c95d9 1379 /*
0f2d19dd
JB
1380 * Do not do this:
1381 * SCM_SETGCMARK (alist);
1382 * SCM_SETGCMARK (kvpair);
1383 *
1384 * It may be that either the key or value is protected by
1385 * an escaped reference to part of the spine of this alist.
1386 * If we mark the spine here, and only mark one or neither of the
1387 * key and value, they may never be properly marked.
1388 * This leads to a horrible situation in which an alist containing
1389 * freelist cells is exported.
1390 *
1391 * So only mark the spines of these arrays last of all marking.
1392 * If somebody confuses us by constructing a weak vector
1393 * with a circular alist then we are hosed, but at least we
1394 * won't prematurely drop table entries.
1395 */
1396 if (!weak_keys)
56495472 1397 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1398 if (!weak_values)
56495472 1399 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1400 alist = next_alist;
1401 }
1402 if (SCM_NIMP (alist))
56495472 1403 RECURSE (alist);
0f2d19dd
JB
1404 }
1405 }
1406 break;
1407
28b06554
DH
1408 case scm_tc7_symbol:
1409 ptr = SCM_PROP_SLOTS (ptr);
86d31dfe 1410 goto_gc_mark_loop;
e5aca4b5
MV
1411 case scm_tc7_variable:
1412 ptr = SCM_CELL_OBJECT_1 (ptr);
1413 goto_gc_mark_loop;
0f2d19dd 1414 case scm_tcs_subrs:
9de33deb 1415 break;
0f2d19dd
JB
1416 case scm_tc7_port:
1417 i = SCM_PTOBNUM (ptr);
7a7f7c53 1418#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1419 if (!(i < scm_numptob))
7a7f7c53
DH
1420 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1421#endif
ebf7394e 1422 if (SCM_PTAB_ENTRY(ptr))
56495472 1423 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1424 if (scm_ptobs[i].mark)
1425 {
1426 ptr = (scm_ptobs[i].mark) (ptr);
86d31dfe 1427 goto_gc_mark_loop;
dc53f026
JB
1428 }
1429 else
1430 return;
0f2d19dd
JB
1431 break;
1432 case scm_tc7_smob:
d6884e63 1433 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1434 { /* should be faster than going through scm_smobs */
1435 case scm_tc_free_cell:
592996c9
DH
1436 /* We have detected a free cell. This can happen if non-object data
1437 * on the C stack points into guile's heap and is scanned during
1438 * conservative marking. */
1439#if (SCM_DEBUG_CELL_ACCESSES == 0)
1440 /* If cell debugging is disabled, there is a second situation in
1441 * which a free cell can be encountered, namely if with preemptive
1442 * threading one thread has just obtained a fresh cell and was
1443 * preempted before the cell initialization was completed. In this
1444 * case, some entries of the cell may already contain objects.
1445 * Thus, if cell debugging is disabled, free cells are scanned
1446 * conservatively. */
1447 scm_gc_mark_cell_conservatively (ptr);
1448#else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1449 /* With cell debugging enabled, a freshly obtained but not fully
1450 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1451 * Thus, no conservative scanning for free cells is necessary, but
1452 * instead cells of type scm_tc16_allocated have to be scanned
1453 * conservatively. This is done in the mark function of the
eae33935 1454 * scm_tc16_allocated smob type. */
592996c9
DH
1455#endif
1456 break;
acb0a19c
MD
1457 case scm_tc16_big:
1458 case scm_tc16_real:
1459 case scm_tc16_complex:
0f2d19dd
JB
1460 break;
1461 default:
1462 i = SCM_SMOBNUM (ptr);
7a7f7c53 1463#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1464 if (!(i < scm_numsmob))
7a7f7c53
DH
1465 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1466#endif
dc53f026
JB
1467 if (scm_smobs[i].mark)
1468 {
1469 ptr = (scm_smobs[i].mark) (ptr);
86d31dfe 1470 goto_gc_mark_loop;
dc53f026
JB
1471 }
1472 else
1473 return;
0f2d19dd
JB
1474 }
1475 break;
1476 default:
acf4331f 1477 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1478 }
0209177b 1479#undef RECURSE
0f2d19dd 1480}
acf4331f 1481#undef FUNC_NAME
0f2d19dd 1482
56495472
ML
1483#ifndef MARK_DEPENDENCIES
1484
1485#undef MARK
56495472
ML
1486#undef FNAME
1487
1488/* And here we define `scm_gc_mark_dependencies', by including this
1489 * same file in itself.
1490 */
1491#define MARK scm_gc_mark_dependencies
1492#define FNAME "scm_gc_mark_dependencies"
1493#define MARK_DEPENDENCIES
1494#include "gc.c"
1495#undef MARK_DEPENDENCIES
1496#undef MARK
56495472
ML
1497#undef FNAME
1498
0f2d19dd 1499
592996c9
DH
1500/* Determine whether the given value does actually represent a cell in some
1501 * heap segment. If this is the case, the number of the heap segment is
1502 * returned. Otherwise, -1 is returned. Binary search is used in order to
1503 * determine the heap segment that contains the cell.*/
1504/* FIXME: To be used within scm_gc_mark_cell_conservatively,
1505 * scm_mark_locations and scm_cellp this function should be an inline
1506 * function. */
1507static long int
1508heap_segment (SCM obj)
0f2d19dd 1509{
8c494e99 1510 if (!CELL_P (obj))
592996c9
DH
1511 return -1;
1512 else
c4da09e2 1513 {
592996c9
DH
1514 SCM_CELLPTR ptr = SCM2PTR (obj);
1515 unsigned long int i = 0;
1516 unsigned long int j = scm_n_heap_segs - 1;
1517
1518 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1519 return -1;
1520 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1521 return -1;
1522 else
c4da09e2 1523 {
592996c9 1524 while (i < j)
c4da09e2 1525 {
592996c9 1526 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
c4da09e2 1527 {
592996c9
DH
1528 break;
1529 }
1530 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1531 {
1532 i = j;
1533 break;
1534 }
1535 else
1536 {
1537 unsigned long int k = (i + j) / 2;
1538
1539 if (k == i)
1540 return -1;
1541 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
c4da09e2 1542 {
592996c9
DH
1543 j = k;
1544 ++i;
1545 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1546 return -1;
1547 }
1548 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1549 {
1550 i = k;
1551 --j;
1552 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1553 return -1;
c4da09e2 1554 }
c4da09e2
DH
1555 }
1556 }
592996c9
DH
1557
1558 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1559 return -1;
1560 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1561 return -1;
1562 else
1563 return i;
c4da09e2
DH
1564 }
1565 }
0f2d19dd
JB
1566}
1567
1568
592996c9
DH
1569/* Mark the entries of a cell conservatively. The given cell is known to be
1570 * on the heap. Still we have to determine its heap segment in order to
1571 * figure out whether it is a single or a double cell. Then, each of the cell
1572 * elements itself is checked and potentially marked. */
1573void
1574scm_gc_mark_cell_conservatively (SCM cell)
2e11a577 1575{
592996c9
DH
1576 unsigned long int cell_segment = heap_segment (cell);
1577 unsigned int span = scm_heap_table[cell_segment].span;
1578 unsigned int i;
1a548472 1579
592996c9
DH
1580 for (i = 1; i != span * 2; ++i)
1581 {
1582 SCM obj = SCM_CELL_OBJECT (cell, i);
1583 long int obj_segment = heap_segment (obj);
1584 if (obj_segment >= 0)
1585 scm_gc_mark (obj);
1586 }
1587}
61045190 1588
592996c9
DH
1589
1590/* Mark a region conservatively */
1591void
1592scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1593{
1594 unsigned long m;
1595
1596 for (m = 0; m < n; ++m)
1597 {
1598 SCM obj = * (SCM *) &x[m];
1599 long int segment = heap_segment (obj);
1600 if (segment >= 0)
1601 scm_gc_mark (obj);
1a548472 1602 }
592996c9 1603}
2e11a577 1604
592996c9
DH
1605
1606/* The function scm_cellp determines whether an SCM value can be regarded as a
1607 * pointer to a cell on the heap.
1608 */
1609int
1610scm_cellp (SCM value)
1611{
1612 long int segment = heap_segment (value);
1613 return (segment >= 0);
2e11a577
MD
1614}
1615
1616
4c48ba06 1617static void
92c2555f 1618gc_sweep_freelist_start (scm_t_freelist *freelist)
4c48ba06
MD
1619{
1620 freelist->cells = SCM_EOL;
1621 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1622 freelist->clusters_allocated = 0;
4c48ba06
MD
1623 freelist->clusters = SCM_EOL;
1624 freelist->clustertail = &freelist->clusters;
1811ebce 1625 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1626 freelist->collected = 0;
1627}
1628
1629static void
92c2555f 1630gc_sweep_freelist_finish (scm_t_freelist *freelist)
4c48ba06 1631{
c014a02e 1632 long collected;
4c48ba06 1633 *freelist->clustertail = freelist->cells;
3f5d82cd 1634 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1635 {
1636 SCM c = freelist->cells;
22a52da1
DH
1637 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1638 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1639 freelist->collected +=
1640 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1641 }
b37fe1c5 1642 scm_gc_cells_collected += freelist->collected;
a00c95d9 1643
8fef55a8 1644 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1645 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1646 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1647 * size. This means that a too low yield is compensated by more
1648 * heap on the list which is currently doing most work, which is
1649 * just what we want.
1650 */
1811ebce 1651 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1652 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1653}
0f2d19dd 1654
d6884e63
ML
1655#define NEXT_DATA_CELL(ptr, span) \
1656 do { \
1657 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1658 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1659 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1660 : nxt__); \
1661 } while (0)
1662
a00c95d9 1663void
0f2d19dd 1664scm_gc_sweep ()
acf4331f 1665#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1666{
1667 register SCM_CELLPTR ptr;
0f2d19dd 1668 register SCM nfreelist;
92c2555f 1669 register scm_t_freelist *freelist;
c014a02e 1670 register unsigned long m;
0f2d19dd 1671 register int span;
592996c9 1672 size_t i;
1be6b49c 1673 size_t seg_size;
0f2d19dd 1674
0f2d19dd 1675 m = 0;
0f2d19dd 1676
4c48ba06
MD
1677 gc_sweep_freelist_start (&scm_master_freelist);
1678 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1679
cf2d30f6 1680 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1681 {
c014a02e 1682 register long left_to_collect;
1be6b49c 1683 register size_t j;
15e9d186 1684
cf2d30f6
JB
1685 /* Unmarked cells go onto the front of the freelist this heap
1686 segment points to. Rather than updating the real freelist
1687 pointer as we go along, we accumulate the new head in
1688 nfreelist. Then, if it turns out that the entire segment is
1689 free, we free (i.e., malloc's free) the whole segment, and
1690 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1691 freelist = scm_heap_table[i].freelist;
1692 nfreelist = freelist->cells;
4c48ba06 1693 left_to_collect = freelist->left_to_collect;
945fec60 1694 span = scm_heap_table[i].span;
cf2d30f6 1695
a00c95d9
ML
1696 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1697 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1698
d6884e63
ML
1699 /* use only data cells in seg_size */
1700 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1701
c9b0d4b0
ML
1702 scm_gc_cells_swept += seg_size;
1703
0f2d19dd
JB
1704 for (j = seg_size + span; j -= span; ptr += span)
1705 {
d6884e63 1706 SCM scmptr;
96f6f4ae 1707
d6884e63 1708 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1709 {
d6884e63
ML
1710 SCM_CELLPTR nxt;
1711
1712 /* cheat here */
1713 nxt = ptr;
1714 NEXT_DATA_CELL (nxt, span);
1715 j += span;
1716
1717 ptr = nxt - span;
1718 continue;
1719 }
1720
1721 scmptr = PTR2SCM (ptr);
1722
1723 if (SCM_GCMARKP (scmptr))
1724 continue;
7bb8eac7 1725
d6884e63
ML
1726 switch SCM_TYP7 (scmptr)
1727 {
904a077d 1728 case scm_tcs_struct:
0f2d19dd 1729 {
904a077d
MV
1730 /* Structs need to be freed in a special order.
1731 * This is handled by GC C hooks in struct.c.
c8045e8d 1732 */
904a077d
MV
1733 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1734 scm_structs_to_free = scmptr;
0f2d19dd 1735 }
904a077d 1736 continue;
0f2d19dd
JB
1737 case scm_tcs_cons_imcar:
1738 case scm_tcs_cons_nimcar:
1739 case scm_tcs_closures:
e641afaf 1740 case scm_tc7_pws:
0f2d19dd
JB
1741 break;
1742 case scm_tc7_wvect:
0f2d19dd 1743 case scm_tc7_vector:
1b9be268 1744 {
c014a02e 1745 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1b9be268
DH
1746 if (length > 0)
1747 {
92c2555f 1748 m += length * sizeof (scm_t_bits);
1b9be268
DH
1749 scm_must_free (SCM_VECTOR_BASE (scmptr));
1750 }
1751 break;
1752 }
0f2d19dd
JB
1753#ifdef CCLO
1754 case scm_tc7_cclo:
b5c2579a 1755 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1756 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1757 break;
06ee04b2 1758#endif
afe5177e 1759#ifdef HAVE_ARRAYS
0f2d19dd 1760 case scm_tc7_bvect:
93778877 1761 {
c014a02e 1762 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
93778877
DH
1763 if (length > 0)
1764 {
c014a02e 1765 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
93778877
DH
1766 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1767 }
1768 }
06ee04b2 1769 break;
0f2d19dd 1770 case scm_tc7_byvect:
0f2d19dd
JB
1771 case scm_tc7_ivect:
1772 case scm_tc7_uvect:
0f2d19dd 1773 case scm_tc7_svect:
5c11cc9d 1774#ifdef HAVE_LONG_LONGS
0f2d19dd 1775 case scm_tc7_llvect:
0f2d19dd
JB
1776#endif
1777 case scm_tc7_fvect:
0f2d19dd 1778 case scm_tc7_dvect:
0f2d19dd 1779 case scm_tc7_cvect:
d1ca2c64 1780 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1781 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1782 break;
afe5177e 1783#endif
0f2d19dd 1784 case scm_tc7_string:
b5c2579a 1785 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1786 scm_must_free (SCM_STRING_CHARS (scmptr));
1787 break;
28b06554 1788 case scm_tc7_symbol:
b5c2579a 1789 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1790 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1791 break;
8a3e715b
ML
1792 case scm_tc7_variable:
1793 break;
0f2d19dd 1794 case scm_tcs_subrs:
d6884e63 1795 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1796 continue;
1797 case scm_tc7_port:
0f2d19dd
JB
1798 if SCM_OPENP (scmptr)
1799 {
1800 int k = SCM_PTOBNUM (scmptr);
7a7f7c53 1801#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1802 if (!(k < scm_numptob))
7a7f7c53
DH
1803 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1804#endif
0f2d19dd 1805 /* Keep "revealed" ports alive. */
945fec60 1806 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1807 continue;
1808 /* Yes, I really do mean scm_ptobs[k].free */
1809 /* rather than ftobs[k].close. .close */
1810 /* is for explicit CLOSE-PORT by user */
84af0382 1811 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1812 SCM_SETSTREAM (scmptr, 0);
1813 scm_remove_from_port_table (scmptr);
1814 scm_gc_ports_collected++;
22a52da1 1815 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1816 }
1817 break;
1818 case scm_tc7_smob:
d6884e63 1819 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1820 {
1821 case scm_tc_free_cell:
acb0a19c 1822 case scm_tc16_real:
0f2d19dd
JB
1823 break;
1824#ifdef SCM_BIGDIG
acb0a19c 1825 case scm_tc16_big:
0f2d19dd 1826 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1827 scm_must_free (SCM_BDIGITS (scmptr));
1828 break;
0f2d19dd 1829#endif /* def SCM_BIGDIG */
acb0a19c 1830 case scm_tc16_complex:
92c2555f 1831 m += sizeof (scm_t_complex);
405aaef9 1832 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1833 break;
0f2d19dd 1834 default:
0f2d19dd
JB
1835 {
1836 int k;
1837 k = SCM_SMOBNUM (scmptr);
7a7f7c53 1838#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1839 if (!(k < scm_numsmob))
7a7f7c53
DH
1840 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1841#endif
1842 if (scm_smobs[k].free)
1843 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1844 break;
1845 }
1846 }
1847 break;
1848 default:
acf4331f 1849 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1850 }
7bb8eac7 1851
4c48ba06 1852 if (!--left_to_collect)
4a4c9785 1853 {
22a52da1 1854 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1855 *freelist->clustertail = scmptr;
1856 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1857
4a4c9785 1858 nfreelist = SCM_EOL;
4c48ba06
MD
1859 freelist->collected += span * freelist->cluster_size;
1860 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1861 }
1862 else
4a4c9785
MD
1863 {
1864 /* Stick the new cell on the front of nfreelist. It's
1865 critical that we mark this cell as freed; otherwise, the
1866 conservative collector might trace it as some other type
1867 of object. */
54778cd3 1868 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1869 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1870 nfreelist = scmptr;
1871 }
0f2d19dd 1872 }
d6884e63 1873
0f2d19dd
JB
1874#ifdef GC_FREE_SEGMENTS
1875 if (n == seg_size)
1876 {
c014a02e 1877 register long j;
15e9d186 1878
4c48ba06 1879 freelist->heap_size -= seg_size;
cf2d30f6
JB
1880 free ((char *) scm_heap_table[i].bounds[0]);
1881 scm_heap_table[i].bounds[0] = 0;
1882 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1883 scm_heap_table[j - 1] = scm_heap_table[j];
1884 scm_n_heap_segs -= 1;
cf2d30f6 1885 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1886 }
1887 else
1888#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1889 {
1890 /* Update the real freelist pointer to point to the head of
1891 the list of free cells we've built for this segment. */
4c48ba06 1892 freelist->cells = nfreelist;
4c48ba06 1893 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1894 }
1895
fca7547b 1896#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1897 scm_map_free_list ();
1898#endif
4a4c9785 1899 }
a00c95d9 1900
4c48ba06
MD
1901 gc_sweep_freelist_finish (&scm_master_freelist);
1902 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1903
8ded62a3
MD
1904 /* When we move to POSIX threads private freelists should probably
1905 be GC-protected instead. */
1906 scm_freelist = SCM_EOL;
1907 scm_freelist2 = SCM_EOL;
a00c95d9 1908
b37fe1c5 1909 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1910 scm_gc_yield -= scm_cells_allocated;
eae33935 1911
1be6b49c
ML
1912 if (scm_mallocated < m)
1913 /* The byte count of allocated objects has underflowed. This is
1914 probably because you forgot to report the sizes of objects you
1915 have allocated, by calling scm_done_malloc or some such. When
1916 the GC freed them, it subtracted their size from
1917 scm_mallocated, which underflowed. */
1918 abort ();
1919
0f2d19dd
JB
1920 scm_mallocated -= m;
1921 scm_gc_malloc_collected = m;
1922}
acf4331f 1923#undef FUNC_NAME
0f2d19dd
JB
1924
1925
1926\f
0f2d19dd
JB
1927/* {Front end to malloc}
1928 *
9d47a1e6
ML
1929 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1930 * scm_done_free
0f2d19dd 1931 *
c6c79933
GH
1932 * These functions provide services comparable to malloc, realloc, and
1933 * free. They should be used when allocating memory that will be under
1934 * control of the garbage collector, i.e., if the memory may be freed
1935 * during garbage collection.
1936 */
bc9d9bb2 1937
0f2d19dd
JB
1938/* scm_must_malloc
1939 * Return newly malloced storage or throw an error.
1940 *
1941 * The parameter WHAT is a string for error reporting.
a00c95d9 1942 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1943 * allocation, or if the first call to malloc fails,
1944 * garbage collect -- on the presumption that some objects
1945 * using malloced storage may be collected.
1946 *
1947 * The limit scm_mtrigger may be raised by this allocation.
1948 */
07806695 1949void *
1be6b49c 1950scm_must_malloc (size_t size, const char *what)
0f2d19dd 1951{
07806695 1952 void *ptr;
c014a02e 1953 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
1954
1955 if (nm < size)
1956 /* The byte count of allocated objects has overflowed. This is
1957 probably because you forgot to report the correct size of freed
1958 memory in some of your smob free methods. */
1959 abort ();
e4ef2330
MD
1960
1961 if (nm <= scm_mtrigger)
0f2d19dd 1962 {
07806695 1963 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1964 if (NULL != ptr)
1965 {
1966 scm_mallocated = nm;
bc9d9bb2
MD
1967#ifdef GUILE_DEBUG_MALLOC
1968 scm_malloc_register (ptr, what);
1969#endif
0f2d19dd
JB
1970 return ptr;
1971 }
1972 }
6064dcc6 1973
0f2d19dd 1974 scm_igc (what);
e4ef2330 1975
0f2d19dd 1976 nm = scm_mallocated + size;
eae33935 1977
1be6b49c
ML
1978 if (nm < size)
1979 /* The byte count of allocated objects has overflowed. This is
1980 probably because you forgot to report the correct size of freed
1981 memory in some of your smob free methods. */
1982 abort ();
1983
07806695 1984 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1985 if (NULL != ptr)
1986 {
1987 scm_mallocated = nm;
6064dcc6
MV
1988 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1989 if (nm > scm_mtrigger)
1990 scm_mtrigger = nm + nm / 2;
1991 else
1992 scm_mtrigger += scm_mtrigger / 2;
1993 }
bc9d9bb2
MD
1994#ifdef GUILE_DEBUG_MALLOC
1995 scm_malloc_register (ptr, what);
1996#endif
1997
0f2d19dd
JB
1998 return ptr;
1999 }
e4ef2330 2000
acf4331f 2001 scm_memory_error (what);
0f2d19dd
JB
2002}
2003
2004
2005/* scm_must_realloc
2006 * is similar to scm_must_malloc.
2007 */
07806695
JB
2008void *
2009scm_must_realloc (void *where,
1be6b49c
ML
2010 size_t old_size,
2011 size_t size,
3eeba8d4 2012 const char *what)
0f2d19dd 2013{
07806695 2014 void *ptr;
c014a02e 2015 unsigned long nm;
1be6b49c
ML
2016
2017 if (size <= old_size)
2018 return where;
2019
2020 nm = scm_mallocated + size - old_size;
2021
2022 if (nm < (size - old_size))
2023 /* The byte count of allocated objects has overflowed. This is
2024 probably because you forgot to report the correct size of freed
2025 memory in some of your smob free methods. */
2026 abort ();
e4ef2330
MD
2027
2028 if (nm <= scm_mtrigger)
0f2d19dd 2029 {
07806695 2030 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2031 if (NULL != ptr)
2032 {
2033 scm_mallocated = nm;
bc9d9bb2
MD
2034#ifdef GUILE_DEBUG_MALLOC
2035 scm_malloc_reregister (where, ptr, what);
2036#endif
0f2d19dd
JB
2037 return ptr;
2038 }
2039 }
e4ef2330 2040
0f2d19dd 2041 scm_igc (what);
e4ef2330
MD
2042
2043 nm = scm_mallocated + size - old_size;
1be6b49c
ML
2044
2045 if (nm < (size - old_size))
2046 /* The byte count of allocated objects has overflowed. This is
2047 probably because you forgot to report the correct size of freed
2048 memory in some of your smob free methods. */
2049 abort ();
2050
07806695 2051 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2052 if (NULL != ptr)
2053 {
2054 scm_mallocated = nm;
6064dcc6
MV
2055 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2056 if (nm > scm_mtrigger)
2057 scm_mtrigger = nm + nm / 2;
2058 else
2059 scm_mtrigger += scm_mtrigger / 2;
2060 }
bc9d9bb2
MD
2061#ifdef GUILE_DEBUG_MALLOC
2062 scm_malloc_reregister (where, ptr, what);
2063#endif
0f2d19dd
JB
2064 return ptr;
2065 }
e4ef2330 2066
acf4331f 2067 scm_memory_error (what);
0f2d19dd
JB
2068}
2069
e4a7824f 2070char *
1be6b49c 2071scm_must_strndup (const char *str, size_t length)
e4a7824f
MV
2072{
2073 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2074 memcpy (dst, str, length);
2075 dst[length] = 0;
2076 return dst;
2077}
2078
2079char *
2080scm_must_strdup (const char *str)
2081{
2082 return scm_must_strndup (str, strlen (str));
2083}
acf4331f 2084
a00c95d9 2085void
07806695 2086scm_must_free (void *obj)
acf4331f 2087#define FUNC_NAME "scm_must_free"
0f2d19dd 2088{
bc9d9bb2
MD
2089#ifdef GUILE_DEBUG_MALLOC
2090 scm_malloc_unregister (obj);
2091#endif
0f2d19dd
JB
2092 if (obj)
2093 free (obj);
2094 else
acf4331f 2095 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 2096}
acf4331f
DH
2097#undef FUNC_NAME
2098
0f2d19dd 2099
c68296f8
MV
2100/* Announce that there has been some malloc done that will be freed
2101 * during gc. A typical use is for a smob that uses some malloced
2102 * memory but can not get it from scm_must_malloc (for whatever
2103 * reason). When a new object of this smob is created you call
2104 * scm_done_malloc with the size of the object. When your smob free
2105 * function is called, be sure to include this size in the return
9d47a1e6
ML
2106 * value.
2107 *
2108 * If you can't actually free the memory in the smob free function,
2109 * for whatever reason (like reference counting), you still can (and
2110 * should) report the amount of memory freed when you actually free it.
2111 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2112 * eh? Or even better, call scm_done_free. */
0f2d19dd 2113
c68296f8 2114void
c014a02e 2115scm_done_malloc (long size)
c68296f8 2116{
1be6b49c
ML
2117 if (size < 0) {
2118 if (scm_mallocated < size)
2119 /* The byte count of allocated objects has underflowed. This is
2120 probably because you forgot to report the sizes of objects you
2121 have allocated, by calling scm_done_malloc or some such. When
2122 the GC freed them, it subtracted their size from
2123 scm_mallocated, which underflowed. */
2124 abort ();
2125 } else {
c014a02e 2126 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2127 if (nm < size)
2128 /* The byte count of allocated objects has overflowed. This is
2129 probably because you forgot to report the correct size of freed
2130 memory in some of your smob free methods. */
2131 abort ();
2132 }
eae33935 2133
c68296f8
MV
2134 scm_mallocated += size;
2135
2136 if (scm_mallocated > scm_mtrigger)
2137 {
2138 scm_igc ("foreign mallocs");
2139 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2140 {
2141 if (scm_mallocated > scm_mtrigger)
2142 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2143 else
2144 scm_mtrigger += scm_mtrigger / 2;
2145 }
2146 }
2147}
2148
9d47a1e6 2149void
c014a02e 2150scm_done_free (long size)
9d47a1e6 2151{
1be6b49c
ML
2152 if (size >= 0) {
2153 if (scm_mallocated < size)
2154 /* The byte count of allocated objects has underflowed. This is
2155 probably because you forgot to report the sizes of objects you
2156 have allocated, by calling scm_done_malloc or some such. When
2157 the GC freed them, it subtracted their size from
2158 scm_mallocated, which underflowed. */
2159 abort ();
2160 } else {
c014a02e 2161 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2162 if (nm < size)
2163 /* The byte count of allocated objects has overflowed. This is
2164 probably because you forgot to report the correct size of freed
2165 memory in some of your smob free methods. */
2166 abort ();
2167 }
2168
9d47a1e6
ML
2169 scm_mallocated -= size;
2170}
2171
c68296f8
MV
2172
2173\f
0f2d19dd
JB
2174/* {Heap Segments}
2175 *
2176 * Each heap segment is an array of objects of a particular size.
2177 * Every segment has an associated (possibly shared) freelist.
2178 * A table of segment records is kept that records the upper and
2179 * lower extents of the segment; this is used during the conservative
2180 * phase of gc to identify probably gc roots (because they point
c68296f8 2181 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2182
2183/* scm_expmem
2184 * is true if the first segment was smaller than INIT_HEAP_SEG.
2185 * If scm_expmem is set to one, subsequent segment allocations will
2186 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2187 */
2188int scm_expmem = 0;
2189
1be6b49c 2190size_t scm_max_segment_size;
4c48ba06 2191
0f2d19dd
JB
2192/* scm_heap_org
2193 * is the lowest base address of any heap segment.
2194 */
2195SCM_CELLPTR scm_heap_org;
2196
92c2555f 2197scm_t_heap_seg_data * scm_heap_table = 0;
1be6b49c
ML
2198static size_t heap_segment_table_size = 0;
2199size_t scm_n_heap_segs = 0;
0f2d19dd 2200
0f2d19dd 2201/* init_heap_seg
d6884e63 2202 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2203 *
d6884e63
ML
2204 * The segment origin and segment size in bytes are input parameters.
2205 * The freelist is both input and output.
0f2d19dd 2206 *
d6884e63
ML
2207 * This function presumes that the scm_heap_table has already been expanded
2208 * to accomodate a new segment record and that the markbit space was reserved
2209 * for all the cards in this segment.
0f2d19dd
JB
2210 */
2211
d6884e63
ML
2212#define INIT_CARD(card, span) \
2213 do { \
322ec19d 2214 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2215 if ((span) == 2) \
2216 SCM_GC_SET_CARD_DOUBLECELL (card); \
2217 } while (0)
0f2d19dd 2218
1be6b49c 2219static size_t
92c2555f 2220init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
0f2d19dd
JB
2221{
2222 register SCM_CELLPTR ptr;
0f2d19dd 2223 SCM_CELLPTR seg_end;
592996c9 2224 size_t new_seg_index;
1be6b49c 2225 ptrdiff_t n_new_cells;
4c48ba06 2226 int span = freelist->span;
a00c95d9 2227
0f2d19dd
JB
2228 if (seg_org == NULL)
2229 return 0;
2230
d6884e63
ML
2231 /* Align the begin ptr up.
2232 */
2233 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2234
a00c95d9 2235 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2236 */
d6884e63 2237 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2238
a00c95d9 2239 /* Find the right place and insert the segment record.
0f2d19dd 2240 */
592996c9 2241 new_seg_index = 0;
eae33935 2242 while (new_seg_index < scm_n_heap_segs
592996c9
DH
2243 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2244 new_seg_index++;
0f2d19dd
JB
2245
2246 {
2247 int i;
2248 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2249 scm_heap_table[i] = scm_heap_table[i - 1];
2250 }
a00c95d9 2251
0f2d19dd
JB
2252 ++scm_n_heap_segs;
2253
945fec60 2254 scm_heap_table[new_seg_index].span = span;
4c48ba06 2255 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2256 scm_heap_table[new_seg_index].bounds[0] = ptr;
2257 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2258
acb0a19c
MD
2259 /*n_new_cells*/
2260 n_new_cells = seg_end - ptr;
0f2d19dd 2261
4c48ba06 2262 freelist->heap_size += n_new_cells;
4a4c9785 2263
a00c95d9 2264 /* Partition objects in this segment into clusters */
4a4c9785
MD
2265 {
2266 SCM clusters;
2267 SCM *clusterp = &clusters;
4a4c9785 2268
d6884e63
ML
2269 NEXT_DATA_CELL (ptr, span);
2270 while (ptr < seg_end)
4a4c9785 2271 {
d6884e63
ML
2272 scm_cell *nxt = ptr;
2273 scm_cell *prv = NULL;
2274 scm_cell *last_card = NULL;
2275 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2276 NEXT_DATA_CELL(nxt, span);
4a4c9785 2277
4c48ba06
MD
2278 /* Allocate cluster spine
2279 */
4a4c9785 2280 *clusterp = PTR2SCM (ptr);
d6884e63 2281 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2282 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2283 ptr = nxt;
a00c95d9 2284
d6884e63 2285 while (n_data_cells--)
4a4c9785 2286 {
d6884e63 2287 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2288 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2289 nxt = ptr;
2290 NEXT_DATA_CELL (nxt, span);
2291 prv = ptr;
2292
2293 if (card != last_card)
2294 {
2295 INIT_CARD (card, span);
2296 last_card = card;
2297 }
96f6f4ae 2298
54778cd3 2299 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2300 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2301
2302 ptr = nxt;
4a4c9785 2303 }
4c48ba06 2304
d6884e63 2305 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2306 }
a00c95d9 2307
d6884e63
ML
2308 /* sanity check */
2309 {
2310 scm_cell *ref = seg_end;
2311 NEXT_DATA_CELL (ref, span);
2312 if (ref != ptr)
2313 /* [cmm] looks like the segment size doesn't divide cleanly by
2314 cluster size. bad cmm! */
2315 abort();
2316 }
2317
4a4c9785
MD
2318 /* Patch up the last cluster pointer in the segment
2319 * to join it to the input freelist.
2320 */
4c48ba06
MD
2321 *clusterp = freelist->clusters;
2322 freelist->clusters = clusters;
4a4c9785
MD
2323 }
2324
4c48ba06
MD
2325#ifdef DEBUGINFO
2326 fprintf (stderr, "H");
2327#endif
0f2d19dd 2328 return size;
0f2d19dd
JB
2329}
2330
1be6b49c 2331static size_t
92c2555f 2332round_to_cluster_size (scm_t_freelist *freelist, size_t len)
a00c95d9 2333{
1be6b49c 2334 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9
ML
2335
2336 return
2337 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2338 + ALIGNMENT_SLACK (freelist);
2339}
2340
a00c95d9 2341static void
92c2555f 2342alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
acf4331f 2343#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2344{
0f2d19dd 2345 SCM_CELLPTR ptr;
1be6b49c 2346 size_t len;
a00c95d9 2347
9d47a1e6 2348 if (scm_gc_heap_lock)
b6efc951
DH
2349 {
2350 /* Critical code sections (such as the garbage collector) aren't
2351 * supposed to add heap segments.
2352 */
2353 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2354 abort ();
2355 }
0f2d19dd 2356
9d47a1e6 2357 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2358 {
2359 /* We have to expand the heap segment table to have room for the new
2360 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2361 * init_heap_seg only if the allocation of the segment itself succeeds.
2362 */
1be6b49c 2363 size_t new_table_size = scm_n_heap_segs + 1;
92c2555f
MV
2364 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2365 scm_t_heap_seg_data *new_heap_table;
b6efc951 2366
92c2555f 2367 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
b6efc951
DH
2368 realloc ((char *)scm_heap_table, size)));
2369 if (!new_heap_table)
2370 {
2371 if (error_policy == abort_on_error)
2372 {
2373 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2374 abort ();
2375 }
2376 else
2377 {
2378 return;
2379 }
2380 }
2381 else
2382 {
2383 scm_heap_table = new_heap_table;
2384 heap_segment_table_size = new_table_size;
2385 }
2386 }
0f2d19dd 2387
0f2d19dd 2388 /* Pick a size for the new heap segment.
a00c95d9 2389 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2390 * gc.h
2391 */
4c48ba06 2392 {
1811ebce
MD
2393 /* Assure that the new segment is predicted to be large enough.
2394 *
2395 * New yield should at least equal GC fraction of new heap size, i.e.
2396 *
2397 * y + dh > f * (h + dh)
2398 *
2399 * y : yield
8fef55a8 2400 * f : min yield fraction
1811ebce
MD
2401 * h : heap size
2402 * dh : size of new heap segment
2403 *
2404 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2405 */
8fef55a8 2406 int f = freelist->min_yield_fraction;
c014a02e 2407 unsigned long h = SCM_HEAP_SIZE;
1be6b49c 2408 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2409 len = SCM_EXPHEAP (freelist->heap_size);
2410#ifdef DEBUGINFO
1be6b49c 2411 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
4c48ba06
MD
2412#endif
2413 if (len < min_cells)
1811ebce 2414 len = min_cells + freelist->cluster_size;
4c48ba06 2415 len *= sizeof (scm_cell);
1811ebce
MD
2416 /* force new sampling */
2417 freelist->collected = LONG_MAX;
4c48ba06 2418 }
a00c95d9 2419
4c48ba06
MD
2420 if (len > scm_max_segment_size)
2421 len = scm_max_segment_size;
0f2d19dd
JB
2422
2423 {
1be6b49c 2424 size_t smallest;
0f2d19dd 2425
a00c95d9 2426 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2427
0f2d19dd 2428 if (len < smallest)
a00c95d9 2429 len = smallest;
0f2d19dd
JB
2430
2431 /* Allocate with decaying ambition. */
2432 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2433 && (len >= smallest))
2434 {
1be6b49c 2435 size_t rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2436 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2437 if (ptr)
2438 {
a00c95d9 2439 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2440 return;
2441 }
2442 len /= 2;
2443 }
2444 }
2445
b6efc951
DH
2446 if (error_policy == abort_on_error)
2447 {
2448 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2449 abort ();
2450 }
0f2d19dd 2451}
acf4331f 2452#undef FUNC_NAME
0f2d19dd 2453
0f2d19dd
JB
2454\f
2455/* {GC Protection Helper Functions}
2456 */
2457
2458
5d2b97cd
DH
2459/*
2460 * If within a function you need to protect one or more scheme objects from
2461 * garbage collection, pass them as parameters to one of the
2462 * scm_remember_upto_here* functions below. These functions don't do
2463 * anything, but since the compiler does not know that they are actually
2464 * no-ops, it will generate code that calls these functions with the given
2465 * parameters. Therefore, you can be sure that the compiler will keep those
2466 * scheme values alive (on the stack or in a register) up to the point where
2467 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 2468 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
2469 * depends on the scheme object to exist.
2470 *
8c494e99
DH
2471 * Example: We want to make sure that the string object str does not get
2472 * garbage collected during the execution of 'some_function' in the code
2473 * below, because otherwise the characters belonging to str would be freed and
5d2b97cd
DH
2474 * 'some_function' might access freed memory. To make sure that the compiler
2475 * keeps str alive on the stack or in a register such that it is visible to
2476 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2477 * call to 'some_function'. Note that this would not be necessary if str was
2478 * used anyway after the call to 'some_function'.
2479 * char *chars = SCM_STRING_CHARS (str);
2480 * some_function (chars);
2481 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2482 */
2483
2484void
e81d98ec 2485scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
2486{
2487 /* Empty. Protects a single object from garbage collection. */
2488}
2489
2490void
e81d98ec 2491scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
2492{
2493 /* Empty. Protects two objects from garbage collection. */
2494}
2495
2496void
e81d98ec 2497scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
2498{
2499 /* Empty. Protects any number of objects from garbage collection. */
2500}
2501
c209c88e 2502/*
41b0806d
GB
2503 These crazy functions prevent garbage collection
2504 of arguments after the first argument by
2505 ensuring they remain live throughout the
2506 function because they are used in the last
2507 line of the code block.
2508 It'd be better to have a nice compiler hint to
2509 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2510SCM
2511scm_return_first (SCM elt, ...)
0f2d19dd
JB
2512{
2513 return elt;
2514}
2515
41b0806d
GB
2516int
2517scm_return_first_int (int i, ...)
2518{
2519 return i;
2520}
2521
0f2d19dd 2522
0f2d19dd 2523SCM
6e8d25a6 2524scm_permanent_object (SCM obj)
0f2d19dd
JB
2525{
2526 SCM_REDEFER_INTS;
2527 scm_permobjs = scm_cons (obj, scm_permobjs);
2528 SCM_REALLOW_INTS;
2529 return obj;
2530}
2531
2532
7bd4fbe2
MD
2533/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2534 other references are dropped, until the object is unprotected by calling
6b1b030e 2535 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
2536 i. e. it is possible to protect the same object several times, but it is
2537 necessary to unprotect the object the same number of times to actually get
2538 the object unprotected. It is an error to unprotect an object more often
2539 than it has been protected before. The function scm_protect_object returns
2540 OBJ.
2541*/
2542
2543/* Implementation note: For every object X, there is a counter which
6b1b030e 2544 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 2545*/
686765af 2546
ef290276 2547SCM
6b1b030e 2548scm_gc_protect_object (SCM obj)
ef290276 2549{
686765af 2550 SCM handle;
9d47a1e6 2551
686765af 2552 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2553 SCM_REDEFER_INTS;
9d47a1e6 2554
0f0f0899 2555 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
1be6b49c 2556 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
9d47a1e6 2557
2dd6a83a 2558 SCM_REALLOW_INTS;
9d47a1e6 2559
ef290276
JB
2560 return obj;
2561}
2562
2563
2564/* Remove any protection for OBJ established by a prior call to
dab7f566 2565 scm_protect_object. This function returns OBJ.
ef290276 2566
dab7f566 2567 See scm_protect_object for more information. */
ef290276 2568SCM
6b1b030e 2569scm_gc_unprotect_object (SCM obj)
ef290276 2570{
686765af 2571 SCM handle;
9d47a1e6 2572
686765af 2573 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2574 SCM_REDEFER_INTS;
9d47a1e6 2575
686765af 2576 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2577
22a52da1 2578 if (SCM_FALSEP (handle))
686765af 2579 {
0f0f0899
MD
2580 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2581 abort ();
686765af 2582 }
6a199940
DH
2583 else
2584 {
1be6b49c
ML
2585 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2586 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
6a199940
DH
2587 scm_hashq_remove_x (scm_protects, obj);
2588 else
1be6b49c 2589 SCM_SETCDR (handle, count);
6a199940 2590 }
686765af 2591
2dd6a83a 2592 SCM_REALLOW_INTS;
ef290276
JB
2593
2594 return obj;
2595}
2596
6b1b030e
ML
2597void
2598scm_gc_register_root (SCM *p)
2599{
2600 SCM handle;
2601 SCM key = scm_long2num ((long) p);
eae33935 2602
6b1b030e
ML
2603 /* This critical section barrier will be replaced by a mutex. */
2604 SCM_REDEFER_INTS;
2605
2606 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2607 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2608
2609 SCM_REALLOW_INTS;
2610}
2611
2612void
2613scm_gc_unregister_root (SCM *p)
2614{
2615 SCM handle;
2616 SCM key = scm_long2num ((long) p);
2617
2618 /* This critical section barrier will be replaced by a mutex. */
2619 SCM_REDEFER_INTS;
2620
2621 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2622
2623 if (SCM_FALSEP (handle))
2624 {
2625 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2626 abort ();
2627 }
2628 else
2629 {
2630 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2631 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2632 scm_hashv_remove_x (scm_gc_registered_roots, key);
2633 else
2634 SCM_SETCDR (handle, count);
2635 }
2636
2637 SCM_REALLOW_INTS;
2638}
2639
2640void
2641scm_gc_register_roots (SCM *b, unsigned long n)
2642{
2643 SCM *p = b;
2644 for (; p < b + n; ++p)
2645 scm_gc_register_root (p);
2646}
2647
2648void
2649scm_gc_unregister_roots (SCM *b, unsigned long n)
2650{
2651 SCM *p = b;
2652 for (; p < b + n; ++p)
2653 scm_gc_unregister_root (p);
2654}
2655
c45acc34
JB
2656int terminating;
2657
2658/* called on process termination. */
e52ceaac
MD
2659#ifdef HAVE_ATEXIT
2660static void
2661cleanup (void)
2662#else
2663#ifdef HAVE_ON_EXIT
51157deb
MD
2664extern int on_exit (void (*procp) (), int arg);
2665
e52ceaac
MD
2666static void
2667cleanup (int status, void *arg)
2668#else
2669#error Dont know how to setup a cleanup handler on your system.
2670#endif
2671#endif
c45acc34
JB
2672{
2673 terminating = 1;
2674 scm_flush_all_ports ();
2675}
ef290276 2676
0f2d19dd 2677\f
acb0a19c 2678static int
92c2555f 2679make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
acb0a19c 2680{
1be6b49c 2681 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2682
a00c95d9
ML
2683 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2684 rounded_size,
4c48ba06 2685 freelist))
acb0a19c 2686 {
a00c95d9
ML
2687 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2688 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2689 rounded_size,
4c48ba06 2690 freelist))
acb0a19c
MD
2691 return 1;
2692 }
2693 else
2694 scm_expmem = 1;
2695
8fef55a8
MD
2696 if (freelist->min_yield_fraction)
2697 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2698 / 100);
8fef55a8 2699 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2700
acb0a19c
MD
2701 return 0;
2702}
2703
2704\f
4c48ba06 2705static void
92c2555f 2706init_freelist (scm_t_freelist *freelist,
4c48ba06 2707 int span,
c014a02e 2708 long cluster_size,
8fef55a8 2709 int min_yield)
4c48ba06
MD
2710{
2711 freelist->clusters = SCM_EOL;
2712 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2713 freelist->left_to_collect = 0;
2714 freelist->clusters_allocated = 0;
8fef55a8
MD
2715 freelist->min_yield = 0;
2716 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2717 freelist->span = span;
2718 freelist->collected = 0;
1811ebce 2719 freelist->collected_1 = 0;
4c48ba06
MD
2720 freelist->heap_size = 0;
2721}
2722
85db4a2c
DH
2723
2724/* Get an integer from an environment variable. */
2725static int
2726scm_i_getenv_int (const char *var, int def)
2727{
2728 char *end, *val = getenv (var);
2729 long res;
2730 if (!val)
2731 return def;
2732 res = strtol (val, &end, 10);
2733 if (end == val)
2734 return def;
2735 return res;
2736}
2737
2738
4a4c9785 2739int
85db4a2c 2740scm_init_storage ()
0f2d19dd 2741{
1be6b49c
ML
2742 unsigned long gc_trigger_1;
2743 unsigned long gc_trigger_2;
2744 size_t init_heap_size_1;
2745 size_t init_heap_size_2;
2746 size_t j;
0f2d19dd 2747
7c33806a
DH
2748#if (SCM_DEBUG_CELL_ACCESSES == 1)
2749 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
592996c9 2750 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
7c33806a
DH
2751#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2752
0f2d19dd
JB
2753 j = SCM_NUM_PROTECTS;
2754 while (j)
2755 scm_sys_protects[--j] = SCM_BOOL_F;
2756 scm_block_gc = 1;
4a4c9785 2757
4a4c9785 2758 scm_freelist = SCM_EOL;
4c48ba06 2759 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2760 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2761 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2762 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2763 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2764 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2765
0f2d19dd
JB
2766 scm_expmem = 0;
2767
2768 j = SCM_HEAP_SEG_SIZE;
2769 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
92c2555f
MV
2770 scm_heap_table = ((scm_t_heap_seg_data *)
2771 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
b6efc951 2772 heap_segment_table_size = 2;
acb0a19c 2773
d6884e63
ML
2774 mark_space_ptr = &mark_space_head;
2775
85db4a2c
DH
2776 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2777 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2778 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2779 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2780 return 1;
acb0a19c 2781
801cb5e7 2782 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2783 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2784
801cb5e7
MD
2785 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2786 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2787 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2788 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2789 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2790
2791 /* Initialise the list of ports. */
f5fd8aa2
MV
2792 scm_port_table = (scm_t_port **)
2793 malloc (sizeof (scm_t_port *) * scm_port_table_room);
2794 if (!scm_port_table)
0f2d19dd
JB
2795 return 1;
2796
a18bcd0e 2797#ifdef HAVE_ATEXIT
c45acc34 2798 atexit (cleanup);
e52ceaac
MD
2799#else
2800#ifdef HAVE_ON_EXIT
2801 on_exit (cleanup, 0);
2802#endif
a18bcd0e 2803#endif
0f2d19dd 2804
8960e0a0 2805 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2806 scm_permobjs = SCM_EOL;
00ffa0e7 2807 scm_protects = scm_c_make_hash_table (31);
6b1b030e 2808 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 2809
0f2d19dd
JB
2810 return 0;
2811}
939794ce 2812
0f2d19dd
JB
2813\f
2814
939794ce
DH
2815SCM scm_after_gc_hook;
2816
939794ce
DH
2817static SCM gc_async;
2818
939794ce
DH
2819/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2820 * is run after the gc, as soon as the asynchronous events are handled by the
2821 * evaluator.
2822 */
2823static SCM
2824gc_async_thunk (void)
2825{
2826 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
2827 return SCM_UNSPECIFIED;
2828}
2829
2830
2831/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2832 * the garbage collection. The only purpose of this function is to mark the
2833 * gc_async (which will eventually lead to the execution of the
2834 * gc_async_thunk).
2835 */
2836static void *
e81d98ec
DH
2837mark_gc_async (void * hook_data SCM_UNUSED,
2838 void *func_data SCM_UNUSED,
2839 void *data SCM_UNUSED)
2840{
2841 /* If cell access debugging is enabled, the user may choose to perform
2842 * additional garbage collections after an arbitrary number of cell
2843 * accesses. We don't want the scheme level after-gc-hook to be performed
2844 * for each of these garbage collections for the following reason: The
2845 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2846 * after-gc-hook was performed with every gc, and if the gc was performed
2847 * after a very small number of cell accesses, then the number of cell
2848 * accesses during the execution of the after-gc-hook will suffice to cause
2849 * the execution of the next gc. Then, guile would keep executing the
2850 * after-gc-hook over and over again, and would never come to do other
2851 * things.
eae33935 2852 *
e81d98ec
DH
2853 * To overcome this problem, if cell access debugging with additional
2854 * garbage collections is enabled, the after-gc-hook is never run by the
2855 * garbage collecter. When running guile with cell access debugging and the
2856 * execution of the after-gc-hook is desired, then it is necessary to run
2857 * the hook explicitly from the user code. This has the effect, that from
2858 * the scheme level point of view it seems that garbage collection is
2859 * performed with a much lower frequency than it actually is. Obviously,
2860 * this will not work for code that depends on a fixed one to one
2861 * relationship between the execution counts of the C level garbage
2862 * collection hooks and the execution count of the scheme level
2863 * after-gc-hook.
2864 */
2865#if (SCM_DEBUG_CELL_ACCESSES == 1)
2866 if (debug_cells_gc_interval == 0)
2867 scm_system_async_mark (gc_async);
2868#else
939794ce 2869 scm_system_async_mark (gc_async);
e81d98ec
DH
2870#endif
2871
939794ce
DH
2872 return NULL;
2873}
2874
2875
0f2d19dd
JB
2876void
2877scm_init_gc ()
0f2d19dd 2878{
939794ce
DH
2879 SCM after_gc_thunk;
2880
fde50407
ML
2881 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2882 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 2883
9a441ddb
MV
2884 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2885 gc_async_thunk);
23670993 2886 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2887
2888 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2889
8dc9439f 2890#ifndef SCM_MAGIC_SNARFER
a0599745 2891#include "libguile/gc.x"
8dc9439f 2892#endif
0f2d19dd 2893}
89e00824 2894
56495472
ML
2895#endif /*MARK_DEPENDENCIES*/
2896
89e00824
ML
2897/*
2898 Local Variables:
2899 c-file-style: "gnu"
2900 End:
2901*/