*** empty log message ***
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84 41
1bbd0b84 42
37ddcaf6
MD
43/* #define DEBUGINFO */
44
56495472
ML
45/* SECTION: This code is compiled once.
46 */
47
48#ifndef MARK_DEPENDENCIES
49
0f2d19dd
JB
50\f
51#include <stdio.h>
e6e2e95a 52#include <errno.h>
783e7774 53#include <string.h>
e6e2e95a 54
a0599745 55#include "libguile/_scm.h"
0a7a7445 56#include "libguile/eval.h"
a0599745
MD
57#include "libguile/stime.h"
58#include "libguile/stackchk.h"
59#include "libguile/struct.h"
a0599745
MD
60#include "libguile/smob.h"
61#include "libguile/unif.h"
62#include "libguile/async.h"
63#include "libguile/ports.h"
64#include "libguile/root.h"
65#include "libguile/strings.h"
66#include "libguile/vectors.h"
801cb5e7 67#include "libguile/weaks.h"
686765af 68#include "libguile/hashtab.h"
ecf470a2 69#include "libguile/tags.h"
a0599745
MD
70
71#include "libguile/validate.h"
1be6b49c 72#include "libguile/deprecation.h"
a0599745 73#include "libguile/gc.h"
fce59c93 74
bc9d9bb2 75#ifdef GUILE_DEBUG_MALLOC
a0599745 76#include "libguile/debug-malloc.h"
bc9d9bb2
MD
77#endif
78
0f2d19dd 79#ifdef HAVE_MALLOC_H
95b88819 80#include <malloc.h>
0f2d19dd
JB
81#endif
82
83#ifdef HAVE_UNISTD_H
95b88819 84#include <unistd.h>
0f2d19dd
JB
85#endif
86
1cc91f1b
JB
87#ifdef __STDC__
88#include <stdarg.h>
89#define var_start(x, y) va_start(x, y)
90#else
91#include <varargs.h>
92#define var_start(x, y) va_start(x)
93#endif
94
0f2d19dd 95\f
406c7d90
DH
96
97unsigned int scm_gc_running_p = 0;
98
99\f
100
101#if (SCM_DEBUG_CELL_ACCESSES == 1)
102
92c2555f 103scm_t_bits scm_tc16_allocated;
61045190
DH
104
105/* Set this to != 0 if every cell that is accessed shall be checked:
106 */
107unsigned int scm_debug_cell_accesses_p = 1;
406c7d90 108
e81d98ec
DH
109/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
110 * the number of cell accesses after which a gc shall be called.
111 */
112static unsigned int debug_cells_gc_interval = 0;
113
406c7d90 114
592996c9
DH
115/* If an allocated cell is detected during garbage collection, this means that
116 * some code has just obtained the object but was preempted before the
117 * initialization of the object was completed. This meanst that some entries
118 * of the allocated cell may already contain SCM objects. Therefore,
119 * allocated cells are scanned conservatively. */
120static SCM
121allocated_mark (SCM allocated)
122{
123 scm_gc_mark_cell_conservatively (allocated);
124 return SCM_BOOL_F;
125}
126
127
406c7d90
DH
128/* Assert that the given object is a valid reference to a valid cell. This
129 * test involves to determine whether the object is a cell pointer, whether
130 * this pointer actually points into a heap segment and whether the cell
e81d98ec
DH
131 * pointed to is not a free cell. Further, additional garbage collections may
132 * get executed after a user defined number of cell accesses. This helps to
133 * find places in the C code where references are dropped for extremely short
134 * periods.
406c7d90
DH
135 */
136void
137scm_assert_cell_valid (SCM cell)
138{
61045190
DH
139 static unsigned int already_running = 0;
140
141 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 142 {
61045190 143 already_running = 1; /* set to avoid recursion */
406c7d90 144
9d47a1e6 145 if (!scm_cellp (cell))
406c7d90 146 {
1be6b49c
ML
147 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
148 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
149 abort ();
150 }
151 else if (!scm_gc_running_p)
152 {
153 /* Dirk::FIXME:: During garbage collection there occur references to
154 free cells. This is allright during conservative marking, but
155 should not happen otherwise (I think). The case of free cells
156 accessed during conservative marking is handled in function
157 scm_mark_locations. However, there still occur accesses to free
158 cells during gc. I don't understand why this happens. If it is
159 a bug and gets fixed, the following test should also work while
160 gc is running.
161 */
162 if (SCM_FREE_CELL_P (cell))
163 {
1be6b49c
ML
164 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
165 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
166 abort ();
167 }
e81d98ec
DH
168
169 /* If desired, perform additional garbage collections after a user
170 * defined number of cell accesses.
171 */
172 if (debug_cells_gc_interval)
173 {
174 static unsigned int counter = 0;
175
176 if (counter != 0)
177 {
178 --counter;
179 }
180 else
181 {
182 counter = debug_cells_gc_interval;
183 scm_igc ("scm_assert_cell_valid");
184 }
185 }
406c7d90 186 }
61045190 187 already_running = 0; /* re-enable */
406c7d90
DH
188 }
189}
190
191
192SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
193 (SCM flag),
1e6808ea 194 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
e81d98ec
DH
195 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
196 "but no additional calls to garbage collection are issued.\n"
197 "If @var{flag} is a number, cell access checking is enabled,\n"
198 "with an additional garbage collection after the given\n"
199 "number of cell accesses.\n"
1e6808ea
MG
200 "This procedure only exists when the compile-time flag\n"
201 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
202#define FUNC_NAME s_scm_set_debug_cell_accesses_x
203{
204 if (SCM_FALSEP (flag)) {
205 scm_debug_cell_accesses_p = 0;
206 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
e81d98ec
DH
207 debug_cells_gc_interval = 0;
208 scm_debug_cell_accesses_p = 1;
209 } else if (SCM_INUMP (flag)) {
210 long int f = SCM_INUM (flag);
211 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
212 debug_cells_gc_interval = f;
406c7d90
DH
213 scm_debug_cell_accesses_p = 1;
214 } else {
215 SCM_WRONG_TYPE_ARG (1, flag);
216 }
217 return SCM_UNSPECIFIED;
218}
219#undef FUNC_NAME
220
221#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
222
223\f
224
0f2d19dd 225/* {heap tuning parameters}
a00c95d9 226 *
0f2d19dd
JB
227 * These are parameters for controlling memory allocation. The heap
228 * is the area out of which scm_cons, and object headers are allocated.
229 *
230 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
231 * 64 bit machine. The units of the _SIZE parameters are bytes.
232 * Cons pairs and object headers occupy one heap cell.
233 *
234 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
235 * allocated initially the heap will grow by half its current size
236 * each subsequent time more heap is needed.
237 *
238 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
239 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
1be6b49c 240 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
0f2d19dd 241 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 242 *
0f2d19dd
JB
243 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
244 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
245 *
246 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
247 * is needed.
248 *
249 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 250 * trigger a GC.
6064dcc6
MV
251 *
252 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
253 * reclaimed by a GC triggered by must_malloc. If less than this is
254 * reclaimed, the trigger threshold is raised. [I don't know what a
255 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 256 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
257 */
258
8fef55a8
MD
259/*
260 * Heap size 45000 and 40% min yield gives quick startup and no extra
261 * heap allocation. Having higher values on min yield may lead to
262 * large heaps, especially if code behaviour is varying its
263 * maximum consumption between different freelists.
264 */
d6884e63
ML
265
266#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
267#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
268#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
1be6b49c 269size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 270 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 271int scm_default_min_yield_1 = 40;
4c48ba06 272
d6884e63 273#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
1be6b49c 274size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 275 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
276/* The following value may seem large, but note that if we get to GC at
277 * all, this means that we have a numerically intensive application
278 */
aeacfc8f 279int scm_default_min_yield_2 = 40;
4c48ba06 280
1be6b49c 281size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 282
d6884e63 283#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
284#ifdef _QC
285# define SCM_HEAP_SEG_SIZE 32768L
286#else
287# ifdef sequent
4c48ba06 288# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 289# else
4c48ba06 290# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
291# endif
292#endif
4c48ba06 293/* Make heap grow with factor 1.5 */
4a4c9785 294#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 295#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 296#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 297
d6884e63
ML
298/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
299 aligned inner bounds for allocated storage */
0f2d19dd
JB
300
301#ifdef PROT386
302/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
303# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
304# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
305#else
306# ifdef _UNICOS
c014a02e
ML
307# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
308# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 309# else
c014a02e
ML
310# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
311# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
312# endif /* UNICOS */
313#endif /* PROT386 */
314
ecf470a2
ML
315#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
316
d6884e63
ML
317#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
318#define CLUSTER_SIZE_IN_BYTES(freelist) \
319 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
320
321\f
945fec60 322/* scm_freelists
0f2d19dd 323 */
945fec60 324
92c2555f 325typedef struct scm_t_freelist {
a00c95d9
ML
326 /* collected cells */
327 SCM cells;
a00c95d9
ML
328 /* number of cells left to collect before cluster is full */
329 unsigned int left_to_collect;
b37fe1c5
MD
330 /* number of clusters which have been allocated */
331 unsigned int clusters_allocated;
8fef55a8
MD
332 /* a list of freelists, each of size cluster_size,
333 * except the last one which may be shorter
334 */
a00c95d9
ML
335 SCM clusters;
336 SCM *clustertail;
b37fe1c5 337 /* this is the number of objects in each cluster, including the spine cell */
1be6b49c 338 unsigned int cluster_size;
8fef55a8 339 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
340 */
341 int grow_heap_p;
8fef55a8 342 /* minimum yield on this list in order not to grow the heap
a00c95d9 343 */
8fef55a8
MD
344 long min_yield;
345 /* defines min_yield as percent of total heap size
a00c95d9 346 */
8fef55a8 347 int min_yield_fraction;
a00c95d9
ML
348 /* number of cells per object on this list */
349 int span;
350 /* number of collected cells during last GC */
c014a02e 351 unsigned long collected;
1811ebce 352 /* number of collected cells during penultimate GC */
c014a02e 353 unsigned long collected_1;
a00c95d9
ML
354 /* total number of cells in heap segments
355 * belonging to this list.
356 */
c014a02e 357 unsigned long heap_size;
92c2555f 358} scm_t_freelist;
a00c95d9 359
4a4c9785 360SCM scm_freelist = SCM_EOL;
92c2555f 361scm_t_freelist scm_master_freelist = {
729dbac3 362 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
4a4c9785
MD
363};
364SCM scm_freelist2 = SCM_EOL;
92c2555f 365scm_t_freelist scm_master_freelist2 = {
729dbac3 366 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
4a4c9785 367};
0f2d19dd
JB
368
369/* scm_mtrigger
370 * is the number of bytes of must_malloc allocation needed to trigger gc.
371 */
c014a02e 372unsigned long scm_mtrigger;
0f2d19dd 373
0f2d19dd
JB
374/* scm_gc_heap_lock
375 * If set, don't expand the heap. Set only during gc, during which no allocation
376 * is supposed to take place anyway.
377 */
378int scm_gc_heap_lock = 0;
379
380/* GC Blocking
381 * Don't pause for collection if this is set -- just
382 * expand the heap.
383 */
0f2d19dd
JB
384int scm_block_gc = 1;
385
0f2d19dd
JB
386/* During collection, this accumulates objects holding
387 * weak references.
388 */
ab4bef85 389SCM scm_weak_vectors;
0f2d19dd 390
7445e0e8
MD
391/* During collection, this accumulates structures which are to be freed.
392 */
393SCM scm_structs_to_free;
394
0f2d19dd
JB
395/* GC Statistics Keeping
396 */
c014a02e
ML
397unsigned long scm_cells_allocated = 0;
398unsigned long scm_mallocated = 0;
399unsigned long scm_gc_cells_collected;
400unsigned long scm_gc_yield;
401static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
402unsigned long scm_gc_malloc_collected;
403unsigned long scm_gc_ports_collected;
0f2d19dd 404unsigned long scm_gc_time_taken = 0;
c014a02e
ML
405static unsigned long t_before_gc;
406static unsigned long t_before_sweep;
c9b0d4b0
ML
407unsigned long scm_gc_mark_time_taken = 0;
408unsigned long scm_gc_sweep_time_taken = 0;
c014a02e
ML
409unsigned long scm_gc_times = 0;
410unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
411double scm_gc_cells_marked_acc = 0.;
412double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
413
414SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
415SCM_SYMBOL (sym_heap_size, "cell-heap-size");
416SCM_SYMBOL (sym_mallocated, "bytes-malloced");
417SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
418SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
419SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
420SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
421SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
422SCM_SYMBOL (sym_times, "gc-times");
423SCM_SYMBOL (sym_cells_marked, "cells-marked");
424SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 425
92c2555f 426typedef struct scm_t_heap_seg_data
0f2d19dd 427{
cf2d30f6
JB
428 /* lower and upper bounds of the segment */
429 SCM_CELLPTR bounds[2];
430
431 /* address of the head-of-freelist pointer for this segment's cells.
432 All segments usually point to the same one, scm_freelist. */
92c2555f 433 scm_t_freelist *freelist;
cf2d30f6 434
fe517a7d 435 /* number of cells per object in this segment */
945fec60 436 int span;
92c2555f 437} scm_t_heap_seg_data;
0f2d19dd
JB
438
439
440
92c2555f 441static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
b6efc951
DH
442
443typedef enum { return_on_error, abort_on_error } policy_on_error;
92c2555f 444static void alloc_some_heap (scm_t_freelist *, policy_on_error);
0f2d19dd
JB
445
446
d6884e63
ML
447#define SCM_HEAP_SIZE \
448 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
449#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
450
451#define BVEC_GROW_SIZE 256
452#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
92c2555f 453#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
d6884e63
ML
454
455/* mark space allocation */
456
92c2555f 457typedef struct scm_t_mark_space
d6884e63 458{
92c2555f
MV
459 scm_t_c_bvec_limb *bvec_space;
460 struct scm_t_mark_space *next;
461} scm_t_mark_space;
d6884e63 462
92c2555f
MV
463static scm_t_mark_space *current_mark_space;
464static scm_t_mark_space **mark_space_ptr;
1be6b49c 465static ptrdiff_t current_mark_space_offset;
92c2555f 466static scm_t_mark_space *mark_space_head;
d6884e63 467
92c2555f 468static scm_t_c_bvec_limb *
d6884e63 469get_bvec ()
db4b4ca6 470#define FUNC_NAME "get_bvec"
d6884e63 471{
92c2555f 472 scm_t_c_bvec_limb *res;
d6884e63
ML
473
474 if (!current_mark_space)
475 {
92c2555f 476 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
d6884e63 477 if (!current_mark_space)
db4b4ca6 478 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
479
480 current_mark_space->bvec_space = NULL;
481 current_mark_space->next = NULL;
482
483 *mark_space_ptr = current_mark_space;
484 mark_space_ptr = &(current_mark_space->next);
485
486 return get_bvec ();
487 }
488
489 if (!(current_mark_space->bvec_space))
490 {
491 SCM_SYSCALL (current_mark_space->bvec_space =
92c2555f 492 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
d6884e63 493 if (!(current_mark_space->bvec_space))
db4b4ca6 494 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
495
496 current_mark_space_offset = 0;
497
498 return get_bvec ();
499 }
500
501 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
502 {
503 current_mark_space = NULL;
504
505 return get_bvec ();
506 }
507
508 res = current_mark_space->bvec_space + current_mark_space_offset;
509 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
510
511 return res;
512}
db4b4ca6
DH
513#undef FUNC_NAME
514
d6884e63
ML
515
516static void
517clear_mark_space ()
518{
92c2555f 519 scm_t_mark_space *ms;
d6884e63
ML
520
521 for (ms = mark_space_head; ms; ms = ms->next)
522 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
523}
524
525
0f2d19dd 526\f
cf2d30f6
JB
527/* Debugging functions. */
528
bb2c57fa 529#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6 530
8ded62a3 531static void
92c2555f 532map_free_list (scm_t_freelist *master, SCM freelist)
8ded62a3 533{
c014a02e 534 long last_seg = -1, count = 0;
8ded62a3 535 SCM f;
a00c95d9 536
3f5d82cd 537 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3 538 {
592996c9 539 long int this_seg = heap_segment (f);
8ded62a3 540
592996c9
DH
541 if (this_seg == -1)
542 {
543 fprintf (stderr,
544 "map_free_list: can't find segment containing cell %lux\n",
545 (unsigned long int) SCM_UNPACK (cell));
546 abort ();
547 }
548 else if (this_seg != last_seg)
8ded62a3
MD
549 {
550 if (last_seg != -1)
1be6b49c
ML
551 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
552 (long) count, master->span, (long) last_seg);
8ded62a3
MD
553 last_seg = this_seg;
554 count = 0;
555 }
556 count++;
557 }
558 if (last_seg != -1)
1be6b49c
ML
559 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
560 (long) count, master->span, (long) last_seg);
8ded62a3 561}
cf2d30f6 562
a00c95d9 563SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 564 (),
5352393c
MG
565 "Print debugging information about the free-list.\n"
566 "@code{map-free-list} is only included in\n"
567 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
568#define FUNC_NAME s_scm_map_free_list
569{
592996c9
DH
570 size_t i;
571
1be6b49c
ML
572 fprintf (stderr, "%ld segments total (%d:%ld",
573 (long) scm_n_heap_segs,
4c48ba06 574 scm_heap_table[0].span,
1be6b49c 575 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
592996c9
DH
576
577 for (i = 1; i != scm_n_heap_segs; i++)
1be6b49c 578 fprintf (stderr, ", %d:%ld",
4c48ba06 579 scm_heap_table[i].span,
1be6b49c 580 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
4c48ba06 581 fprintf (stderr, ")\n");
8ded62a3
MD
582 map_free_list (&scm_master_freelist, scm_freelist);
583 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
584 fflush (stderr);
585
586 return SCM_UNSPECIFIED;
587}
1bbd0b84 588#undef FUNC_NAME
cf2d30f6 589
c014a02e
ML
590static long last_cluster;
591static long last_size;
4c48ba06 592
c014a02e
ML
593static long
594free_list_length (char *title, long i, SCM freelist)
5384bc5b
MD
595{
596 SCM ls;
c014a02e 597 long n = 0;
3f5d82cd
DH
598 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
599 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
600 ++n;
601 else
602 {
1be6b49c 603 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
5384bc5b
MD
604 abort ();
605 }
4c48ba06
MD
606 if (n != last_size)
607 {
608 if (i > 0)
609 {
610 if (last_cluster == i - 1)
1be6b49c 611 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 612 else
1be6b49c 613 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
4c48ba06
MD
614 }
615 if (i >= 0)
1be6b49c 616 fprintf (stderr, "%s %ld", title, (long) i);
4c48ba06 617 else
1be6b49c 618 fprintf (stderr, "%s\t%ld\n", title, (long) n);
4c48ba06
MD
619 last_cluster = i;
620 last_size = n;
621 }
5384bc5b
MD
622 return n;
623}
624
625static void
92c2555f 626free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
5384bc5b
MD
627{
628 SCM clusters;
c014a02e 629 long i = 0, len, n = 0;
5384bc5b
MD
630 fprintf (stderr, "%s\n\n", title);
631 n += free_list_length ("free list", -1, freelist);
632 for (clusters = master->clusters;
633 SCM_NNULLP (clusters);
634 clusters = SCM_CDR (clusters))
4c48ba06
MD
635 {
636 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
637 n += len;
638 }
639 if (last_cluster == i - 1)
1be6b49c 640 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 641 else
1be6b49c
ML
642 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
643 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
5384bc5b
MD
644}
645
a00c95d9 646SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 647 (),
5352393c
MG
648 "Print debugging information about the free-list.\n"
649 "@code{free-list-length} is only included in\n"
650 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
651#define FUNC_NAME s_scm_free_list_length
652{
b37fe1c5
MD
653 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
654 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 655 return SCM_UNSPECIFIED;
5384bc5b
MD
656}
657#undef FUNC_NAME
658
bb2c57fa
MD
659#endif
660
661#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 662
d3dd80ab
MG
663/* Non-zero if freelist debugging is in effect. Set this via
664 `gc-set-debug-check-freelist!'. */
665static int scm_debug_check_freelist = 0;
666
cf2d30f6 667/* Number of calls to SCM_NEWCELL since startup. */
c014a02e
ML
668static unsigned long scm_newcell_count;
669static unsigned long scm_newcell2_count;
cf2d30f6
JB
670
671/* Search freelist for anything that isn't marked as a free cell.
672 Abort if we find something. */
8ded62a3
MD
673static void
674scm_check_freelist (SCM freelist)
675{
676 SCM f;
c014a02e 677 long i = 0;
8ded62a3 678
3f5d82cd
DH
679 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
680 if (!SCM_FREE_CELL_P (f))
8ded62a3 681 {
1be6b49c
ML
682 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
683 (long) scm_newcell_count, (long) i);
8ded62a3
MD
684 abort ();
685 }
686}
cf2d30f6 687
a00c95d9 688SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 689 (SCM flag),
1e6808ea
MG
690 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
691 "on each cell allocation. This procedure only exists when the\n"
692 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
1bbd0b84 693#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 694{
d6884e63
ML
695 /* [cmm] I did a double-take when I read this code the first time.
696 well, FWIW. */
945fec60 697 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
698 return SCM_UNSPECIFIED;
699}
1bbd0b84 700#undef FUNC_NAME
25748c78
GB
701
702
4a4c9785
MD
703SCM
704scm_debug_newcell (void)
705{
706 SCM new;
707
708 scm_newcell_count++;
709 if (scm_debug_check_freelist)
710 {
8ded62a3 711 scm_check_freelist (scm_freelist);
4a4c9785
MD
712 scm_gc();
713 }
714
715 /* The rest of this is supposed to be identical to the SCM_NEWCELL
716 macro. */
3f5d82cd 717 if (SCM_NULLP (scm_freelist))
7c33806a
DH
718 {
719 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
720 SCM_GC_SET_ALLOCATED (new);
721 }
4a4c9785
MD
722 else
723 {
724 new = scm_freelist;
3f5d82cd 725 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
7c33806a 726 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
727 }
728
729 return new;
730}
731
732SCM
733scm_debug_newcell2 (void)
734{
735 SCM new;
736
737 scm_newcell2_count++;
738 if (scm_debug_check_freelist)
739 {
8ded62a3 740 scm_check_freelist (scm_freelist2);
4a4c9785
MD
741 scm_gc ();
742 }
743
744 /* The rest of this is supposed to be identical to the SCM_NEWCELL
745 macro. */
3f5d82cd 746 if (SCM_NULLP (scm_freelist2))
7c33806a
DH
747 {
748 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
749 SCM_GC_SET_ALLOCATED (new);
750 }
4a4c9785
MD
751 else
752 {
753 new = scm_freelist2;
3f5d82cd 754 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
7c33806a 755 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
756 }
757
758 return new;
759}
760
fca7547b 761#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
762
763\f
0f2d19dd 764
c014a02e 765static unsigned long
92c2555f 766master_cells_allocated (scm_t_freelist *master)
b37fe1c5 767{
d6884e63 768 /* the '- 1' below is to ignore the cluster spine cells. */
c014a02e 769 long objects = master->clusters_allocated * (master->cluster_size - 1);
b37fe1c5
MD
770 if (SCM_NULLP (master->clusters))
771 objects -= master->left_to_collect;
772 return master->span * objects;
773}
774
c014a02e 775static unsigned long
b37fe1c5
MD
776freelist_length (SCM freelist)
777{
c014a02e 778 long n;
3f5d82cd 779 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
780 ++n;
781 return n;
782}
783
c014a02e 784static unsigned long
b37fe1c5
MD
785compute_cells_allocated ()
786{
787 return (scm_cells_allocated
788 + master_cells_allocated (&scm_master_freelist)
789 + master_cells_allocated (&scm_master_freelist2)
790 - scm_master_freelist.span * freelist_length (scm_freelist)
791 - scm_master_freelist2.span * freelist_length (scm_freelist2));
792}
b37fe1c5 793
0f2d19dd
JB
794/* {Scheme Interface to GC}
795 */
796
a00c95d9 797SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 798 (),
1e6808ea
MG
799 "Return an association list of statistics about Guile's current\n"
800 "use of storage.")
1bbd0b84 801#define FUNC_NAME s_scm_gc_stats
0f2d19dd 802{
c014a02e
ML
803 long i;
804 long n;
0f2d19dd 805 SCM heap_segs;
c014a02e
ML
806 unsigned long int local_scm_mtrigger;
807 unsigned long int local_scm_mallocated;
808 unsigned long int local_scm_heap_size;
809 unsigned long int local_scm_cells_allocated;
810 unsigned long int local_scm_gc_time_taken;
811 unsigned long int local_scm_gc_times;
812 unsigned long int local_scm_gc_mark_time_taken;
813 unsigned long int local_scm_gc_sweep_time_taken;
c9b0d4b0
ML
814 double local_scm_gc_cells_swept;
815 double local_scm_gc_cells_marked;
0f2d19dd
JB
816 SCM answer;
817
818 SCM_DEFER_INTS;
939794ce
DH
819
820 ++scm_block_gc;
821
0f2d19dd
JB
822 retry:
823 heap_segs = SCM_EOL;
824 n = scm_n_heap_segs;
825 for (i = scm_n_heap_segs; i--; )
c014a02e
ML
826 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
827 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
0f2d19dd
JB
828 heap_segs);
829 if (scm_n_heap_segs != n)
830 goto retry;
939794ce
DH
831
832 --scm_block_gc;
0f2d19dd 833
7febb4a2
MD
834 /* Below, we cons to produce the resulting list. We want a snapshot of
835 * the heap situation before consing.
836 */
0f2d19dd
JB
837 local_scm_mtrigger = scm_mtrigger;
838 local_scm_mallocated = scm_mallocated;
b37fe1c5 839 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 840 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 841 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
842 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
843 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
844 local_scm_gc_times = scm_gc_times;
845 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
846 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd 847
1afff620
KN
848 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
849 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
850 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
851 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
852 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
853 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
854 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
855 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
856 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
857 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
858 scm_cons (sym_heap_segments, heap_segs),
859 SCM_UNDEFINED);
0f2d19dd
JB
860 SCM_ALLOW_INTS;
861 return answer;
862}
1bbd0b84 863#undef FUNC_NAME
0f2d19dd
JB
864
865
c9b0d4b0 866static void
e81d98ec 867gc_start_stats (const char *what SCM_UNUSED)
0f2d19dd 868{
c9b0d4b0
ML
869 t_before_gc = scm_c_get_internal_run_time ();
870 scm_gc_cells_swept = 0;
b37fe1c5 871 scm_gc_cells_collected = 0;
37ddcaf6 872 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
873 scm_gc_yield = (scm_cells_allocated
874 + master_cells_allocated (&scm_master_freelist)
875 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
876 scm_gc_malloc_collected = 0;
877 scm_gc_ports_collected = 0;
878}
879
939794ce 880
c9b0d4b0
ML
881static void
882gc_end_stats ()
0f2d19dd 883{
c9b0d4b0
ML
884 unsigned long t = scm_c_get_internal_run_time ();
885 scm_gc_time_taken += (t - t_before_gc);
886 scm_gc_sweep_time_taken += (t - t_before_sweep);
887 ++scm_gc_times;
888
889 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
890 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
891}
892
893
a00c95d9 894SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 895 (SCM obj),
b380b885
MD
896 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
897 "returned by this function for @var{obj}")
1bbd0b84 898#define FUNC_NAME s_scm_object_address
0f2d19dd 899{
c014a02e 900 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 901}
1bbd0b84 902#undef FUNC_NAME
0f2d19dd
JB
903
904
a00c95d9 905SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 906 (),
b380b885
MD
907 "Scans all of SCM objects and reclaims for further use those that are\n"
908 "no longer accessible.")
1bbd0b84 909#define FUNC_NAME s_scm_gc
0f2d19dd
JB
910{
911 SCM_DEFER_INTS;
912 scm_igc ("call");
913 SCM_ALLOW_INTS;
914 return SCM_UNSPECIFIED;
915}
1bbd0b84 916#undef FUNC_NAME
0f2d19dd
JB
917
918
919\f
920/* {C Interface For When GC is Triggered}
921 */
922
b37fe1c5 923static void
92c2555f 924adjust_min_yield (scm_t_freelist *freelist)
b37fe1c5 925{
8fef55a8 926 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 927 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
928 * `min_yield_fraction' of total heap size. Note, however, that
929 * the absolute value of min_yield will correspond to `collected'
bda1446c 930 * on one master (the one which currently is triggering GC).
b37fe1c5 931 *
bda1446c
MD
932 * The reason why we look at total yield instead of cells collected
933 * on one list is that we want to take other freelists into account.
934 * On this freelist, we know that (local) yield = collected cells,
935 * but that's probably not the case on the other lists.
b37fe1c5
MD
936 *
937 * (We might consider computing a better prediction, for example
938 * by computing an average over multiple GC:s.)
939 */
8fef55a8 940 if (freelist->min_yield_fraction)
b37fe1c5 941 {
37ddcaf6 942 /* Pick largest of last two yields. */
1be6b49c 943 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
8fef55a8 944 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5 945#ifdef DEBUGINFO
1be6b49c
ML
946 fprintf (stderr, " after GC = %lu, delta = %ld\n",
947 (long) scm_cells_allocated,
948 (long) delta);
b37fe1c5
MD
949#endif
950 if (delta > 0)
8fef55a8 951 freelist->min_yield += delta;
b37fe1c5
MD
952 }
953}
954
b6efc951 955
4a4c9785 956/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
957 * common while the freelist will be individual for each thread.
958 */
4a4c9785
MD
959
960SCM
92c2555f 961scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
4a4c9785
MD
962{
963 SCM cell;
964 ++scm_ints_disabled;
4c48ba06
MD
965 do
966 {
c7387918 967 if (SCM_NULLP (master->clusters))
4c48ba06 968 {
150c200b 969 if (master->grow_heap_p || scm_block_gc)
4c48ba06 970 {
b6efc951
DH
971 /* In order to reduce gc frequency, try to allocate a new heap
972 * segment first, even if gc might find some free cells. If we
973 * can't obtain a new heap segment, we will try gc later.
974 */
4c48ba06 975 master->grow_heap_p = 0;
b6efc951 976 alloc_some_heap (master, return_on_error);
4c48ba06 977 }
b6efc951 978 if (SCM_NULLP (master->clusters))
b37fe1c5 979 {
b6efc951
DH
980 /* The heap was not grown, either because it wasn't scheduled to
981 * grow, or because there was not enough memory available. In
982 * both cases we have to try gc to get some free cells.
983 */
37ddcaf6 984#ifdef DEBUGINFO
1be6b49c
ML
985 fprintf (stderr, "allocated = %lu, ",
986 (long) (scm_cells_allocated
37ddcaf6 987 + master_cells_allocated (&scm_master_freelist)
c014a02e 988 + master_cells_allocated (&scm_master_freelist2)));
37ddcaf6 989#endif
b37fe1c5 990 scm_igc ("cells");
8fef55a8 991 adjust_min_yield (master);
c7387918
DH
992 if (SCM_NULLP (master->clusters))
993 {
b6efc951
DH
994 /* gc could not free any cells. Now, we _must_ allocate a
995 * new heap segment, because there is no other possibility
996 * to provide a new cell for the caller.
997 */
998 alloc_some_heap (master, abort_on_error);
c7387918 999 }
b37fe1c5 1000 }
4c48ba06
MD
1001 }
1002 cell = SCM_CAR (master->clusters);
1003 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 1004 ++master->clusters_allocated;
4c48ba06
MD
1005 }
1006 while (SCM_NULLP (cell));
d6884e63
ML
1007
1008#ifdef GUILE_DEBUG_FREELIST
1009 scm_check_freelist (cell);
1010#endif
1011
4a4c9785 1012 --scm_ints_disabled;
3f5d82cd 1013 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
1014 return cell;
1015}
1016
b6efc951 1017
4c48ba06
MD
1018#if 0
1019/* This is a support routine which can be used to reserve a cluster
1020 * for some special use, such as debugging. It won't be useful until
1021 * free cells are preserved between garbage collections.
1022 */
1023
1024void
92c2555f 1025scm_alloc_cluster (scm_t_freelist *master)
4c48ba06
MD
1026{
1027 SCM freelist, cell;
1028 cell = scm_gc_for_newcell (master, &freelist);
1029 SCM_SETCDR (cell, freelist);
1030 return cell;
1031}
1032#endif
1033
801cb5e7 1034
92c2555f
MV
1035scm_t_c_hook scm_before_gc_c_hook;
1036scm_t_c_hook scm_before_mark_c_hook;
1037scm_t_c_hook scm_before_sweep_c_hook;
1038scm_t_c_hook scm_after_sweep_c_hook;
1039scm_t_c_hook scm_after_gc_c_hook;
801cb5e7 1040
b6efc951 1041
0f2d19dd 1042void
1bbd0b84 1043scm_igc (const char *what)
0f2d19dd 1044{
c014a02e 1045 long j;
0f2d19dd 1046
406c7d90 1047 ++scm_gc_running_p;
801cb5e7 1048 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
1049#ifdef DEBUGINFO
1050 fprintf (stderr,
1051 SCM_NULLP (scm_freelist)
1052 ? "*"
1053 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1054#endif
42db06f0 1055 /* During the critical section, only the current thread may run. */
216eedfc 1056 SCM_CRITICAL_SECTION_START;
42db06f0 1057
e242dfd2 1058 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1059
ab4bef85
JB
1060 if (!scm_stack_base || scm_block_gc)
1061 {
406c7d90 1062 --scm_gc_running_p;
ab4bef85
JB
1063 return;
1064 }
1065
c9b0d4b0
ML
1066 gc_start_stats (what);
1067
ab4bef85
JB
1068 if (scm_gc_heap_lock)
1069 /* We've invoked the collector while a GC is already in progress.
1070 That should never happen. */
1071 abort ();
0f2d19dd
JB
1072
1073 ++scm_gc_heap_lock;
ab4bef85 1074
0f2d19dd
JB
1075 /* flush dead entries from the continuation stack */
1076 {
c014a02e
ML
1077 long x;
1078 long bound;
0f2d19dd
JB
1079 SCM * elts;
1080 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1081 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1082 x = SCM_INUM (scm_continuation_stack_ptr);
1083 while (x < bound)
1084 {
1085 elts[x] = SCM_BOOL_F;
1086 ++x;
1087 }
1088 }
1089
801cb5e7
MD
1090 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1091
d6884e63
ML
1092 clear_mark_space ();
1093
42db06f0 1094#ifndef USE_THREADS
a00c95d9 1095
1b9be268 1096 /* Mark objects on the C stack. */
0f2d19dd
JB
1097 SCM_FLUSH_REGISTER_WINDOWS;
1098 /* This assumes that all registers are saved into the jmp_buf */
1099 setjmp (scm_save_regs_gc_mark);
1100 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1be6b49c 1101 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
ce4a361d
JB
1102 sizeof scm_save_regs_gc_mark)
1103 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1104
1105 {
6b1b030e 1106 unsigned long stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1107#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1108 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1109#else
6ba93e5e 1110 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1111#endif
1112 }
1113
42db06f0
MD
1114#else /* USE_THREADS */
1115
1116 /* Mark every thread's stack and registers */
945fec60 1117 scm_threads_mark_stacks ();
42db06f0
MD
1118
1119#endif /* USE_THREADS */
0f2d19dd 1120
0f2d19dd
JB
1121 j = SCM_NUM_PROTECTS;
1122 while (j--)
1123 scm_gc_mark (scm_sys_protects[j]);
1124
6b1b030e
ML
1125 /* mark the registered roots */
1126 {
592996c9 1127 size_t i;
6b1b030e
ML
1128 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1129 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
592996c9 1130 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
6b1b030e
ML
1131 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1132 scm_gc_mark (*p);
1133 }
1134 }
1135 }
1136
9de33deb
MD
1137 /* FIXME: we should have a means to register C functions to be run
1138 * in different phases of GC
a00c95d9 1139 */
9de33deb 1140 scm_mark_subr_table ();
a00c95d9 1141
42db06f0
MD
1142#ifndef USE_THREADS
1143 scm_gc_mark (scm_root->handle);
1144#endif
a00c95d9 1145
c9b0d4b0
ML
1146 t_before_sweep = scm_c_get_internal_run_time ();
1147 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1148
801cb5e7 1149 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1150
0f2d19dd
JB
1151 scm_gc_sweep ();
1152
801cb5e7
MD
1153 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1154
0f2d19dd 1155 --scm_gc_heap_lock;
c9b0d4b0 1156 gc_end_stats ();
42db06f0 1157
216eedfc 1158 SCM_CRITICAL_SECTION_END;
801cb5e7 1159 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1160 --scm_gc_running_p;
0f2d19dd
JB
1161}
1162
1163\f
939794ce 1164
a00c95d9 1165/* {Mark/Sweep}
0f2d19dd
JB
1166 */
1167
56495472
ML
1168#define MARK scm_gc_mark
1169#define FNAME "scm_gc_mark"
0f2d19dd 1170
56495472 1171#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1172
1173/* Mark an object precisely.
1174 */
a00c95d9 1175void
56495472
ML
1176MARK (SCM p)
1177#define FUNC_NAME FNAME
0f2d19dd 1178{
c014a02e 1179 register long i;
0f2d19dd 1180 register SCM ptr;
92c2555f 1181 scm_t_bits cell_type;
0f2d19dd 1182
56495472
ML
1183#ifndef MARK_DEPENDENCIES
1184# define RECURSE scm_gc_mark
1185#else
1186 /* go through the usual marking, but not for self-cycles. */
1187# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1188#endif
0f2d19dd
JB
1189 ptr = p;
1190
56495472
ML
1191#ifdef MARK_DEPENDENCIES
1192 goto gc_mark_loop_first_time;
1193#endif
1194
86d31dfe
MV
1195/* A simple hack for debugging. Chose the second branch to get a
1196 meaningful backtrace for crashes inside the GC.
1197*/
1198#if 1
1199#define goto_gc_mark_loop goto gc_mark_loop
1200#define goto_gc_mark_nimp goto gc_mark_nimp
1201#else
1202#define goto_gc_mark_loop RECURSE(ptr); return
1203#define goto_gc_mark_nimp RECURSE(ptr); return
1204#endif
1205
0f2d19dd
JB
1206gc_mark_loop:
1207 if (SCM_IMP (ptr))
1208 return;
1209
1210gc_mark_nimp:
56495472
ML
1211
1212#ifdef MARK_DEPENDENCIES
0209177b 1213 if (SCM_EQ_P (ptr, p))
56495472
ML
1214 return;
1215
1216 scm_gc_mark (ptr);
0209177b 1217 return;
56495472
ML
1218
1219gc_mark_loop_first_time:
1220#endif
9a6976cd 1221
61045190 1222#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1223 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1224 if (!scm_cellp (ptr))
db4b4ca6 1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1226#else
1227 /* In non-debug mode, do at least some cheap testing. */
1228 if (!SCM_CELLP (ptr))
1229 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1230#endif
1231
56495472
ML
1232#ifndef MARK_DEPENDENCIES
1233
d6884e63
ML
1234 if (SCM_GCMARKP (ptr))
1235 return;
56495472 1236
d6884e63
ML
1237 SCM_SETGCMARK (ptr);
1238
56495472
ML
1239#endif
1240
61045190
DH
1241 cell_type = SCM_GC_CELL_TYPE (ptr);
1242 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1243 {
1244 case scm_tcs_cons_nimcar:
d6884e63 1245 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1246 {
1247 ptr = SCM_CAR (ptr);
86d31dfe 1248 goto_gc_mark_nimp;
0f2d19dd 1249 }
56495472 1250 RECURSE (SCM_CAR (ptr));
d6884e63 1251 ptr = SCM_CDR (ptr);
86d31dfe 1252 goto_gc_mark_nimp;
0f2d19dd 1253 case scm_tcs_cons_imcar:
d6884e63 1254 ptr = SCM_CDR (ptr);
86d31dfe 1255 goto_gc_mark_loop;
e641afaf 1256 case scm_tc7_pws:
22a52da1
DH
1257 RECURSE (SCM_SETTER (ptr));
1258 ptr = SCM_PROCEDURE (ptr);
86d31dfe 1259 goto_gc_mark_loop;
0f2d19dd 1260 case scm_tcs_cons_gloc:
0f2d19dd 1261 {
86d31dfe
MV
1262 /* Dirk:FIXME:: The following code is super ugly: ptr may be a
1263 * struct or a gloc. If it is a gloc, the cell word #0 of ptr
1264 * is the address of a scm_tc16_variable smob. If it is a
1265 * struct, the cell word #0 of ptr is a pointer to a struct
1266 * vtable data region. (The fact that these are accessed in
1267 * the same way restricts the possibilites to change the data
1268 * layout of structs or heap cells.) To discriminate between
1269 * the two, it is guaranteed that the scm_vtable_index_vcell
1270 * element of the prospective vtable is always zero. For a
1271 * gloc, this location has the CDR of the variable smob, which
1272 * is guaranteed to be non-zero.
c8045e8d 1273 */
92c2555f
MV
1274 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1275 scm_t_bits * vtable_data = (scm_t_bits *) word0; /* access as struct */
7445e0e8 1276 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1277 {
d6884e63
ML
1278 /* ptr is a gloc */
1279 SCM gloc_car = SCM_PACK (word0);
56495472 1280 RECURSE (gloc_car);
d6884e63
ML
1281 ptr = SCM_CDR (ptr);
1282 goto gc_mark_loop;
1283 }
1284 else
1285 {
1286 /* ptr is a struct */
1287 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
c014a02e 1288 long len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1289 char * fields_desc = SCM_SYMBOL_CHARS (layout);
92c2555f 1290 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1291
d6884e63
ML
1292 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1293 {
56495472
ML
1294 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1295 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1296 }
1297 if (len)
1298 {
c014a02e 1299 long x;
7bb8eac7 1300
d6884e63
ML
1301 for (x = 0; x < len - 2; x += 2, ++struct_data)
1302 if (fields_desc[x] == 'p')
56495472 1303 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1304 if (fields_desc[x] == 'p')
1305 {
1306 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1307 for (x = *struct_data++; x; --x, ++struct_data)
1308 RECURSE (SCM_PACK (*struct_data));
d6884e63 1309 else
56495472 1310 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1311 }
1312 }
1313 /* mark vtable */
1314 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
86d31dfe 1315 goto_gc_mark_loop;
0f2d19dd
JB
1316 }
1317 }
1318 break;
1319 case scm_tcs_closures:
22a52da1 1320 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1321 {
1322 ptr = SCM_CLOSCAR (ptr);
86d31dfe 1323 goto_gc_mark_nimp;
0f2d19dd 1324 }
56495472 1325 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1326 ptr = SCM_ENV (ptr);
86d31dfe 1327 goto_gc_mark_nimp;
0f2d19dd 1328 case scm_tc7_vector:
b5c2579a
DH
1329 i = SCM_VECTOR_LENGTH (ptr);
1330 if (i == 0)
1331 break;
1332 while (--i > 0)
1333 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1334 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a 1335 ptr = SCM_VELTS (ptr)[0];
86d31dfe 1336 goto_gc_mark_loop;
0f2d19dd
JB
1337#ifdef CCLO
1338 case scm_tc7_cclo:
362306b9 1339 {
1be6b49c
ML
1340 size_t i = SCM_CCLO_LENGTH (ptr);
1341 size_t j;
362306b9
DH
1342 for (j = 1; j != i; ++j)
1343 {
1344 SCM obj = SCM_CCLO_REF (ptr, j);
1345 if (!SCM_IMP (obj))
56495472 1346 RECURSE (obj);
362306b9
DH
1347 }
1348 ptr = SCM_CCLO_REF (ptr, 0);
86d31dfe 1349 goto_gc_mark_loop;
362306b9 1350 }
b5c2579a 1351#endif
afe5177e 1352#ifdef HAVE_ARRAYS
0f2d19dd
JB
1353 case scm_tc7_bvect:
1354 case scm_tc7_byvect:
1355 case scm_tc7_ivect:
1356 case scm_tc7_uvect:
1357 case scm_tc7_fvect:
1358 case scm_tc7_dvect:
1359 case scm_tc7_cvect:
1360 case scm_tc7_svect:
5c11cc9d 1361#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1362 case scm_tc7_llvect:
1363#endif
afe5177e 1364#endif
0f2d19dd 1365 case scm_tc7_string:
0f2d19dd
JB
1366 break;
1367
1368 case scm_tc7_substring:
0f2d19dd 1369 ptr = SCM_CDR (ptr);
86d31dfe 1370 goto_gc_mark_loop;
0f2d19dd
JB
1371
1372 case scm_tc7_wvect:
592996c9 1373 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
ab4bef85 1374 scm_weak_vectors = ptr;
0f2d19dd
JB
1375 if (SCM_IS_WHVEC_ANY (ptr))
1376 {
c014a02e
ML
1377 long x;
1378 long len;
0f2d19dd
JB
1379 int weak_keys;
1380 int weak_values;
1381
b5c2579a 1382 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1383 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1384 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1385
0f2d19dd
JB
1386 for (x = 0; x < len; ++x)
1387 {
1388 SCM alist;
1389 alist = SCM_VELTS (ptr)[x];
46408039
JB
1390
1391 /* mark everything on the alist except the keys or
1392 * values, according to weak_values and weak_keys. */
0b5f3f34 1393 while ( SCM_CONSP (alist)
0f2d19dd 1394 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1395 && SCM_CONSP (SCM_CAR (alist)))
1396 {
1397 SCM kvpair;
1398 SCM next_alist;
1399
1400 kvpair = SCM_CAR (alist);
1401 next_alist = SCM_CDR (alist);
a00c95d9 1402 /*
0f2d19dd
JB
1403 * Do not do this:
1404 * SCM_SETGCMARK (alist);
1405 * SCM_SETGCMARK (kvpair);
1406 *
1407 * It may be that either the key or value is protected by
1408 * an escaped reference to part of the spine of this alist.
1409 * If we mark the spine here, and only mark one or neither of the
1410 * key and value, they may never be properly marked.
1411 * This leads to a horrible situation in which an alist containing
1412 * freelist cells is exported.
1413 *
1414 * So only mark the spines of these arrays last of all marking.
1415 * If somebody confuses us by constructing a weak vector
1416 * with a circular alist then we are hosed, but at least we
1417 * won't prematurely drop table entries.
1418 */
1419 if (!weak_keys)
56495472 1420 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1421 if (!weak_values)
56495472 1422 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1423 alist = next_alist;
1424 }
1425 if (SCM_NIMP (alist))
56495472 1426 RECURSE (alist);
0f2d19dd
JB
1427 }
1428 }
1429 break;
1430
28b06554
DH
1431 case scm_tc7_symbol:
1432 ptr = SCM_PROP_SLOTS (ptr);
86d31dfe 1433 goto_gc_mark_loop;
e5aca4b5
MV
1434 case scm_tc7_variable:
1435 ptr = SCM_CELL_OBJECT_1 (ptr);
1436 goto_gc_mark_loop;
0f2d19dd 1437 case scm_tcs_subrs:
9de33deb 1438 break;
0f2d19dd
JB
1439 case scm_tc7_port:
1440 i = SCM_PTOBNUM (ptr);
7a7f7c53 1441#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1442 if (!(i < scm_numptob))
7a7f7c53
DH
1443 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1444#endif
ebf7394e 1445 if (SCM_PTAB_ENTRY(ptr))
56495472 1446 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1447 if (scm_ptobs[i].mark)
1448 {
1449 ptr = (scm_ptobs[i].mark) (ptr);
86d31dfe 1450 goto_gc_mark_loop;
dc53f026
JB
1451 }
1452 else
1453 return;
0f2d19dd
JB
1454 break;
1455 case scm_tc7_smob:
d6884e63 1456 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1457 { /* should be faster than going through scm_smobs */
1458 case scm_tc_free_cell:
592996c9
DH
1459 /* We have detected a free cell. This can happen if non-object data
1460 * on the C stack points into guile's heap and is scanned during
1461 * conservative marking. */
1462#if (SCM_DEBUG_CELL_ACCESSES == 0)
1463 /* If cell debugging is disabled, there is a second situation in
1464 * which a free cell can be encountered, namely if with preemptive
1465 * threading one thread has just obtained a fresh cell and was
1466 * preempted before the cell initialization was completed. In this
1467 * case, some entries of the cell may already contain objects.
1468 * Thus, if cell debugging is disabled, free cells are scanned
1469 * conservatively. */
1470 scm_gc_mark_cell_conservatively (ptr);
1471#else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1472 /* With cell debugging enabled, a freshly obtained but not fully
1473 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1474 * Thus, no conservative scanning for free cells is necessary, but
1475 * instead cells of type scm_tc16_allocated have to be scanned
1476 * conservatively. This is done in the mark function of the
1477 * scm_tc16_allocated smob type. */
1478#endif
1479 break;
acb0a19c
MD
1480 case scm_tc16_big:
1481 case scm_tc16_real:
1482 case scm_tc16_complex:
0f2d19dd
JB
1483 break;
1484 default:
1485 i = SCM_SMOBNUM (ptr);
7a7f7c53 1486#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1487 if (!(i < scm_numsmob))
7a7f7c53
DH
1488 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1489#endif
dc53f026
JB
1490 if (scm_smobs[i].mark)
1491 {
1492 ptr = (scm_smobs[i].mark) (ptr);
86d31dfe 1493 goto_gc_mark_loop;
dc53f026
JB
1494 }
1495 else
1496 return;
0f2d19dd
JB
1497 }
1498 break;
1499 default:
acf4331f 1500 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1501 }
0209177b 1502#undef RECURSE
0f2d19dd 1503}
acf4331f 1504#undef FUNC_NAME
0f2d19dd 1505
56495472
ML
1506#ifndef MARK_DEPENDENCIES
1507
1508#undef MARK
56495472
ML
1509#undef FNAME
1510
1511/* And here we define `scm_gc_mark_dependencies', by including this
1512 * same file in itself.
1513 */
1514#define MARK scm_gc_mark_dependencies
1515#define FNAME "scm_gc_mark_dependencies"
1516#define MARK_DEPENDENCIES
1517#include "gc.c"
1518#undef MARK_DEPENDENCIES
1519#undef MARK
56495472
ML
1520#undef FNAME
1521
0f2d19dd 1522
592996c9
DH
1523/* Determine whether the given value does actually represent a cell in some
1524 * heap segment. If this is the case, the number of the heap segment is
1525 * returned. Otherwise, -1 is returned. Binary search is used in order to
1526 * determine the heap segment that contains the cell.*/
1527/* FIXME: To be used within scm_gc_mark_cell_conservatively,
1528 * scm_mark_locations and scm_cellp this function should be an inline
1529 * function. */
1530static long int
1531heap_segment (SCM obj)
0f2d19dd 1532{
592996c9
DH
1533 if (!SCM_CELLP (obj))
1534 return -1;
1535 else
c4da09e2 1536 {
592996c9
DH
1537 SCM_CELLPTR ptr = SCM2PTR (obj);
1538 unsigned long int i = 0;
1539 unsigned long int j = scm_n_heap_segs - 1;
1540
1541 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1542 return -1;
1543 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1544 return -1;
1545 else
c4da09e2 1546 {
592996c9 1547 while (i < j)
c4da09e2 1548 {
592996c9 1549 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
c4da09e2 1550 {
592996c9
DH
1551 break;
1552 }
1553 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1554 {
1555 i = j;
1556 break;
1557 }
1558 else
1559 {
1560 unsigned long int k = (i + j) / 2;
1561
1562 if (k == i)
1563 return -1;
1564 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
c4da09e2 1565 {
592996c9
DH
1566 j = k;
1567 ++i;
1568 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1569 return -1;
1570 }
1571 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1572 {
1573 i = k;
1574 --j;
1575 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1576 return -1;
c4da09e2 1577 }
c4da09e2
DH
1578 }
1579 }
592996c9
DH
1580
1581 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1582 return -1;
1583 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1584 return -1;
1585 else
1586 return i;
c4da09e2
DH
1587 }
1588 }
0f2d19dd
JB
1589}
1590
1591
592996c9
DH
1592/* Mark the entries of a cell conservatively. The given cell is known to be
1593 * on the heap. Still we have to determine its heap segment in order to
1594 * figure out whether it is a single or a double cell. Then, each of the cell
1595 * elements itself is checked and potentially marked. */
1596void
1597scm_gc_mark_cell_conservatively (SCM cell)
2e11a577 1598{
592996c9
DH
1599 unsigned long int cell_segment = heap_segment (cell);
1600 unsigned int span = scm_heap_table[cell_segment].span;
1601 unsigned int i;
1a548472 1602
592996c9
DH
1603 for (i = 1; i != span * 2; ++i)
1604 {
1605 SCM obj = SCM_CELL_OBJECT (cell, i);
1606 long int obj_segment = heap_segment (obj);
1607 if (obj_segment >= 0)
1608 scm_gc_mark (obj);
1609 }
1610}
61045190 1611
592996c9
DH
1612
1613/* Mark a region conservatively */
1614void
1615scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1616{
1617 unsigned long m;
1618
1619 for (m = 0; m < n; ++m)
1620 {
1621 SCM obj = * (SCM *) &x[m];
1622 long int segment = heap_segment (obj);
1623 if (segment >= 0)
1624 scm_gc_mark (obj);
1a548472 1625 }
592996c9 1626}
2e11a577 1627
592996c9
DH
1628
1629/* The function scm_cellp determines whether an SCM value can be regarded as a
1630 * pointer to a cell on the heap.
1631 */
1632int
1633scm_cellp (SCM value)
1634{
1635 long int segment = heap_segment (value);
1636 return (segment >= 0);
2e11a577
MD
1637}
1638
1639
4c48ba06 1640static void
92c2555f 1641gc_sweep_freelist_start (scm_t_freelist *freelist)
4c48ba06
MD
1642{
1643 freelist->cells = SCM_EOL;
1644 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1645 freelist->clusters_allocated = 0;
4c48ba06
MD
1646 freelist->clusters = SCM_EOL;
1647 freelist->clustertail = &freelist->clusters;
1811ebce 1648 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1649 freelist->collected = 0;
1650}
1651
1652static void
92c2555f 1653gc_sweep_freelist_finish (scm_t_freelist *freelist)
4c48ba06 1654{
c014a02e 1655 long collected;
4c48ba06 1656 *freelist->clustertail = freelist->cells;
3f5d82cd 1657 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1658 {
1659 SCM c = freelist->cells;
22a52da1
DH
1660 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1661 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1662 freelist->collected +=
1663 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1664 }
b37fe1c5 1665 scm_gc_cells_collected += freelist->collected;
a00c95d9 1666
8fef55a8 1667 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1668 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1669 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1670 * size. This means that a too low yield is compensated by more
1671 * heap on the list which is currently doing most work, which is
1672 * just what we want.
1673 */
1811ebce 1674 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1675 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1676}
0f2d19dd 1677
d6884e63
ML
1678#define NEXT_DATA_CELL(ptr, span) \
1679 do { \
1680 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1681 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1682 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1683 : nxt__); \
1684 } while (0)
1685
a00c95d9 1686void
0f2d19dd 1687scm_gc_sweep ()
acf4331f 1688#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1689{
1690 register SCM_CELLPTR ptr;
0f2d19dd 1691 register SCM nfreelist;
92c2555f 1692 register scm_t_freelist *freelist;
c014a02e 1693 register unsigned long m;
0f2d19dd 1694 register int span;
592996c9 1695 size_t i;
1be6b49c 1696 size_t seg_size;
0f2d19dd 1697
0f2d19dd 1698 m = 0;
0f2d19dd 1699
4c48ba06
MD
1700 gc_sweep_freelist_start (&scm_master_freelist);
1701 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1702
cf2d30f6 1703 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1704 {
c014a02e 1705 register long left_to_collect;
1be6b49c 1706 register size_t j;
15e9d186 1707
cf2d30f6
JB
1708 /* Unmarked cells go onto the front of the freelist this heap
1709 segment points to. Rather than updating the real freelist
1710 pointer as we go along, we accumulate the new head in
1711 nfreelist. Then, if it turns out that the entire segment is
1712 free, we free (i.e., malloc's free) the whole segment, and
1713 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1714 freelist = scm_heap_table[i].freelist;
1715 nfreelist = freelist->cells;
4c48ba06 1716 left_to_collect = freelist->left_to_collect;
945fec60 1717 span = scm_heap_table[i].span;
cf2d30f6 1718
a00c95d9
ML
1719 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1720 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1721
d6884e63
ML
1722 /* use only data cells in seg_size */
1723 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1724
c9b0d4b0
ML
1725 scm_gc_cells_swept += seg_size;
1726
0f2d19dd
JB
1727 for (j = seg_size + span; j -= span; ptr += span)
1728 {
d6884e63 1729 SCM scmptr;
96f6f4ae 1730
d6884e63 1731 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1732 {
d6884e63
ML
1733 SCM_CELLPTR nxt;
1734
1735 /* cheat here */
1736 nxt = ptr;
1737 NEXT_DATA_CELL (nxt, span);
1738 j += span;
1739
1740 ptr = nxt - span;
1741 continue;
1742 }
1743
1744 scmptr = PTR2SCM (ptr);
1745
1746 if (SCM_GCMARKP (scmptr))
1747 continue;
7bb8eac7 1748
d6884e63
ML
1749 switch SCM_TYP7 (scmptr)
1750 {
0f2d19dd 1751 case scm_tcs_cons_gloc:
0f2d19dd 1752 {
c8045e8d
DH
1753 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1754 * struct or a gloc. See the corresponding comment in
1755 * scm_gc_mark.
1756 */
92c2555f 1757 scm_t_bits word0 = (SCM_CELL_WORD_0 (scmptr)
7445e0e8
MD
1758 - scm_tc3_cons_gloc);
1759 /* access as struct */
92c2555f 1760 scm_t_bits * vtable_data = (scm_t_bits *) word0;
d6884e63 1761 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1762 {
7445e0e8
MD
1763 /* Structs need to be freed in a special order.
1764 * This is handled by GC C hooks in struct.c.
1765 */
1766 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1767 scm_structs_to_free = scmptr;
7bb8eac7 1768 continue;
c8045e8d 1769 }
7445e0e8 1770 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1771 }
1772 break;
1773 case scm_tcs_cons_imcar:
1774 case scm_tcs_cons_nimcar:
1775 case scm_tcs_closures:
e641afaf 1776 case scm_tc7_pws:
0f2d19dd
JB
1777 break;
1778 case scm_tc7_wvect:
0f2d19dd 1779 case scm_tc7_vector:
1b9be268 1780 {
c014a02e 1781 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1b9be268
DH
1782 if (length > 0)
1783 {
92c2555f 1784 m += length * sizeof (scm_t_bits);
1b9be268
DH
1785 scm_must_free (SCM_VECTOR_BASE (scmptr));
1786 }
1787 break;
1788 }
0f2d19dd
JB
1789#ifdef CCLO
1790 case scm_tc7_cclo:
b5c2579a 1791 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1792 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1793 break;
06ee04b2 1794#endif
afe5177e 1795#ifdef HAVE_ARRAYS
0f2d19dd 1796 case scm_tc7_bvect:
93778877 1797 {
c014a02e 1798 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
93778877
DH
1799 if (length > 0)
1800 {
c014a02e 1801 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
93778877
DH
1802 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1803 }
1804 }
06ee04b2 1805 break;
0f2d19dd 1806 case scm_tc7_byvect:
0f2d19dd
JB
1807 case scm_tc7_ivect:
1808 case scm_tc7_uvect:
0f2d19dd 1809 case scm_tc7_svect:
5c11cc9d 1810#ifdef HAVE_LONG_LONGS
0f2d19dd 1811 case scm_tc7_llvect:
0f2d19dd
JB
1812#endif
1813 case scm_tc7_fvect:
0f2d19dd 1814 case scm_tc7_dvect:
0f2d19dd 1815 case scm_tc7_cvect:
d1ca2c64 1816 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1817 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1818 break;
afe5177e 1819#endif
0f2d19dd 1820 case scm_tc7_substring:
0f2d19dd
JB
1821 break;
1822 case scm_tc7_string:
b5c2579a 1823 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1824 scm_must_free (SCM_STRING_CHARS (scmptr));
1825 break;
28b06554 1826 case scm_tc7_symbol:
b5c2579a 1827 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1828 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1829 break;
0f2d19dd 1830 case scm_tcs_subrs:
d6884e63 1831 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1832 continue;
1833 case scm_tc7_port:
0f2d19dd
JB
1834 if SCM_OPENP (scmptr)
1835 {
1836 int k = SCM_PTOBNUM (scmptr);
7a7f7c53 1837#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1838 if (!(k < scm_numptob))
7a7f7c53
DH
1839 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1840#endif
0f2d19dd 1841 /* Keep "revealed" ports alive. */
945fec60 1842 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1843 continue;
1844 /* Yes, I really do mean scm_ptobs[k].free */
1845 /* rather than ftobs[k].close. .close */
1846 /* is for explicit CLOSE-PORT by user */
84af0382 1847 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1848 SCM_SETSTREAM (scmptr, 0);
1849 scm_remove_from_port_table (scmptr);
1850 scm_gc_ports_collected++;
22a52da1 1851 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1852 }
1853 break;
1854 case scm_tc7_smob:
d6884e63 1855 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1856 {
1857 case scm_tc_free_cell:
acb0a19c 1858 case scm_tc16_real:
0f2d19dd
JB
1859 break;
1860#ifdef SCM_BIGDIG
acb0a19c 1861 case scm_tc16_big:
0f2d19dd 1862 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1863 scm_must_free (SCM_BDIGITS (scmptr));
1864 break;
0f2d19dd 1865#endif /* def SCM_BIGDIG */
acb0a19c 1866 case scm_tc16_complex:
92c2555f 1867 m += sizeof (scm_t_complex);
405aaef9 1868 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1869 break;
0f2d19dd 1870 default:
0f2d19dd
JB
1871 {
1872 int k;
1873 k = SCM_SMOBNUM (scmptr);
7a7f7c53 1874#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1875 if (!(k < scm_numsmob))
7a7f7c53
DH
1876 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1877#endif
1878 if (scm_smobs[k].free)
1879 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1880 break;
1881 }
1882 }
1883 break;
1884 default:
acf4331f 1885 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1886 }
7bb8eac7 1887
4c48ba06 1888 if (!--left_to_collect)
4a4c9785 1889 {
22a52da1 1890 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1891 *freelist->clustertail = scmptr;
1892 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1893
4a4c9785 1894 nfreelist = SCM_EOL;
4c48ba06
MD
1895 freelist->collected += span * freelist->cluster_size;
1896 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1897 }
1898 else
4a4c9785
MD
1899 {
1900 /* Stick the new cell on the front of nfreelist. It's
1901 critical that we mark this cell as freed; otherwise, the
1902 conservative collector might trace it as some other type
1903 of object. */
54778cd3 1904 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1905 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1906 nfreelist = scmptr;
1907 }
0f2d19dd 1908 }
d6884e63 1909
0f2d19dd
JB
1910#ifdef GC_FREE_SEGMENTS
1911 if (n == seg_size)
1912 {
c014a02e 1913 register long j;
15e9d186 1914
4c48ba06 1915 freelist->heap_size -= seg_size;
cf2d30f6
JB
1916 free ((char *) scm_heap_table[i].bounds[0]);
1917 scm_heap_table[i].bounds[0] = 0;
1918 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1919 scm_heap_table[j - 1] = scm_heap_table[j];
1920 scm_n_heap_segs -= 1;
cf2d30f6 1921 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1922 }
1923 else
1924#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1925 {
1926 /* Update the real freelist pointer to point to the head of
1927 the list of free cells we've built for this segment. */
4c48ba06 1928 freelist->cells = nfreelist;
4c48ba06 1929 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1930 }
1931
fca7547b 1932#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1933 scm_map_free_list ();
1934#endif
4a4c9785 1935 }
a00c95d9 1936
4c48ba06
MD
1937 gc_sweep_freelist_finish (&scm_master_freelist);
1938 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1939
8ded62a3
MD
1940 /* When we move to POSIX threads private freelists should probably
1941 be GC-protected instead. */
1942 scm_freelist = SCM_EOL;
1943 scm_freelist2 = SCM_EOL;
a00c95d9 1944
b37fe1c5 1945 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1946 scm_gc_yield -= scm_cells_allocated;
1be6b49c
ML
1947
1948 if (scm_mallocated < m)
1949 /* The byte count of allocated objects has underflowed. This is
1950 probably because you forgot to report the sizes of objects you
1951 have allocated, by calling scm_done_malloc or some such. When
1952 the GC freed them, it subtracted their size from
1953 scm_mallocated, which underflowed. */
1954 abort ();
1955
0f2d19dd
JB
1956 scm_mallocated -= m;
1957 scm_gc_malloc_collected = m;
1958}
acf4331f 1959#undef FUNC_NAME
0f2d19dd
JB
1960
1961
1962\f
0f2d19dd
JB
1963/* {Front end to malloc}
1964 *
9d47a1e6
ML
1965 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1966 * scm_done_free
0f2d19dd 1967 *
c6c79933
GH
1968 * These functions provide services comparable to malloc, realloc, and
1969 * free. They should be used when allocating memory that will be under
1970 * control of the garbage collector, i.e., if the memory may be freed
1971 * during garbage collection.
1972 */
bc9d9bb2 1973
0f2d19dd
JB
1974/* scm_must_malloc
1975 * Return newly malloced storage or throw an error.
1976 *
1977 * The parameter WHAT is a string for error reporting.
a00c95d9 1978 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1979 * allocation, or if the first call to malloc fails,
1980 * garbage collect -- on the presumption that some objects
1981 * using malloced storage may be collected.
1982 *
1983 * The limit scm_mtrigger may be raised by this allocation.
1984 */
07806695 1985void *
1be6b49c 1986scm_must_malloc (size_t size, const char *what)
0f2d19dd 1987{
07806695 1988 void *ptr;
c014a02e 1989 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
1990
1991 if (nm < size)
1992 /* The byte count of allocated objects has overflowed. This is
1993 probably because you forgot to report the correct size of freed
1994 memory in some of your smob free methods. */
1995 abort ();
e4ef2330
MD
1996
1997 if (nm <= scm_mtrigger)
0f2d19dd 1998 {
07806695 1999 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2000 if (NULL != ptr)
2001 {
2002 scm_mallocated = nm;
bc9d9bb2
MD
2003#ifdef GUILE_DEBUG_MALLOC
2004 scm_malloc_register (ptr, what);
2005#endif
0f2d19dd
JB
2006 return ptr;
2007 }
2008 }
6064dcc6 2009
0f2d19dd 2010 scm_igc (what);
e4ef2330 2011
0f2d19dd 2012 nm = scm_mallocated + size;
1be6b49c
ML
2013
2014 if (nm < size)
2015 /* The byte count of allocated objects has overflowed. This is
2016 probably because you forgot to report the correct size of freed
2017 memory in some of your smob free methods. */
2018 abort ();
2019
07806695 2020 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2021 if (NULL != ptr)
2022 {
2023 scm_mallocated = nm;
6064dcc6
MV
2024 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2025 if (nm > scm_mtrigger)
2026 scm_mtrigger = nm + nm / 2;
2027 else
2028 scm_mtrigger += scm_mtrigger / 2;
2029 }
bc9d9bb2
MD
2030#ifdef GUILE_DEBUG_MALLOC
2031 scm_malloc_register (ptr, what);
2032#endif
2033
0f2d19dd
JB
2034 return ptr;
2035 }
e4ef2330 2036
acf4331f 2037 scm_memory_error (what);
0f2d19dd
JB
2038}
2039
2040
2041/* scm_must_realloc
2042 * is similar to scm_must_malloc.
2043 */
07806695
JB
2044void *
2045scm_must_realloc (void *where,
1be6b49c
ML
2046 size_t old_size,
2047 size_t size,
3eeba8d4 2048 const char *what)
0f2d19dd 2049{
07806695 2050 void *ptr;
c014a02e 2051 unsigned long nm;
1be6b49c
ML
2052
2053 if (size <= old_size)
2054 return where;
2055
2056 nm = scm_mallocated + size - old_size;
2057
2058 if (nm < (size - old_size))
2059 /* The byte count of allocated objects has overflowed. This is
2060 probably because you forgot to report the correct size of freed
2061 memory in some of your smob free methods. */
2062 abort ();
e4ef2330
MD
2063
2064 if (nm <= scm_mtrigger)
0f2d19dd 2065 {
07806695 2066 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2067 if (NULL != ptr)
2068 {
2069 scm_mallocated = nm;
bc9d9bb2
MD
2070#ifdef GUILE_DEBUG_MALLOC
2071 scm_malloc_reregister (where, ptr, what);
2072#endif
0f2d19dd
JB
2073 return ptr;
2074 }
2075 }
e4ef2330 2076
0f2d19dd 2077 scm_igc (what);
e4ef2330
MD
2078
2079 nm = scm_mallocated + size - old_size;
1be6b49c
ML
2080
2081 if (nm < (size - old_size))
2082 /* The byte count of allocated objects has overflowed. This is
2083 probably because you forgot to report the correct size of freed
2084 memory in some of your smob free methods. */
2085 abort ();
2086
07806695 2087 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2088 if (NULL != ptr)
2089 {
2090 scm_mallocated = nm;
6064dcc6
MV
2091 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2092 if (nm > scm_mtrigger)
2093 scm_mtrigger = nm + nm / 2;
2094 else
2095 scm_mtrigger += scm_mtrigger / 2;
2096 }
bc9d9bb2
MD
2097#ifdef GUILE_DEBUG_MALLOC
2098 scm_malloc_reregister (where, ptr, what);
2099#endif
0f2d19dd
JB
2100 return ptr;
2101 }
e4ef2330 2102
acf4331f 2103 scm_memory_error (what);
0f2d19dd
JB
2104}
2105
e4a7824f 2106char *
1be6b49c 2107scm_must_strndup (const char *str, size_t length)
e4a7824f
MV
2108{
2109 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2110 memcpy (dst, str, length);
2111 dst[length] = 0;
2112 return dst;
2113}
2114
2115char *
2116scm_must_strdup (const char *str)
2117{
2118 return scm_must_strndup (str, strlen (str));
2119}
acf4331f 2120
a00c95d9 2121void
07806695 2122scm_must_free (void *obj)
acf4331f 2123#define FUNC_NAME "scm_must_free"
0f2d19dd 2124{
bc9d9bb2
MD
2125#ifdef GUILE_DEBUG_MALLOC
2126 scm_malloc_unregister (obj);
2127#endif
0f2d19dd
JB
2128 if (obj)
2129 free (obj);
2130 else
acf4331f 2131 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 2132}
acf4331f
DH
2133#undef FUNC_NAME
2134
0f2d19dd 2135
c68296f8
MV
2136/* Announce that there has been some malloc done that will be freed
2137 * during gc. A typical use is for a smob that uses some malloced
2138 * memory but can not get it from scm_must_malloc (for whatever
2139 * reason). When a new object of this smob is created you call
2140 * scm_done_malloc with the size of the object. When your smob free
2141 * function is called, be sure to include this size in the return
9d47a1e6
ML
2142 * value.
2143 *
2144 * If you can't actually free the memory in the smob free function,
2145 * for whatever reason (like reference counting), you still can (and
2146 * should) report the amount of memory freed when you actually free it.
2147 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2148 * eh? Or even better, call scm_done_free. */
0f2d19dd 2149
c68296f8 2150void
c014a02e 2151scm_done_malloc (long size)
c68296f8 2152{
1be6b49c
ML
2153 if (size < 0) {
2154 if (scm_mallocated < size)
2155 /* The byte count of allocated objects has underflowed. This is
2156 probably because you forgot to report the sizes of objects you
2157 have allocated, by calling scm_done_malloc or some such. When
2158 the GC freed them, it subtracted their size from
2159 scm_mallocated, which underflowed. */
2160 abort ();
2161 } else {
c014a02e 2162 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2163 if (nm < size)
2164 /* The byte count of allocated objects has overflowed. This is
2165 probably because you forgot to report the correct size of freed
2166 memory in some of your smob free methods. */
2167 abort ();
2168 }
2169
c68296f8
MV
2170 scm_mallocated += size;
2171
2172 if (scm_mallocated > scm_mtrigger)
2173 {
2174 scm_igc ("foreign mallocs");
2175 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2176 {
2177 if (scm_mallocated > scm_mtrigger)
2178 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2179 else
2180 scm_mtrigger += scm_mtrigger / 2;
2181 }
2182 }
2183}
2184
9d47a1e6 2185void
c014a02e 2186scm_done_free (long size)
9d47a1e6 2187{
1be6b49c
ML
2188 if (size >= 0) {
2189 if (scm_mallocated < size)
2190 /* The byte count of allocated objects has underflowed. This is
2191 probably because you forgot to report the sizes of objects you
2192 have allocated, by calling scm_done_malloc or some such. When
2193 the GC freed them, it subtracted their size from
2194 scm_mallocated, which underflowed. */
2195 abort ();
2196 } else {
c014a02e 2197 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2198 if (nm < size)
2199 /* The byte count of allocated objects has overflowed. This is
2200 probably because you forgot to report the correct size of freed
2201 memory in some of your smob free methods. */
2202 abort ();
2203 }
2204
9d47a1e6
ML
2205 scm_mallocated -= size;
2206}
2207
c68296f8
MV
2208
2209\f
0f2d19dd
JB
2210/* {Heap Segments}
2211 *
2212 * Each heap segment is an array of objects of a particular size.
2213 * Every segment has an associated (possibly shared) freelist.
2214 * A table of segment records is kept that records the upper and
2215 * lower extents of the segment; this is used during the conservative
2216 * phase of gc to identify probably gc roots (because they point
c68296f8 2217 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2218
2219/* scm_expmem
2220 * is true if the first segment was smaller than INIT_HEAP_SEG.
2221 * If scm_expmem is set to one, subsequent segment allocations will
2222 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2223 */
2224int scm_expmem = 0;
2225
1be6b49c 2226size_t scm_max_segment_size;
4c48ba06 2227
0f2d19dd
JB
2228/* scm_heap_org
2229 * is the lowest base address of any heap segment.
2230 */
2231SCM_CELLPTR scm_heap_org;
2232
92c2555f 2233scm_t_heap_seg_data * scm_heap_table = 0;
1be6b49c
ML
2234static size_t heap_segment_table_size = 0;
2235size_t scm_n_heap_segs = 0;
0f2d19dd 2236
0f2d19dd 2237/* init_heap_seg
d6884e63 2238 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2239 *
d6884e63
ML
2240 * The segment origin and segment size in bytes are input parameters.
2241 * The freelist is both input and output.
0f2d19dd 2242 *
d6884e63
ML
2243 * This function presumes that the scm_heap_table has already been expanded
2244 * to accomodate a new segment record and that the markbit space was reserved
2245 * for all the cards in this segment.
0f2d19dd
JB
2246 */
2247
d6884e63
ML
2248#define INIT_CARD(card, span) \
2249 do { \
322ec19d 2250 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2251 if ((span) == 2) \
2252 SCM_GC_SET_CARD_DOUBLECELL (card); \
2253 } while (0)
0f2d19dd 2254
1be6b49c 2255static size_t
92c2555f 2256init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
0f2d19dd
JB
2257{
2258 register SCM_CELLPTR ptr;
0f2d19dd 2259 SCM_CELLPTR seg_end;
592996c9 2260 size_t new_seg_index;
1be6b49c 2261 ptrdiff_t n_new_cells;
4c48ba06 2262 int span = freelist->span;
a00c95d9 2263
0f2d19dd
JB
2264 if (seg_org == NULL)
2265 return 0;
2266
d6884e63
ML
2267 /* Align the begin ptr up.
2268 */
2269 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2270
a00c95d9 2271 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2272 */
d6884e63 2273 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2274
a00c95d9 2275 /* Find the right place and insert the segment record.
0f2d19dd 2276 */
592996c9
DH
2277 new_seg_index = 0;
2278 while (new_seg_index < scm_n_heap_segs
2279 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2280 new_seg_index++;
0f2d19dd
JB
2281
2282 {
2283 int i;
2284 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2285 scm_heap_table[i] = scm_heap_table[i - 1];
2286 }
a00c95d9 2287
0f2d19dd
JB
2288 ++scm_n_heap_segs;
2289
945fec60 2290 scm_heap_table[new_seg_index].span = span;
4c48ba06 2291 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2292 scm_heap_table[new_seg_index].bounds[0] = ptr;
2293 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2294
acb0a19c
MD
2295 /*n_new_cells*/
2296 n_new_cells = seg_end - ptr;
0f2d19dd 2297
4c48ba06 2298 freelist->heap_size += n_new_cells;
4a4c9785 2299
a00c95d9 2300 /* Partition objects in this segment into clusters */
4a4c9785
MD
2301 {
2302 SCM clusters;
2303 SCM *clusterp = &clusters;
4a4c9785 2304
d6884e63
ML
2305 NEXT_DATA_CELL (ptr, span);
2306 while (ptr < seg_end)
4a4c9785 2307 {
d6884e63
ML
2308 scm_cell *nxt = ptr;
2309 scm_cell *prv = NULL;
2310 scm_cell *last_card = NULL;
2311 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2312 NEXT_DATA_CELL(nxt, span);
4a4c9785 2313
4c48ba06
MD
2314 /* Allocate cluster spine
2315 */
4a4c9785 2316 *clusterp = PTR2SCM (ptr);
d6884e63 2317 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2318 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2319 ptr = nxt;
a00c95d9 2320
d6884e63 2321 while (n_data_cells--)
4a4c9785 2322 {
d6884e63 2323 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2324 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2325 nxt = ptr;
2326 NEXT_DATA_CELL (nxt, span);
2327 prv = ptr;
2328
2329 if (card != last_card)
2330 {
2331 INIT_CARD (card, span);
2332 last_card = card;
2333 }
96f6f4ae 2334
54778cd3 2335 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2336 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2337
2338 ptr = nxt;
4a4c9785 2339 }
4c48ba06 2340
d6884e63 2341 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2342 }
a00c95d9 2343
d6884e63
ML
2344 /* sanity check */
2345 {
2346 scm_cell *ref = seg_end;
2347 NEXT_DATA_CELL (ref, span);
2348 if (ref != ptr)
2349 /* [cmm] looks like the segment size doesn't divide cleanly by
2350 cluster size. bad cmm! */
2351 abort();
2352 }
2353
4a4c9785
MD
2354 /* Patch up the last cluster pointer in the segment
2355 * to join it to the input freelist.
2356 */
4c48ba06
MD
2357 *clusterp = freelist->clusters;
2358 freelist->clusters = clusters;
4a4c9785
MD
2359 }
2360
4c48ba06
MD
2361#ifdef DEBUGINFO
2362 fprintf (stderr, "H");
2363#endif
0f2d19dd 2364 return size;
0f2d19dd
JB
2365}
2366
1be6b49c 2367static size_t
92c2555f 2368round_to_cluster_size (scm_t_freelist *freelist, size_t len)
a00c95d9 2369{
1be6b49c 2370 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9
ML
2371
2372 return
2373 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2374 + ALIGNMENT_SLACK (freelist);
2375}
2376
a00c95d9 2377static void
92c2555f 2378alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
acf4331f 2379#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2380{
0f2d19dd 2381 SCM_CELLPTR ptr;
1be6b49c 2382 size_t len;
a00c95d9 2383
9d47a1e6 2384 if (scm_gc_heap_lock)
b6efc951
DH
2385 {
2386 /* Critical code sections (such as the garbage collector) aren't
2387 * supposed to add heap segments.
2388 */
2389 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2390 abort ();
2391 }
0f2d19dd 2392
9d47a1e6 2393 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2394 {
2395 /* We have to expand the heap segment table to have room for the new
2396 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2397 * init_heap_seg only if the allocation of the segment itself succeeds.
2398 */
1be6b49c 2399 size_t new_table_size = scm_n_heap_segs + 1;
92c2555f
MV
2400 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2401 scm_t_heap_seg_data *new_heap_table;
b6efc951 2402
92c2555f 2403 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
b6efc951
DH
2404 realloc ((char *)scm_heap_table, size)));
2405 if (!new_heap_table)
2406 {
2407 if (error_policy == abort_on_error)
2408 {
2409 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2410 abort ();
2411 }
2412 else
2413 {
2414 return;
2415 }
2416 }
2417 else
2418 {
2419 scm_heap_table = new_heap_table;
2420 heap_segment_table_size = new_table_size;
2421 }
2422 }
0f2d19dd 2423
0f2d19dd 2424 /* Pick a size for the new heap segment.
a00c95d9 2425 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2426 * gc.h
2427 */
4c48ba06 2428 {
1811ebce
MD
2429 /* Assure that the new segment is predicted to be large enough.
2430 *
2431 * New yield should at least equal GC fraction of new heap size, i.e.
2432 *
2433 * y + dh > f * (h + dh)
2434 *
2435 * y : yield
8fef55a8 2436 * f : min yield fraction
1811ebce
MD
2437 * h : heap size
2438 * dh : size of new heap segment
2439 *
2440 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2441 */
8fef55a8 2442 int f = freelist->min_yield_fraction;
c014a02e 2443 unsigned long h = SCM_HEAP_SIZE;
1be6b49c 2444 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2445 len = SCM_EXPHEAP (freelist->heap_size);
2446#ifdef DEBUGINFO
1be6b49c 2447 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
4c48ba06
MD
2448#endif
2449 if (len < min_cells)
1811ebce 2450 len = min_cells + freelist->cluster_size;
4c48ba06 2451 len *= sizeof (scm_cell);
1811ebce
MD
2452 /* force new sampling */
2453 freelist->collected = LONG_MAX;
4c48ba06 2454 }
a00c95d9 2455
4c48ba06
MD
2456 if (len > scm_max_segment_size)
2457 len = scm_max_segment_size;
0f2d19dd
JB
2458
2459 {
1be6b49c 2460 size_t smallest;
0f2d19dd 2461
a00c95d9 2462 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2463
0f2d19dd 2464 if (len < smallest)
a00c95d9 2465 len = smallest;
0f2d19dd
JB
2466
2467 /* Allocate with decaying ambition. */
2468 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2469 && (len >= smallest))
2470 {
1be6b49c 2471 size_t rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2472 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2473 if (ptr)
2474 {
a00c95d9 2475 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2476 return;
2477 }
2478 len /= 2;
2479 }
2480 }
2481
b6efc951
DH
2482 if (error_policy == abort_on_error)
2483 {
2484 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2485 abort ();
2486 }
0f2d19dd 2487}
acf4331f 2488#undef FUNC_NAME
0f2d19dd 2489
0f2d19dd
JB
2490\f
2491/* {GC Protection Helper Functions}
2492 */
2493
2494
5d2b97cd
DH
2495/*
2496 * If within a function you need to protect one or more scheme objects from
2497 * garbage collection, pass them as parameters to one of the
2498 * scm_remember_upto_here* functions below. These functions don't do
2499 * anything, but since the compiler does not know that they are actually
2500 * no-ops, it will generate code that calls these functions with the given
2501 * parameters. Therefore, you can be sure that the compiler will keep those
2502 * scheme values alive (on the stack or in a register) up to the point where
2503 * scm_remember_upto_here* is called. In other words, place the call to
592996c9 2504 * scm_remember_upto_here* _behind_ the last code in your function, that
5d2b97cd
DH
2505 * depends on the scheme object to exist.
2506 *
2507 * Example: We want to make sure, that the string object str does not get
2508 * garbage collected during the execution of 'some_function', because
2509 * otherwise the characters belonging to str would be freed and
2510 * 'some_function' might access freed memory. To make sure that the compiler
2511 * keeps str alive on the stack or in a register such that it is visible to
2512 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2513 * call to 'some_function'. Note that this would not be necessary if str was
2514 * used anyway after the call to 'some_function'.
2515 * char *chars = SCM_STRING_CHARS (str);
2516 * some_function (chars);
2517 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2518 */
2519
2520void
e81d98ec 2521scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
2522{
2523 /* Empty. Protects a single object from garbage collection. */
2524}
2525
2526void
e81d98ec 2527scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
2528{
2529 /* Empty. Protects two objects from garbage collection. */
2530}
2531
2532void
e81d98ec 2533scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
2534{
2535 /* Empty. Protects any number of objects from garbage collection. */
2536}
2537
2538
2539#if (SCM_DEBUG_DEPRECATED == 0)
2540
0f2d19dd 2541void
6e8d25a6 2542scm_remember (SCM *ptr)
b24b5e13 2543{
1be6b49c
ML
2544 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2545 "Use the `scm_remember_upto_here*' family of functions instead.");
b24b5e13 2546}
0f2d19dd 2547
6b1b030e
ML
2548SCM
2549scm_protect_object (SCM obj)
2550{
2551 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2552 "Use `scm_gc_protect_object' instead.");
2553 return scm_gc_protect_object (obj);
2554}
2555
2556SCM
2557scm_unprotect_object (SCM obj)
2558{
2559 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2560 "Use `scm_gc_unprotect_object' instead.");
2561 return scm_gc_unprotect_object (obj);
2562}
2563
5d2b97cd 2564#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2565
c209c88e 2566/*
41b0806d
GB
2567 These crazy functions prevent garbage collection
2568 of arguments after the first argument by
2569 ensuring they remain live throughout the
2570 function because they are used in the last
2571 line of the code block.
2572 It'd be better to have a nice compiler hint to
2573 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2574SCM
2575scm_return_first (SCM elt, ...)
0f2d19dd
JB
2576{
2577 return elt;
2578}
2579
41b0806d
GB
2580int
2581scm_return_first_int (int i, ...)
2582{
2583 return i;
2584}
2585
0f2d19dd 2586
0f2d19dd 2587SCM
6e8d25a6 2588scm_permanent_object (SCM obj)
0f2d19dd
JB
2589{
2590 SCM_REDEFER_INTS;
2591 scm_permobjs = scm_cons (obj, scm_permobjs);
2592 SCM_REALLOW_INTS;
2593 return obj;
2594}
2595
2596
7bd4fbe2
MD
2597/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2598 other references are dropped, until the object is unprotected by calling
6b1b030e 2599 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
2600 i. e. it is possible to protect the same object several times, but it is
2601 necessary to unprotect the object the same number of times to actually get
2602 the object unprotected. It is an error to unprotect an object more often
2603 than it has been protected before. The function scm_protect_object returns
2604 OBJ.
2605*/
2606
2607/* Implementation note: For every object X, there is a counter which
6b1b030e 2608 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 2609*/
686765af 2610
ef290276 2611SCM
6b1b030e 2612scm_gc_protect_object (SCM obj)
ef290276 2613{
686765af 2614 SCM handle;
9d47a1e6 2615
686765af 2616 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2617 SCM_REDEFER_INTS;
9d47a1e6 2618
0f0f0899 2619 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
1be6b49c 2620 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
9d47a1e6 2621
2dd6a83a 2622 SCM_REALLOW_INTS;
9d47a1e6 2623
ef290276
JB
2624 return obj;
2625}
2626
2627
2628/* Remove any protection for OBJ established by a prior call to
dab7f566 2629 scm_protect_object. This function returns OBJ.
ef290276 2630
dab7f566 2631 See scm_protect_object for more information. */
ef290276 2632SCM
6b1b030e 2633scm_gc_unprotect_object (SCM obj)
ef290276 2634{
686765af 2635 SCM handle;
9d47a1e6 2636
686765af 2637 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2638 SCM_REDEFER_INTS;
9d47a1e6 2639
686765af 2640 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2641
22a52da1 2642 if (SCM_FALSEP (handle))
686765af 2643 {
0f0f0899
MD
2644 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2645 abort ();
686765af 2646 }
6a199940
DH
2647 else
2648 {
1be6b49c
ML
2649 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2650 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
6a199940
DH
2651 scm_hashq_remove_x (scm_protects, obj);
2652 else
1be6b49c 2653 SCM_SETCDR (handle, count);
6a199940 2654 }
686765af 2655
2dd6a83a 2656 SCM_REALLOW_INTS;
ef290276
JB
2657
2658 return obj;
2659}
2660
6b1b030e
ML
2661void
2662scm_gc_register_root (SCM *p)
2663{
2664 SCM handle;
2665 SCM key = scm_long2num ((long) p);
2666
2667 /* This critical section barrier will be replaced by a mutex. */
2668 SCM_REDEFER_INTS;
2669
2670 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2671 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2672
2673 SCM_REALLOW_INTS;
2674}
2675
2676void
2677scm_gc_unregister_root (SCM *p)
2678{
2679 SCM handle;
2680 SCM key = scm_long2num ((long) p);
2681
2682 /* This critical section barrier will be replaced by a mutex. */
2683 SCM_REDEFER_INTS;
2684
2685 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2686
2687 if (SCM_FALSEP (handle))
2688 {
2689 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2690 abort ();
2691 }
2692 else
2693 {
2694 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2695 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2696 scm_hashv_remove_x (scm_gc_registered_roots, key);
2697 else
2698 SCM_SETCDR (handle, count);
2699 }
2700
2701 SCM_REALLOW_INTS;
2702}
2703
2704void
2705scm_gc_register_roots (SCM *b, unsigned long n)
2706{
2707 SCM *p = b;
2708 for (; p < b + n; ++p)
2709 scm_gc_register_root (p);
2710}
2711
2712void
2713scm_gc_unregister_roots (SCM *b, unsigned long n)
2714{
2715 SCM *p = b;
2716 for (; p < b + n; ++p)
2717 scm_gc_unregister_root (p);
2718}
2719
c45acc34
JB
2720int terminating;
2721
2722/* called on process termination. */
e52ceaac
MD
2723#ifdef HAVE_ATEXIT
2724static void
2725cleanup (void)
2726#else
2727#ifdef HAVE_ON_EXIT
51157deb
MD
2728extern int on_exit (void (*procp) (), int arg);
2729
e52ceaac
MD
2730static void
2731cleanup (int status, void *arg)
2732#else
2733#error Dont know how to setup a cleanup handler on your system.
2734#endif
2735#endif
c45acc34
JB
2736{
2737 terminating = 1;
2738 scm_flush_all_ports ();
2739}
ef290276 2740
0f2d19dd 2741\f
acb0a19c 2742static int
92c2555f 2743make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
acb0a19c 2744{
1be6b49c 2745 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2746
a00c95d9
ML
2747 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2748 rounded_size,
4c48ba06 2749 freelist))
acb0a19c 2750 {
a00c95d9
ML
2751 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2752 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2753 rounded_size,
4c48ba06 2754 freelist))
acb0a19c
MD
2755 return 1;
2756 }
2757 else
2758 scm_expmem = 1;
2759
8fef55a8
MD
2760 if (freelist->min_yield_fraction)
2761 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2762 / 100);
8fef55a8 2763 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2764
acb0a19c
MD
2765 return 0;
2766}
2767
2768\f
4c48ba06 2769static void
92c2555f 2770init_freelist (scm_t_freelist *freelist,
4c48ba06 2771 int span,
c014a02e 2772 long cluster_size,
8fef55a8 2773 int min_yield)
4c48ba06
MD
2774{
2775 freelist->clusters = SCM_EOL;
2776 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2777 freelist->left_to_collect = 0;
2778 freelist->clusters_allocated = 0;
8fef55a8
MD
2779 freelist->min_yield = 0;
2780 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2781 freelist->span = span;
2782 freelist->collected = 0;
1811ebce 2783 freelist->collected_1 = 0;
4c48ba06
MD
2784 freelist->heap_size = 0;
2785}
2786
85db4a2c
DH
2787
2788/* Get an integer from an environment variable. */
2789static int
2790scm_i_getenv_int (const char *var, int def)
2791{
2792 char *end, *val = getenv (var);
2793 long res;
2794 if (!val)
2795 return def;
2796 res = strtol (val, &end, 10);
2797 if (end == val)
2798 return def;
2799 return res;
2800}
2801
2802
4a4c9785 2803int
85db4a2c 2804scm_init_storage ()
0f2d19dd 2805{
1be6b49c
ML
2806 unsigned long gc_trigger_1;
2807 unsigned long gc_trigger_2;
2808 size_t init_heap_size_1;
2809 size_t init_heap_size_2;
2810 size_t j;
0f2d19dd 2811
7c33806a
DH
2812#if (SCM_DEBUG_CELL_ACCESSES == 1)
2813 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
592996c9 2814 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
7c33806a
DH
2815#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2816
0f2d19dd
JB
2817 j = SCM_NUM_PROTECTS;
2818 while (j)
2819 scm_sys_protects[--j] = SCM_BOOL_F;
2820 scm_block_gc = 1;
4a4c9785 2821
4a4c9785 2822 scm_freelist = SCM_EOL;
4c48ba06 2823 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2824 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2825 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2826 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2827 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2828 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2829
0f2d19dd
JB
2830 scm_expmem = 0;
2831
2832 j = SCM_HEAP_SEG_SIZE;
2833 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
92c2555f
MV
2834 scm_heap_table = ((scm_t_heap_seg_data *)
2835 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
b6efc951 2836 heap_segment_table_size = 2;
acb0a19c 2837
d6884e63
ML
2838 mark_space_ptr = &mark_space_head;
2839
85db4a2c
DH
2840 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2841 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2842 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2843 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2844 return 1;
acb0a19c 2845
801cb5e7 2846 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2847 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2848
801cb5e7
MD
2849 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2850 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2851 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2852 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2853 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2854
2855 /* Initialise the list of ports. */
92c2555f
MV
2856 scm_t_portable = (scm_t_port **)
2857 malloc (sizeof (scm_t_port *) * scm_t_portable_room);
2858 if (!scm_t_portable)
0f2d19dd
JB
2859 return 1;
2860
a18bcd0e 2861#ifdef HAVE_ATEXIT
c45acc34 2862 atexit (cleanup);
e52ceaac
MD
2863#else
2864#ifdef HAVE_ON_EXIT
2865 on_exit (cleanup, 0);
2866#endif
a18bcd0e 2867#endif
0f2d19dd 2868
8960e0a0 2869 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2870 scm_permobjs = SCM_EOL;
00ffa0e7 2871 scm_protects = scm_c_make_hash_table (31);
6b1b030e 2872 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 2873
0f2d19dd
JB
2874 return 0;
2875}
939794ce 2876
0f2d19dd
JB
2877\f
2878
939794ce
DH
2879SCM scm_after_gc_hook;
2880
939794ce
DH
2881static SCM gc_async;
2882
939794ce
DH
2883/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2884 * is run after the gc, as soon as the asynchronous events are handled by the
2885 * evaluator.
2886 */
2887static SCM
2888gc_async_thunk (void)
2889{
2890 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
2891 return SCM_UNSPECIFIED;
2892}
2893
2894
2895/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2896 * the garbage collection. The only purpose of this function is to mark the
2897 * gc_async (which will eventually lead to the execution of the
2898 * gc_async_thunk).
2899 */
2900static void *
e81d98ec
DH
2901mark_gc_async (void * hook_data SCM_UNUSED,
2902 void *func_data SCM_UNUSED,
2903 void *data SCM_UNUSED)
2904{
2905 /* If cell access debugging is enabled, the user may choose to perform
2906 * additional garbage collections after an arbitrary number of cell
2907 * accesses. We don't want the scheme level after-gc-hook to be performed
2908 * for each of these garbage collections for the following reason: The
2909 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2910 * after-gc-hook was performed with every gc, and if the gc was performed
2911 * after a very small number of cell accesses, then the number of cell
2912 * accesses during the execution of the after-gc-hook will suffice to cause
2913 * the execution of the next gc. Then, guile would keep executing the
2914 * after-gc-hook over and over again, and would never come to do other
2915 * things.
2916 *
2917 * To overcome this problem, if cell access debugging with additional
2918 * garbage collections is enabled, the after-gc-hook is never run by the
2919 * garbage collecter. When running guile with cell access debugging and the
2920 * execution of the after-gc-hook is desired, then it is necessary to run
2921 * the hook explicitly from the user code. This has the effect, that from
2922 * the scheme level point of view it seems that garbage collection is
2923 * performed with a much lower frequency than it actually is. Obviously,
2924 * this will not work for code that depends on a fixed one to one
2925 * relationship between the execution counts of the C level garbage
2926 * collection hooks and the execution count of the scheme level
2927 * after-gc-hook.
2928 */
2929#if (SCM_DEBUG_CELL_ACCESSES == 1)
2930 if (debug_cells_gc_interval == 0)
2931 scm_system_async_mark (gc_async);
2932#else
939794ce 2933 scm_system_async_mark (gc_async);
e81d98ec
DH
2934#endif
2935
939794ce
DH
2936 return NULL;
2937}
2938
2939
0f2d19dd
JB
2940void
2941scm_init_gc ()
0f2d19dd 2942{
939794ce
DH
2943 SCM after_gc_thunk;
2944
fde50407
ML
2945 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2946 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 2947
9a441ddb
MV
2948 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2949 gc_async_thunk);
23670993 2950 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2951
2952 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2953
8dc9439f 2954#ifndef SCM_MAGIC_SNARFER
a0599745 2955#include "libguile/gc.x"
8dc9439f 2956#endif
0f2d19dd 2957}
89e00824 2958
56495472
ML
2959#endif /*MARK_DEPENDENCIES*/
2960
89e00824
ML
2961/*
2962 Local Variables:
2963 c-file-style: "gnu"
2964 End:
2965*/