* list.h (scm_list_1, scm_list_2, scm_list_3, scm_list_4, scm_list_5,
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
56495472
ML
47/* SECTION: This code is compiled once.
48 */
49
50#ifndef MARK_DEPENDENCIES
51
0f2d19dd
JB
52\f
53#include <stdio.h>
e6e2e95a 54#include <errno.h>
783e7774 55#include <string.h>
e6e2e95a 56
a0599745 57#include "libguile/_scm.h"
0a7a7445 58#include "libguile/eval.h"
a0599745
MD
59#include "libguile/stime.h"
60#include "libguile/stackchk.h"
61#include "libguile/struct.h"
a0599745
MD
62#include "libguile/smob.h"
63#include "libguile/unif.h"
64#include "libguile/async.h"
65#include "libguile/ports.h"
66#include "libguile/root.h"
67#include "libguile/strings.h"
68#include "libguile/vectors.h"
801cb5e7 69#include "libguile/weaks.h"
686765af 70#include "libguile/hashtab.h"
ecf470a2 71#include "libguile/tags.h"
a0599745
MD
72
73#include "libguile/validate.h"
1be6b49c 74#include "libguile/deprecation.h"
a0599745 75#include "libguile/gc.h"
fce59c93 76
bc9d9bb2 77#ifdef GUILE_DEBUG_MALLOC
a0599745 78#include "libguile/debug-malloc.h"
bc9d9bb2
MD
79#endif
80
0f2d19dd 81#ifdef HAVE_MALLOC_H
95b88819 82#include <malloc.h>
0f2d19dd
JB
83#endif
84
85#ifdef HAVE_UNISTD_H
95b88819 86#include <unistd.h>
0f2d19dd
JB
87#endif
88
1cc91f1b
JB
89#ifdef __STDC__
90#include <stdarg.h>
91#define var_start(x, y) va_start(x, y)
92#else
93#include <varargs.h>
94#define var_start(x, y) va_start(x)
95#endif
96
0f2d19dd 97\f
406c7d90
DH
98
99unsigned int scm_gc_running_p = 0;
100
101\f
102
103#if (SCM_DEBUG_CELL_ACCESSES == 1)
104
92c2555f 105scm_t_bits scm_tc16_allocated;
61045190
DH
106
107/* Set this to != 0 if every cell that is accessed shall be checked:
108 */
109unsigned int scm_debug_cell_accesses_p = 1;
406c7d90 110
e81d98ec
DH
111/* Set this to 0 if no additional gc's shall be performed, otherwise set it to
112 * the number of cell accesses after which a gc shall be called.
113 */
114static unsigned int debug_cells_gc_interval = 0;
115
406c7d90
DH
116
117/* Assert that the given object is a valid reference to a valid cell. This
118 * test involves to determine whether the object is a cell pointer, whether
119 * this pointer actually points into a heap segment and whether the cell
e81d98ec
DH
120 * pointed to is not a free cell. Further, additional garbage collections may
121 * get executed after a user defined number of cell accesses. This helps to
122 * find places in the C code where references are dropped for extremely short
123 * periods.
406c7d90
DH
124 */
125void
126scm_assert_cell_valid (SCM cell)
127{
61045190
DH
128 static unsigned int already_running = 0;
129
130 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 131 {
61045190 132 already_running = 1; /* set to avoid recursion */
406c7d90 133
9d47a1e6 134 if (!scm_cellp (cell))
406c7d90 135 {
1be6b49c
ML
136 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
137 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
138 abort ();
139 }
140 else if (!scm_gc_running_p)
141 {
142 /* Dirk::FIXME:: During garbage collection there occur references to
143 free cells. This is allright during conservative marking, but
144 should not happen otherwise (I think). The case of free cells
145 accessed during conservative marking is handled in function
146 scm_mark_locations. However, there still occur accesses to free
147 cells during gc. I don't understand why this happens. If it is
148 a bug and gets fixed, the following test should also work while
149 gc is running.
150 */
151 if (SCM_FREE_CELL_P (cell))
152 {
1be6b49c
ML
153 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
154 (unsigned long) SCM_UNPACK (cell));
406c7d90
DH
155 abort ();
156 }
e81d98ec
DH
157
158 /* If desired, perform additional garbage collections after a user
159 * defined number of cell accesses.
160 */
161 if (debug_cells_gc_interval)
162 {
163 static unsigned int counter = 0;
164
165 if (counter != 0)
166 {
167 --counter;
168 }
169 else
170 {
171 counter = debug_cells_gc_interval;
172 scm_igc ("scm_assert_cell_valid");
173 }
174 }
406c7d90 175 }
61045190 176 already_running = 0; /* re-enable */
406c7d90
DH
177 }
178}
179
180
181SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
182 (SCM flag),
1e6808ea 183 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
e81d98ec
DH
184 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
185 "but no additional calls to garbage collection are issued.\n"
186 "If @var{flag} is a number, cell access checking is enabled,\n"
187 "with an additional garbage collection after the given\n"
188 "number of cell accesses.\n"
1e6808ea
MG
189 "This procedure only exists when the compile-time flag\n"
190 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
191#define FUNC_NAME s_scm_set_debug_cell_accesses_x
192{
193 if (SCM_FALSEP (flag)) {
194 scm_debug_cell_accesses_p = 0;
195 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
e81d98ec
DH
196 debug_cells_gc_interval = 0;
197 scm_debug_cell_accesses_p = 1;
198 } else if (SCM_INUMP (flag)) {
199 long int f = SCM_INUM (flag);
200 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
201 debug_cells_gc_interval = f;
406c7d90
DH
202 scm_debug_cell_accesses_p = 1;
203 } else {
204 SCM_WRONG_TYPE_ARG (1, flag);
205 }
206 return SCM_UNSPECIFIED;
207}
208#undef FUNC_NAME
209
210#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
211
212\f
213
0f2d19dd 214/* {heap tuning parameters}
a00c95d9 215 *
0f2d19dd
JB
216 * These are parameters for controlling memory allocation. The heap
217 * is the area out of which scm_cons, and object headers are allocated.
218 *
219 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
220 * 64 bit machine. The units of the _SIZE parameters are bytes.
221 * Cons pairs and object headers occupy one heap cell.
222 *
223 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
224 * allocated initially the heap will grow by half its current size
225 * each subsequent time more heap is needed.
226 *
227 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
228 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
1be6b49c 229 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
0f2d19dd 230 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 231 *
0f2d19dd
JB
232 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
233 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
234 *
235 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
236 * is needed.
237 *
238 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 239 * trigger a GC.
6064dcc6
MV
240 *
241 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
242 * reclaimed by a GC triggered by must_malloc. If less than this is
243 * reclaimed, the trigger threshold is raised. [I don't know what a
244 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 245 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
246 */
247
8fef55a8
MD
248/*
249 * Heap size 45000 and 40% min yield gives quick startup and no extra
250 * heap allocation. Having higher values on min yield may lead to
251 * large heaps, especially if code behaviour is varying its
252 * maximum consumption between different freelists.
253 */
d6884e63
ML
254
255#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
256#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
257#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
1be6b49c 258size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 259 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 260int scm_default_min_yield_1 = 40;
4c48ba06 261
d6884e63 262#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
1be6b49c 263size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
d6884e63 264 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
265/* The following value may seem large, but note that if we get to GC at
266 * all, this means that we have a numerically intensive application
267 */
aeacfc8f 268int scm_default_min_yield_2 = 40;
4c48ba06 269
1be6b49c 270size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 271
d6884e63 272#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
273#ifdef _QC
274# define SCM_HEAP_SEG_SIZE 32768L
275#else
276# ifdef sequent
4c48ba06 277# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 278# else
4c48ba06 279# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
280# endif
281#endif
4c48ba06 282/* Make heap grow with factor 1.5 */
4a4c9785 283#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 284#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 285#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 286
d6884e63
ML
287/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
288 aligned inner bounds for allocated storage */
0f2d19dd
JB
289
290#ifdef PROT386
291/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
292# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
293# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
294#else
295# ifdef _UNICOS
c014a02e
ML
296# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
297# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 298# else
c014a02e
ML
299# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
300# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
301# endif /* UNICOS */
302#endif /* PROT386 */
303
ecf470a2
ML
304#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
305
d6884e63
ML
306#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
307#define CLUSTER_SIZE_IN_BYTES(freelist) \
308 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
309
310\f
945fec60 311/* scm_freelists
0f2d19dd 312 */
945fec60 313
92c2555f 314typedef struct scm_t_freelist {
a00c95d9
ML
315 /* collected cells */
316 SCM cells;
a00c95d9
ML
317 /* number of cells left to collect before cluster is full */
318 unsigned int left_to_collect;
b37fe1c5
MD
319 /* number of clusters which have been allocated */
320 unsigned int clusters_allocated;
8fef55a8
MD
321 /* a list of freelists, each of size cluster_size,
322 * except the last one which may be shorter
323 */
a00c95d9
ML
324 SCM clusters;
325 SCM *clustertail;
b37fe1c5 326 /* this is the number of objects in each cluster, including the spine cell */
1be6b49c 327 unsigned int cluster_size;
8fef55a8 328 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
329 */
330 int grow_heap_p;
8fef55a8 331 /* minimum yield on this list in order not to grow the heap
a00c95d9 332 */
8fef55a8
MD
333 long min_yield;
334 /* defines min_yield as percent of total heap size
a00c95d9 335 */
8fef55a8 336 int min_yield_fraction;
a00c95d9
ML
337 /* number of cells per object on this list */
338 int span;
339 /* number of collected cells during last GC */
c014a02e 340 unsigned long collected;
1811ebce 341 /* number of collected cells during penultimate GC */
c014a02e 342 unsigned long collected_1;
a00c95d9
ML
343 /* total number of cells in heap segments
344 * belonging to this list.
345 */
c014a02e 346 unsigned long heap_size;
92c2555f 347} scm_t_freelist;
a00c95d9 348
4a4c9785 349SCM scm_freelist = SCM_EOL;
92c2555f 350scm_t_freelist scm_master_freelist = {
729dbac3 351 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
4a4c9785
MD
352};
353SCM scm_freelist2 = SCM_EOL;
92c2555f 354scm_t_freelist scm_master_freelist2 = {
729dbac3 355 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
4a4c9785 356};
0f2d19dd
JB
357
358/* scm_mtrigger
359 * is the number of bytes of must_malloc allocation needed to trigger gc.
360 */
c014a02e 361unsigned long scm_mtrigger;
0f2d19dd 362
0f2d19dd
JB
363/* scm_gc_heap_lock
364 * If set, don't expand the heap. Set only during gc, during which no allocation
365 * is supposed to take place anyway.
366 */
367int scm_gc_heap_lock = 0;
368
369/* GC Blocking
370 * Don't pause for collection if this is set -- just
371 * expand the heap.
372 */
0f2d19dd
JB
373int scm_block_gc = 1;
374
0f2d19dd
JB
375/* During collection, this accumulates objects holding
376 * weak references.
377 */
ab4bef85 378SCM scm_weak_vectors;
0f2d19dd 379
7445e0e8
MD
380/* During collection, this accumulates structures which are to be freed.
381 */
382SCM scm_structs_to_free;
383
0f2d19dd
JB
384/* GC Statistics Keeping
385 */
c014a02e
ML
386unsigned long scm_cells_allocated = 0;
387unsigned long scm_mallocated = 0;
388unsigned long scm_gc_cells_collected;
389unsigned long scm_gc_yield;
390static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
391unsigned long scm_gc_malloc_collected;
392unsigned long scm_gc_ports_collected;
0f2d19dd 393unsigned long scm_gc_time_taken = 0;
c014a02e
ML
394static unsigned long t_before_gc;
395static unsigned long t_before_sweep;
c9b0d4b0
ML
396unsigned long scm_gc_mark_time_taken = 0;
397unsigned long scm_gc_sweep_time_taken = 0;
c014a02e
ML
398unsigned long scm_gc_times = 0;
399unsigned long scm_gc_cells_swept = 0;
c9b0d4b0
ML
400double scm_gc_cells_marked_acc = 0.;
401double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
402
403SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
404SCM_SYMBOL (sym_heap_size, "cell-heap-size");
405SCM_SYMBOL (sym_mallocated, "bytes-malloced");
406SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
407SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
408SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
409SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
410SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
411SCM_SYMBOL (sym_times, "gc-times");
412SCM_SYMBOL (sym_cells_marked, "cells-marked");
413SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 414
92c2555f 415typedef struct scm_t_heap_seg_data
0f2d19dd 416{
cf2d30f6
JB
417 /* lower and upper bounds of the segment */
418 SCM_CELLPTR bounds[2];
419
420 /* address of the head-of-freelist pointer for this segment's cells.
421 All segments usually point to the same one, scm_freelist. */
92c2555f 422 scm_t_freelist *freelist;
cf2d30f6 423
fe517a7d 424 /* number of cells per object in this segment */
945fec60 425 int span;
92c2555f 426} scm_t_heap_seg_data;
0f2d19dd
JB
427
428
429
92c2555f 430static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
b6efc951
DH
431
432typedef enum { return_on_error, abort_on_error } policy_on_error;
92c2555f 433static void alloc_some_heap (scm_t_freelist *, policy_on_error);
0f2d19dd
JB
434
435
d6884e63
ML
436#define SCM_HEAP_SIZE \
437 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
438#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
439
440#define BVEC_GROW_SIZE 256
441#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
92c2555f 442#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
d6884e63
ML
443
444/* mark space allocation */
445
92c2555f 446typedef struct scm_t_mark_space
d6884e63 447{
92c2555f
MV
448 scm_t_c_bvec_limb *bvec_space;
449 struct scm_t_mark_space *next;
450} scm_t_mark_space;
d6884e63 451
92c2555f
MV
452static scm_t_mark_space *current_mark_space;
453static scm_t_mark_space **mark_space_ptr;
1be6b49c 454static ptrdiff_t current_mark_space_offset;
92c2555f 455static scm_t_mark_space *mark_space_head;
d6884e63 456
92c2555f 457static scm_t_c_bvec_limb *
d6884e63 458get_bvec ()
db4b4ca6 459#define FUNC_NAME "get_bvec"
d6884e63 460{
92c2555f 461 scm_t_c_bvec_limb *res;
d6884e63
ML
462
463 if (!current_mark_space)
464 {
92c2555f 465 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
d6884e63 466 if (!current_mark_space)
db4b4ca6 467 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
468
469 current_mark_space->bvec_space = NULL;
470 current_mark_space->next = NULL;
471
472 *mark_space_ptr = current_mark_space;
473 mark_space_ptr = &(current_mark_space->next);
474
475 return get_bvec ();
476 }
477
478 if (!(current_mark_space->bvec_space))
479 {
480 SCM_SYSCALL (current_mark_space->bvec_space =
92c2555f 481 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
d6884e63 482 if (!(current_mark_space->bvec_space))
db4b4ca6 483 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
484
485 current_mark_space_offset = 0;
486
487 return get_bvec ();
488 }
489
490 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
491 {
492 current_mark_space = NULL;
493
494 return get_bvec ();
495 }
496
497 res = current_mark_space->bvec_space + current_mark_space_offset;
498 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
499
500 return res;
501}
db4b4ca6
DH
502#undef FUNC_NAME
503
d6884e63
ML
504
505static void
506clear_mark_space ()
507{
92c2555f 508 scm_t_mark_space *ms;
d6884e63
ML
509
510 for (ms = mark_space_head; ms; ms = ms->next)
511 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
512}
513
514
0f2d19dd 515\f
cf2d30f6
JB
516/* Debugging functions. */
517
bb2c57fa 518#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
519
520/* Return the number of the heap segment containing CELL. */
c014a02e 521static long
cf2d30f6
JB
522which_seg (SCM cell)
523{
c014a02e 524 long i;
cf2d30f6
JB
525
526 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
527 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
528 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6 529 return i;
1be6b49c
ML
530 fprintf (stderr, "which_seg: can't find segment containing cell %lux\n",
531 (unsigned long) SCM_UNPACK (cell));
cf2d30f6
JB
532 abort ();
533}
534
535
8ded62a3 536static void
92c2555f 537map_free_list (scm_t_freelist *master, SCM freelist)
8ded62a3 538{
c014a02e 539 long last_seg = -1, count = 0;
8ded62a3 540 SCM f;
a00c95d9 541
3f5d82cd 542 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3 543 {
c014a02e 544 long this_seg = which_seg (f);
8ded62a3
MD
545
546 if (this_seg != last_seg)
547 {
548 if (last_seg != -1)
1be6b49c
ML
549 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
550 (long) count, master->span, (long) last_seg);
8ded62a3
MD
551 last_seg = this_seg;
552 count = 0;
553 }
554 count++;
555 }
556 if (last_seg != -1)
1be6b49c
ML
557 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
558 (long) count, master->span, (long) last_seg);
8ded62a3 559}
cf2d30f6 560
a00c95d9 561SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 562 (),
5352393c
MG
563 "Print debugging information about the free-list.\n"
564 "@code{map-free-list} is only included in\n"
565 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
566#define FUNC_NAME s_scm_map_free_list
567{
c014a02e 568 long i;
1be6b49c
ML
569 fprintf (stderr, "%ld segments total (%d:%ld",
570 (long) scm_n_heap_segs,
4c48ba06 571 scm_heap_table[0].span,
1be6b49c 572 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
4c48ba06 573 for (i = 1; i < scm_n_heap_segs; i++)
1be6b49c 574 fprintf (stderr, ", %d:%ld",
4c48ba06 575 scm_heap_table[i].span,
1be6b49c 576 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
4c48ba06 577 fprintf (stderr, ")\n");
8ded62a3
MD
578 map_free_list (&scm_master_freelist, scm_freelist);
579 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
580 fflush (stderr);
581
582 return SCM_UNSPECIFIED;
583}
1bbd0b84 584#undef FUNC_NAME
cf2d30f6 585
c014a02e
ML
586static long last_cluster;
587static long last_size;
4c48ba06 588
c014a02e
ML
589static long
590free_list_length (char *title, long i, SCM freelist)
5384bc5b
MD
591{
592 SCM ls;
c014a02e 593 long n = 0;
3f5d82cd
DH
594 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
595 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
596 ++n;
597 else
598 {
1be6b49c 599 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
5384bc5b
MD
600 abort ();
601 }
4c48ba06
MD
602 if (n != last_size)
603 {
604 if (i > 0)
605 {
606 if (last_cluster == i - 1)
1be6b49c 607 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 608 else
1be6b49c 609 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
4c48ba06
MD
610 }
611 if (i >= 0)
1be6b49c 612 fprintf (stderr, "%s %ld", title, (long) i);
4c48ba06 613 else
1be6b49c 614 fprintf (stderr, "%s\t%ld\n", title, (long) n);
4c48ba06
MD
615 last_cluster = i;
616 last_size = n;
617 }
5384bc5b
MD
618 return n;
619}
620
621static void
92c2555f 622free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
5384bc5b
MD
623{
624 SCM clusters;
c014a02e 625 long i = 0, len, n = 0;
5384bc5b
MD
626 fprintf (stderr, "%s\n\n", title);
627 n += free_list_length ("free list", -1, freelist);
628 for (clusters = master->clusters;
629 SCM_NNULLP (clusters);
630 clusters = SCM_CDR (clusters))
4c48ba06
MD
631 {
632 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
633 n += len;
634 }
635 if (last_cluster == i - 1)
1be6b49c 636 fprintf (stderr, "\t%ld\n", (long) last_size);
4c48ba06 637 else
1be6b49c
ML
638 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
639 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
5384bc5b
MD
640}
641
a00c95d9 642SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 643 (),
5352393c
MG
644 "Print debugging information about the free-list.\n"
645 "@code{free-list-length} is only included in\n"
646 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
647#define FUNC_NAME s_scm_free_list_length
648{
b37fe1c5
MD
649 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
650 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 651 return SCM_UNSPECIFIED;
5384bc5b
MD
652}
653#undef FUNC_NAME
654
bb2c57fa
MD
655#endif
656
657#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 658
d3dd80ab
MG
659/* Non-zero if freelist debugging is in effect. Set this via
660 `gc-set-debug-check-freelist!'. */
661static int scm_debug_check_freelist = 0;
662
cf2d30f6 663/* Number of calls to SCM_NEWCELL since startup. */
c014a02e
ML
664static unsigned long scm_newcell_count;
665static unsigned long scm_newcell2_count;
cf2d30f6
JB
666
667/* Search freelist for anything that isn't marked as a free cell.
668 Abort if we find something. */
8ded62a3
MD
669static void
670scm_check_freelist (SCM freelist)
671{
672 SCM f;
c014a02e 673 long i = 0;
8ded62a3 674
3f5d82cd
DH
675 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
676 if (!SCM_FREE_CELL_P (f))
8ded62a3 677 {
1be6b49c
ML
678 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
679 (long) scm_newcell_count, (long) i);
8ded62a3
MD
680 abort ();
681 }
682}
cf2d30f6 683
a00c95d9 684SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 685 (SCM flag),
1e6808ea
MG
686 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
687 "on each cell allocation. This procedure only exists when the\n"
688 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
1bbd0b84 689#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 690{
d6884e63
ML
691 /* [cmm] I did a double-take when I read this code the first time.
692 well, FWIW. */
945fec60 693 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
694 return SCM_UNSPECIFIED;
695}
1bbd0b84 696#undef FUNC_NAME
25748c78
GB
697
698
4a4c9785
MD
699SCM
700scm_debug_newcell (void)
701{
702 SCM new;
703
704 scm_newcell_count++;
705 if (scm_debug_check_freelist)
706 {
8ded62a3 707 scm_check_freelist (scm_freelist);
4a4c9785
MD
708 scm_gc();
709 }
710
711 /* The rest of this is supposed to be identical to the SCM_NEWCELL
712 macro. */
3f5d82cd 713 if (SCM_NULLP (scm_freelist))
7c33806a
DH
714 {
715 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
716 SCM_GC_SET_ALLOCATED (new);
717 }
4a4c9785
MD
718 else
719 {
720 new = scm_freelist;
3f5d82cd 721 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
7c33806a 722 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
723 }
724
725 return new;
726}
727
728SCM
729scm_debug_newcell2 (void)
730{
731 SCM new;
732
733 scm_newcell2_count++;
734 if (scm_debug_check_freelist)
735 {
8ded62a3 736 scm_check_freelist (scm_freelist2);
4a4c9785
MD
737 scm_gc ();
738 }
739
740 /* The rest of this is supposed to be identical to the SCM_NEWCELL
741 macro. */
3f5d82cd 742 if (SCM_NULLP (scm_freelist2))
7c33806a
DH
743 {
744 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
745 SCM_GC_SET_ALLOCATED (new);
746 }
4a4c9785
MD
747 else
748 {
749 new = scm_freelist2;
3f5d82cd 750 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
7c33806a 751 SCM_GC_SET_ALLOCATED (new);
4a4c9785
MD
752 }
753
754 return new;
755}
756
fca7547b 757#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
758
759\f
0f2d19dd 760
c014a02e 761static unsigned long
92c2555f 762master_cells_allocated (scm_t_freelist *master)
b37fe1c5 763{
d6884e63 764 /* the '- 1' below is to ignore the cluster spine cells. */
c014a02e 765 long objects = master->clusters_allocated * (master->cluster_size - 1);
b37fe1c5
MD
766 if (SCM_NULLP (master->clusters))
767 objects -= master->left_to_collect;
768 return master->span * objects;
769}
770
c014a02e 771static unsigned long
b37fe1c5
MD
772freelist_length (SCM freelist)
773{
c014a02e 774 long n;
3f5d82cd 775 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
776 ++n;
777 return n;
778}
779
c014a02e 780static unsigned long
b37fe1c5
MD
781compute_cells_allocated ()
782{
783 return (scm_cells_allocated
784 + master_cells_allocated (&scm_master_freelist)
785 + master_cells_allocated (&scm_master_freelist2)
786 - scm_master_freelist.span * freelist_length (scm_freelist)
787 - scm_master_freelist2.span * freelist_length (scm_freelist2));
788}
b37fe1c5 789
0f2d19dd
JB
790/* {Scheme Interface to GC}
791 */
792
a00c95d9 793SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 794 (),
1e6808ea
MG
795 "Return an association list of statistics about Guile's current\n"
796 "use of storage.")
1bbd0b84 797#define FUNC_NAME s_scm_gc_stats
0f2d19dd 798{
c014a02e
ML
799 long i;
800 long n;
0f2d19dd 801 SCM heap_segs;
c014a02e
ML
802 unsigned long int local_scm_mtrigger;
803 unsigned long int local_scm_mallocated;
804 unsigned long int local_scm_heap_size;
805 unsigned long int local_scm_cells_allocated;
806 unsigned long int local_scm_gc_time_taken;
807 unsigned long int local_scm_gc_times;
808 unsigned long int local_scm_gc_mark_time_taken;
809 unsigned long int local_scm_gc_sweep_time_taken;
c9b0d4b0
ML
810 double local_scm_gc_cells_swept;
811 double local_scm_gc_cells_marked;
0f2d19dd
JB
812 SCM answer;
813
814 SCM_DEFER_INTS;
939794ce
DH
815
816 ++scm_block_gc;
817
0f2d19dd
JB
818 retry:
819 heap_segs = SCM_EOL;
820 n = scm_n_heap_segs;
821 for (i = scm_n_heap_segs; i--; )
c014a02e
ML
822 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
823 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
0f2d19dd
JB
824 heap_segs);
825 if (scm_n_heap_segs != n)
826 goto retry;
939794ce
DH
827
828 --scm_block_gc;
0f2d19dd 829
7febb4a2
MD
830 /* Below, we cons to produce the resulting list. We want a snapshot of
831 * the heap situation before consing.
832 */
0f2d19dd
JB
833 local_scm_mtrigger = scm_mtrigger;
834 local_scm_mallocated = scm_mallocated;
b37fe1c5 835 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 836 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 837 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
838 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
839 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
840 local_scm_gc_times = scm_gc_times;
841 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
842 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd 843
1afff620
KN
844 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
845 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
846 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
847 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
848 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
849 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
850 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
851 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
852 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
853 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
854 scm_cons (sym_heap_segments, heap_segs),
855 SCM_UNDEFINED);
0f2d19dd
JB
856 SCM_ALLOW_INTS;
857 return answer;
858}
1bbd0b84 859#undef FUNC_NAME
0f2d19dd
JB
860
861
c9b0d4b0 862static void
e81d98ec 863gc_start_stats (const char *what SCM_UNUSED)
0f2d19dd 864{
c9b0d4b0
ML
865 t_before_gc = scm_c_get_internal_run_time ();
866 scm_gc_cells_swept = 0;
b37fe1c5 867 scm_gc_cells_collected = 0;
37ddcaf6 868 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
869 scm_gc_yield = (scm_cells_allocated
870 + master_cells_allocated (&scm_master_freelist)
871 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
872 scm_gc_malloc_collected = 0;
873 scm_gc_ports_collected = 0;
874}
875
939794ce 876
c9b0d4b0
ML
877static void
878gc_end_stats ()
0f2d19dd 879{
c9b0d4b0
ML
880 unsigned long t = scm_c_get_internal_run_time ();
881 scm_gc_time_taken += (t - t_before_gc);
882 scm_gc_sweep_time_taken += (t - t_before_sweep);
883 ++scm_gc_times;
884
885 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
886 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
887}
888
889
a00c95d9 890SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 891 (SCM obj),
b380b885
MD
892 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
893 "returned by this function for @var{obj}")
1bbd0b84 894#define FUNC_NAME s_scm_object_address
0f2d19dd 895{
c014a02e 896 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 897}
1bbd0b84 898#undef FUNC_NAME
0f2d19dd
JB
899
900
a00c95d9 901SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 902 (),
b380b885
MD
903 "Scans all of SCM objects and reclaims for further use those that are\n"
904 "no longer accessible.")
1bbd0b84 905#define FUNC_NAME s_scm_gc
0f2d19dd
JB
906{
907 SCM_DEFER_INTS;
908 scm_igc ("call");
909 SCM_ALLOW_INTS;
910 return SCM_UNSPECIFIED;
911}
1bbd0b84 912#undef FUNC_NAME
0f2d19dd
JB
913
914
915\f
916/* {C Interface For When GC is Triggered}
917 */
918
b37fe1c5 919static void
92c2555f 920adjust_min_yield (scm_t_freelist *freelist)
b37fe1c5 921{
8fef55a8 922 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 923 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
924 * `min_yield_fraction' of total heap size. Note, however, that
925 * the absolute value of min_yield will correspond to `collected'
bda1446c 926 * on one master (the one which currently is triggering GC).
b37fe1c5 927 *
bda1446c
MD
928 * The reason why we look at total yield instead of cells collected
929 * on one list is that we want to take other freelists into account.
930 * On this freelist, we know that (local) yield = collected cells,
931 * but that's probably not the case on the other lists.
b37fe1c5
MD
932 *
933 * (We might consider computing a better prediction, for example
934 * by computing an average over multiple GC:s.)
935 */
8fef55a8 936 if (freelist->min_yield_fraction)
b37fe1c5 937 {
37ddcaf6 938 /* Pick largest of last two yields. */
1be6b49c 939 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
8fef55a8 940 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5 941#ifdef DEBUGINFO
1be6b49c
ML
942 fprintf (stderr, " after GC = %lu, delta = %ld\n",
943 (long) scm_cells_allocated,
944 (long) delta);
b37fe1c5
MD
945#endif
946 if (delta > 0)
8fef55a8 947 freelist->min_yield += delta;
b37fe1c5
MD
948 }
949}
950
b6efc951 951
4a4c9785 952/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
953 * common while the freelist will be individual for each thread.
954 */
4a4c9785
MD
955
956SCM
92c2555f 957scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
4a4c9785
MD
958{
959 SCM cell;
960 ++scm_ints_disabled;
4c48ba06
MD
961 do
962 {
c7387918 963 if (SCM_NULLP (master->clusters))
4c48ba06 964 {
150c200b 965 if (master->grow_heap_p || scm_block_gc)
4c48ba06 966 {
b6efc951
DH
967 /* In order to reduce gc frequency, try to allocate a new heap
968 * segment first, even if gc might find some free cells. If we
969 * can't obtain a new heap segment, we will try gc later.
970 */
4c48ba06 971 master->grow_heap_p = 0;
b6efc951 972 alloc_some_heap (master, return_on_error);
4c48ba06 973 }
b6efc951 974 if (SCM_NULLP (master->clusters))
b37fe1c5 975 {
b6efc951
DH
976 /* The heap was not grown, either because it wasn't scheduled to
977 * grow, or because there was not enough memory available. In
978 * both cases we have to try gc to get some free cells.
979 */
37ddcaf6 980#ifdef DEBUGINFO
1be6b49c
ML
981 fprintf (stderr, "allocated = %lu, ",
982 (long) (scm_cells_allocated
37ddcaf6 983 + master_cells_allocated (&scm_master_freelist)
c014a02e 984 + master_cells_allocated (&scm_master_freelist2)));
37ddcaf6 985#endif
b37fe1c5 986 scm_igc ("cells");
8fef55a8 987 adjust_min_yield (master);
c7387918
DH
988 if (SCM_NULLP (master->clusters))
989 {
b6efc951
DH
990 /* gc could not free any cells. Now, we _must_ allocate a
991 * new heap segment, because there is no other possibility
992 * to provide a new cell for the caller.
993 */
994 alloc_some_heap (master, abort_on_error);
c7387918 995 }
b37fe1c5 996 }
4c48ba06
MD
997 }
998 cell = SCM_CAR (master->clusters);
999 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 1000 ++master->clusters_allocated;
4c48ba06
MD
1001 }
1002 while (SCM_NULLP (cell));
d6884e63
ML
1003
1004#ifdef GUILE_DEBUG_FREELIST
1005 scm_check_freelist (cell);
1006#endif
1007
4a4c9785 1008 --scm_ints_disabled;
3f5d82cd 1009 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
1010 return cell;
1011}
1012
b6efc951 1013
4c48ba06
MD
1014#if 0
1015/* This is a support routine which can be used to reserve a cluster
1016 * for some special use, such as debugging. It won't be useful until
1017 * free cells are preserved between garbage collections.
1018 */
1019
1020void
92c2555f 1021scm_alloc_cluster (scm_t_freelist *master)
4c48ba06
MD
1022{
1023 SCM freelist, cell;
1024 cell = scm_gc_for_newcell (master, &freelist);
1025 SCM_SETCDR (cell, freelist);
1026 return cell;
1027}
1028#endif
1029
801cb5e7 1030
92c2555f
MV
1031scm_t_c_hook scm_before_gc_c_hook;
1032scm_t_c_hook scm_before_mark_c_hook;
1033scm_t_c_hook scm_before_sweep_c_hook;
1034scm_t_c_hook scm_after_sweep_c_hook;
1035scm_t_c_hook scm_after_gc_c_hook;
801cb5e7 1036
b6efc951 1037
0f2d19dd 1038void
1bbd0b84 1039scm_igc (const char *what)
0f2d19dd 1040{
c014a02e 1041 long j;
0f2d19dd 1042
406c7d90 1043 ++scm_gc_running_p;
801cb5e7 1044 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
1045#ifdef DEBUGINFO
1046 fprintf (stderr,
1047 SCM_NULLP (scm_freelist)
1048 ? "*"
1049 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1050#endif
42db06f0 1051 /* During the critical section, only the current thread may run. */
216eedfc 1052 SCM_CRITICAL_SECTION_START;
42db06f0 1053
e242dfd2 1054 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1055
ab4bef85
JB
1056 if (!scm_stack_base || scm_block_gc)
1057 {
406c7d90 1058 --scm_gc_running_p;
ab4bef85
JB
1059 return;
1060 }
1061
c9b0d4b0
ML
1062 gc_start_stats (what);
1063
ab4bef85
JB
1064 if (scm_gc_heap_lock)
1065 /* We've invoked the collector while a GC is already in progress.
1066 That should never happen. */
1067 abort ();
0f2d19dd
JB
1068
1069 ++scm_gc_heap_lock;
ab4bef85 1070
0f2d19dd
JB
1071 /* flush dead entries from the continuation stack */
1072 {
c014a02e
ML
1073 long x;
1074 long bound;
0f2d19dd
JB
1075 SCM * elts;
1076 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1077 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1078 x = SCM_INUM (scm_continuation_stack_ptr);
1079 while (x < bound)
1080 {
1081 elts[x] = SCM_BOOL_F;
1082 ++x;
1083 }
1084 }
1085
801cb5e7
MD
1086 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1087
d6884e63
ML
1088 clear_mark_space ();
1089
42db06f0 1090#ifndef USE_THREADS
a00c95d9 1091
1b9be268 1092 /* Mark objects on the C stack. */
0f2d19dd
JB
1093 SCM_FLUSH_REGISTER_WINDOWS;
1094 /* This assumes that all registers are saved into the jmp_buf */
1095 setjmp (scm_save_regs_gc_mark);
1096 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1be6b49c 1097 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
ce4a361d
JB
1098 sizeof scm_save_regs_gc_mark)
1099 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1100
1101 {
6b1b030e 1102 unsigned long stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1103#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1104 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1105#else
6ba93e5e 1106 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1107#endif
1108 }
1109
42db06f0
MD
1110#else /* USE_THREADS */
1111
1112 /* Mark every thread's stack and registers */
945fec60 1113 scm_threads_mark_stacks ();
42db06f0
MD
1114
1115#endif /* USE_THREADS */
0f2d19dd 1116
0f2d19dd
JB
1117 j = SCM_NUM_PROTECTS;
1118 while (j--)
1119 scm_gc_mark (scm_sys_protects[j]);
1120
6b1b030e
ML
1121 /* mark the registered roots */
1122 {
1123 long i;
1124 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1125 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1126 for (; ! SCM_NULLP (l); l = SCM_CDR (l)) {
1127 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1128 scm_gc_mark (*p);
1129 }
1130 }
1131 }
1132
9de33deb
MD
1133 /* FIXME: we should have a means to register C functions to be run
1134 * in different phases of GC
a00c95d9 1135 */
9de33deb 1136 scm_mark_subr_table ();
a00c95d9 1137
42db06f0
MD
1138#ifndef USE_THREADS
1139 scm_gc_mark (scm_root->handle);
1140#endif
a00c95d9 1141
c9b0d4b0
ML
1142 t_before_sweep = scm_c_get_internal_run_time ();
1143 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1144
801cb5e7 1145 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1146
0f2d19dd
JB
1147 scm_gc_sweep ();
1148
801cb5e7
MD
1149 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1150
0f2d19dd 1151 --scm_gc_heap_lock;
c9b0d4b0 1152 gc_end_stats ();
42db06f0 1153
216eedfc 1154 SCM_CRITICAL_SECTION_END;
801cb5e7 1155 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1156 --scm_gc_running_p;
0f2d19dd
JB
1157}
1158
1159\f
939794ce 1160
a00c95d9 1161/* {Mark/Sweep}
0f2d19dd
JB
1162 */
1163
56495472
ML
1164#define MARK scm_gc_mark
1165#define FNAME "scm_gc_mark"
0f2d19dd 1166
56495472 1167#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1168
1169/* Mark an object precisely.
1170 */
a00c95d9 1171void
56495472
ML
1172MARK (SCM p)
1173#define FUNC_NAME FNAME
0f2d19dd 1174{
c014a02e 1175 register long i;
0f2d19dd 1176 register SCM ptr;
92c2555f 1177 scm_t_bits cell_type;
0f2d19dd 1178
56495472
ML
1179#ifndef MARK_DEPENDENCIES
1180# define RECURSE scm_gc_mark
1181#else
1182 /* go through the usual marking, but not for self-cycles. */
1183# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1184#endif
0f2d19dd
JB
1185 ptr = p;
1186
56495472
ML
1187#ifdef MARK_DEPENDENCIES
1188 goto gc_mark_loop_first_time;
1189#endif
1190
86d31dfe
MV
1191/* A simple hack for debugging. Chose the second branch to get a
1192 meaningful backtrace for crashes inside the GC.
1193*/
1194#if 1
1195#define goto_gc_mark_loop goto gc_mark_loop
1196#define goto_gc_mark_nimp goto gc_mark_nimp
1197#else
1198#define goto_gc_mark_loop RECURSE(ptr); return
1199#define goto_gc_mark_nimp RECURSE(ptr); return
1200#endif
1201
0f2d19dd
JB
1202gc_mark_loop:
1203 if (SCM_IMP (ptr))
1204 return;
1205
1206gc_mark_nimp:
56495472
ML
1207
1208#ifdef MARK_DEPENDENCIES
0209177b 1209 if (SCM_EQ_P (ptr, p))
56495472
ML
1210 return;
1211
1212 scm_gc_mark (ptr);
0209177b 1213 return;
56495472
ML
1214
1215gc_mark_loop_first_time:
1216#endif
9a6976cd 1217
61045190 1218#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1219 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1220 if (!scm_cellp (ptr))
db4b4ca6 1221 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1222#else
1223 /* In non-debug mode, do at least some cheap testing. */
1224 if (!SCM_CELLP (ptr))
1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1226#endif
1227
56495472
ML
1228#ifndef MARK_DEPENDENCIES
1229
d6884e63
ML
1230 if (SCM_GCMARKP (ptr))
1231 return;
56495472 1232
d6884e63
ML
1233 SCM_SETGCMARK (ptr);
1234
56495472
ML
1235#endif
1236
61045190
DH
1237 cell_type = SCM_GC_CELL_TYPE (ptr);
1238 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1239 {
1240 case scm_tcs_cons_nimcar:
d6884e63 1241 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1242 {
1243 ptr = SCM_CAR (ptr);
86d31dfe 1244 goto_gc_mark_nimp;
0f2d19dd 1245 }
56495472 1246 RECURSE (SCM_CAR (ptr));
d6884e63 1247 ptr = SCM_CDR (ptr);
86d31dfe 1248 goto_gc_mark_nimp;
0f2d19dd 1249 case scm_tcs_cons_imcar:
d6884e63 1250 ptr = SCM_CDR (ptr);
86d31dfe 1251 goto_gc_mark_loop;
e641afaf 1252 case scm_tc7_pws:
22a52da1
DH
1253 RECURSE (SCM_SETTER (ptr));
1254 ptr = SCM_PROCEDURE (ptr);
86d31dfe 1255 goto_gc_mark_loop;
0f2d19dd 1256 case scm_tcs_cons_gloc:
0f2d19dd 1257 {
86d31dfe
MV
1258 /* Dirk:FIXME:: The following code is super ugly: ptr may be a
1259 * struct or a gloc. If it is a gloc, the cell word #0 of ptr
1260 * is the address of a scm_tc16_variable smob. If it is a
1261 * struct, the cell word #0 of ptr is a pointer to a struct
1262 * vtable data region. (The fact that these are accessed in
1263 * the same way restricts the possibilites to change the data
1264 * layout of structs or heap cells.) To discriminate between
1265 * the two, it is guaranteed that the scm_vtable_index_vcell
1266 * element of the prospective vtable is always zero. For a
1267 * gloc, this location has the CDR of the variable smob, which
1268 * is guaranteed to be non-zero.
c8045e8d 1269 */
92c2555f
MV
1270 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1271 scm_t_bits * vtable_data = (scm_t_bits *) word0; /* access as struct */
7445e0e8 1272 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1273 {
d6884e63
ML
1274 /* ptr is a gloc */
1275 SCM gloc_car = SCM_PACK (word0);
56495472 1276 RECURSE (gloc_car);
d6884e63
ML
1277 ptr = SCM_CDR (ptr);
1278 goto gc_mark_loop;
1279 }
1280 else
1281 {
1282 /* ptr is a struct */
1283 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
c014a02e 1284 long len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1285 char * fields_desc = SCM_SYMBOL_CHARS (layout);
92c2555f 1286 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1287
d6884e63
ML
1288 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1289 {
56495472
ML
1290 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1291 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1292 }
1293 if (len)
1294 {
c014a02e 1295 long x;
7bb8eac7 1296
d6884e63
ML
1297 for (x = 0; x < len - 2; x += 2, ++struct_data)
1298 if (fields_desc[x] == 'p')
56495472 1299 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1300 if (fields_desc[x] == 'p')
1301 {
1302 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1303 for (x = *struct_data++; x; --x, ++struct_data)
1304 RECURSE (SCM_PACK (*struct_data));
d6884e63 1305 else
56495472 1306 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1307 }
1308 }
1309 /* mark vtable */
1310 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
86d31dfe 1311 goto_gc_mark_loop;
0f2d19dd
JB
1312 }
1313 }
1314 break;
1315 case scm_tcs_closures:
22a52da1 1316 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1317 {
1318 ptr = SCM_CLOSCAR (ptr);
86d31dfe 1319 goto_gc_mark_nimp;
0f2d19dd 1320 }
56495472 1321 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1322 ptr = SCM_ENV (ptr);
86d31dfe 1323 goto_gc_mark_nimp;
0f2d19dd 1324 case scm_tc7_vector:
b5c2579a
DH
1325 i = SCM_VECTOR_LENGTH (ptr);
1326 if (i == 0)
1327 break;
1328 while (--i > 0)
1329 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1330 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a 1331 ptr = SCM_VELTS (ptr)[0];
86d31dfe 1332 goto_gc_mark_loop;
0f2d19dd
JB
1333#ifdef CCLO
1334 case scm_tc7_cclo:
362306b9 1335 {
1be6b49c
ML
1336 size_t i = SCM_CCLO_LENGTH (ptr);
1337 size_t j;
362306b9
DH
1338 for (j = 1; j != i; ++j)
1339 {
1340 SCM obj = SCM_CCLO_REF (ptr, j);
1341 if (!SCM_IMP (obj))
56495472 1342 RECURSE (obj);
362306b9
DH
1343 }
1344 ptr = SCM_CCLO_REF (ptr, 0);
86d31dfe 1345 goto_gc_mark_loop;
362306b9 1346 }
b5c2579a 1347#endif
afe5177e 1348#ifdef HAVE_ARRAYS
0f2d19dd
JB
1349 case scm_tc7_bvect:
1350 case scm_tc7_byvect:
1351 case scm_tc7_ivect:
1352 case scm_tc7_uvect:
1353 case scm_tc7_fvect:
1354 case scm_tc7_dvect:
1355 case scm_tc7_cvect:
1356 case scm_tc7_svect:
5c11cc9d 1357#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1358 case scm_tc7_llvect:
1359#endif
afe5177e 1360#endif
0f2d19dd 1361 case scm_tc7_string:
0f2d19dd
JB
1362 break;
1363
1364 case scm_tc7_substring:
0f2d19dd 1365 ptr = SCM_CDR (ptr);
86d31dfe 1366 goto_gc_mark_loop;
0f2d19dd
JB
1367
1368 case scm_tc7_wvect:
ab4bef85
JB
1369 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1370 scm_weak_vectors = ptr;
0f2d19dd
JB
1371 if (SCM_IS_WHVEC_ANY (ptr))
1372 {
c014a02e
ML
1373 long x;
1374 long len;
0f2d19dd
JB
1375 int weak_keys;
1376 int weak_values;
1377
b5c2579a 1378 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1379 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1380 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1381
0f2d19dd
JB
1382 for (x = 0; x < len; ++x)
1383 {
1384 SCM alist;
1385 alist = SCM_VELTS (ptr)[x];
46408039
JB
1386
1387 /* mark everything on the alist except the keys or
1388 * values, according to weak_values and weak_keys. */
0b5f3f34 1389 while ( SCM_CONSP (alist)
0f2d19dd 1390 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1391 && SCM_CONSP (SCM_CAR (alist)))
1392 {
1393 SCM kvpair;
1394 SCM next_alist;
1395
1396 kvpair = SCM_CAR (alist);
1397 next_alist = SCM_CDR (alist);
a00c95d9 1398 /*
0f2d19dd
JB
1399 * Do not do this:
1400 * SCM_SETGCMARK (alist);
1401 * SCM_SETGCMARK (kvpair);
1402 *
1403 * It may be that either the key or value is protected by
1404 * an escaped reference to part of the spine of this alist.
1405 * If we mark the spine here, and only mark one or neither of the
1406 * key and value, they may never be properly marked.
1407 * This leads to a horrible situation in which an alist containing
1408 * freelist cells is exported.
1409 *
1410 * So only mark the spines of these arrays last of all marking.
1411 * If somebody confuses us by constructing a weak vector
1412 * with a circular alist then we are hosed, but at least we
1413 * won't prematurely drop table entries.
1414 */
1415 if (!weak_keys)
56495472 1416 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1417 if (!weak_values)
56495472 1418 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1419 alist = next_alist;
1420 }
1421 if (SCM_NIMP (alist))
56495472 1422 RECURSE (alist);
0f2d19dd
JB
1423 }
1424 }
1425 break;
1426
28b06554
DH
1427 case scm_tc7_symbol:
1428 ptr = SCM_PROP_SLOTS (ptr);
86d31dfe 1429 goto_gc_mark_loop;
0f2d19dd 1430 case scm_tcs_subrs:
9de33deb 1431 break;
0f2d19dd
JB
1432 case scm_tc7_port:
1433 i = SCM_PTOBNUM (ptr);
7a7f7c53 1434#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1435 if (!(i < scm_numptob))
7a7f7c53
DH
1436 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1437#endif
ebf7394e 1438 if (SCM_PTAB_ENTRY(ptr))
56495472 1439 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1440 if (scm_ptobs[i].mark)
1441 {
1442 ptr = (scm_ptobs[i].mark) (ptr);
86d31dfe 1443 goto_gc_mark_loop;
dc53f026
JB
1444 }
1445 else
1446 return;
0f2d19dd
JB
1447 break;
1448 case scm_tc7_smob:
d6884e63 1449 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1450 { /* should be faster than going through scm_smobs */
1451 case scm_tc_free_cell:
1452 /* printf("found free_cell %X ", ptr); fflush(stdout); */
acb0a19c
MD
1453 case scm_tc16_big:
1454 case scm_tc16_real:
1455 case scm_tc16_complex:
0f2d19dd
JB
1456 break;
1457 default:
1458 i = SCM_SMOBNUM (ptr);
7a7f7c53 1459#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1460 if (!(i < scm_numsmob))
7a7f7c53
DH
1461 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1462#endif
dc53f026
JB
1463 if (scm_smobs[i].mark)
1464 {
1465 ptr = (scm_smobs[i].mark) (ptr);
86d31dfe 1466 goto_gc_mark_loop;
dc53f026
JB
1467 }
1468 else
1469 return;
0f2d19dd
JB
1470 }
1471 break;
1472 default:
acf4331f 1473 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1474 }
0209177b 1475#undef RECURSE
0f2d19dd 1476}
acf4331f 1477#undef FUNC_NAME
0f2d19dd 1478
56495472
ML
1479#ifndef MARK_DEPENDENCIES
1480
1481#undef MARK
56495472
ML
1482#undef FNAME
1483
1484/* And here we define `scm_gc_mark_dependencies', by including this
1485 * same file in itself.
1486 */
1487#define MARK scm_gc_mark_dependencies
1488#define FNAME "scm_gc_mark_dependencies"
1489#define MARK_DEPENDENCIES
1490#include "gc.c"
1491#undef MARK_DEPENDENCIES
1492#undef MARK
56495472
ML
1493#undef FNAME
1494
0f2d19dd
JB
1495
1496/* Mark a Region Conservatively
1497 */
1498
a00c95d9 1499void
c014a02e 1500scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
0f2d19dd 1501{
c014a02e 1502 unsigned long m;
0f2d19dd 1503
c4da09e2
DH
1504 for (m = 0; m < n; ++m)
1505 {
1506 SCM obj = * (SCM *) &x[m];
1507 if (SCM_CELLP (obj))
1508 {
1509 SCM_CELLPTR ptr = SCM2PTR (obj);
c014a02e
ML
1510 long i = 0;
1511 long j = scm_n_heap_segs - 1;
c4da09e2
DH
1512 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1513 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1514 {
1515 while (i <= j)
1516 {
c014a02e 1517 long seg_id;
c4da09e2
DH
1518 seg_id = -1;
1519 if ((i == j)
1520 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1521 seg_id = i;
1522 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1523 seg_id = j;
1524 else
1525 {
c014a02e 1526 long k;
c4da09e2
DH
1527 k = (i + j) / 2;
1528 if (k == i)
1529 break;
1530 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1531 {
1532 j = k;
1533 ++i;
1534 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1535 continue;
1536 else
1537 break;
1538 }
1539 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1540 {
1541 i = k;
1542 --j;
1543 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1544 continue;
1545 else
1546 break;
1547 }
1548 }
7bb8eac7 1549
d6884e63
ML
1550 if (SCM_GC_IN_CARD_HEADERP (ptr))
1551 break;
7bb8eac7 1552
c4da09e2 1553 if (scm_heap_table[seg_id].span == 1
ecf470a2 1554 || DOUBLECELL_ALIGNED_P (obj))
3731149d
ML
1555 scm_gc_mark (obj);
1556
c4da09e2
DH
1557 break;
1558 }
1559 }
1560 }
1561 }
0f2d19dd
JB
1562}
1563
1564
1a548472
DH
1565/* The function scm_cellp determines whether an SCM value can be regarded as a
1566 * pointer to a cell on the heap. Binary search is used in order to determine
1567 * the heap segment that contains the cell.
1568 */
2e11a577 1569int
6e8d25a6 1570scm_cellp (SCM value)
2e11a577 1571{
1a548472
DH
1572 if (SCM_CELLP (value)) {
1573 scm_cell * ptr = SCM2PTR (value);
c014a02e
ML
1574 unsigned long i = 0;
1575 unsigned long j = scm_n_heap_segs - 1;
1a548472 1576
61045190
DH
1577 if (SCM_GC_IN_CARD_HEADERP (ptr))
1578 return 0;
1579
1a548472 1580 while (i < j) {
c014a02e 1581 long k = (i + j) / 2;
1a548472
DH
1582 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1583 j = k;
1584 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1585 i = k + 1;
1586 }
1587 }
2e11a577 1588
9d47a1e6 1589 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1a548472 1590 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
ecf470a2 1591 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
d6884e63
ML
1592 && !SCM_GC_IN_CARD_HEADERP (ptr)
1593 )
1a548472 1594 return 1;
d6884e63 1595 else
1a548472 1596 return 0;
d6884e63 1597 } else
1a548472 1598 return 0;
2e11a577
MD
1599}
1600
1601
4c48ba06 1602static void
92c2555f 1603gc_sweep_freelist_start (scm_t_freelist *freelist)
4c48ba06
MD
1604{
1605 freelist->cells = SCM_EOL;
1606 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1607 freelist->clusters_allocated = 0;
4c48ba06
MD
1608 freelist->clusters = SCM_EOL;
1609 freelist->clustertail = &freelist->clusters;
1811ebce 1610 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1611 freelist->collected = 0;
1612}
1613
1614static void
92c2555f 1615gc_sweep_freelist_finish (scm_t_freelist *freelist)
4c48ba06 1616{
c014a02e 1617 long collected;
4c48ba06 1618 *freelist->clustertail = freelist->cells;
3f5d82cd 1619 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1620 {
1621 SCM c = freelist->cells;
22a52da1
DH
1622 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1623 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1624 freelist->collected +=
1625 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1626 }
b37fe1c5 1627 scm_gc_cells_collected += freelist->collected;
a00c95d9 1628
8fef55a8 1629 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1630 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1631 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1632 * size. This means that a too low yield is compensated by more
1633 * heap on the list which is currently doing most work, which is
1634 * just what we want.
1635 */
1811ebce 1636 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1637 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1638}
0f2d19dd 1639
d6884e63
ML
1640#define NEXT_DATA_CELL(ptr, span) \
1641 do { \
1642 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1643 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1644 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1645 : nxt__); \
1646 } while (0)
1647
a00c95d9 1648void
0f2d19dd 1649scm_gc_sweep ()
acf4331f 1650#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1651{
1652 register SCM_CELLPTR ptr;
0f2d19dd 1653 register SCM nfreelist;
92c2555f 1654 register scm_t_freelist *freelist;
c014a02e 1655 register unsigned long m;
0f2d19dd 1656 register int span;
c014a02e 1657 long i;
1be6b49c 1658 size_t seg_size;
0f2d19dd 1659
0f2d19dd 1660 m = 0;
0f2d19dd 1661
4c48ba06
MD
1662 gc_sweep_freelist_start (&scm_master_freelist);
1663 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1664
cf2d30f6 1665 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1666 {
c014a02e 1667 register long left_to_collect;
1be6b49c 1668 register size_t j;
15e9d186 1669
cf2d30f6
JB
1670 /* Unmarked cells go onto the front of the freelist this heap
1671 segment points to. Rather than updating the real freelist
1672 pointer as we go along, we accumulate the new head in
1673 nfreelist. Then, if it turns out that the entire segment is
1674 free, we free (i.e., malloc's free) the whole segment, and
1675 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1676 freelist = scm_heap_table[i].freelist;
1677 nfreelist = freelist->cells;
4c48ba06 1678 left_to_collect = freelist->left_to_collect;
945fec60 1679 span = scm_heap_table[i].span;
cf2d30f6 1680
a00c95d9
ML
1681 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1682 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1683
d6884e63
ML
1684 /* use only data cells in seg_size */
1685 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1686
c9b0d4b0
ML
1687 scm_gc_cells_swept += seg_size;
1688
0f2d19dd
JB
1689 for (j = seg_size + span; j -= span; ptr += span)
1690 {
d6884e63 1691 SCM scmptr;
96f6f4ae 1692
d6884e63 1693 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1694 {
d6884e63
ML
1695 SCM_CELLPTR nxt;
1696
1697 /* cheat here */
1698 nxt = ptr;
1699 NEXT_DATA_CELL (nxt, span);
1700 j += span;
1701
1702 ptr = nxt - span;
1703 continue;
1704 }
1705
1706 scmptr = PTR2SCM (ptr);
1707
1708 if (SCM_GCMARKP (scmptr))
1709 continue;
7bb8eac7 1710
d6884e63
ML
1711 switch SCM_TYP7 (scmptr)
1712 {
0f2d19dd 1713 case scm_tcs_cons_gloc:
0f2d19dd 1714 {
c8045e8d
DH
1715 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1716 * struct or a gloc. See the corresponding comment in
1717 * scm_gc_mark.
1718 */
92c2555f 1719 scm_t_bits word0 = (SCM_CELL_WORD_0 (scmptr)
7445e0e8
MD
1720 - scm_tc3_cons_gloc);
1721 /* access as struct */
92c2555f 1722 scm_t_bits * vtable_data = (scm_t_bits *) word0;
d6884e63 1723 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1724 {
7445e0e8
MD
1725 /* Structs need to be freed in a special order.
1726 * This is handled by GC C hooks in struct.c.
1727 */
1728 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1729 scm_structs_to_free = scmptr;
7bb8eac7 1730 continue;
c8045e8d 1731 }
7445e0e8 1732 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1733 }
1734 break;
1735 case scm_tcs_cons_imcar:
1736 case scm_tcs_cons_nimcar:
1737 case scm_tcs_closures:
e641afaf 1738 case scm_tc7_pws:
0f2d19dd
JB
1739 break;
1740 case scm_tc7_wvect:
b5c2579a 1741 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
06ee04b2 1742 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
d6884e63 1743 break;
0f2d19dd 1744 case scm_tc7_vector:
1b9be268 1745 {
c014a02e 1746 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1b9be268
DH
1747 if (length > 0)
1748 {
92c2555f 1749 m += length * sizeof (scm_t_bits);
1b9be268
DH
1750 scm_must_free (SCM_VECTOR_BASE (scmptr));
1751 }
1752 break;
1753 }
0f2d19dd
JB
1754#ifdef CCLO
1755 case scm_tc7_cclo:
b5c2579a 1756 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1757 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1758 break;
06ee04b2 1759#endif
afe5177e 1760#ifdef HAVE_ARRAYS
0f2d19dd 1761 case scm_tc7_bvect:
93778877 1762 {
c014a02e 1763 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
93778877
DH
1764 if (length > 0)
1765 {
c014a02e 1766 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
93778877
DH
1767 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1768 }
1769 }
06ee04b2 1770 break;
0f2d19dd 1771 case scm_tc7_byvect:
0f2d19dd
JB
1772 case scm_tc7_ivect:
1773 case scm_tc7_uvect:
0f2d19dd 1774 case scm_tc7_svect:
5c11cc9d 1775#ifdef HAVE_LONG_LONGS
0f2d19dd 1776 case scm_tc7_llvect:
0f2d19dd
JB
1777#endif
1778 case scm_tc7_fvect:
0f2d19dd 1779 case scm_tc7_dvect:
0f2d19dd 1780 case scm_tc7_cvect:
d1ca2c64 1781 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1782 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1783 break;
afe5177e 1784#endif
0f2d19dd 1785 case scm_tc7_substring:
0f2d19dd
JB
1786 break;
1787 case scm_tc7_string:
b5c2579a 1788 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1789 scm_must_free (SCM_STRING_CHARS (scmptr));
1790 break;
28b06554 1791 case scm_tc7_symbol:
b5c2579a 1792 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1793 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1794 break;
0f2d19dd 1795 case scm_tcs_subrs:
d6884e63 1796 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1797 continue;
1798 case scm_tc7_port:
0f2d19dd
JB
1799 if SCM_OPENP (scmptr)
1800 {
1801 int k = SCM_PTOBNUM (scmptr);
7a7f7c53 1802#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1803 if (!(k < scm_numptob))
7a7f7c53
DH
1804 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1805#endif
0f2d19dd 1806 /* Keep "revealed" ports alive. */
945fec60 1807 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1808 continue;
1809 /* Yes, I really do mean scm_ptobs[k].free */
1810 /* rather than ftobs[k].close. .close */
1811 /* is for explicit CLOSE-PORT by user */
84af0382 1812 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1813 SCM_SETSTREAM (scmptr, 0);
1814 scm_remove_from_port_table (scmptr);
1815 scm_gc_ports_collected++;
22a52da1 1816 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1817 }
1818 break;
1819 case scm_tc7_smob:
d6884e63 1820 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1821 {
1822 case scm_tc_free_cell:
acb0a19c 1823 case scm_tc16_real:
0f2d19dd
JB
1824 break;
1825#ifdef SCM_BIGDIG
acb0a19c 1826 case scm_tc16_big:
0f2d19dd 1827 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1828 scm_must_free (SCM_BDIGITS (scmptr));
1829 break;
0f2d19dd 1830#endif /* def SCM_BIGDIG */
acb0a19c 1831 case scm_tc16_complex:
92c2555f 1832 m += sizeof (scm_t_complex);
405aaef9 1833 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1834 break;
0f2d19dd 1835 default:
0f2d19dd
JB
1836 {
1837 int k;
1838 k = SCM_SMOBNUM (scmptr);
7a7f7c53 1839#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1840 if (!(k < scm_numsmob))
7a7f7c53
DH
1841 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1842#endif
1843 if (scm_smobs[k].free)
1844 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1845 break;
1846 }
1847 }
1848 break;
1849 default:
acf4331f 1850 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1851 }
7bb8eac7 1852
4c48ba06 1853 if (!--left_to_collect)
4a4c9785 1854 {
22a52da1 1855 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1856 *freelist->clustertail = scmptr;
1857 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1858
4a4c9785 1859 nfreelist = SCM_EOL;
4c48ba06
MD
1860 freelist->collected += span * freelist->cluster_size;
1861 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1862 }
1863 else
4a4c9785
MD
1864 {
1865 /* Stick the new cell on the front of nfreelist. It's
1866 critical that we mark this cell as freed; otherwise, the
1867 conservative collector might trace it as some other type
1868 of object. */
54778cd3 1869 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1870 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1871 nfreelist = scmptr;
1872 }
0f2d19dd 1873 }
d6884e63 1874
0f2d19dd
JB
1875#ifdef GC_FREE_SEGMENTS
1876 if (n == seg_size)
1877 {
c014a02e 1878 register long j;
15e9d186 1879
4c48ba06 1880 freelist->heap_size -= seg_size;
cf2d30f6
JB
1881 free ((char *) scm_heap_table[i].bounds[0]);
1882 scm_heap_table[i].bounds[0] = 0;
1883 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1884 scm_heap_table[j - 1] = scm_heap_table[j];
1885 scm_n_heap_segs -= 1;
cf2d30f6 1886 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1887 }
1888 else
1889#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1890 {
1891 /* Update the real freelist pointer to point to the head of
1892 the list of free cells we've built for this segment. */
4c48ba06 1893 freelist->cells = nfreelist;
4c48ba06 1894 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1895 }
1896
fca7547b 1897#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1898 scm_map_free_list ();
1899#endif
4a4c9785 1900 }
a00c95d9 1901
4c48ba06
MD
1902 gc_sweep_freelist_finish (&scm_master_freelist);
1903 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1904
8ded62a3
MD
1905 /* When we move to POSIX threads private freelists should probably
1906 be GC-protected instead. */
1907 scm_freelist = SCM_EOL;
1908 scm_freelist2 = SCM_EOL;
a00c95d9 1909
b37fe1c5 1910 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1911 scm_gc_yield -= scm_cells_allocated;
1be6b49c
ML
1912
1913 if (scm_mallocated < m)
1914 /* The byte count of allocated objects has underflowed. This is
1915 probably because you forgot to report the sizes of objects you
1916 have allocated, by calling scm_done_malloc or some such. When
1917 the GC freed them, it subtracted their size from
1918 scm_mallocated, which underflowed. */
1919 abort ();
1920
0f2d19dd
JB
1921 scm_mallocated -= m;
1922 scm_gc_malloc_collected = m;
1923}
acf4331f 1924#undef FUNC_NAME
0f2d19dd
JB
1925
1926
1927\f
0f2d19dd
JB
1928/* {Front end to malloc}
1929 *
9d47a1e6
ML
1930 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1931 * scm_done_free
0f2d19dd 1932 *
c6c79933
GH
1933 * These functions provide services comparable to malloc, realloc, and
1934 * free. They should be used when allocating memory that will be under
1935 * control of the garbage collector, i.e., if the memory may be freed
1936 * during garbage collection.
1937 */
bc9d9bb2 1938
0f2d19dd
JB
1939/* scm_must_malloc
1940 * Return newly malloced storage or throw an error.
1941 *
1942 * The parameter WHAT is a string for error reporting.
a00c95d9 1943 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1944 * allocation, or if the first call to malloc fails,
1945 * garbage collect -- on the presumption that some objects
1946 * using malloced storage may be collected.
1947 *
1948 * The limit scm_mtrigger may be raised by this allocation.
1949 */
07806695 1950void *
1be6b49c 1951scm_must_malloc (size_t size, const char *what)
0f2d19dd 1952{
07806695 1953 void *ptr;
c014a02e 1954 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
1955
1956 if (nm < size)
1957 /* The byte count of allocated objects has overflowed. This is
1958 probably because you forgot to report the correct size of freed
1959 memory in some of your smob free methods. */
1960 abort ();
e4ef2330
MD
1961
1962 if (nm <= scm_mtrigger)
0f2d19dd 1963 {
07806695 1964 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1965 if (NULL != ptr)
1966 {
1967 scm_mallocated = nm;
bc9d9bb2
MD
1968#ifdef GUILE_DEBUG_MALLOC
1969 scm_malloc_register (ptr, what);
1970#endif
0f2d19dd
JB
1971 return ptr;
1972 }
1973 }
6064dcc6 1974
0f2d19dd 1975 scm_igc (what);
e4ef2330 1976
0f2d19dd 1977 nm = scm_mallocated + size;
1be6b49c
ML
1978
1979 if (nm < size)
1980 /* The byte count of allocated objects has overflowed. This is
1981 probably because you forgot to report the correct size of freed
1982 memory in some of your smob free methods. */
1983 abort ();
1984
07806695 1985 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1986 if (NULL != ptr)
1987 {
1988 scm_mallocated = nm;
6064dcc6
MV
1989 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1990 if (nm > scm_mtrigger)
1991 scm_mtrigger = nm + nm / 2;
1992 else
1993 scm_mtrigger += scm_mtrigger / 2;
1994 }
bc9d9bb2
MD
1995#ifdef GUILE_DEBUG_MALLOC
1996 scm_malloc_register (ptr, what);
1997#endif
1998
0f2d19dd
JB
1999 return ptr;
2000 }
e4ef2330 2001
acf4331f 2002 scm_memory_error (what);
0f2d19dd
JB
2003}
2004
2005
2006/* scm_must_realloc
2007 * is similar to scm_must_malloc.
2008 */
07806695
JB
2009void *
2010scm_must_realloc (void *where,
1be6b49c
ML
2011 size_t old_size,
2012 size_t size,
3eeba8d4 2013 const char *what)
0f2d19dd 2014{
07806695 2015 void *ptr;
c014a02e 2016 unsigned long nm;
1be6b49c
ML
2017
2018 if (size <= old_size)
2019 return where;
2020
2021 nm = scm_mallocated + size - old_size;
2022
2023 if (nm < (size - old_size))
2024 /* The byte count of allocated objects has overflowed. This is
2025 probably because you forgot to report the correct size of freed
2026 memory in some of your smob free methods. */
2027 abort ();
e4ef2330
MD
2028
2029 if (nm <= scm_mtrigger)
0f2d19dd 2030 {
07806695 2031 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2032 if (NULL != ptr)
2033 {
2034 scm_mallocated = nm;
bc9d9bb2
MD
2035#ifdef GUILE_DEBUG_MALLOC
2036 scm_malloc_reregister (where, ptr, what);
2037#endif
0f2d19dd
JB
2038 return ptr;
2039 }
2040 }
e4ef2330 2041
0f2d19dd 2042 scm_igc (what);
e4ef2330
MD
2043
2044 nm = scm_mallocated + size - old_size;
1be6b49c
ML
2045
2046 if (nm < (size - old_size))
2047 /* The byte count of allocated objects has overflowed. This is
2048 probably because you forgot to report the correct size of freed
2049 memory in some of your smob free methods. */
2050 abort ();
2051
07806695 2052 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2053 if (NULL != ptr)
2054 {
2055 scm_mallocated = nm;
6064dcc6
MV
2056 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2057 if (nm > scm_mtrigger)
2058 scm_mtrigger = nm + nm / 2;
2059 else
2060 scm_mtrigger += scm_mtrigger / 2;
2061 }
bc9d9bb2
MD
2062#ifdef GUILE_DEBUG_MALLOC
2063 scm_malloc_reregister (where, ptr, what);
2064#endif
0f2d19dd
JB
2065 return ptr;
2066 }
e4ef2330 2067
acf4331f 2068 scm_memory_error (what);
0f2d19dd
JB
2069}
2070
e4a7824f 2071char *
1be6b49c 2072scm_must_strndup (const char *str, size_t length)
e4a7824f
MV
2073{
2074 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2075 memcpy (dst, str, length);
2076 dst[length] = 0;
2077 return dst;
2078}
2079
2080char *
2081scm_must_strdup (const char *str)
2082{
2083 return scm_must_strndup (str, strlen (str));
2084}
acf4331f 2085
a00c95d9 2086void
07806695 2087scm_must_free (void *obj)
acf4331f 2088#define FUNC_NAME "scm_must_free"
0f2d19dd 2089{
bc9d9bb2
MD
2090#ifdef GUILE_DEBUG_MALLOC
2091 scm_malloc_unregister (obj);
2092#endif
0f2d19dd
JB
2093 if (obj)
2094 free (obj);
2095 else
acf4331f 2096 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 2097}
acf4331f
DH
2098#undef FUNC_NAME
2099
0f2d19dd 2100
c68296f8
MV
2101/* Announce that there has been some malloc done that will be freed
2102 * during gc. A typical use is for a smob that uses some malloced
2103 * memory but can not get it from scm_must_malloc (for whatever
2104 * reason). When a new object of this smob is created you call
2105 * scm_done_malloc with the size of the object. When your smob free
2106 * function is called, be sure to include this size in the return
9d47a1e6
ML
2107 * value.
2108 *
2109 * If you can't actually free the memory in the smob free function,
2110 * for whatever reason (like reference counting), you still can (and
2111 * should) report the amount of memory freed when you actually free it.
2112 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2113 * eh? Or even better, call scm_done_free. */
0f2d19dd 2114
c68296f8 2115void
c014a02e 2116scm_done_malloc (long size)
c68296f8 2117{
1be6b49c
ML
2118 if (size < 0) {
2119 if (scm_mallocated < size)
2120 /* The byte count of allocated objects has underflowed. This is
2121 probably because you forgot to report the sizes of objects you
2122 have allocated, by calling scm_done_malloc or some such. When
2123 the GC freed them, it subtracted their size from
2124 scm_mallocated, which underflowed. */
2125 abort ();
2126 } else {
c014a02e 2127 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2128 if (nm < size)
2129 /* The byte count of allocated objects has overflowed. This is
2130 probably because you forgot to report the correct size of freed
2131 memory in some of your smob free methods. */
2132 abort ();
2133 }
2134
c68296f8
MV
2135 scm_mallocated += size;
2136
2137 if (scm_mallocated > scm_mtrigger)
2138 {
2139 scm_igc ("foreign mallocs");
2140 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2141 {
2142 if (scm_mallocated > scm_mtrigger)
2143 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2144 else
2145 scm_mtrigger += scm_mtrigger / 2;
2146 }
2147 }
2148}
2149
9d47a1e6 2150void
c014a02e 2151scm_done_free (long size)
9d47a1e6 2152{
1be6b49c
ML
2153 if (size >= 0) {
2154 if (scm_mallocated < size)
2155 /* The byte count of allocated objects has underflowed. This is
2156 probably because you forgot to report the sizes of objects you
2157 have allocated, by calling scm_done_malloc or some such. When
2158 the GC freed them, it subtracted their size from
2159 scm_mallocated, which underflowed. */
2160 abort ();
2161 } else {
c014a02e 2162 unsigned long nm = scm_mallocated + size;
1be6b49c
ML
2163 if (nm < size)
2164 /* The byte count of allocated objects has overflowed. This is
2165 probably because you forgot to report the correct size of freed
2166 memory in some of your smob free methods. */
2167 abort ();
2168 }
2169
9d47a1e6
ML
2170 scm_mallocated -= size;
2171}
2172
c68296f8
MV
2173
2174\f
0f2d19dd
JB
2175/* {Heap Segments}
2176 *
2177 * Each heap segment is an array of objects of a particular size.
2178 * Every segment has an associated (possibly shared) freelist.
2179 * A table of segment records is kept that records the upper and
2180 * lower extents of the segment; this is used during the conservative
2181 * phase of gc to identify probably gc roots (because they point
c68296f8 2182 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2183
2184/* scm_expmem
2185 * is true if the first segment was smaller than INIT_HEAP_SEG.
2186 * If scm_expmem is set to one, subsequent segment allocations will
2187 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2188 */
2189int scm_expmem = 0;
2190
1be6b49c 2191size_t scm_max_segment_size;
4c48ba06 2192
0f2d19dd
JB
2193/* scm_heap_org
2194 * is the lowest base address of any heap segment.
2195 */
2196SCM_CELLPTR scm_heap_org;
2197
92c2555f 2198scm_t_heap_seg_data * scm_heap_table = 0;
1be6b49c
ML
2199static size_t heap_segment_table_size = 0;
2200size_t scm_n_heap_segs = 0;
0f2d19dd 2201
0f2d19dd 2202/* init_heap_seg
d6884e63 2203 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2204 *
d6884e63
ML
2205 * The segment origin and segment size in bytes are input parameters.
2206 * The freelist is both input and output.
0f2d19dd 2207 *
d6884e63
ML
2208 * This function presumes that the scm_heap_table has already been expanded
2209 * to accomodate a new segment record and that the markbit space was reserved
2210 * for all the cards in this segment.
0f2d19dd
JB
2211 */
2212
d6884e63
ML
2213#define INIT_CARD(card, span) \
2214 do { \
322ec19d 2215 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2216 if ((span) == 2) \
2217 SCM_GC_SET_CARD_DOUBLECELL (card); \
2218 } while (0)
0f2d19dd 2219
1be6b49c 2220static size_t
92c2555f 2221init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
0f2d19dd
JB
2222{
2223 register SCM_CELLPTR ptr;
0f2d19dd 2224 SCM_CELLPTR seg_end;
c014a02e 2225 long new_seg_index;
1be6b49c 2226 ptrdiff_t n_new_cells;
4c48ba06 2227 int span = freelist->span;
a00c95d9 2228
0f2d19dd
JB
2229 if (seg_org == NULL)
2230 return 0;
2231
d6884e63
ML
2232 /* Align the begin ptr up.
2233 */
2234 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2235
a00c95d9 2236 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2237 */
d6884e63 2238 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2239
a00c95d9 2240 /* Find the right place and insert the segment record.
0f2d19dd
JB
2241 *
2242 */
2243 for (new_seg_index = 0;
2244 ( (new_seg_index < scm_n_heap_segs)
2245 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2246 new_seg_index++)
2247 ;
2248
2249 {
2250 int i;
2251 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2252 scm_heap_table[i] = scm_heap_table[i - 1];
2253 }
a00c95d9 2254
0f2d19dd
JB
2255 ++scm_n_heap_segs;
2256
945fec60 2257 scm_heap_table[new_seg_index].span = span;
4c48ba06 2258 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2259 scm_heap_table[new_seg_index].bounds[0] = ptr;
2260 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2261
acb0a19c
MD
2262 /*n_new_cells*/
2263 n_new_cells = seg_end - ptr;
0f2d19dd 2264
4c48ba06 2265 freelist->heap_size += n_new_cells;
4a4c9785 2266
a00c95d9 2267 /* Partition objects in this segment into clusters */
4a4c9785
MD
2268 {
2269 SCM clusters;
2270 SCM *clusterp = &clusters;
4a4c9785 2271
d6884e63
ML
2272 NEXT_DATA_CELL (ptr, span);
2273 while (ptr < seg_end)
4a4c9785 2274 {
d6884e63
ML
2275 scm_cell *nxt = ptr;
2276 scm_cell *prv = NULL;
2277 scm_cell *last_card = NULL;
2278 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2279 NEXT_DATA_CELL(nxt, span);
4a4c9785 2280
4c48ba06
MD
2281 /* Allocate cluster spine
2282 */
4a4c9785 2283 *clusterp = PTR2SCM (ptr);
d6884e63 2284 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2285 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2286 ptr = nxt;
a00c95d9 2287
d6884e63 2288 while (n_data_cells--)
4a4c9785 2289 {
d6884e63 2290 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2291 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2292 nxt = ptr;
2293 NEXT_DATA_CELL (nxt, span);
2294 prv = ptr;
2295
2296 if (card != last_card)
2297 {
2298 INIT_CARD (card, span);
2299 last_card = card;
2300 }
96f6f4ae 2301
54778cd3 2302 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2303 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2304
2305 ptr = nxt;
4a4c9785 2306 }
4c48ba06 2307
d6884e63 2308 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2309 }
a00c95d9 2310
d6884e63
ML
2311 /* sanity check */
2312 {
2313 scm_cell *ref = seg_end;
2314 NEXT_DATA_CELL (ref, span);
2315 if (ref != ptr)
2316 /* [cmm] looks like the segment size doesn't divide cleanly by
2317 cluster size. bad cmm! */
2318 abort();
2319 }
2320
4a4c9785
MD
2321 /* Patch up the last cluster pointer in the segment
2322 * to join it to the input freelist.
2323 */
4c48ba06
MD
2324 *clusterp = freelist->clusters;
2325 freelist->clusters = clusters;
4a4c9785
MD
2326 }
2327
4c48ba06
MD
2328#ifdef DEBUGINFO
2329 fprintf (stderr, "H");
2330#endif
0f2d19dd 2331 return size;
0f2d19dd
JB
2332}
2333
1be6b49c 2334static size_t
92c2555f 2335round_to_cluster_size (scm_t_freelist *freelist, size_t len)
a00c95d9 2336{
1be6b49c 2337 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9
ML
2338
2339 return
2340 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2341 + ALIGNMENT_SLACK (freelist);
2342}
2343
a00c95d9 2344static void
92c2555f 2345alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
acf4331f 2346#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2347{
0f2d19dd 2348 SCM_CELLPTR ptr;
1be6b49c 2349 size_t len;
a00c95d9 2350
9d47a1e6 2351 if (scm_gc_heap_lock)
b6efc951
DH
2352 {
2353 /* Critical code sections (such as the garbage collector) aren't
2354 * supposed to add heap segments.
2355 */
2356 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2357 abort ();
2358 }
0f2d19dd 2359
9d47a1e6 2360 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2361 {
2362 /* We have to expand the heap segment table to have room for the new
2363 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2364 * init_heap_seg only if the allocation of the segment itself succeeds.
2365 */
1be6b49c 2366 size_t new_table_size = scm_n_heap_segs + 1;
92c2555f
MV
2367 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2368 scm_t_heap_seg_data *new_heap_table;
b6efc951 2369
92c2555f 2370 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
b6efc951
DH
2371 realloc ((char *)scm_heap_table, size)));
2372 if (!new_heap_table)
2373 {
2374 if (error_policy == abort_on_error)
2375 {
2376 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2377 abort ();
2378 }
2379 else
2380 {
2381 return;
2382 }
2383 }
2384 else
2385 {
2386 scm_heap_table = new_heap_table;
2387 heap_segment_table_size = new_table_size;
2388 }
2389 }
0f2d19dd 2390
0f2d19dd 2391 /* Pick a size for the new heap segment.
a00c95d9 2392 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2393 * gc.h
2394 */
4c48ba06 2395 {
1811ebce
MD
2396 /* Assure that the new segment is predicted to be large enough.
2397 *
2398 * New yield should at least equal GC fraction of new heap size, i.e.
2399 *
2400 * y + dh > f * (h + dh)
2401 *
2402 * y : yield
8fef55a8 2403 * f : min yield fraction
1811ebce
MD
2404 * h : heap size
2405 * dh : size of new heap segment
2406 *
2407 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2408 */
8fef55a8 2409 int f = freelist->min_yield_fraction;
c014a02e 2410 unsigned long h = SCM_HEAP_SIZE;
1be6b49c 2411 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2412 len = SCM_EXPHEAP (freelist->heap_size);
2413#ifdef DEBUGINFO
1be6b49c 2414 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
4c48ba06
MD
2415#endif
2416 if (len < min_cells)
1811ebce 2417 len = min_cells + freelist->cluster_size;
4c48ba06 2418 len *= sizeof (scm_cell);
1811ebce
MD
2419 /* force new sampling */
2420 freelist->collected = LONG_MAX;
4c48ba06 2421 }
a00c95d9 2422
4c48ba06
MD
2423 if (len > scm_max_segment_size)
2424 len = scm_max_segment_size;
0f2d19dd
JB
2425
2426 {
1be6b49c 2427 size_t smallest;
0f2d19dd 2428
a00c95d9 2429 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2430
0f2d19dd 2431 if (len < smallest)
a00c95d9 2432 len = smallest;
0f2d19dd
JB
2433
2434 /* Allocate with decaying ambition. */
2435 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2436 && (len >= smallest))
2437 {
1be6b49c 2438 size_t rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2439 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2440 if (ptr)
2441 {
a00c95d9 2442 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2443 return;
2444 }
2445 len /= 2;
2446 }
2447 }
2448
b6efc951
DH
2449 if (error_policy == abort_on_error)
2450 {
2451 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2452 abort ();
2453 }
0f2d19dd 2454}
acf4331f 2455#undef FUNC_NAME
0f2d19dd 2456
0f2d19dd
JB
2457\f
2458/* {GC Protection Helper Functions}
2459 */
2460
2461
5d2b97cd
DH
2462/*
2463 * If within a function you need to protect one or more scheme objects from
2464 * garbage collection, pass them as parameters to one of the
2465 * scm_remember_upto_here* functions below. These functions don't do
2466 * anything, but since the compiler does not know that they are actually
2467 * no-ops, it will generate code that calls these functions with the given
2468 * parameters. Therefore, you can be sure that the compiler will keep those
2469 * scheme values alive (on the stack or in a register) up to the point where
2470 * scm_remember_upto_here* is called. In other words, place the call to
2471 * scm_remember_upt_here* _behind_ the last code in your function, that
2472 * depends on the scheme object to exist.
2473 *
2474 * Example: We want to make sure, that the string object str does not get
2475 * garbage collected during the execution of 'some_function', because
2476 * otherwise the characters belonging to str would be freed and
2477 * 'some_function' might access freed memory. To make sure that the compiler
2478 * keeps str alive on the stack or in a register such that it is visible to
2479 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2480 * call to 'some_function'. Note that this would not be necessary if str was
2481 * used anyway after the call to 'some_function'.
2482 * char *chars = SCM_STRING_CHARS (str);
2483 * some_function (chars);
2484 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2485 */
2486
2487void
e81d98ec 2488scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
5d2b97cd
DH
2489{
2490 /* Empty. Protects a single object from garbage collection. */
2491}
2492
2493void
e81d98ec 2494scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
5d2b97cd
DH
2495{
2496 /* Empty. Protects two objects from garbage collection. */
2497}
2498
2499void
e81d98ec 2500scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
5d2b97cd
DH
2501{
2502 /* Empty. Protects any number of objects from garbage collection. */
2503}
2504
2505
2506#if (SCM_DEBUG_DEPRECATED == 0)
2507
0f2d19dd 2508void
6e8d25a6 2509scm_remember (SCM *ptr)
b24b5e13 2510{
1be6b49c
ML
2511 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2512 "Use the `scm_remember_upto_here*' family of functions instead.");
b24b5e13 2513}
0f2d19dd 2514
6b1b030e
ML
2515SCM
2516scm_protect_object (SCM obj)
2517{
2518 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2519 "Use `scm_gc_protect_object' instead.");
2520 return scm_gc_protect_object (obj);
2521}
2522
2523SCM
2524scm_unprotect_object (SCM obj)
2525{
2526 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2527 "Use `scm_gc_unprotect_object' instead.");
2528 return scm_gc_unprotect_object (obj);
2529}
2530
5d2b97cd 2531#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2532
c209c88e 2533/*
41b0806d
GB
2534 These crazy functions prevent garbage collection
2535 of arguments after the first argument by
2536 ensuring they remain live throughout the
2537 function because they are used in the last
2538 line of the code block.
2539 It'd be better to have a nice compiler hint to
2540 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2541SCM
2542scm_return_first (SCM elt, ...)
0f2d19dd
JB
2543{
2544 return elt;
2545}
2546
41b0806d
GB
2547int
2548scm_return_first_int (int i, ...)
2549{
2550 return i;
2551}
2552
0f2d19dd 2553
0f2d19dd 2554SCM
6e8d25a6 2555scm_permanent_object (SCM obj)
0f2d19dd
JB
2556{
2557 SCM_REDEFER_INTS;
2558 scm_permobjs = scm_cons (obj, scm_permobjs);
2559 SCM_REALLOW_INTS;
2560 return obj;
2561}
2562
2563
7bd4fbe2
MD
2564/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2565 other references are dropped, until the object is unprotected by calling
6b1b030e 2566 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
7bd4fbe2
MD
2567 i. e. it is possible to protect the same object several times, but it is
2568 necessary to unprotect the object the same number of times to actually get
2569 the object unprotected. It is an error to unprotect an object more often
2570 than it has been protected before. The function scm_protect_object returns
2571 OBJ.
2572*/
2573
2574/* Implementation note: For every object X, there is a counter which
6b1b030e 2575 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
7bd4fbe2 2576*/
686765af 2577
ef290276 2578SCM
6b1b030e 2579scm_gc_protect_object (SCM obj)
ef290276 2580{
686765af 2581 SCM handle;
9d47a1e6 2582
686765af 2583 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2584 SCM_REDEFER_INTS;
9d47a1e6 2585
0f0f0899 2586 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
1be6b49c 2587 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
9d47a1e6 2588
2dd6a83a 2589 SCM_REALLOW_INTS;
9d47a1e6 2590
ef290276
JB
2591 return obj;
2592}
2593
2594
2595/* Remove any protection for OBJ established by a prior call to
dab7f566 2596 scm_protect_object. This function returns OBJ.
ef290276 2597
dab7f566 2598 See scm_protect_object for more information. */
ef290276 2599SCM
6b1b030e 2600scm_gc_unprotect_object (SCM obj)
ef290276 2601{
686765af 2602 SCM handle;
9d47a1e6 2603
686765af 2604 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2605 SCM_REDEFER_INTS;
9d47a1e6 2606
686765af 2607 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2608
22a52da1 2609 if (SCM_FALSEP (handle))
686765af 2610 {
0f0f0899
MD
2611 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2612 abort ();
686765af 2613 }
6a199940
DH
2614 else
2615 {
1be6b49c
ML
2616 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2617 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
6a199940
DH
2618 scm_hashq_remove_x (scm_protects, obj);
2619 else
1be6b49c 2620 SCM_SETCDR (handle, count);
6a199940 2621 }
686765af 2622
2dd6a83a 2623 SCM_REALLOW_INTS;
ef290276
JB
2624
2625 return obj;
2626}
2627
6b1b030e
ML
2628void
2629scm_gc_register_root (SCM *p)
2630{
2631 SCM handle;
2632 SCM key = scm_long2num ((long) p);
2633
2634 /* This critical section barrier will be replaced by a mutex. */
2635 SCM_REDEFER_INTS;
2636
2637 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2638 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2639
2640 SCM_REALLOW_INTS;
2641}
2642
2643void
2644scm_gc_unregister_root (SCM *p)
2645{
2646 SCM handle;
2647 SCM key = scm_long2num ((long) p);
2648
2649 /* This critical section barrier will be replaced by a mutex. */
2650 SCM_REDEFER_INTS;
2651
2652 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2653
2654 if (SCM_FALSEP (handle))
2655 {
2656 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2657 abort ();
2658 }
2659 else
2660 {
2661 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2662 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2663 scm_hashv_remove_x (scm_gc_registered_roots, key);
2664 else
2665 SCM_SETCDR (handle, count);
2666 }
2667
2668 SCM_REALLOW_INTS;
2669}
2670
2671void
2672scm_gc_register_roots (SCM *b, unsigned long n)
2673{
2674 SCM *p = b;
2675 for (; p < b + n; ++p)
2676 scm_gc_register_root (p);
2677}
2678
2679void
2680scm_gc_unregister_roots (SCM *b, unsigned long n)
2681{
2682 SCM *p = b;
2683 for (; p < b + n; ++p)
2684 scm_gc_unregister_root (p);
2685}
2686
c45acc34
JB
2687int terminating;
2688
2689/* called on process termination. */
e52ceaac
MD
2690#ifdef HAVE_ATEXIT
2691static void
2692cleanup (void)
2693#else
2694#ifdef HAVE_ON_EXIT
51157deb
MD
2695extern int on_exit (void (*procp) (), int arg);
2696
e52ceaac
MD
2697static void
2698cleanup (int status, void *arg)
2699#else
2700#error Dont know how to setup a cleanup handler on your system.
2701#endif
2702#endif
c45acc34
JB
2703{
2704 terminating = 1;
2705 scm_flush_all_ports ();
2706}
ef290276 2707
0f2d19dd 2708\f
acb0a19c 2709static int
92c2555f 2710make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
acb0a19c 2711{
1be6b49c 2712 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2713
a00c95d9
ML
2714 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2715 rounded_size,
4c48ba06 2716 freelist))
acb0a19c 2717 {
a00c95d9
ML
2718 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2719 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2720 rounded_size,
4c48ba06 2721 freelist))
acb0a19c
MD
2722 return 1;
2723 }
2724 else
2725 scm_expmem = 1;
2726
8fef55a8
MD
2727 if (freelist->min_yield_fraction)
2728 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2729 / 100);
8fef55a8 2730 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2731
acb0a19c
MD
2732 return 0;
2733}
2734
2735\f
4c48ba06 2736static void
92c2555f 2737init_freelist (scm_t_freelist *freelist,
4c48ba06 2738 int span,
c014a02e 2739 long cluster_size,
8fef55a8 2740 int min_yield)
4c48ba06
MD
2741{
2742 freelist->clusters = SCM_EOL;
2743 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2744 freelist->left_to_collect = 0;
2745 freelist->clusters_allocated = 0;
8fef55a8
MD
2746 freelist->min_yield = 0;
2747 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2748 freelist->span = span;
2749 freelist->collected = 0;
1811ebce 2750 freelist->collected_1 = 0;
4c48ba06
MD
2751 freelist->heap_size = 0;
2752}
2753
85db4a2c
DH
2754
2755/* Get an integer from an environment variable. */
2756static int
2757scm_i_getenv_int (const char *var, int def)
2758{
2759 char *end, *val = getenv (var);
2760 long res;
2761 if (!val)
2762 return def;
2763 res = strtol (val, &end, 10);
2764 if (end == val)
2765 return def;
2766 return res;
2767}
2768
2769
4a4c9785 2770int
85db4a2c 2771scm_init_storage ()
0f2d19dd 2772{
1be6b49c
ML
2773 unsigned long gc_trigger_1;
2774 unsigned long gc_trigger_2;
2775 size_t init_heap_size_1;
2776 size_t init_heap_size_2;
2777 size_t j;
0f2d19dd 2778
7c33806a
DH
2779#if (SCM_DEBUG_CELL_ACCESSES == 1)
2780 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2781#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2782
0f2d19dd
JB
2783 j = SCM_NUM_PROTECTS;
2784 while (j)
2785 scm_sys_protects[--j] = SCM_BOOL_F;
2786 scm_block_gc = 1;
4a4c9785 2787
4a4c9785 2788 scm_freelist = SCM_EOL;
4c48ba06 2789 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2790 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2791 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2792 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2793 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2794 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2795
0f2d19dd
JB
2796 scm_expmem = 0;
2797
2798 j = SCM_HEAP_SEG_SIZE;
2799 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
92c2555f
MV
2800 scm_heap_table = ((scm_t_heap_seg_data *)
2801 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
b6efc951 2802 heap_segment_table_size = 2;
acb0a19c 2803
d6884e63
ML
2804 mark_space_ptr = &mark_space_head;
2805
85db4a2c
DH
2806 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2807 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2808 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2809 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2810 return 1;
acb0a19c 2811
801cb5e7 2812 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2813 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2814
801cb5e7
MD
2815 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2816 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2817 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2818 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2819 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2820
2821 /* Initialise the list of ports. */
92c2555f
MV
2822 scm_t_portable = (scm_t_port **)
2823 malloc (sizeof (scm_t_port *) * scm_t_portable_room);
2824 if (!scm_t_portable)
0f2d19dd
JB
2825 return 1;
2826
a18bcd0e 2827#ifdef HAVE_ATEXIT
c45acc34 2828 atexit (cleanup);
e52ceaac
MD
2829#else
2830#ifdef HAVE_ON_EXIT
2831 on_exit (cleanup, 0);
2832#endif
a18bcd0e 2833#endif
0f2d19dd 2834
8960e0a0 2835 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2836 scm_permobjs = SCM_EOL;
00ffa0e7 2837 scm_protects = scm_c_make_hash_table (31);
6b1b030e 2838 scm_gc_registered_roots = scm_c_make_hash_table (31);
d6884e63 2839
0f2d19dd
JB
2840 return 0;
2841}
939794ce 2842
0f2d19dd
JB
2843\f
2844
939794ce
DH
2845SCM scm_after_gc_hook;
2846
939794ce
DH
2847static SCM gc_async;
2848
939794ce
DH
2849/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2850 * is run after the gc, as soon as the asynchronous events are handled by the
2851 * evaluator.
2852 */
2853static SCM
2854gc_async_thunk (void)
2855{
2856 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
2857 return SCM_UNSPECIFIED;
2858}
2859
2860
2861/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2862 * the garbage collection. The only purpose of this function is to mark the
2863 * gc_async (which will eventually lead to the execution of the
2864 * gc_async_thunk).
2865 */
2866static void *
e81d98ec
DH
2867mark_gc_async (void * hook_data SCM_UNUSED,
2868 void *func_data SCM_UNUSED,
2869 void *data SCM_UNUSED)
2870{
2871 /* If cell access debugging is enabled, the user may choose to perform
2872 * additional garbage collections after an arbitrary number of cell
2873 * accesses. We don't want the scheme level after-gc-hook to be performed
2874 * for each of these garbage collections for the following reason: The
2875 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2876 * after-gc-hook was performed with every gc, and if the gc was performed
2877 * after a very small number of cell accesses, then the number of cell
2878 * accesses during the execution of the after-gc-hook will suffice to cause
2879 * the execution of the next gc. Then, guile would keep executing the
2880 * after-gc-hook over and over again, and would never come to do other
2881 * things.
2882 *
2883 * To overcome this problem, if cell access debugging with additional
2884 * garbage collections is enabled, the after-gc-hook is never run by the
2885 * garbage collecter. When running guile with cell access debugging and the
2886 * execution of the after-gc-hook is desired, then it is necessary to run
2887 * the hook explicitly from the user code. This has the effect, that from
2888 * the scheme level point of view it seems that garbage collection is
2889 * performed with a much lower frequency than it actually is. Obviously,
2890 * this will not work for code that depends on a fixed one to one
2891 * relationship between the execution counts of the C level garbage
2892 * collection hooks and the execution count of the scheme level
2893 * after-gc-hook.
2894 */
2895#if (SCM_DEBUG_CELL_ACCESSES == 1)
2896 if (debug_cells_gc_interval == 0)
2897 scm_system_async_mark (gc_async);
2898#else
939794ce 2899 scm_system_async_mark (gc_async);
e81d98ec
DH
2900#endif
2901
939794ce
DH
2902 return NULL;
2903}
2904
2905
0f2d19dd
JB
2906void
2907scm_init_gc ()
0f2d19dd 2908{
939794ce
DH
2909 SCM after_gc_thunk;
2910
fde50407
ML
2911 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2912 scm_c_define ("after-gc-hook", scm_after_gc_hook);
939794ce 2913
9a441ddb
MV
2914 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2915 gc_async_thunk);
23670993 2916 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2917
2918 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2919
8dc9439f 2920#ifndef SCM_MAGIC_SNARFER
a0599745 2921#include "libguile/gc.x"
8dc9439f 2922#endif
0f2d19dd 2923}
89e00824 2924
56495472
ML
2925#endif /*MARK_DEPENDENCIES*/
2926
89e00824
ML
2927/*
2928 Local Variables:
2929 c-file-style: "gnu"
2930 End:
2931*/