* scheme-options.texi, scheme-procedures.texi,
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
56495472
ML
47/* SECTION: This code is compiled once.
48 */
49
50#ifndef MARK_DEPENDENCIES
51
0f2d19dd
JB
52\f
53#include <stdio.h>
e6e2e95a 54#include <errno.h>
783e7774 55#include <string.h>
e6e2e95a 56
a0599745 57#include "libguile/_scm.h"
0a7a7445 58#include "libguile/eval.h"
a0599745
MD
59#include "libguile/stime.h"
60#include "libguile/stackchk.h"
61#include "libguile/struct.h"
a0599745
MD
62#include "libguile/smob.h"
63#include "libguile/unif.h"
64#include "libguile/async.h"
65#include "libguile/ports.h"
66#include "libguile/root.h"
67#include "libguile/strings.h"
68#include "libguile/vectors.h"
801cb5e7 69#include "libguile/weaks.h"
686765af 70#include "libguile/hashtab.h"
ecf470a2 71#include "libguile/tags.h"
a0599745
MD
72
73#include "libguile/validate.h"
74#include "libguile/gc.h"
fce59c93 75
bc9d9bb2 76#ifdef GUILE_DEBUG_MALLOC
a0599745 77#include "libguile/debug-malloc.h"
bc9d9bb2
MD
78#endif
79
0f2d19dd 80#ifdef HAVE_MALLOC_H
95b88819 81#include <malloc.h>
0f2d19dd
JB
82#endif
83
84#ifdef HAVE_UNISTD_H
95b88819 85#include <unistd.h>
0f2d19dd
JB
86#endif
87
1cc91f1b
JB
88#ifdef __STDC__
89#include <stdarg.h>
90#define var_start(x, y) va_start(x, y)
91#else
92#include <varargs.h>
93#define var_start(x, y) va_start(x)
94#endif
95
0f2d19dd 96\f
406c7d90
DH
97
98unsigned int scm_gc_running_p = 0;
99
100\f
101
102#if (SCM_DEBUG_CELL_ACCESSES == 1)
103
61045190
DH
104scm_bits_t scm_tc16_allocated;
105
106/* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108unsigned int scm_debug_cell_accesses_p = 1;
406c7d90
DH
109
110
111/* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116void
117scm_assert_cell_valid (SCM cell)
118{
61045190
DH
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 122 {
61045190 123 already_running = 1; /* set to avoid recursion */
406c7d90 124
9d47a1e6 125 if (!scm_cellp (cell))
406c7d90
DH
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
61045190 147 already_running = 0; /* re-enable */
406c7d90
DH
148 }
149}
150
151
152SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
154 "If FLAG is #f, cell access checking is disabled.\n"
155 "If FLAG is #t, cell access checking is enabled.\n"
156 "This procedure only exists because the compile-time flag\n"
157 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
158#define FUNC_NAME s_scm_set_debug_cell_accesses_x
159{
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168}
169#undef FUNC_NAME
170
171#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173\f
174
0f2d19dd 175/* {heap tuning parameters}
a00c95d9 176 *
0f2d19dd
JB
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 192 *
0f2d19dd
JB
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 200 * trigger a GC.
6064dcc6
MV
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 206 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
207 */
208
8fef55a8
MD
209/*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
d6884e63
ML
215
216#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 221int scm_default_min_yield_1 = 40;
4c48ba06 222
d6884e63
ML
223#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
226/* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
aeacfc8f 229int scm_default_min_yield_2 = 40;
4c48ba06 230
aeacfc8f 231int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 232
d6884e63 233#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
234#ifdef _QC
235# define SCM_HEAP_SEG_SIZE 32768L
236#else
237# ifdef sequent
4c48ba06 238# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 239# else
4c48ba06 240# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
241# endif
242#endif
4c48ba06 243/* Make heap grow with factor 1.5 */
4a4c9785 244#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 245#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 246#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 247
d6884e63
ML
248/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
0f2d19dd
JB
250
251#ifdef PROT386
252/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
253# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
255#else
256# ifdef _UNICOS
a00c95d9
ML
257# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 259# else
a00c95d9
ML
260# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
262# endif /* UNICOS */
263#endif /* PROT386 */
264
ecf470a2
ML
265#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
d6884e63
ML
267#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268#define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
270
271\f
945fec60 272/* scm_freelists
0f2d19dd 273 */
945fec60 274
a00c95d9
ML
275typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
a00c95d9
ML
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
b37fe1c5
MD
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
8fef55a8
MD
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
a00c95d9
ML
285 SCM clusters;
286 SCM *clustertail;
b37fe1c5 287 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 288 int cluster_size;
8fef55a8 289 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
290 */
291 int grow_heap_p;
8fef55a8 292 /* minimum yield on this list in order not to grow the heap
a00c95d9 293 */
8fef55a8
MD
294 long min_yield;
295 /* defines min_yield as percent of total heap size
a00c95d9 296 */
8fef55a8 297 int min_yield_fraction;
a00c95d9
ML
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
1811ebce
MD
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
a00c95d9
ML
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
1811ebce 307 long heap_size;
a00c95d9
ML
308} scm_freelist_t;
309
4a4c9785
MD
310SCM scm_freelist = SCM_EOL;
311scm_freelist_t scm_master_freelist = {
b37fe1c5 312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
313};
314SCM scm_freelist2 = SCM_EOL;
315scm_freelist_t scm_master_freelist2 = {
b37fe1c5 316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 317};
0f2d19dd
JB
318
319/* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
15e9d186 322unsigned long scm_mtrigger;
0f2d19dd 323
0f2d19dd
JB
324/* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328int scm_gc_heap_lock = 0;
329
330/* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
0f2d19dd
JB
334int scm_block_gc = 1;
335
0f2d19dd
JB
336/* During collection, this accumulates objects holding
337 * weak references.
338 */
ab4bef85 339SCM scm_weak_vectors;
0f2d19dd 340
7445e0e8
MD
341/* During collection, this accumulates structures which are to be freed.
342 */
343SCM scm_structs_to_free;
344
0f2d19dd
JB
345/* GC Statistics Keeping
346 */
347unsigned long scm_cells_allocated = 0;
a5c314c8 348long scm_mallocated = 0;
b37fe1c5 349unsigned long scm_gc_cells_collected;
8b0d194f 350unsigned long scm_gc_yield;
37ddcaf6 351static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
352unsigned long scm_gc_malloc_collected;
353unsigned long scm_gc_ports_collected;
0f2d19dd 354unsigned long scm_gc_time_taken = 0;
c9b0d4b0
ML
355static unsigned long t_before_gc;
356static unsigned long t_before_sweep;
357unsigned long scm_gc_mark_time_taken = 0;
358unsigned long scm_gc_sweep_time_taken = 0;
359unsigned long scm_gc_times = 0;
360unsigned long scm_gc_cells_swept = 0;
361double scm_gc_cells_marked_acc = 0.;
362double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
363
364SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
370SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372SCM_SYMBOL (sym_times, "gc-times");
373SCM_SYMBOL (sym_cells_marked, "cells-marked");
374SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 375
a00c95d9 376typedef struct scm_heap_seg_data_t
0f2d19dd 377{
cf2d30f6
JB
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
4c48ba06 383 scm_freelist_t *freelist;
cf2d30f6 384
fe517a7d 385 /* number of cells per object in this segment */
945fec60 386 int span;
a00c95d9 387} scm_heap_seg_data_t;
0f2d19dd
JB
388
389
390
945fec60 391static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
392
393typedef enum { return_on_error, abort_on_error } policy_on_error;
394static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
395
396
d6884e63
ML
397#define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401#define BVEC_GROW_SIZE 256
402#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405/* mark space allocation */
406
407typedef struct scm_mark_space_t
408{
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411} scm_mark_space_t;
412
413static scm_mark_space_t *current_mark_space;
414static scm_mark_space_t **mark_space_ptr;
415static int current_mark_space_offset;
416static scm_mark_space_t *mark_space_head;
417
418static scm_c_bvec_limb_t *
419get_bvec ()
db4b4ca6 420#define FUNC_NAME "get_bvec"
d6884e63
ML
421{
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
db4b4ca6 428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
db4b4ca6 444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462}
db4b4ca6
DH
463#undef FUNC_NAME
464
d6884e63
ML
465
466static void
467clear_mark_space ()
468{
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473}
474
475
0f2d19dd 476\f
cf2d30f6
JB
477/* Debugging functions. */
478
bb2c57fa 479#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
480
481/* Return the number of the heap segment containing CELL. */
482static int
483which_seg (SCM cell)
484{
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 492 SCM_UNPACK (cell));
cf2d30f6
JB
493 abort ();
494}
495
496
8ded62a3
MD
497static void
498map_free_list (scm_freelist_t *master, SCM freelist)
499{
500 int last_seg = -1, count = 0;
501 SCM f;
a00c95d9 502
3f5d82cd 503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3
MD
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520}
cf2d30f6 521
a00c95d9 522SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 523 (),
5352393c
MG
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
527#define FUNC_NAME s_scm_map_free_list
528{
4c48ba06
MD
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
8ded62a3
MD
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544}
1bbd0b84 545#undef FUNC_NAME
cf2d30f6 546
4c48ba06
MD
547static int last_cluster;
548static int last_size;
549
5384bc5b
MD
550static int
551free_list_length (char *title, int i, SCM freelist)
552{
553 SCM ls;
554 int n = 0;
3f5d82cd
DH
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
4c48ba06
MD
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
5384bc5b
MD
579 return n;
580}
581
582static void
583free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584{
585 SCM clusters;
4c48ba06 586 int i = 0, len, n = 0;
5384bc5b
MD
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
4c48ba06
MD
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
601}
602
a00c95d9 603SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 604 (),
5352393c
MG
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
608#define FUNC_NAME s_scm_free_list_length
609{
b37fe1c5
MD
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 612 return SCM_UNSPECIFIED;
5384bc5b
MD
613}
614#undef FUNC_NAME
615
bb2c57fa
MD
616#endif
617
618#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 619
d3dd80ab
MG
620/* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622static int scm_debug_check_freelist = 0;
623
cf2d30f6
JB
624/* Number of calls to SCM_NEWCELL since startup. */
625static unsigned long scm_newcell_count;
acb0a19c 626static unsigned long scm_newcell2_count;
cf2d30f6
JB
627
628/* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
8ded62a3
MD
630static void
631scm_check_freelist (SCM freelist)
632{
633 SCM f;
634 int i = 0;
635
3f5d82cd
DH
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
8ded62a3
MD
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
8ded62a3
MD
641 abort ();
642 }
643}
cf2d30f6 644
a00c95d9 645SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 646 (SCM flag),
da4a1dba
GB
647 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
648 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
649 "compile-time flag was selected.\n")
1bbd0b84 650#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 651{
d6884e63
ML
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
945fec60 654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
655 return SCM_UNSPECIFIED;
656}
1bbd0b84 657#undef FUNC_NAME
25748c78
GB
658
659
4a4c9785
MD
660SCM
661scm_debug_newcell (void)
662{
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
8ded62a3 668 scm_check_freelist (scm_freelist);
4a4c9785
MD
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
3f5d82cd 674 if (SCM_NULLP (scm_freelist))
4a4c9785
MD
675 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
676 else
677 {
678 new = scm_freelist;
3f5d82cd 679 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
4a4c9785
MD
680 }
681
682 return new;
683}
684
685SCM
686scm_debug_newcell2 (void)
687{
688 SCM new;
689
690 scm_newcell2_count++;
691 if (scm_debug_check_freelist)
692 {
8ded62a3 693 scm_check_freelist (scm_freelist2);
4a4c9785
MD
694 scm_gc ();
695 }
696
697 /* The rest of this is supposed to be identical to the SCM_NEWCELL
698 macro. */
3f5d82cd 699 if (SCM_NULLP (scm_freelist2))
4a4c9785
MD
700 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
701 else
702 {
703 new = scm_freelist2;
3f5d82cd 704 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
4a4c9785
MD
705 }
706
707 return new;
708}
709
fca7547b 710#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
711
712\f
0f2d19dd 713
b37fe1c5
MD
714static unsigned long
715master_cells_allocated (scm_freelist_t *master)
716{
d6884e63 717 /* the '- 1' below is to ignore the cluster spine cells. */
b37fe1c5
MD
718 int objects = master->clusters_allocated * (master->cluster_size - 1);
719 if (SCM_NULLP (master->clusters))
720 objects -= master->left_to_collect;
721 return master->span * objects;
722}
723
724static unsigned long
725freelist_length (SCM freelist)
726{
727 int n;
3f5d82cd 728 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
729 ++n;
730 return n;
731}
732
733static unsigned long
734compute_cells_allocated ()
735{
736 return (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2)
739 - scm_master_freelist.span * freelist_length (scm_freelist)
740 - scm_master_freelist2.span * freelist_length (scm_freelist2));
741}
b37fe1c5 742
0f2d19dd
JB
743/* {Scheme Interface to GC}
744 */
745
a00c95d9 746SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 747 (),
b380b885 748 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 749#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
750{
751 int i;
752 int n;
753 SCM heap_segs;
c209c88e
GB
754 long int local_scm_mtrigger;
755 long int local_scm_mallocated;
756 long int local_scm_heap_size;
757 long int local_scm_cells_allocated;
758 long int local_scm_gc_time_taken;
c9b0d4b0
ML
759 long int local_scm_gc_times;
760 long int local_scm_gc_mark_time_taken;
761 long int local_scm_gc_sweep_time_taken;
762 double local_scm_gc_cells_swept;
763 double local_scm_gc_cells_marked;
0f2d19dd
JB
764 SCM answer;
765
766 SCM_DEFER_INTS;
939794ce
DH
767
768 ++scm_block_gc;
769
0f2d19dd
JB
770 retry:
771 heap_segs = SCM_EOL;
772 n = scm_n_heap_segs;
773 for (i = scm_n_heap_segs; i--; )
774 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
775 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
776 heap_segs);
777 if (scm_n_heap_segs != n)
778 goto retry;
939794ce
DH
779
780 --scm_block_gc;
0f2d19dd 781
7febb4a2
MD
782 /* Below, we cons to produce the resulting list. We want a snapshot of
783 * the heap situation before consing.
784 */
0f2d19dd
JB
785 local_scm_mtrigger = scm_mtrigger;
786 local_scm_mallocated = scm_mallocated;
b37fe1c5 787 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 788 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 789 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
790 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
791 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
792 local_scm_gc_times = scm_gc_times;
793 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
794 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd
JB
795
796 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
797 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
798 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
799 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
800 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
c9b0d4b0
ML
801 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
802 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
803 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
804 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
805 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
0f2d19dd
JB
806 scm_cons (sym_heap_segments, heap_segs),
807 SCM_UNDEFINED);
808 SCM_ALLOW_INTS;
809 return answer;
810}
1bbd0b84 811#undef FUNC_NAME
0f2d19dd
JB
812
813
c9b0d4b0
ML
814static void
815gc_start_stats (const char *what)
0f2d19dd 816{
c9b0d4b0
ML
817 t_before_gc = scm_c_get_internal_run_time ();
818 scm_gc_cells_swept = 0;
b37fe1c5 819 scm_gc_cells_collected = 0;
37ddcaf6 820 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
821 scm_gc_yield = (scm_cells_allocated
822 + master_cells_allocated (&scm_master_freelist)
823 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
824 scm_gc_malloc_collected = 0;
825 scm_gc_ports_collected = 0;
826}
827
939794ce 828
c9b0d4b0
ML
829static void
830gc_end_stats ()
0f2d19dd 831{
c9b0d4b0
ML
832 unsigned long t = scm_c_get_internal_run_time ();
833 scm_gc_time_taken += (t - t_before_gc);
834 scm_gc_sweep_time_taken += (t - t_before_sweep);
835 ++scm_gc_times;
836
837 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
838 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
839}
840
841
a00c95d9 842SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 843 (SCM obj),
b380b885
MD
844 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
845 "returned by this function for @var{obj}")
1bbd0b84 846#define FUNC_NAME s_scm_object_address
0f2d19dd 847{
54778cd3 848 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 849}
1bbd0b84 850#undef FUNC_NAME
0f2d19dd
JB
851
852
a00c95d9 853SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 854 (),
b380b885
MD
855 "Scans all of SCM objects and reclaims for further use those that are\n"
856 "no longer accessible.")
1bbd0b84 857#define FUNC_NAME s_scm_gc
0f2d19dd
JB
858{
859 SCM_DEFER_INTS;
860 scm_igc ("call");
861 SCM_ALLOW_INTS;
862 return SCM_UNSPECIFIED;
863}
1bbd0b84 864#undef FUNC_NAME
0f2d19dd
JB
865
866
867\f
868/* {C Interface For When GC is Triggered}
869 */
870
b37fe1c5 871static void
8fef55a8 872adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 873{
8fef55a8 874 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 875 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
876 * `min_yield_fraction' of total heap size. Note, however, that
877 * the absolute value of min_yield will correspond to `collected'
bda1446c 878 * on one master (the one which currently is triggering GC).
b37fe1c5 879 *
bda1446c
MD
880 * The reason why we look at total yield instead of cells collected
881 * on one list is that we want to take other freelists into account.
882 * On this freelist, we know that (local) yield = collected cells,
883 * but that's probably not the case on the other lists.
b37fe1c5
MD
884 *
885 * (We might consider computing a better prediction, for example
886 * by computing an average over multiple GC:s.)
887 */
8fef55a8 888 if (freelist->min_yield_fraction)
b37fe1c5 889 {
37ddcaf6 890 /* Pick largest of last two yields. */
8fef55a8
MD
891 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
892 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
893#ifdef DEBUGINFO
894 fprintf (stderr, " after GC = %d, delta = %d\n",
895 scm_cells_allocated,
896 delta);
897#endif
898 if (delta > 0)
8fef55a8 899 freelist->min_yield += delta;
b37fe1c5
MD
900 }
901}
902
b6efc951 903
4a4c9785 904/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
905 * common while the freelist will be individual for each thread.
906 */
4a4c9785
MD
907
908SCM
909scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
910{
911 SCM cell;
912 ++scm_ints_disabled;
4c48ba06
MD
913 do
914 {
c7387918 915 if (SCM_NULLP (master->clusters))
4c48ba06 916 {
150c200b 917 if (master->grow_heap_p || scm_block_gc)
4c48ba06 918 {
b6efc951
DH
919 /* In order to reduce gc frequency, try to allocate a new heap
920 * segment first, even if gc might find some free cells. If we
921 * can't obtain a new heap segment, we will try gc later.
922 */
4c48ba06 923 master->grow_heap_p = 0;
b6efc951 924 alloc_some_heap (master, return_on_error);
4c48ba06 925 }
b6efc951 926 if (SCM_NULLP (master->clusters))
b37fe1c5 927 {
b6efc951
DH
928 /* The heap was not grown, either because it wasn't scheduled to
929 * grow, or because there was not enough memory available. In
930 * both cases we have to try gc to get some free cells.
931 */
37ddcaf6
MD
932#ifdef DEBUGINFO
933 fprintf (stderr, "allocated = %d, ",
934 scm_cells_allocated
935 + master_cells_allocated (&scm_master_freelist)
936 + master_cells_allocated (&scm_master_freelist2));
937#endif
b37fe1c5 938 scm_igc ("cells");
8fef55a8 939 adjust_min_yield (master);
c7387918
DH
940 if (SCM_NULLP (master->clusters))
941 {
b6efc951
DH
942 /* gc could not free any cells. Now, we _must_ allocate a
943 * new heap segment, because there is no other possibility
944 * to provide a new cell for the caller.
945 */
946 alloc_some_heap (master, abort_on_error);
c7387918 947 }
b37fe1c5 948 }
4c48ba06
MD
949 }
950 cell = SCM_CAR (master->clusters);
951 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 952 ++master->clusters_allocated;
4c48ba06
MD
953 }
954 while (SCM_NULLP (cell));
d6884e63
ML
955
956#ifdef GUILE_DEBUG_FREELIST
957 scm_check_freelist (cell);
958#endif
959
4a4c9785 960 --scm_ints_disabled;
3f5d82cd 961 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
962 return cell;
963}
964
b6efc951 965
4c48ba06
MD
966#if 0
967/* This is a support routine which can be used to reserve a cluster
968 * for some special use, such as debugging. It won't be useful until
969 * free cells are preserved between garbage collections.
970 */
971
972void
973scm_alloc_cluster (scm_freelist_t *master)
974{
975 SCM freelist, cell;
976 cell = scm_gc_for_newcell (master, &freelist);
977 SCM_SETCDR (cell, freelist);
978 return cell;
979}
980#endif
981
801cb5e7
MD
982
983scm_c_hook_t scm_before_gc_c_hook;
984scm_c_hook_t scm_before_mark_c_hook;
985scm_c_hook_t scm_before_sweep_c_hook;
986scm_c_hook_t scm_after_sweep_c_hook;
987scm_c_hook_t scm_after_gc_c_hook;
988
b6efc951 989
0f2d19dd 990void
1bbd0b84 991scm_igc (const char *what)
0f2d19dd
JB
992{
993 int j;
994
406c7d90 995 ++scm_gc_running_p;
801cb5e7 996 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
997#ifdef DEBUGINFO
998 fprintf (stderr,
999 SCM_NULLP (scm_freelist)
1000 ? "*"
1001 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1002#endif
42db06f0
MD
1003#ifdef USE_THREADS
1004 /* During the critical section, only the current thread may run. */
1005 SCM_THREAD_CRITICAL_SECTION_START;
1006#endif
1007
e242dfd2 1008 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1009
ab4bef85
JB
1010 if (!scm_stack_base || scm_block_gc)
1011 {
406c7d90 1012 --scm_gc_running_p;
ab4bef85
JB
1013 return;
1014 }
1015
c9b0d4b0
ML
1016 gc_start_stats (what);
1017
a5c314c8
JB
1018 if (scm_mallocated < 0)
1019 /* The byte count of allocated objects has underflowed. This is
1020 probably because you forgot to report the sizes of objects you
1021 have allocated, by calling scm_done_malloc or some such. When
1022 the GC freed them, it subtracted their size from
1023 scm_mallocated, which underflowed. */
1024 abort ();
c45acc34 1025
ab4bef85
JB
1026 if (scm_gc_heap_lock)
1027 /* We've invoked the collector while a GC is already in progress.
1028 That should never happen. */
1029 abort ();
0f2d19dd
JB
1030
1031 ++scm_gc_heap_lock;
ab4bef85 1032
0f2d19dd
JB
1033 /* flush dead entries from the continuation stack */
1034 {
1035 int x;
1036 int bound;
1037 SCM * elts;
1038 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1039 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1040 x = SCM_INUM (scm_continuation_stack_ptr);
1041 while (x < bound)
1042 {
1043 elts[x] = SCM_BOOL_F;
1044 ++x;
1045 }
1046 }
1047
801cb5e7
MD
1048 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1049
d6884e63
ML
1050 clear_mark_space ();
1051
42db06f0 1052#ifndef USE_THREADS
a00c95d9 1053
1b9be268 1054 /* Mark objects on the C stack. */
0f2d19dd
JB
1055 SCM_FLUSH_REGISTER_WINDOWS;
1056 /* This assumes that all registers are saved into the jmp_buf */
1057 setjmp (scm_save_regs_gc_mark);
1058 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1059 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1060 sizeof scm_save_regs_gc_mark)
1061 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1062
1063 {
6ba93e5e 1064 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1065#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1066 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1067#else
6ba93e5e 1068 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1069#endif
1070 }
1071
42db06f0
MD
1072#else /* USE_THREADS */
1073
1074 /* Mark every thread's stack and registers */
945fec60 1075 scm_threads_mark_stacks ();
42db06f0
MD
1076
1077#endif /* USE_THREADS */
0f2d19dd 1078
0f2d19dd
JB
1079 j = SCM_NUM_PROTECTS;
1080 while (j--)
1081 scm_gc_mark (scm_sys_protects[j]);
1082
9de33deb
MD
1083 /* FIXME: we should have a means to register C functions to be run
1084 * in different phases of GC
a00c95d9 1085 */
9de33deb 1086 scm_mark_subr_table ();
a00c95d9 1087
42db06f0
MD
1088#ifndef USE_THREADS
1089 scm_gc_mark (scm_root->handle);
1090#endif
a00c95d9 1091
c9b0d4b0
ML
1092 t_before_sweep = scm_c_get_internal_run_time ();
1093 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1094
801cb5e7 1095 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1096
0f2d19dd
JB
1097 scm_gc_sweep ();
1098
801cb5e7
MD
1099 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1100
0f2d19dd 1101 --scm_gc_heap_lock;
c9b0d4b0 1102 gc_end_stats ();
42db06f0
MD
1103
1104#ifdef USE_THREADS
1105 SCM_THREAD_CRITICAL_SECTION_END;
1106#endif
801cb5e7 1107 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1108 --scm_gc_running_p;
0f2d19dd
JB
1109}
1110
1111\f
939794ce 1112
a00c95d9 1113/* {Mark/Sweep}
0f2d19dd
JB
1114 */
1115
56495472
ML
1116#define MARK scm_gc_mark
1117#define FNAME "scm_gc_mark"
0f2d19dd 1118
56495472 1119#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1120
1121/* Mark an object precisely.
1122 */
a00c95d9 1123void
56495472
ML
1124MARK (SCM p)
1125#define FUNC_NAME FNAME
0f2d19dd
JB
1126{
1127 register long i;
1128 register SCM ptr;
61045190 1129 scm_bits_t cell_type;
0f2d19dd 1130
56495472
ML
1131#ifndef MARK_DEPENDENCIES
1132# define RECURSE scm_gc_mark
1133#else
1134 /* go through the usual marking, but not for self-cycles. */
1135# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1136#endif
0f2d19dd
JB
1137 ptr = p;
1138
56495472
ML
1139#ifdef MARK_DEPENDENCIES
1140 goto gc_mark_loop_first_time;
1141#endif
1142
0f2d19dd
JB
1143gc_mark_loop:
1144 if (SCM_IMP (ptr))
1145 return;
1146
1147gc_mark_nimp:
56495472
ML
1148
1149#ifdef MARK_DEPENDENCIES
0209177b 1150 if (SCM_EQ_P (ptr, p))
56495472
ML
1151 return;
1152
1153 scm_gc_mark (ptr);
0209177b 1154 return;
56495472
ML
1155
1156gc_mark_loop_first_time:
1157#endif
9a6976cd 1158
61045190 1159#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1160 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1161 if (!scm_cellp (ptr))
db4b4ca6 1162 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1163#else
1164 /* In non-debug mode, do at least some cheap testing. */
1165 if (!SCM_CELLP (ptr))
1166 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1167#endif
1168
56495472
ML
1169#ifndef MARK_DEPENDENCIES
1170
d6884e63
ML
1171 if (SCM_GCMARKP (ptr))
1172 return;
56495472 1173
d6884e63
ML
1174 SCM_SETGCMARK (ptr);
1175
56495472
ML
1176#endif
1177
61045190
DH
1178 cell_type = SCM_GC_CELL_TYPE (ptr);
1179 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1180 {
1181 case scm_tcs_cons_nimcar:
d6884e63 1182 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1183 {
1184 ptr = SCM_CAR (ptr);
1185 goto gc_mark_nimp;
1186 }
56495472 1187 RECURSE (SCM_CAR (ptr));
d6884e63 1188 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1189 goto gc_mark_nimp;
1190 case scm_tcs_cons_imcar:
d6884e63 1191 ptr = SCM_CDR (ptr);
acb0a19c 1192 goto gc_mark_loop;
e641afaf 1193 case scm_tc7_pws:
22a52da1
DH
1194 RECURSE (SCM_SETTER (ptr));
1195 ptr = SCM_PROCEDURE (ptr);
0f2d19dd
JB
1196 goto gc_mark_loop;
1197 case scm_tcs_cons_gloc:
0f2d19dd 1198 {
c8045e8d
DH
1199 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1200 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1201 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1202 * pointer to a struct vtable data region. The fact that these are
1203 * accessed in the same way restricts the possibilites to change the
9d47a1e6 1204 * data layout of structs or heap cells.
c8045e8d
DH
1205 */
1206 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1207 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
7445e0e8 1208 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1209 {
d6884e63
ML
1210 /* ptr is a gloc */
1211 SCM gloc_car = SCM_PACK (word0);
56495472 1212 RECURSE (gloc_car);
d6884e63
ML
1213 ptr = SCM_CDR (ptr);
1214 goto gc_mark_loop;
1215 }
1216 else
1217 {
1218 /* ptr is a struct */
1219 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
b5c2579a 1220 int len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1221 char * fields_desc = SCM_SYMBOL_CHARS (layout);
d6884e63 1222 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1223
d6884e63
ML
1224 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1225 {
56495472
ML
1226 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1227 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1228 }
1229 if (len)
1230 {
1231 int x;
7bb8eac7 1232
d6884e63
ML
1233 for (x = 0; x < len - 2; x += 2, ++struct_data)
1234 if (fields_desc[x] == 'p')
56495472 1235 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1236 if (fields_desc[x] == 'p')
1237 {
1238 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1239 for (x = *struct_data++; x; --x, ++struct_data)
1240 RECURSE (SCM_PACK (*struct_data));
d6884e63 1241 else
56495472 1242 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1243 }
1244 }
1245 /* mark vtable */
1246 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1247 goto gc_mark_loop;
0f2d19dd
JB
1248 }
1249 }
1250 break;
1251 case scm_tcs_closures:
22a52da1 1252 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1253 {
1254 ptr = SCM_CLOSCAR (ptr);
1255 goto gc_mark_nimp;
1256 }
56495472 1257 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1258 ptr = SCM_ENV (ptr);
0f2d19dd
JB
1259 goto gc_mark_nimp;
1260 case scm_tc7_vector:
b5c2579a
DH
1261 i = SCM_VECTOR_LENGTH (ptr);
1262 if (i == 0)
1263 break;
1264 while (--i > 0)
1265 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1266 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a
DH
1267 ptr = SCM_VELTS (ptr)[0];
1268 goto gc_mark_loop;
0f2d19dd
JB
1269#ifdef CCLO
1270 case scm_tc7_cclo:
362306b9
DH
1271 {
1272 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1273 unsigned long int j;
1274 for (j = 1; j != i; ++j)
1275 {
1276 SCM obj = SCM_CCLO_REF (ptr, j);
1277 if (!SCM_IMP (obj))
56495472 1278 RECURSE (obj);
362306b9
DH
1279 }
1280 ptr = SCM_CCLO_REF (ptr, 0);
1281 goto gc_mark_loop;
1282 }
b5c2579a 1283#endif
afe5177e 1284#ifdef HAVE_ARRAYS
0f2d19dd
JB
1285 case scm_tc7_bvect:
1286 case scm_tc7_byvect:
1287 case scm_tc7_ivect:
1288 case scm_tc7_uvect:
1289 case scm_tc7_fvect:
1290 case scm_tc7_dvect:
1291 case scm_tc7_cvect:
1292 case scm_tc7_svect:
5c11cc9d 1293#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1294 case scm_tc7_llvect:
1295#endif
afe5177e 1296#endif
0f2d19dd 1297 case scm_tc7_string:
0f2d19dd
JB
1298 break;
1299
1300 case scm_tc7_substring:
0f2d19dd
JB
1301 ptr = SCM_CDR (ptr);
1302 goto gc_mark_loop;
1303
1304 case scm_tc7_wvect:
ab4bef85
JB
1305 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1306 scm_weak_vectors = ptr;
0f2d19dd
JB
1307 if (SCM_IS_WHVEC_ANY (ptr))
1308 {
1309 int x;
1310 int len;
1311 int weak_keys;
1312 int weak_values;
1313
b5c2579a 1314 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1315 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1316 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1317
0f2d19dd
JB
1318 for (x = 0; x < len; ++x)
1319 {
1320 SCM alist;
1321 alist = SCM_VELTS (ptr)[x];
46408039
JB
1322
1323 /* mark everything on the alist except the keys or
1324 * values, according to weak_values and weak_keys. */
0b5f3f34 1325 while ( SCM_CONSP (alist)
0f2d19dd 1326 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1327 && SCM_CONSP (SCM_CAR (alist)))
1328 {
1329 SCM kvpair;
1330 SCM next_alist;
1331
1332 kvpair = SCM_CAR (alist);
1333 next_alist = SCM_CDR (alist);
a00c95d9 1334 /*
0f2d19dd
JB
1335 * Do not do this:
1336 * SCM_SETGCMARK (alist);
1337 * SCM_SETGCMARK (kvpair);
1338 *
1339 * It may be that either the key or value is protected by
1340 * an escaped reference to part of the spine of this alist.
1341 * If we mark the spine here, and only mark one or neither of the
1342 * key and value, they may never be properly marked.
1343 * This leads to a horrible situation in which an alist containing
1344 * freelist cells is exported.
1345 *
1346 * So only mark the spines of these arrays last of all marking.
1347 * If somebody confuses us by constructing a weak vector
1348 * with a circular alist then we are hosed, but at least we
1349 * won't prematurely drop table entries.
1350 */
1351 if (!weak_keys)
56495472 1352 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1353 if (!weak_values)
56495472 1354 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1355 alist = next_alist;
1356 }
1357 if (SCM_NIMP (alist))
56495472 1358 RECURSE (alist);
0f2d19dd
JB
1359 }
1360 }
1361 break;
1362
28b06554
DH
1363 case scm_tc7_symbol:
1364 ptr = SCM_PROP_SLOTS (ptr);
0f2d19dd 1365 goto gc_mark_loop;
0f2d19dd 1366 case scm_tcs_subrs:
9de33deb 1367 break;
0f2d19dd
JB
1368 case scm_tc7_port:
1369 i = SCM_PTOBNUM (ptr);
1370 if (!(i < scm_numptob))
1371 goto def;
ebf7394e 1372 if (SCM_PTAB_ENTRY(ptr))
56495472 1373 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1374 if (scm_ptobs[i].mark)
1375 {
1376 ptr = (scm_ptobs[i].mark) (ptr);
1377 goto gc_mark_loop;
1378 }
1379 else
1380 return;
0f2d19dd
JB
1381 break;
1382 case scm_tc7_smob:
d6884e63 1383 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1384 { /* should be faster than going through scm_smobs */
1385 case scm_tc_free_cell:
1386 /* printf("found free_cell %X ", ptr); fflush(stdout); */
acb0a19c
MD
1387 case scm_tc16_big:
1388 case scm_tc16_real:
1389 case scm_tc16_complex:
0f2d19dd
JB
1390 break;
1391 default:
1392 i = SCM_SMOBNUM (ptr);
1393 if (!(i < scm_numsmob))
1394 goto def;
dc53f026
JB
1395 if (scm_smobs[i].mark)
1396 {
1397 ptr = (scm_smobs[i].mark) (ptr);
1398 goto gc_mark_loop;
1399 }
1400 else
1401 return;
0f2d19dd
JB
1402 }
1403 break;
1404 default:
acf4331f
DH
1405 def:
1406 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1407 }
0209177b 1408#undef RECURSE
0f2d19dd 1409}
acf4331f 1410#undef FUNC_NAME
0f2d19dd 1411
56495472
ML
1412#ifndef MARK_DEPENDENCIES
1413
1414#undef MARK
56495472
ML
1415#undef FNAME
1416
1417/* And here we define `scm_gc_mark_dependencies', by including this
1418 * same file in itself.
1419 */
1420#define MARK scm_gc_mark_dependencies
1421#define FNAME "scm_gc_mark_dependencies"
1422#define MARK_DEPENDENCIES
1423#include "gc.c"
1424#undef MARK_DEPENDENCIES
1425#undef MARK
56495472
ML
1426#undef FNAME
1427
0f2d19dd
JB
1428
1429/* Mark a Region Conservatively
1430 */
1431
a00c95d9 1432void
6e8d25a6 1433scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd 1434{
c4da09e2 1435 unsigned long m;
0f2d19dd 1436
c4da09e2
DH
1437 for (m = 0; m < n; ++m)
1438 {
1439 SCM obj = * (SCM *) &x[m];
1440 if (SCM_CELLP (obj))
1441 {
1442 SCM_CELLPTR ptr = SCM2PTR (obj);
1443 int i = 0;
1444 int j = scm_n_heap_segs - 1;
1445 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1446 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1447 {
1448 while (i <= j)
1449 {
1450 int seg_id;
1451 seg_id = -1;
1452 if ((i == j)
1453 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1454 seg_id = i;
1455 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1456 seg_id = j;
1457 else
1458 {
1459 int k;
1460 k = (i + j) / 2;
1461 if (k == i)
1462 break;
1463 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1464 {
1465 j = k;
1466 ++i;
1467 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1468 continue;
1469 else
1470 break;
1471 }
1472 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1473 {
1474 i = k;
1475 --j;
1476 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1477 continue;
1478 else
1479 break;
1480 }
1481 }
7bb8eac7 1482
d6884e63
ML
1483 if (SCM_GC_IN_CARD_HEADERP (ptr))
1484 break;
7bb8eac7 1485
c4da09e2 1486 if (scm_heap_table[seg_id].span == 1
ecf470a2 1487 || DOUBLECELL_ALIGNED_P (obj))
3731149d
ML
1488 scm_gc_mark (obj);
1489
c4da09e2
DH
1490 break;
1491 }
1492 }
1493 }
1494 }
0f2d19dd
JB
1495}
1496
1497
1a548472
DH
1498/* The function scm_cellp determines whether an SCM value can be regarded as a
1499 * pointer to a cell on the heap. Binary search is used in order to determine
1500 * the heap segment that contains the cell.
1501 */
2e11a577 1502int
6e8d25a6 1503scm_cellp (SCM value)
2e11a577 1504{
1a548472
DH
1505 if (SCM_CELLP (value)) {
1506 scm_cell * ptr = SCM2PTR (value);
1507 unsigned int i = 0;
1508 unsigned int j = scm_n_heap_segs - 1;
1509
61045190
DH
1510 if (SCM_GC_IN_CARD_HEADERP (ptr))
1511 return 0;
1512
1a548472
DH
1513 while (i < j) {
1514 int k = (i + j) / 2;
1515 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1516 j = k;
1517 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1518 i = k + 1;
1519 }
1520 }
2e11a577 1521
9d47a1e6 1522 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1a548472 1523 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
ecf470a2 1524 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
d6884e63
ML
1525 && !SCM_GC_IN_CARD_HEADERP (ptr)
1526 )
1a548472 1527 return 1;
d6884e63 1528 else
1a548472 1529 return 0;
d6884e63 1530 } else
1a548472 1531 return 0;
2e11a577
MD
1532}
1533
1534
4c48ba06
MD
1535static void
1536gc_sweep_freelist_start (scm_freelist_t *freelist)
1537{
1538 freelist->cells = SCM_EOL;
1539 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1540 freelist->clusters_allocated = 0;
4c48ba06
MD
1541 freelist->clusters = SCM_EOL;
1542 freelist->clustertail = &freelist->clusters;
1811ebce 1543 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1544 freelist->collected = 0;
1545}
1546
1547static void
1548gc_sweep_freelist_finish (scm_freelist_t *freelist)
1549{
1811ebce 1550 int collected;
4c48ba06 1551 *freelist->clustertail = freelist->cells;
3f5d82cd 1552 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1553 {
1554 SCM c = freelist->cells;
22a52da1
DH
1555 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1556 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1557 freelist->collected +=
1558 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1559 }
b37fe1c5 1560 scm_gc_cells_collected += freelist->collected;
a00c95d9 1561
8fef55a8 1562 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1563 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1564 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1565 * size. This means that a too low yield is compensated by more
1566 * heap on the list which is currently doing most work, which is
1567 * just what we want.
1568 */
1811ebce 1569 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1570 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1571}
0f2d19dd 1572
d6884e63
ML
1573#define NEXT_DATA_CELL(ptr, span) \
1574 do { \
1575 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1576 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1577 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1578 : nxt__); \
1579 } while (0)
1580
a00c95d9 1581void
0f2d19dd 1582scm_gc_sweep ()
acf4331f 1583#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1584{
1585 register SCM_CELLPTR ptr;
0f2d19dd 1586 register SCM nfreelist;
4c48ba06 1587 register scm_freelist_t *freelist;
0f2d19dd 1588 register long m;
0f2d19dd 1589 register int span;
15e9d186 1590 long i;
0f2d19dd
JB
1591 scm_sizet seg_size;
1592
0f2d19dd 1593 m = 0;
0f2d19dd 1594
4c48ba06
MD
1595 gc_sweep_freelist_start (&scm_master_freelist);
1596 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1597
cf2d30f6 1598 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1599 {
4c48ba06 1600 register unsigned int left_to_collect;
4c48ba06 1601 register scm_sizet j;
15e9d186 1602
cf2d30f6
JB
1603 /* Unmarked cells go onto the front of the freelist this heap
1604 segment points to. Rather than updating the real freelist
1605 pointer as we go along, we accumulate the new head in
1606 nfreelist. Then, if it turns out that the entire segment is
1607 free, we free (i.e., malloc's free) the whole segment, and
1608 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1609 freelist = scm_heap_table[i].freelist;
1610 nfreelist = freelist->cells;
4c48ba06 1611 left_to_collect = freelist->left_to_collect;
945fec60 1612 span = scm_heap_table[i].span;
cf2d30f6 1613
a00c95d9
ML
1614 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1615 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1616
d6884e63
ML
1617 /* use only data cells in seg_size */
1618 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1619
c9b0d4b0
ML
1620 scm_gc_cells_swept += seg_size;
1621
0f2d19dd
JB
1622 for (j = seg_size + span; j -= span; ptr += span)
1623 {
d6884e63 1624 SCM scmptr;
96f6f4ae 1625
d6884e63 1626 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1627 {
d6884e63
ML
1628 SCM_CELLPTR nxt;
1629
1630 /* cheat here */
1631 nxt = ptr;
1632 NEXT_DATA_CELL (nxt, span);
1633 j += span;
1634
1635 ptr = nxt - span;
1636 continue;
1637 }
1638
1639 scmptr = PTR2SCM (ptr);
1640
1641 if (SCM_GCMARKP (scmptr))
1642 continue;
7bb8eac7 1643
d6884e63
ML
1644 switch SCM_TYP7 (scmptr)
1645 {
0f2d19dd 1646 case scm_tcs_cons_gloc:
0f2d19dd 1647 {
c8045e8d
DH
1648 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1649 * struct or a gloc. See the corresponding comment in
1650 * scm_gc_mark.
1651 */
7445e0e8
MD
1652 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1653 - scm_tc3_cons_gloc);
1654 /* access as struct */
1655 scm_bits_t * vtable_data = (scm_bits_t *) word0;
d6884e63 1656 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1657 {
7445e0e8
MD
1658 /* Structs need to be freed in a special order.
1659 * This is handled by GC C hooks in struct.c.
1660 */
1661 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1662 scm_structs_to_free = scmptr;
7bb8eac7 1663 continue;
c8045e8d 1664 }
7445e0e8 1665 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1666 }
1667 break;
1668 case scm_tcs_cons_imcar:
1669 case scm_tcs_cons_nimcar:
1670 case scm_tcs_closures:
e641afaf 1671 case scm_tc7_pws:
0f2d19dd
JB
1672 break;
1673 case scm_tc7_wvect:
b5c2579a 1674 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
06ee04b2 1675 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
d6884e63 1676 break;
0f2d19dd 1677 case scm_tc7_vector:
1b9be268
DH
1678 {
1679 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1680 if (length > 0)
1681 {
1682 m += length * sizeof (scm_bits_t);
1683 scm_must_free (SCM_VECTOR_BASE (scmptr));
1684 }
1685 break;
1686 }
0f2d19dd
JB
1687#ifdef CCLO
1688 case scm_tc7_cclo:
b5c2579a 1689 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1690 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1691 break;
06ee04b2 1692#endif
afe5177e 1693#ifdef HAVE_ARRAYS
0f2d19dd 1694 case scm_tc7_bvect:
93778877
DH
1695 {
1696 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1697 if (length > 0)
1698 {
1699 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1700 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1701 }
1702 }
06ee04b2 1703 break;
0f2d19dd 1704 case scm_tc7_byvect:
0f2d19dd
JB
1705 case scm_tc7_ivect:
1706 case scm_tc7_uvect:
0f2d19dd 1707 case scm_tc7_svect:
5c11cc9d 1708#ifdef HAVE_LONG_LONGS
0f2d19dd 1709 case scm_tc7_llvect:
0f2d19dd
JB
1710#endif
1711 case scm_tc7_fvect:
0f2d19dd 1712 case scm_tc7_dvect:
0f2d19dd 1713 case scm_tc7_cvect:
d1ca2c64 1714 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1715 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1716 break;
afe5177e 1717#endif
0f2d19dd 1718 case scm_tc7_substring:
0f2d19dd
JB
1719 break;
1720 case scm_tc7_string:
b5c2579a 1721 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1722 scm_must_free (SCM_STRING_CHARS (scmptr));
1723 break;
28b06554 1724 case scm_tc7_symbol:
b5c2579a 1725 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1726 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1727 break;
0f2d19dd 1728 case scm_tcs_subrs:
d6884e63 1729 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1730 continue;
1731 case scm_tc7_port:
0f2d19dd
JB
1732 if SCM_OPENP (scmptr)
1733 {
1734 int k = SCM_PTOBNUM (scmptr);
1735 if (!(k < scm_numptob))
1736 goto sweeperr;
1737 /* Keep "revealed" ports alive. */
945fec60 1738 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1739 continue;
1740 /* Yes, I really do mean scm_ptobs[k].free */
1741 /* rather than ftobs[k].close. .close */
1742 /* is for explicit CLOSE-PORT by user */
84af0382 1743 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1744 SCM_SETSTREAM (scmptr, 0);
1745 scm_remove_from_port_table (scmptr);
1746 scm_gc_ports_collected++;
22a52da1 1747 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1748 }
1749 break;
1750 case scm_tc7_smob:
d6884e63 1751 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1752 {
1753 case scm_tc_free_cell:
acb0a19c 1754 case scm_tc16_real:
0f2d19dd
JB
1755 break;
1756#ifdef SCM_BIGDIG
acb0a19c 1757 case scm_tc16_big:
0f2d19dd 1758 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1759 scm_must_free (SCM_BDIGITS (scmptr));
1760 break;
0f2d19dd 1761#endif /* def SCM_BIGDIG */
acb0a19c 1762 case scm_tc16_complex:
06ee04b2 1763 m += sizeof (scm_complex_t);
405aaef9 1764 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1765 break;
0f2d19dd 1766 default:
0f2d19dd
JB
1767 {
1768 int k;
1769 k = SCM_SMOBNUM (scmptr);
1770 if (!(k < scm_numsmob))
1771 goto sweeperr;
c8045e8d 1772 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1773 break;
1774 }
1775 }
1776 break;
1777 default:
acf4331f
DH
1778 sweeperr:
1779 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1780 }
7bb8eac7 1781
4c48ba06 1782 if (!--left_to_collect)
4a4c9785 1783 {
22a52da1 1784 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1785 *freelist->clustertail = scmptr;
1786 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1787
4a4c9785 1788 nfreelist = SCM_EOL;
4c48ba06
MD
1789 freelist->collected += span * freelist->cluster_size;
1790 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1791 }
1792 else
4a4c9785
MD
1793 {
1794 /* Stick the new cell on the front of nfreelist. It's
1795 critical that we mark this cell as freed; otherwise, the
1796 conservative collector might trace it as some other type
1797 of object. */
54778cd3 1798 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1799 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1800 nfreelist = scmptr;
1801 }
0f2d19dd 1802 }
d6884e63 1803
0f2d19dd
JB
1804#ifdef GC_FREE_SEGMENTS
1805 if (n == seg_size)
1806 {
15e9d186
JB
1807 register long j;
1808
4c48ba06 1809 freelist->heap_size -= seg_size;
cf2d30f6
JB
1810 free ((char *) scm_heap_table[i].bounds[0]);
1811 scm_heap_table[i].bounds[0] = 0;
1812 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1813 scm_heap_table[j - 1] = scm_heap_table[j];
1814 scm_n_heap_segs -= 1;
cf2d30f6 1815 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1816 }
1817 else
1818#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1819 {
1820 /* Update the real freelist pointer to point to the head of
1821 the list of free cells we've built for this segment. */
4c48ba06 1822 freelist->cells = nfreelist;
4c48ba06 1823 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1824 }
1825
fca7547b 1826#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1827 scm_map_free_list ();
1828#endif
4a4c9785 1829 }
a00c95d9 1830
4c48ba06
MD
1831 gc_sweep_freelist_finish (&scm_master_freelist);
1832 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1833
8ded62a3
MD
1834 /* When we move to POSIX threads private freelists should probably
1835 be GC-protected instead. */
1836 scm_freelist = SCM_EOL;
1837 scm_freelist2 = SCM_EOL;
a00c95d9 1838
b37fe1c5 1839 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1840 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1841 scm_mallocated -= m;
1842 scm_gc_malloc_collected = m;
1843}
acf4331f 1844#undef FUNC_NAME
0f2d19dd
JB
1845
1846
1847\f
0f2d19dd
JB
1848/* {Front end to malloc}
1849 *
9d47a1e6
ML
1850 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1851 * scm_done_free
0f2d19dd 1852 *
c6c79933
GH
1853 * These functions provide services comparable to malloc, realloc, and
1854 * free. They should be used when allocating memory that will be under
1855 * control of the garbage collector, i.e., if the memory may be freed
1856 * during garbage collection.
1857 */
bc9d9bb2 1858
0f2d19dd
JB
1859/* scm_must_malloc
1860 * Return newly malloced storage or throw an error.
1861 *
1862 * The parameter WHAT is a string for error reporting.
a00c95d9 1863 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1864 * allocation, or if the first call to malloc fails,
1865 * garbage collect -- on the presumption that some objects
1866 * using malloced storage may be collected.
1867 *
1868 * The limit scm_mtrigger may be raised by this allocation.
1869 */
07806695 1870void *
e4ef2330 1871scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1872{
07806695 1873 void *ptr;
15e9d186 1874 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1875
1876 if (nm <= scm_mtrigger)
0f2d19dd 1877 {
07806695 1878 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1879 if (NULL != ptr)
1880 {
1881 scm_mallocated = nm;
bc9d9bb2
MD
1882#ifdef GUILE_DEBUG_MALLOC
1883 scm_malloc_register (ptr, what);
1884#endif
0f2d19dd
JB
1885 return ptr;
1886 }
1887 }
6064dcc6 1888
0f2d19dd 1889 scm_igc (what);
e4ef2330 1890
0f2d19dd 1891 nm = scm_mallocated + size;
07806695 1892 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1893 if (NULL != ptr)
1894 {
1895 scm_mallocated = nm;
6064dcc6
MV
1896 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1897 if (nm > scm_mtrigger)
1898 scm_mtrigger = nm + nm / 2;
1899 else
1900 scm_mtrigger += scm_mtrigger / 2;
1901 }
bc9d9bb2
MD
1902#ifdef GUILE_DEBUG_MALLOC
1903 scm_malloc_register (ptr, what);
1904#endif
1905
0f2d19dd
JB
1906 return ptr;
1907 }
e4ef2330 1908
acf4331f 1909 scm_memory_error (what);
0f2d19dd
JB
1910}
1911
1912
1913/* scm_must_realloc
1914 * is similar to scm_must_malloc.
1915 */
07806695
JB
1916void *
1917scm_must_realloc (void *where,
e4ef2330
MD
1918 scm_sizet old_size,
1919 scm_sizet size,
3eeba8d4 1920 const char *what)
0f2d19dd 1921{
07806695 1922 void *ptr;
e4ef2330
MD
1923 scm_sizet nm = scm_mallocated + size - old_size;
1924
1925 if (nm <= scm_mtrigger)
0f2d19dd 1926 {
07806695 1927 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1928 if (NULL != ptr)
1929 {
1930 scm_mallocated = nm;
bc9d9bb2
MD
1931#ifdef GUILE_DEBUG_MALLOC
1932 scm_malloc_reregister (where, ptr, what);
1933#endif
0f2d19dd
JB
1934 return ptr;
1935 }
1936 }
e4ef2330 1937
0f2d19dd 1938 scm_igc (what);
e4ef2330
MD
1939
1940 nm = scm_mallocated + size - old_size;
07806695 1941 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1942 if (NULL != ptr)
1943 {
1944 scm_mallocated = nm;
6064dcc6
MV
1945 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1946 if (nm > scm_mtrigger)
1947 scm_mtrigger = nm + nm / 2;
1948 else
1949 scm_mtrigger += scm_mtrigger / 2;
1950 }
bc9d9bb2
MD
1951#ifdef GUILE_DEBUG_MALLOC
1952 scm_malloc_reregister (where, ptr, what);
1953#endif
0f2d19dd
JB
1954 return ptr;
1955 }
e4ef2330 1956
acf4331f 1957 scm_memory_error (what);
0f2d19dd
JB
1958}
1959
acf4331f 1960
a00c95d9 1961void
07806695 1962scm_must_free (void *obj)
acf4331f 1963#define FUNC_NAME "scm_must_free"
0f2d19dd 1964{
bc9d9bb2
MD
1965#ifdef GUILE_DEBUG_MALLOC
1966 scm_malloc_unregister (obj);
1967#endif
0f2d19dd
JB
1968 if (obj)
1969 free (obj);
1970 else
acf4331f 1971 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1972}
acf4331f
DH
1973#undef FUNC_NAME
1974
0f2d19dd 1975
c68296f8
MV
1976/* Announce that there has been some malloc done that will be freed
1977 * during gc. A typical use is for a smob that uses some malloced
1978 * memory but can not get it from scm_must_malloc (for whatever
1979 * reason). When a new object of this smob is created you call
1980 * scm_done_malloc with the size of the object. When your smob free
1981 * function is called, be sure to include this size in the return
9d47a1e6
ML
1982 * value.
1983 *
1984 * If you can't actually free the memory in the smob free function,
1985 * for whatever reason (like reference counting), you still can (and
1986 * should) report the amount of memory freed when you actually free it.
1987 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1988 * eh? Or even better, call scm_done_free. */
0f2d19dd 1989
c68296f8 1990void
6e8d25a6 1991scm_done_malloc (long size)
c68296f8
MV
1992{
1993 scm_mallocated += size;
1994
1995 if (scm_mallocated > scm_mtrigger)
1996 {
1997 scm_igc ("foreign mallocs");
1998 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1999 {
2000 if (scm_mallocated > scm_mtrigger)
2001 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2002 else
2003 scm_mtrigger += scm_mtrigger / 2;
2004 }
2005 }
2006}
2007
9d47a1e6
ML
2008void
2009scm_done_free (long size)
2010{
2011 scm_mallocated -= size;
2012}
2013
c68296f8
MV
2014
2015\f
0f2d19dd
JB
2016/* {Heap Segments}
2017 *
2018 * Each heap segment is an array of objects of a particular size.
2019 * Every segment has an associated (possibly shared) freelist.
2020 * A table of segment records is kept that records the upper and
2021 * lower extents of the segment; this is used during the conservative
2022 * phase of gc to identify probably gc roots (because they point
c68296f8 2023 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2024
2025/* scm_expmem
2026 * is true if the first segment was smaller than INIT_HEAP_SEG.
2027 * If scm_expmem is set to one, subsequent segment allocations will
2028 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2029 */
2030int scm_expmem = 0;
2031
4c48ba06
MD
2032scm_sizet scm_max_segment_size;
2033
0f2d19dd
JB
2034/* scm_heap_org
2035 * is the lowest base address of any heap segment.
2036 */
2037SCM_CELLPTR scm_heap_org;
2038
a00c95d9 2039scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 2040static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
2041int scm_n_heap_segs = 0;
2042
0f2d19dd 2043/* init_heap_seg
d6884e63 2044 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2045 *
d6884e63
ML
2046 * The segment origin and segment size in bytes are input parameters.
2047 * The freelist is both input and output.
0f2d19dd 2048 *
d6884e63
ML
2049 * This function presumes that the scm_heap_table has already been expanded
2050 * to accomodate a new segment record and that the markbit space was reserved
2051 * for all the cards in this segment.
0f2d19dd
JB
2052 */
2053
d6884e63
ML
2054#define INIT_CARD(card, span) \
2055 do { \
322ec19d 2056 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2057 if ((span) == 2) \
2058 SCM_GC_SET_CARD_DOUBLECELL (card); \
2059 } while (0)
0f2d19dd 2060
a00c95d9 2061static scm_sizet
4c48ba06 2062init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2063{
2064 register SCM_CELLPTR ptr;
0f2d19dd 2065 SCM_CELLPTR seg_end;
15e9d186 2066 int new_seg_index;
acb0a19c 2067 int n_new_cells;
4c48ba06 2068 int span = freelist->span;
a00c95d9 2069
0f2d19dd
JB
2070 if (seg_org == NULL)
2071 return 0;
2072
d6884e63
ML
2073 /* Align the begin ptr up.
2074 */
2075 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2076
a00c95d9 2077 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2078 */
d6884e63 2079 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2080
a00c95d9 2081 /* Find the right place and insert the segment record.
0f2d19dd
JB
2082 *
2083 */
2084 for (new_seg_index = 0;
2085 ( (new_seg_index < scm_n_heap_segs)
2086 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2087 new_seg_index++)
2088 ;
2089
2090 {
2091 int i;
2092 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2093 scm_heap_table[i] = scm_heap_table[i - 1];
2094 }
a00c95d9 2095
0f2d19dd
JB
2096 ++scm_n_heap_segs;
2097
945fec60 2098 scm_heap_table[new_seg_index].span = span;
4c48ba06 2099 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2100 scm_heap_table[new_seg_index].bounds[0] = ptr;
2101 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2102
acb0a19c
MD
2103 /*n_new_cells*/
2104 n_new_cells = seg_end - ptr;
0f2d19dd 2105
4c48ba06 2106 freelist->heap_size += n_new_cells;
4a4c9785 2107
a00c95d9 2108 /* Partition objects in this segment into clusters */
4a4c9785
MD
2109 {
2110 SCM clusters;
2111 SCM *clusterp = &clusters;
4a4c9785 2112
d6884e63
ML
2113 NEXT_DATA_CELL (ptr, span);
2114 while (ptr < seg_end)
4a4c9785 2115 {
d6884e63
ML
2116 scm_cell *nxt = ptr;
2117 scm_cell *prv = NULL;
2118 scm_cell *last_card = NULL;
2119 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2120 NEXT_DATA_CELL(nxt, span);
4a4c9785 2121
4c48ba06
MD
2122 /* Allocate cluster spine
2123 */
4a4c9785 2124 *clusterp = PTR2SCM (ptr);
d6884e63 2125 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2126 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2127 ptr = nxt;
a00c95d9 2128
d6884e63 2129 while (n_data_cells--)
4a4c9785 2130 {
d6884e63 2131 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2132 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2133 nxt = ptr;
2134 NEXT_DATA_CELL (nxt, span);
2135 prv = ptr;
2136
2137 if (card != last_card)
2138 {
2139 INIT_CARD (card, span);
2140 last_card = card;
2141 }
96f6f4ae 2142
54778cd3 2143 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2144 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2145
2146 ptr = nxt;
4a4c9785 2147 }
4c48ba06 2148
d6884e63 2149 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2150 }
a00c95d9 2151
d6884e63
ML
2152 /* sanity check */
2153 {
2154 scm_cell *ref = seg_end;
2155 NEXT_DATA_CELL (ref, span);
2156 if (ref != ptr)
2157 /* [cmm] looks like the segment size doesn't divide cleanly by
2158 cluster size. bad cmm! */
2159 abort();
2160 }
2161
4a4c9785
MD
2162 /* Patch up the last cluster pointer in the segment
2163 * to join it to the input freelist.
2164 */
4c48ba06
MD
2165 *clusterp = freelist->clusters;
2166 freelist->clusters = clusters;
4a4c9785
MD
2167 }
2168
4c48ba06
MD
2169#ifdef DEBUGINFO
2170 fprintf (stderr, "H");
2171#endif
0f2d19dd 2172 return size;
0f2d19dd
JB
2173}
2174
a00c95d9
ML
2175static scm_sizet
2176round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2177{
2178 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2179
2180 return
2181 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2182 + ALIGNMENT_SLACK (freelist);
2183}
2184
a00c95d9 2185static void
b6efc951 2186alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 2187#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2188{
0f2d19dd 2189 SCM_CELLPTR ptr;
b37fe1c5 2190 long len;
a00c95d9 2191
9d47a1e6 2192 if (scm_gc_heap_lock)
b6efc951
DH
2193 {
2194 /* Critical code sections (such as the garbage collector) aren't
2195 * supposed to add heap segments.
2196 */
2197 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2198 abort ();
2199 }
0f2d19dd 2200
9d47a1e6 2201 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2202 {
2203 /* We have to expand the heap segment table to have room for the new
2204 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2205 * init_heap_seg only if the allocation of the segment itself succeeds.
2206 */
2207 unsigned int new_table_size = scm_n_heap_segs + 1;
2208 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2209 scm_heap_seg_data_t * new_heap_table;
2210
2211 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2212 realloc ((char *)scm_heap_table, size)));
2213 if (!new_heap_table)
2214 {
2215 if (error_policy == abort_on_error)
2216 {
2217 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2218 abort ();
2219 }
2220 else
2221 {
2222 return;
2223 }
2224 }
2225 else
2226 {
2227 scm_heap_table = new_heap_table;
2228 heap_segment_table_size = new_table_size;
2229 }
2230 }
0f2d19dd 2231
0f2d19dd 2232 /* Pick a size for the new heap segment.
a00c95d9 2233 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2234 * gc.h
2235 */
4c48ba06 2236 {
1811ebce
MD
2237 /* Assure that the new segment is predicted to be large enough.
2238 *
2239 * New yield should at least equal GC fraction of new heap size, i.e.
2240 *
2241 * y + dh > f * (h + dh)
2242 *
2243 * y : yield
8fef55a8 2244 * f : min yield fraction
1811ebce
MD
2245 * h : heap size
2246 * dh : size of new heap segment
2247 *
2248 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2249 */
8fef55a8 2250 int f = freelist->min_yield_fraction;
1811ebce
MD
2251 long h = SCM_HEAP_SIZE;
2252 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2253 len = SCM_EXPHEAP (freelist->heap_size);
2254#ifdef DEBUGINFO
2255 fprintf (stderr, "(%d < %d)", len, min_cells);
2256#endif
2257 if (len < min_cells)
1811ebce 2258 len = min_cells + freelist->cluster_size;
4c48ba06 2259 len *= sizeof (scm_cell);
1811ebce
MD
2260 /* force new sampling */
2261 freelist->collected = LONG_MAX;
4c48ba06 2262 }
a00c95d9 2263
4c48ba06
MD
2264 if (len > scm_max_segment_size)
2265 len = scm_max_segment_size;
0f2d19dd
JB
2266
2267 {
2268 scm_sizet smallest;
2269
a00c95d9 2270 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2271
0f2d19dd 2272 if (len < smallest)
a00c95d9 2273 len = smallest;
0f2d19dd
JB
2274
2275 /* Allocate with decaying ambition. */
2276 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2277 && (len >= smallest))
2278 {
1811ebce 2279 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2280 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2281 if (ptr)
2282 {
a00c95d9 2283 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2284 return;
2285 }
2286 len /= 2;
2287 }
2288 }
2289
b6efc951
DH
2290 if (error_policy == abort_on_error)
2291 {
2292 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2293 abort ();
2294 }
0f2d19dd 2295}
acf4331f 2296#undef FUNC_NAME
0f2d19dd
JB
2297
2298
a00c95d9 2299SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2300 (SCM name),
cf359417
MG
2301 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2302 "is @code{#t}.")
1bbd0b84 2303#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2304{
2305 int x;
2306 int bound;
3b3b36dd 2307 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2308 SCM_DEFER_INTS;
2309 bound = scm_n_heap_segs;
2310 for (x = 0; x < bound; ++x)
2311 {
2312 SCM_CELLPTR p;
2313 SCM_CELLPTR pbound;
195e6201
DH
2314 p = scm_heap_table[x].bounds[0];
2315 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2316 while (p < pbound)
2317 {
c8045e8d
DH
2318 SCM cell = PTR2SCM (p);
2319 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2320 {
c8045e8d
DH
2321 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2322 * struct cell. See the corresponding comment in scm_gc_mark.
2323 */
2324 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2325 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2326 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2327 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2328 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2329 {
c8045e8d 2330 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2331 }
2332 }
2333 ++p;
2334 }
2335 }
2336 SCM_ALLOW_INTS;
2337 return name;
2338}
1bbd0b84 2339#undef FUNC_NAME
0f2d19dd
JB
2340
2341
2342\f
2343/* {GC Protection Helper Functions}
2344 */
2345
2346
5d2b97cd
DH
2347/*
2348 * If within a function you need to protect one or more scheme objects from
2349 * garbage collection, pass them as parameters to one of the
2350 * scm_remember_upto_here* functions below. These functions don't do
2351 * anything, but since the compiler does not know that they are actually
2352 * no-ops, it will generate code that calls these functions with the given
2353 * parameters. Therefore, you can be sure that the compiler will keep those
2354 * scheme values alive (on the stack or in a register) up to the point where
2355 * scm_remember_upto_here* is called. In other words, place the call to
2356 * scm_remember_upt_here* _behind_ the last code in your function, that
2357 * depends on the scheme object to exist.
2358 *
2359 * Example: We want to make sure, that the string object str does not get
2360 * garbage collected during the execution of 'some_function', because
2361 * otherwise the characters belonging to str would be freed and
2362 * 'some_function' might access freed memory. To make sure that the compiler
2363 * keeps str alive on the stack or in a register such that it is visible to
2364 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2365 * call to 'some_function'. Note that this would not be necessary if str was
2366 * used anyway after the call to 'some_function'.
2367 * char *chars = SCM_STRING_CHARS (str);
2368 * some_function (chars);
2369 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2370 */
2371
2372void
2373scm_remember_upto_here_1 (SCM obj)
2374{
2375 /* Empty. Protects a single object from garbage collection. */
2376}
2377
2378void
2379scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2380{
2381 /* Empty. Protects two objects from garbage collection. */
2382}
2383
2384void
2385scm_remember_upto_here (SCM obj, ...)
2386{
2387 /* Empty. Protects any number of objects from garbage collection. */
2388}
2389
2390
2391#if (SCM_DEBUG_DEPRECATED == 0)
2392
0f2d19dd 2393void
6e8d25a6 2394scm_remember (SCM *ptr)
b24b5e13
DH
2395{
2396 /* empty */
2397}
0f2d19dd 2398
5d2b97cd 2399#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2400
c209c88e 2401/*
41b0806d
GB
2402 These crazy functions prevent garbage collection
2403 of arguments after the first argument by
2404 ensuring they remain live throughout the
2405 function because they are used in the last
2406 line of the code block.
2407 It'd be better to have a nice compiler hint to
2408 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2409SCM
2410scm_return_first (SCM elt, ...)
0f2d19dd
JB
2411{
2412 return elt;
2413}
2414
41b0806d
GB
2415int
2416scm_return_first_int (int i, ...)
2417{
2418 return i;
2419}
2420
0f2d19dd 2421
0f2d19dd 2422SCM
6e8d25a6 2423scm_permanent_object (SCM obj)
0f2d19dd
JB
2424{
2425 SCM_REDEFER_INTS;
2426 scm_permobjs = scm_cons (obj, scm_permobjs);
2427 SCM_REALLOW_INTS;
2428 return obj;
2429}
2430
2431
7bd4fbe2
MD
2432/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2433 other references are dropped, until the object is unprotected by calling
2434 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2435 i. e. it is possible to protect the same object several times, but it is
2436 necessary to unprotect the object the same number of times to actually get
2437 the object unprotected. It is an error to unprotect an object more often
2438 than it has been protected before. The function scm_protect_object returns
2439 OBJ.
2440*/
2441
2442/* Implementation note: For every object X, there is a counter which
2443 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2444*/
686765af 2445
ef290276 2446SCM
6e8d25a6 2447scm_protect_object (SCM obj)
ef290276 2448{
686765af 2449 SCM handle;
9d47a1e6 2450
686765af 2451 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2452 SCM_REDEFER_INTS;
9d47a1e6 2453
0f0f0899
MD
2454 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2455 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
9d47a1e6 2456
2dd6a83a 2457 SCM_REALLOW_INTS;
9d47a1e6 2458
ef290276
JB
2459 return obj;
2460}
2461
2462
2463/* Remove any protection for OBJ established by a prior call to
dab7f566 2464 scm_protect_object. This function returns OBJ.
ef290276 2465
dab7f566 2466 See scm_protect_object for more information. */
ef290276 2467SCM
6e8d25a6 2468scm_unprotect_object (SCM obj)
ef290276 2469{
686765af 2470 SCM handle;
9d47a1e6 2471
686765af 2472 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2473 SCM_REDEFER_INTS;
9d47a1e6 2474
686765af 2475 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2476
22a52da1 2477 if (SCM_FALSEP (handle))
686765af 2478 {
0f0f0899
MD
2479 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2480 abort ();
686765af 2481 }
6a199940
DH
2482 else
2483 {
2484 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2485 if (count == 0)
2486 scm_hashq_remove_x (scm_protects, obj);
2487 else
2488 SCM_SETCDR (handle, SCM_MAKINUM (count));
2489 }
686765af 2490
2dd6a83a 2491 SCM_REALLOW_INTS;
ef290276
JB
2492
2493 return obj;
2494}
2495
c45acc34
JB
2496int terminating;
2497
2498/* called on process termination. */
e52ceaac
MD
2499#ifdef HAVE_ATEXIT
2500static void
2501cleanup (void)
2502#else
2503#ifdef HAVE_ON_EXIT
51157deb
MD
2504extern int on_exit (void (*procp) (), int arg);
2505
e52ceaac
MD
2506static void
2507cleanup (int status, void *arg)
2508#else
2509#error Dont know how to setup a cleanup handler on your system.
2510#endif
2511#endif
c45acc34
JB
2512{
2513 terminating = 1;
2514 scm_flush_all_ports ();
2515}
ef290276 2516
0f2d19dd 2517\f
acb0a19c 2518static int
4c48ba06 2519make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2520{
a00c95d9 2521 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2522
a00c95d9
ML
2523 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2524 rounded_size,
4c48ba06 2525 freelist))
acb0a19c 2526 {
a00c95d9
ML
2527 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2528 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2529 rounded_size,
4c48ba06 2530 freelist))
acb0a19c
MD
2531 return 1;
2532 }
2533 else
2534 scm_expmem = 1;
2535
8fef55a8
MD
2536 if (freelist->min_yield_fraction)
2537 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2538 / 100);
8fef55a8 2539 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2540
acb0a19c
MD
2541 return 0;
2542}
2543
2544\f
4c48ba06
MD
2545static void
2546init_freelist (scm_freelist_t *freelist,
2547 int span,
2548 int cluster_size,
8fef55a8 2549 int min_yield)
4c48ba06
MD
2550{
2551 freelist->clusters = SCM_EOL;
2552 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2553 freelist->left_to_collect = 0;
2554 freelist->clusters_allocated = 0;
8fef55a8
MD
2555 freelist->min_yield = 0;
2556 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2557 freelist->span = span;
2558 freelist->collected = 0;
1811ebce 2559 freelist->collected_1 = 0;
4c48ba06
MD
2560 freelist->heap_size = 0;
2561}
2562
85db4a2c
DH
2563
2564/* Get an integer from an environment variable. */
2565static int
2566scm_i_getenv_int (const char *var, int def)
2567{
2568 char *end, *val = getenv (var);
2569 long res;
2570 if (!val)
2571 return def;
2572 res = strtol (val, &end, 10);
2573 if (end == val)
2574 return def;
2575 return res;
2576}
2577
2578
4a4c9785 2579int
85db4a2c 2580scm_init_storage ()
0f2d19dd 2581{
85db4a2c
DH
2582 scm_sizet gc_trigger_1;
2583 scm_sizet gc_trigger_2;
2584 scm_sizet init_heap_size_1;
2585 scm_sizet init_heap_size_2;
0f2d19dd
JB
2586 scm_sizet j;
2587
2588 j = SCM_NUM_PROTECTS;
2589 while (j)
2590 scm_sys_protects[--j] = SCM_BOOL_F;
2591 scm_block_gc = 1;
4a4c9785 2592
4a4c9785 2593 scm_freelist = SCM_EOL;
4c48ba06 2594 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2595 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2596 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2597 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2598 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2599 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2600
0f2d19dd
JB
2601 scm_expmem = 0;
2602
2603 j = SCM_HEAP_SEG_SIZE;
2604 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2605 scm_heap_table = ((scm_heap_seg_data_t *)
2606 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2607 heap_segment_table_size = 2;
acb0a19c 2608
d6884e63
ML
2609 mark_space_ptr = &mark_space_head;
2610
85db4a2c
DH
2611 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2612 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2613 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2614 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2615 return 1;
acb0a19c 2616
801cb5e7 2617 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2618 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2619
801cb5e7
MD
2620 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2621 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2622 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2623 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2624 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2625
2626 /* Initialise the list of ports. */
840ae05d
JB
2627 scm_port_table = (scm_port **)
2628 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2629 if (!scm_port_table)
2630 return 1;
2631
a18bcd0e 2632#ifdef HAVE_ATEXIT
c45acc34 2633 atexit (cleanup);
e52ceaac
MD
2634#else
2635#ifdef HAVE_ON_EXIT
2636 on_exit (cleanup, 0);
2637#endif
a18bcd0e 2638#endif
0f2d19dd
JB
2639
2640 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2641 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2642
2643 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
be54b15d 2644 scm_nullstr = scm_allocate_string (0);
00ffa0e7 2645 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
93d40df2
DH
2646
2647#define DEFAULT_SYMHASH_SIZE 277
00ffa0e7
KN
2648 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2649 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
93d40df2 2650
8960e0a0 2651 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2652 scm_permobjs = SCM_EOL;
00ffa0e7 2653 scm_protects = scm_c_make_hash_table (31);
d6884e63 2654
0f2d19dd
JB
2655 return 0;
2656}
939794ce 2657
0f2d19dd
JB
2658\f
2659
939794ce
DH
2660SCM scm_after_gc_hook;
2661
2662#if (SCM_DEBUG_DEPRECATED == 0)
2663static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2664#endif /* SCM_DEBUG_DEPRECATED == 0 */
2665static SCM gc_async;
2666
2667
2668/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2669 * is run after the gc, as soon as the asynchronous events are handled by the
2670 * evaluator.
2671 */
2672static SCM
2673gc_async_thunk (void)
2674{
2675 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2676
2677#if (SCM_DEBUG_DEPRECATED == 0)
2678
2679 /* The following code will be removed in Guile 1.5. */
2680 if (SCM_NFALSEP (scm_gc_vcell))
2681 {
2682 SCM proc = SCM_CDR (scm_gc_vcell);
2683
2684 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2685 scm_apply (proc, SCM_EOL, SCM_EOL);
2686 }
2687
2688#endif /* SCM_DEBUG_DEPRECATED == 0 */
2689
2690 return SCM_UNSPECIFIED;
2691}
2692
2693
2694/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2695 * the garbage collection. The only purpose of this function is to mark the
2696 * gc_async (which will eventually lead to the execution of the
2697 * gc_async_thunk).
2698 */
2699static void *
2700mark_gc_async (void * hook_data, void *func_data, void *data)
2701{
2702 scm_system_async_mark (gc_async);
2703 return NULL;
2704}
2705
2706
0f2d19dd
JB
2707void
2708scm_init_gc ()
0f2d19dd 2709{
939794ce
DH
2710 SCM after_gc_thunk;
2711
61045190
DH
2712#if (SCM_DEBUG_CELL_ACCESSES == 1)
2713 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2714#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2715
801cb5e7 2716 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2717
2718#if (SCM_DEBUG_DEPRECATED == 0)
2719 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2720#endif /* SCM_DEBUG_DEPRECATED == 0 */
78573619 2721 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
23670993 2722 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2723
2724 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2725
8dc9439f 2726#ifndef SCM_MAGIC_SNARFER
a0599745 2727#include "libguile/gc.x"
8dc9439f 2728#endif
0f2d19dd 2729}
89e00824 2730
56495472
ML
2731#endif /*MARK_DEPENDENCIES*/
2732
89e00824
ML
2733/*
2734 Local Variables:
2735 c-file-style: "gnu"
2736 End:
2737*/