* scheme-io.texi: Removed obsolete section Binary IO. Added
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
56495472
ML
47/* SECTION: This code is compiled once.
48 */
49
50#ifndef MARK_DEPENDENCIES
51
0f2d19dd
JB
52\f
53#include <stdio.h>
e6e2e95a 54#include <errno.h>
783e7774 55#include <string.h>
e6e2e95a 56
a0599745 57#include "libguile/_scm.h"
0a7a7445 58#include "libguile/eval.h"
a0599745
MD
59#include "libguile/stime.h"
60#include "libguile/stackchk.h"
61#include "libguile/struct.h"
a0599745
MD
62#include "libguile/smob.h"
63#include "libguile/unif.h"
64#include "libguile/async.h"
65#include "libguile/ports.h"
66#include "libguile/root.h"
67#include "libguile/strings.h"
68#include "libguile/vectors.h"
801cb5e7 69#include "libguile/weaks.h"
686765af 70#include "libguile/hashtab.h"
ecf470a2 71#include "libguile/tags.h"
a0599745
MD
72
73#include "libguile/validate.h"
74#include "libguile/gc.h"
fce59c93 75
bc9d9bb2 76#ifdef GUILE_DEBUG_MALLOC
a0599745 77#include "libguile/debug-malloc.h"
bc9d9bb2
MD
78#endif
79
0f2d19dd 80#ifdef HAVE_MALLOC_H
95b88819 81#include <malloc.h>
0f2d19dd
JB
82#endif
83
84#ifdef HAVE_UNISTD_H
95b88819 85#include <unistd.h>
0f2d19dd
JB
86#endif
87
1cc91f1b
JB
88#ifdef __STDC__
89#include <stdarg.h>
90#define var_start(x, y) va_start(x, y)
91#else
92#include <varargs.h>
93#define var_start(x, y) va_start(x)
94#endif
95
0f2d19dd 96\f
406c7d90
DH
97
98unsigned int scm_gc_running_p = 0;
99
100\f
101
102#if (SCM_DEBUG_CELL_ACCESSES == 1)
103
61045190
DH
104scm_bits_t scm_tc16_allocated;
105
106/* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108unsigned int scm_debug_cell_accesses_p = 1;
406c7d90
DH
109
110
111/* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116void
117scm_assert_cell_valid (SCM cell)
118{
61045190
DH
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 122 {
61045190 123 already_running = 1; /* set to avoid recursion */
406c7d90 124
9d47a1e6 125 if (!scm_cellp (cell))
406c7d90
DH
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
61045190 147 already_running = 0; /* re-enable */
406c7d90
DH
148 }
149}
150
151
152SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
1e6808ea
MG
154 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
155 "If @var{flag} is @code{#t}, cell access checking is enabled.\n"
156 "This procedure only exists when the compile-time flag\n"
157 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
158#define FUNC_NAME s_scm_set_debug_cell_accesses_x
159{
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168}
169#undef FUNC_NAME
170
171#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173\f
174
0f2d19dd 175/* {heap tuning parameters}
a00c95d9 176 *
0f2d19dd
JB
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 192 *
0f2d19dd
JB
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 200 * trigger a GC.
6064dcc6
MV
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 206 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
207 */
208
8fef55a8
MD
209/*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
d6884e63
ML
215
216#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 221int scm_default_min_yield_1 = 40;
4c48ba06 222
d6884e63
ML
223#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
226/* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
aeacfc8f 229int scm_default_min_yield_2 = 40;
4c48ba06 230
aeacfc8f 231int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 232
d6884e63 233#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
234#ifdef _QC
235# define SCM_HEAP_SEG_SIZE 32768L
236#else
237# ifdef sequent
4c48ba06 238# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 239# else
4c48ba06 240# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
241# endif
242#endif
4c48ba06 243/* Make heap grow with factor 1.5 */
4a4c9785 244#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 245#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 246#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 247
d6884e63
ML
248/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
0f2d19dd
JB
250
251#ifdef PROT386
252/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
253# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
255#else
256# ifdef _UNICOS
a00c95d9
ML
257# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 259# else
a00c95d9
ML
260# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
262# endif /* UNICOS */
263#endif /* PROT386 */
264
ecf470a2
ML
265#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
d6884e63
ML
267#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268#define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
270
271\f
945fec60 272/* scm_freelists
0f2d19dd 273 */
945fec60 274
a00c95d9
ML
275typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
a00c95d9
ML
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
b37fe1c5
MD
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
8fef55a8
MD
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
a00c95d9
ML
285 SCM clusters;
286 SCM *clustertail;
b37fe1c5 287 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 288 int cluster_size;
8fef55a8 289 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
290 */
291 int grow_heap_p;
8fef55a8 292 /* minimum yield on this list in order not to grow the heap
a00c95d9 293 */
8fef55a8
MD
294 long min_yield;
295 /* defines min_yield as percent of total heap size
a00c95d9 296 */
8fef55a8 297 int min_yield_fraction;
a00c95d9
ML
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
1811ebce
MD
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
a00c95d9
ML
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
1811ebce 307 long heap_size;
a00c95d9
ML
308} scm_freelist_t;
309
4a4c9785
MD
310SCM scm_freelist = SCM_EOL;
311scm_freelist_t scm_master_freelist = {
b37fe1c5 312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
313};
314SCM scm_freelist2 = SCM_EOL;
315scm_freelist_t scm_master_freelist2 = {
b37fe1c5 316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 317};
0f2d19dd
JB
318
319/* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
15e9d186 322unsigned long scm_mtrigger;
0f2d19dd 323
0f2d19dd
JB
324/* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328int scm_gc_heap_lock = 0;
329
330/* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
0f2d19dd
JB
334int scm_block_gc = 1;
335
0f2d19dd
JB
336/* During collection, this accumulates objects holding
337 * weak references.
338 */
ab4bef85 339SCM scm_weak_vectors;
0f2d19dd 340
7445e0e8
MD
341/* During collection, this accumulates structures which are to be freed.
342 */
343SCM scm_structs_to_free;
344
0f2d19dd
JB
345/* GC Statistics Keeping
346 */
347unsigned long scm_cells_allocated = 0;
a5c314c8 348long scm_mallocated = 0;
b37fe1c5 349unsigned long scm_gc_cells_collected;
8b0d194f 350unsigned long scm_gc_yield;
37ddcaf6 351static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
352unsigned long scm_gc_malloc_collected;
353unsigned long scm_gc_ports_collected;
0f2d19dd 354unsigned long scm_gc_time_taken = 0;
c9b0d4b0
ML
355static unsigned long t_before_gc;
356static unsigned long t_before_sweep;
357unsigned long scm_gc_mark_time_taken = 0;
358unsigned long scm_gc_sweep_time_taken = 0;
359unsigned long scm_gc_times = 0;
360unsigned long scm_gc_cells_swept = 0;
361double scm_gc_cells_marked_acc = 0.;
362double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
363
364SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
370SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372SCM_SYMBOL (sym_times, "gc-times");
373SCM_SYMBOL (sym_cells_marked, "cells-marked");
374SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 375
a00c95d9 376typedef struct scm_heap_seg_data_t
0f2d19dd 377{
cf2d30f6
JB
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
4c48ba06 383 scm_freelist_t *freelist;
cf2d30f6 384
fe517a7d 385 /* number of cells per object in this segment */
945fec60 386 int span;
a00c95d9 387} scm_heap_seg_data_t;
0f2d19dd
JB
388
389
390
945fec60 391static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
392
393typedef enum { return_on_error, abort_on_error } policy_on_error;
394static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
395
396
d6884e63
ML
397#define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401#define BVEC_GROW_SIZE 256
402#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405/* mark space allocation */
406
407typedef struct scm_mark_space_t
408{
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411} scm_mark_space_t;
412
413static scm_mark_space_t *current_mark_space;
414static scm_mark_space_t **mark_space_ptr;
415static int current_mark_space_offset;
416static scm_mark_space_t *mark_space_head;
417
418static scm_c_bvec_limb_t *
419get_bvec ()
db4b4ca6 420#define FUNC_NAME "get_bvec"
d6884e63
ML
421{
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
db4b4ca6 428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
db4b4ca6 444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462}
db4b4ca6
DH
463#undef FUNC_NAME
464
d6884e63
ML
465
466static void
467clear_mark_space ()
468{
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473}
474
475
0f2d19dd 476\f
cf2d30f6
JB
477/* Debugging functions. */
478
bb2c57fa 479#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
480
481/* Return the number of the heap segment containing CELL. */
482static int
483which_seg (SCM cell)
484{
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 492 SCM_UNPACK (cell));
cf2d30f6
JB
493 abort ();
494}
495
496
8ded62a3
MD
497static void
498map_free_list (scm_freelist_t *master, SCM freelist)
499{
500 int last_seg = -1, count = 0;
501 SCM f;
a00c95d9 502
3f5d82cd 503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3
MD
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520}
cf2d30f6 521
a00c95d9 522SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 523 (),
5352393c
MG
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
527#define FUNC_NAME s_scm_map_free_list
528{
4c48ba06
MD
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
8ded62a3
MD
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544}
1bbd0b84 545#undef FUNC_NAME
cf2d30f6 546
4c48ba06
MD
547static int last_cluster;
548static int last_size;
549
5384bc5b
MD
550static int
551free_list_length (char *title, int i, SCM freelist)
552{
553 SCM ls;
554 int n = 0;
3f5d82cd
DH
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
4c48ba06
MD
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
5384bc5b
MD
579 return n;
580}
581
582static void
583free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584{
585 SCM clusters;
4c48ba06 586 int i = 0, len, n = 0;
5384bc5b
MD
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
4c48ba06
MD
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
601}
602
a00c95d9 603SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 604 (),
5352393c
MG
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
608#define FUNC_NAME s_scm_free_list_length
609{
b37fe1c5
MD
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 612 return SCM_UNSPECIFIED;
5384bc5b
MD
613}
614#undef FUNC_NAME
615
bb2c57fa
MD
616#endif
617
618#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 619
d3dd80ab
MG
620/* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622static int scm_debug_check_freelist = 0;
623
cf2d30f6
JB
624/* Number of calls to SCM_NEWCELL since startup. */
625static unsigned long scm_newcell_count;
acb0a19c 626static unsigned long scm_newcell2_count;
cf2d30f6
JB
627
628/* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
8ded62a3
MD
630static void
631scm_check_freelist (SCM freelist)
632{
633 SCM f;
634 int i = 0;
635
3f5d82cd
DH
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
8ded62a3
MD
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
8ded62a3
MD
641 abort ();
642 }
643}
cf2d30f6 644
a00c95d9 645SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 646 (SCM flag),
1e6808ea
MG
647 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
648 "on each cell allocation. This procedure only exists when the\n"
649 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
1bbd0b84 650#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 651{
d6884e63
ML
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
945fec60 654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
655 return SCM_UNSPECIFIED;
656}
1bbd0b84 657#undef FUNC_NAME
25748c78
GB
658
659
4a4c9785
MD
660SCM
661scm_debug_newcell (void)
662{
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
8ded62a3 668 scm_check_freelist (scm_freelist);
4a4c9785
MD
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
3f5d82cd 674 if (SCM_NULLP (scm_freelist))
4a4c9785
MD
675 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
676 else
677 {
678 new = scm_freelist;
3f5d82cd 679 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
4a4c9785
MD
680 }
681
682 return new;
683}
684
685SCM
686scm_debug_newcell2 (void)
687{
688 SCM new;
689
690 scm_newcell2_count++;
691 if (scm_debug_check_freelist)
692 {
8ded62a3 693 scm_check_freelist (scm_freelist2);
4a4c9785
MD
694 scm_gc ();
695 }
696
697 /* The rest of this is supposed to be identical to the SCM_NEWCELL
698 macro. */
3f5d82cd 699 if (SCM_NULLP (scm_freelist2))
4a4c9785
MD
700 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
701 else
702 {
703 new = scm_freelist2;
3f5d82cd 704 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
4a4c9785
MD
705 }
706
707 return new;
708}
709
fca7547b 710#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
711
712\f
0f2d19dd 713
b37fe1c5
MD
714static unsigned long
715master_cells_allocated (scm_freelist_t *master)
716{
d6884e63 717 /* the '- 1' below is to ignore the cluster spine cells. */
b37fe1c5
MD
718 int objects = master->clusters_allocated * (master->cluster_size - 1);
719 if (SCM_NULLP (master->clusters))
720 objects -= master->left_to_collect;
721 return master->span * objects;
722}
723
724static unsigned long
725freelist_length (SCM freelist)
726{
727 int n;
3f5d82cd 728 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
729 ++n;
730 return n;
731}
732
733static unsigned long
734compute_cells_allocated ()
735{
736 return (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2)
739 - scm_master_freelist.span * freelist_length (scm_freelist)
740 - scm_master_freelist2.span * freelist_length (scm_freelist2));
741}
b37fe1c5 742
0f2d19dd
JB
743/* {Scheme Interface to GC}
744 */
745
a00c95d9 746SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 747 (),
1e6808ea
MG
748 "Return an association list of statistics about Guile's current\n"
749 "use of storage.")
1bbd0b84 750#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
751{
752 int i;
753 int n;
754 SCM heap_segs;
c209c88e
GB
755 long int local_scm_mtrigger;
756 long int local_scm_mallocated;
757 long int local_scm_heap_size;
758 long int local_scm_cells_allocated;
759 long int local_scm_gc_time_taken;
c9b0d4b0
ML
760 long int local_scm_gc_times;
761 long int local_scm_gc_mark_time_taken;
762 long int local_scm_gc_sweep_time_taken;
763 double local_scm_gc_cells_swept;
764 double local_scm_gc_cells_marked;
0f2d19dd
JB
765 SCM answer;
766
767 SCM_DEFER_INTS;
939794ce
DH
768
769 ++scm_block_gc;
770
0f2d19dd
JB
771 retry:
772 heap_segs = SCM_EOL;
773 n = scm_n_heap_segs;
774 for (i = scm_n_heap_segs; i--; )
775 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
776 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
777 heap_segs);
778 if (scm_n_heap_segs != n)
779 goto retry;
939794ce
DH
780
781 --scm_block_gc;
0f2d19dd 782
7febb4a2
MD
783 /* Below, we cons to produce the resulting list. We want a snapshot of
784 * the heap situation before consing.
785 */
0f2d19dd
JB
786 local_scm_mtrigger = scm_mtrigger;
787 local_scm_mallocated = scm_mallocated;
b37fe1c5 788 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 789 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 790 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
791 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
792 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
793 local_scm_gc_times = scm_gc_times;
794 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
795 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd
JB
796
797 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
798 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
799 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
800 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
801 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
c9b0d4b0
ML
802 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
803 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
804 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
805 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
806 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
0f2d19dd
JB
807 scm_cons (sym_heap_segments, heap_segs),
808 SCM_UNDEFINED);
809 SCM_ALLOW_INTS;
810 return answer;
811}
1bbd0b84 812#undef FUNC_NAME
0f2d19dd
JB
813
814
c9b0d4b0
ML
815static void
816gc_start_stats (const char *what)
0f2d19dd 817{
c9b0d4b0
ML
818 t_before_gc = scm_c_get_internal_run_time ();
819 scm_gc_cells_swept = 0;
b37fe1c5 820 scm_gc_cells_collected = 0;
37ddcaf6 821 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
822 scm_gc_yield = (scm_cells_allocated
823 + master_cells_allocated (&scm_master_freelist)
824 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
825 scm_gc_malloc_collected = 0;
826 scm_gc_ports_collected = 0;
827}
828
939794ce 829
c9b0d4b0
ML
830static void
831gc_end_stats ()
0f2d19dd 832{
c9b0d4b0
ML
833 unsigned long t = scm_c_get_internal_run_time ();
834 scm_gc_time_taken += (t - t_before_gc);
835 scm_gc_sweep_time_taken += (t - t_before_sweep);
836 ++scm_gc_times;
837
838 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
839 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
840}
841
842
a00c95d9 843SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 844 (SCM obj),
b380b885
MD
845 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
846 "returned by this function for @var{obj}")
1bbd0b84 847#define FUNC_NAME s_scm_object_address
0f2d19dd 848{
54778cd3 849 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 850}
1bbd0b84 851#undef FUNC_NAME
0f2d19dd
JB
852
853
a00c95d9 854SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 855 (),
b380b885
MD
856 "Scans all of SCM objects and reclaims for further use those that are\n"
857 "no longer accessible.")
1bbd0b84 858#define FUNC_NAME s_scm_gc
0f2d19dd
JB
859{
860 SCM_DEFER_INTS;
861 scm_igc ("call");
862 SCM_ALLOW_INTS;
863 return SCM_UNSPECIFIED;
864}
1bbd0b84 865#undef FUNC_NAME
0f2d19dd
JB
866
867
868\f
869/* {C Interface For When GC is Triggered}
870 */
871
b37fe1c5 872static void
8fef55a8 873adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 874{
8fef55a8 875 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 876 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
877 * `min_yield_fraction' of total heap size. Note, however, that
878 * the absolute value of min_yield will correspond to `collected'
bda1446c 879 * on one master (the one which currently is triggering GC).
b37fe1c5 880 *
bda1446c
MD
881 * The reason why we look at total yield instead of cells collected
882 * on one list is that we want to take other freelists into account.
883 * On this freelist, we know that (local) yield = collected cells,
884 * but that's probably not the case on the other lists.
b37fe1c5
MD
885 *
886 * (We might consider computing a better prediction, for example
887 * by computing an average over multiple GC:s.)
888 */
8fef55a8 889 if (freelist->min_yield_fraction)
b37fe1c5 890 {
37ddcaf6 891 /* Pick largest of last two yields. */
8fef55a8
MD
892 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
893 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
894#ifdef DEBUGINFO
895 fprintf (stderr, " after GC = %d, delta = %d\n",
896 scm_cells_allocated,
897 delta);
898#endif
899 if (delta > 0)
8fef55a8 900 freelist->min_yield += delta;
b37fe1c5
MD
901 }
902}
903
b6efc951 904
4a4c9785 905/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
906 * common while the freelist will be individual for each thread.
907 */
4a4c9785
MD
908
909SCM
910scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
911{
912 SCM cell;
913 ++scm_ints_disabled;
4c48ba06
MD
914 do
915 {
c7387918 916 if (SCM_NULLP (master->clusters))
4c48ba06 917 {
150c200b 918 if (master->grow_heap_p || scm_block_gc)
4c48ba06 919 {
b6efc951
DH
920 /* In order to reduce gc frequency, try to allocate a new heap
921 * segment first, even if gc might find some free cells. If we
922 * can't obtain a new heap segment, we will try gc later.
923 */
4c48ba06 924 master->grow_heap_p = 0;
b6efc951 925 alloc_some_heap (master, return_on_error);
4c48ba06 926 }
b6efc951 927 if (SCM_NULLP (master->clusters))
b37fe1c5 928 {
b6efc951
DH
929 /* The heap was not grown, either because it wasn't scheduled to
930 * grow, or because there was not enough memory available. In
931 * both cases we have to try gc to get some free cells.
932 */
37ddcaf6
MD
933#ifdef DEBUGINFO
934 fprintf (stderr, "allocated = %d, ",
935 scm_cells_allocated
936 + master_cells_allocated (&scm_master_freelist)
937 + master_cells_allocated (&scm_master_freelist2));
938#endif
b37fe1c5 939 scm_igc ("cells");
8fef55a8 940 adjust_min_yield (master);
c7387918
DH
941 if (SCM_NULLP (master->clusters))
942 {
b6efc951
DH
943 /* gc could not free any cells. Now, we _must_ allocate a
944 * new heap segment, because there is no other possibility
945 * to provide a new cell for the caller.
946 */
947 alloc_some_heap (master, abort_on_error);
c7387918 948 }
b37fe1c5 949 }
4c48ba06
MD
950 }
951 cell = SCM_CAR (master->clusters);
952 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 953 ++master->clusters_allocated;
4c48ba06
MD
954 }
955 while (SCM_NULLP (cell));
d6884e63
ML
956
957#ifdef GUILE_DEBUG_FREELIST
958 scm_check_freelist (cell);
959#endif
960
4a4c9785 961 --scm_ints_disabled;
3f5d82cd 962 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
963 return cell;
964}
965
b6efc951 966
4c48ba06
MD
967#if 0
968/* This is a support routine which can be used to reserve a cluster
969 * for some special use, such as debugging. It won't be useful until
970 * free cells are preserved between garbage collections.
971 */
972
973void
974scm_alloc_cluster (scm_freelist_t *master)
975{
976 SCM freelist, cell;
977 cell = scm_gc_for_newcell (master, &freelist);
978 SCM_SETCDR (cell, freelist);
979 return cell;
980}
981#endif
982
801cb5e7
MD
983
984scm_c_hook_t scm_before_gc_c_hook;
985scm_c_hook_t scm_before_mark_c_hook;
986scm_c_hook_t scm_before_sweep_c_hook;
987scm_c_hook_t scm_after_sweep_c_hook;
988scm_c_hook_t scm_after_gc_c_hook;
989
b6efc951 990
0f2d19dd 991void
1bbd0b84 992scm_igc (const char *what)
0f2d19dd
JB
993{
994 int j;
995
406c7d90 996 ++scm_gc_running_p;
801cb5e7 997 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
998#ifdef DEBUGINFO
999 fprintf (stderr,
1000 SCM_NULLP (scm_freelist)
1001 ? "*"
1002 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1003#endif
42db06f0 1004 /* During the critical section, only the current thread may run. */
216eedfc 1005 SCM_CRITICAL_SECTION_START;
42db06f0 1006
e242dfd2 1007 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1008
ab4bef85
JB
1009 if (!scm_stack_base || scm_block_gc)
1010 {
406c7d90 1011 --scm_gc_running_p;
ab4bef85
JB
1012 return;
1013 }
1014
c9b0d4b0
ML
1015 gc_start_stats (what);
1016
a5c314c8
JB
1017 if (scm_mallocated < 0)
1018 /* The byte count of allocated objects has underflowed. This is
1019 probably because you forgot to report the sizes of objects you
1020 have allocated, by calling scm_done_malloc or some such. When
1021 the GC freed them, it subtracted their size from
1022 scm_mallocated, which underflowed. */
1023 abort ();
c45acc34 1024
ab4bef85
JB
1025 if (scm_gc_heap_lock)
1026 /* We've invoked the collector while a GC is already in progress.
1027 That should never happen. */
1028 abort ();
0f2d19dd
JB
1029
1030 ++scm_gc_heap_lock;
ab4bef85 1031
0f2d19dd
JB
1032 /* flush dead entries from the continuation stack */
1033 {
1034 int x;
1035 int bound;
1036 SCM * elts;
1037 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1038 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1039 x = SCM_INUM (scm_continuation_stack_ptr);
1040 while (x < bound)
1041 {
1042 elts[x] = SCM_BOOL_F;
1043 ++x;
1044 }
1045 }
1046
801cb5e7
MD
1047 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1048
d6884e63
ML
1049 clear_mark_space ();
1050
42db06f0 1051#ifndef USE_THREADS
a00c95d9 1052
1b9be268 1053 /* Mark objects on the C stack. */
0f2d19dd
JB
1054 SCM_FLUSH_REGISTER_WINDOWS;
1055 /* This assumes that all registers are saved into the jmp_buf */
1056 setjmp (scm_save_regs_gc_mark);
1057 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1058 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1059 sizeof scm_save_regs_gc_mark)
1060 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1061
1062 {
6ba93e5e 1063 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1064#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1065 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1066#else
6ba93e5e 1067 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1068#endif
1069 }
1070
42db06f0
MD
1071#else /* USE_THREADS */
1072
1073 /* Mark every thread's stack and registers */
945fec60 1074 scm_threads_mark_stacks ();
42db06f0
MD
1075
1076#endif /* USE_THREADS */
0f2d19dd 1077
0f2d19dd
JB
1078 j = SCM_NUM_PROTECTS;
1079 while (j--)
1080 scm_gc_mark (scm_sys_protects[j]);
1081
9de33deb
MD
1082 /* FIXME: we should have a means to register C functions to be run
1083 * in different phases of GC
a00c95d9 1084 */
9de33deb 1085 scm_mark_subr_table ();
a00c95d9 1086
42db06f0
MD
1087#ifndef USE_THREADS
1088 scm_gc_mark (scm_root->handle);
1089#endif
a00c95d9 1090
c9b0d4b0
ML
1091 t_before_sweep = scm_c_get_internal_run_time ();
1092 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1093
801cb5e7 1094 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1095
0f2d19dd
JB
1096 scm_gc_sweep ();
1097
801cb5e7
MD
1098 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1099
0f2d19dd 1100 --scm_gc_heap_lock;
c9b0d4b0 1101 gc_end_stats ();
42db06f0 1102
216eedfc 1103 SCM_CRITICAL_SECTION_END;
801cb5e7 1104 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1105 --scm_gc_running_p;
0f2d19dd
JB
1106}
1107
1108\f
939794ce 1109
a00c95d9 1110/* {Mark/Sweep}
0f2d19dd
JB
1111 */
1112
56495472
ML
1113#define MARK scm_gc_mark
1114#define FNAME "scm_gc_mark"
0f2d19dd 1115
56495472 1116#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1117
1118/* Mark an object precisely.
1119 */
a00c95d9 1120void
56495472
ML
1121MARK (SCM p)
1122#define FUNC_NAME FNAME
0f2d19dd
JB
1123{
1124 register long i;
1125 register SCM ptr;
61045190 1126 scm_bits_t cell_type;
0f2d19dd 1127
56495472
ML
1128#ifndef MARK_DEPENDENCIES
1129# define RECURSE scm_gc_mark
1130#else
1131 /* go through the usual marking, but not for self-cycles. */
1132# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1133#endif
0f2d19dd
JB
1134 ptr = p;
1135
56495472
ML
1136#ifdef MARK_DEPENDENCIES
1137 goto gc_mark_loop_first_time;
1138#endif
1139
0f2d19dd
JB
1140gc_mark_loop:
1141 if (SCM_IMP (ptr))
1142 return;
1143
1144gc_mark_nimp:
56495472
ML
1145
1146#ifdef MARK_DEPENDENCIES
0209177b 1147 if (SCM_EQ_P (ptr, p))
56495472
ML
1148 return;
1149
1150 scm_gc_mark (ptr);
0209177b 1151 return;
56495472
ML
1152
1153gc_mark_loop_first_time:
1154#endif
9a6976cd 1155
61045190 1156#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1157 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1158 if (!scm_cellp (ptr))
db4b4ca6 1159 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1160#else
1161 /* In non-debug mode, do at least some cheap testing. */
1162 if (!SCM_CELLP (ptr))
1163 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1164#endif
1165
56495472
ML
1166#ifndef MARK_DEPENDENCIES
1167
d6884e63
ML
1168 if (SCM_GCMARKP (ptr))
1169 return;
56495472 1170
d6884e63
ML
1171 SCM_SETGCMARK (ptr);
1172
56495472
ML
1173#endif
1174
61045190
DH
1175 cell_type = SCM_GC_CELL_TYPE (ptr);
1176 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1177 {
1178 case scm_tcs_cons_nimcar:
d6884e63 1179 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1180 {
1181 ptr = SCM_CAR (ptr);
1182 goto gc_mark_nimp;
1183 }
56495472 1184 RECURSE (SCM_CAR (ptr));
d6884e63 1185 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1186 goto gc_mark_nimp;
1187 case scm_tcs_cons_imcar:
d6884e63 1188 ptr = SCM_CDR (ptr);
acb0a19c 1189 goto gc_mark_loop;
e641afaf 1190 case scm_tc7_pws:
22a52da1
DH
1191 RECURSE (SCM_SETTER (ptr));
1192 ptr = SCM_PROCEDURE (ptr);
0f2d19dd
JB
1193 goto gc_mark_loop;
1194 case scm_tcs_cons_gloc:
0f2d19dd 1195 {
c8045e8d
DH
1196 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1197 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1198 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1199 * pointer to a struct vtable data region. The fact that these are
1200 * accessed in the same way restricts the possibilites to change the
9d47a1e6 1201 * data layout of structs or heap cells.
c8045e8d
DH
1202 */
1203 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1204 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
7445e0e8 1205 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1206 {
d6884e63
ML
1207 /* ptr is a gloc */
1208 SCM gloc_car = SCM_PACK (word0);
56495472 1209 RECURSE (gloc_car);
d6884e63
ML
1210 ptr = SCM_CDR (ptr);
1211 goto gc_mark_loop;
1212 }
1213 else
1214 {
1215 /* ptr is a struct */
1216 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
b5c2579a 1217 int len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1218 char * fields_desc = SCM_SYMBOL_CHARS (layout);
d6884e63 1219 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1220
d6884e63
ML
1221 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1222 {
56495472
ML
1223 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1224 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1225 }
1226 if (len)
1227 {
1228 int x;
7bb8eac7 1229
d6884e63
ML
1230 for (x = 0; x < len - 2; x += 2, ++struct_data)
1231 if (fields_desc[x] == 'p')
56495472 1232 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1233 if (fields_desc[x] == 'p')
1234 {
1235 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1236 for (x = *struct_data++; x; --x, ++struct_data)
1237 RECURSE (SCM_PACK (*struct_data));
d6884e63 1238 else
56495472 1239 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1240 }
1241 }
1242 /* mark vtable */
1243 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1244 goto gc_mark_loop;
0f2d19dd
JB
1245 }
1246 }
1247 break;
1248 case scm_tcs_closures:
22a52da1 1249 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1250 {
1251 ptr = SCM_CLOSCAR (ptr);
1252 goto gc_mark_nimp;
1253 }
56495472 1254 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1255 ptr = SCM_ENV (ptr);
0f2d19dd
JB
1256 goto gc_mark_nimp;
1257 case scm_tc7_vector:
b5c2579a
DH
1258 i = SCM_VECTOR_LENGTH (ptr);
1259 if (i == 0)
1260 break;
1261 while (--i > 0)
1262 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1263 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a
DH
1264 ptr = SCM_VELTS (ptr)[0];
1265 goto gc_mark_loop;
0f2d19dd
JB
1266#ifdef CCLO
1267 case scm_tc7_cclo:
362306b9
DH
1268 {
1269 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1270 unsigned long int j;
1271 for (j = 1; j != i; ++j)
1272 {
1273 SCM obj = SCM_CCLO_REF (ptr, j);
1274 if (!SCM_IMP (obj))
56495472 1275 RECURSE (obj);
362306b9
DH
1276 }
1277 ptr = SCM_CCLO_REF (ptr, 0);
1278 goto gc_mark_loop;
1279 }
b5c2579a 1280#endif
afe5177e 1281#ifdef HAVE_ARRAYS
0f2d19dd
JB
1282 case scm_tc7_bvect:
1283 case scm_tc7_byvect:
1284 case scm_tc7_ivect:
1285 case scm_tc7_uvect:
1286 case scm_tc7_fvect:
1287 case scm_tc7_dvect:
1288 case scm_tc7_cvect:
1289 case scm_tc7_svect:
5c11cc9d 1290#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1291 case scm_tc7_llvect:
1292#endif
afe5177e 1293#endif
0f2d19dd 1294 case scm_tc7_string:
0f2d19dd
JB
1295 break;
1296
1297 case scm_tc7_substring:
0f2d19dd
JB
1298 ptr = SCM_CDR (ptr);
1299 goto gc_mark_loop;
1300
1301 case scm_tc7_wvect:
ab4bef85
JB
1302 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1303 scm_weak_vectors = ptr;
0f2d19dd
JB
1304 if (SCM_IS_WHVEC_ANY (ptr))
1305 {
1306 int x;
1307 int len;
1308 int weak_keys;
1309 int weak_values;
1310
b5c2579a 1311 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1312 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1313 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1314
0f2d19dd
JB
1315 for (x = 0; x < len; ++x)
1316 {
1317 SCM alist;
1318 alist = SCM_VELTS (ptr)[x];
46408039
JB
1319
1320 /* mark everything on the alist except the keys or
1321 * values, according to weak_values and weak_keys. */
0b5f3f34 1322 while ( SCM_CONSP (alist)
0f2d19dd 1323 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1324 && SCM_CONSP (SCM_CAR (alist)))
1325 {
1326 SCM kvpair;
1327 SCM next_alist;
1328
1329 kvpair = SCM_CAR (alist);
1330 next_alist = SCM_CDR (alist);
a00c95d9 1331 /*
0f2d19dd
JB
1332 * Do not do this:
1333 * SCM_SETGCMARK (alist);
1334 * SCM_SETGCMARK (kvpair);
1335 *
1336 * It may be that either the key or value is protected by
1337 * an escaped reference to part of the spine of this alist.
1338 * If we mark the spine here, and only mark one or neither of the
1339 * key and value, they may never be properly marked.
1340 * This leads to a horrible situation in which an alist containing
1341 * freelist cells is exported.
1342 *
1343 * So only mark the spines of these arrays last of all marking.
1344 * If somebody confuses us by constructing a weak vector
1345 * with a circular alist then we are hosed, but at least we
1346 * won't prematurely drop table entries.
1347 */
1348 if (!weak_keys)
56495472 1349 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1350 if (!weak_values)
56495472 1351 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1352 alist = next_alist;
1353 }
1354 if (SCM_NIMP (alist))
56495472 1355 RECURSE (alist);
0f2d19dd
JB
1356 }
1357 }
1358 break;
1359
28b06554
DH
1360 case scm_tc7_symbol:
1361 ptr = SCM_PROP_SLOTS (ptr);
0f2d19dd 1362 goto gc_mark_loop;
0f2d19dd 1363 case scm_tcs_subrs:
9de33deb 1364 break;
0f2d19dd
JB
1365 case scm_tc7_port:
1366 i = SCM_PTOBNUM (ptr);
7a7f7c53 1367#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1368 if (!(i < scm_numptob))
7a7f7c53
DH
1369 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1370#endif
ebf7394e 1371 if (SCM_PTAB_ENTRY(ptr))
56495472 1372 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1373 if (scm_ptobs[i].mark)
1374 {
1375 ptr = (scm_ptobs[i].mark) (ptr);
1376 goto gc_mark_loop;
1377 }
1378 else
1379 return;
0f2d19dd
JB
1380 break;
1381 case scm_tc7_smob:
d6884e63 1382 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1383 { /* should be faster than going through scm_smobs */
1384 case scm_tc_free_cell:
1385 /* printf("found free_cell %X ", ptr); fflush(stdout); */
acb0a19c
MD
1386 case scm_tc16_big:
1387 case scm_tc16_real:
1388 case scm_tc16_complex:
0f2d19dd
JB
1389 break;
1390 default:
1391 i = SCM_SMOBNUM (ptr);
7a7f7c53 1392#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1393 if (!(i < scm_numsmob))
7a7f7c53
DH
1394 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1395#endif
dc53f026
JB
1396 if (scm_smobs[i].mark)
1397 {
1398 ptr = (scm_smobs[i].mark) (ptr);
1399 goto gc_mark_loop;
1400 }
1401 else
1402 return;
0f2d19dd
JB
1403 }
1404 break;
1405 default:
acf4331f 1406 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1407 }
0209177b 1408#undef RECURSE
0f2d19dd 1409}
acf4331f 1410#undef FUNC_NAME
0f2d19dd 1411
56495472
ML
1412#ifndef MARK_DEPENDENCIES
1413
1414#undef MARK
56495472
ML
1415#undef FNAME
1416
1417/* And here we define `scm_gc_mark_dependencies', by including this
1418 * same file in itself.
1419 */
1420#define MARK scm_gc_mark_dependencies
1421#define FNAME "scm_gc_mark_dependencies"
1422#define MARK_DEPENDENCIES
1423#include "gc.c"
1424#undef MARK_DEPENDENCIES
1425#undef MARK
56495472
ML
1426#undef FNAME
1427
0f2d19dd
JB
1428
1429/* Mark a Region Conservatively
1430 */
1431
a00c95d9 1432void
6e8d25a6 1433scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd 1434{
c4da09e2 1435 unsigned long m;
0f2d19dd 1436
c4da09e2
DH
1437 for (m = 0; m < n; ++m)
1438 {
1439 SCM obj = * (SCM *) &x[m];
1440 if (SCM_CELLP (obj))
1441 {
1442 SCM_CELLPTR ptr = SCM2PTR (obj);
1443 int i = 0;
1444 int j = scm_n_heap_segs - 1;
1445 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1446 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1447 {
1448 while (i <= j)
1449 {
1450 int seg_id;
1451 seg_id = -1;
1452 if ((i == j)
1453 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1454 seg_id = i;
1455 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1456 seg_id = j;
1457 else
1458 {
1459 int k;
1460 k = (i + j) / 2;
1461 if (k == i)
1462 break;
1463 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1464 {
1465 j = k;
1466 ++i;
1467 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1468 continue;
1469 else
1470 break;
1471 }
1472 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1473 {
1474 i = k;
1475 --j;
1476 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1477 continue;
1478 else
1479 break;
1480 }
1481 }
7bb8eac7 1482
d6884e63
ML
1483 if (SCM_GC_IN_CARD_HEADERP (ptr))
1484 break;
7bb8eac7 1485
c4da09e2 1486 if (scm_heap_table[seg_id].span == 1
ecf470a2 1487 || DOUBLECELL_ALIGNED_P (obj))
3731149d
ML
1488 scm_gc_mark (obj);
1489
c4da09e2
DH
1490 break;
1491 }
1492 }
1493 }
1494 }
0f2d19dd
JB
1495}
1496
1497
1a548472
DH
1498/* The function scm_cellp determines whether an SCM value can be regarded as a
1499 * pointer to a cell on the heap. Binary search is used in order to determine
1500 * the heap segment that contains the cell.
1501 */
2e11a577 1502int
6e8d25a6 1503scm_cellp (SCM value)
2e11a577 1504{
1a548472
DH
1505 if (SCM_CELLP (value)) {
1506 scm_cell * ptr = SCM2PTR (value);
1507 unsigned int i = 0;
1508 unsigned int j = scm_n_heap_segs - 1;
1509
61045190
DH
1510 if (SCM_GC_IN_CARD_HEADERP (ptr))
1511 return 0;
1512
1a548472
DH
1513 while (i < j) {
1514 int k = (i + j) / 2;
1515 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1516 j = k;
1517 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1518 i = k + 1;
1519 }
1520 }
2e11a577 1521
9d47a1e6 1522 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1a548472 1523 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
ecf470a2 1524 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
d6884e63
ML
1525 && !SCM_GC_IN_CARD_HEADERP (ptr)
1526 )
1a548472 1527 return 1;
d6884e63 1528 else
1a548472 1529 return 0;
d6884e63 1530 } else
1a548472 1531 return 0;
2e11a577
MD
1532}
1533
1534
4c48ba06
MD
1535static void
1536gc_sweep_freelist_start (scm_freelist_t *freelist)
1537{
1538 freelist->cells = SCM_EOL;
1539 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1540 freelist->clusters_allocated = 0;
4c48ba06
MD
1541 freelist->clusters = SCM_EOL;
1542 freelist->clustertail = &freelist->clusters;
1811ebce 1543 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1544 freelist->collected = 0;
1545}
1546
1547static void
1548gc_sweep_freelist_finish (scm_freelist_t *freelist)
1549{
1811ebce 1550 int collected;
4c48ba06 1551 *freelist->clustertail = freelist->cells;
3f5d82cd 1552 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1553 {
1554 SCM c = freelist->cells;
22a52da1
DH
1555 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1556 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1557 freelist->collected +=
1558 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1559 }
b37fe1c5 1560 scm_gc_cells_collected += freelist->collected;
a00c95d9 1561
8fef55a8 1562 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1563 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1564 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1565 * size. This means that a too low yield is compensated by more
1566 * heap on the list which is currently doing most work, which is
1567 * just what we want.
1568 */
1811ebce 1569 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1570 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1571}
0f2d19dd 1572
d6884e63
ML
1573#define NEXT_DATA_CELL(ptr, span) \
1574 do { \
1575 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1576 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1577 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1578 : nxt__); \
1579 } while (0)
1580
a00c95d9 1581void
0f2d19dd 1582scm_gc_sweep ()
acf4331f 1583#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1584{
1585 register SCM_CELLPTR ptr;
0f2d19dd 1586 register SCM nfreelist;
4c48ba06 1587 register scm_freelist_t *freelist;
0f2d19dd 1588 register long m;
0f2d19dd 1589 register int span;
15e9d186 1590 long i;
0f2d19dd
JB
1591 scm_sizet seg_size;
1592
0f2d19dd 1593 m = 0;
0f2d19dd 1594
4c48ba06
MD
1595 gc_sweep_freelist_start (&scm_master_freelist);
1596 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1597
cf2d30f6 1598 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1599 {
4c48ba06 1600 register unsigned int left_to_collect;
4c48ba06 1601 register scm_sizet j;
15e9d186 1602
cf2d30f6
JB
1603 /* Unmarked cells go onto the front of the freelist this heap
1604 segment points to. Rather than updating the real freelist
1605 pointer as we go along, we accumulate the new head in
1606 nfreelist. Then, if it turns out that the entire segment is
1607 free, we free (i.e., malloc's free) the whole segment, and
1608 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1609 freelist = scm_heap_table[i].freelist;
1610 nfreelist = freelist->cells;
4c48ba06 1611 left_to_collect = freelist->left_to_collect;
945fec60 1612 span = scm_heap_table[i].span;
cf2d30f6 1613
a00c95d9
ML
1614 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1615 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1616
d6884e63
ML
1617 /* use only data cells in seg_size */
1618 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1619
c9b0d4b0
ML
1620 scm_gc_cells_swept += seg_size;
1621
0f2d19dd
JB
1622 for (j = seg_size + span; j -= span; ptr += span)
1623 {
d6884e63 1624 SCM scmptr;
96f6f4ae 1625
d6884e63 1626 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1627 {
d6884e63
ML
1628 SCM_CELLPTR nxt;
1629
1630 /* cheat here */
1631 nxt = ptr;
1632 NEXT_DATA_CELL (nxt, span);
1633 j += span;
1634
1635 ptr = nxt - span;
1636 continue;
1637 }
1638
1639 scmptr = PTR2SCM (ptr);
1640
1641 if (SCM_GCMARKP (scmptr))
1642 continue;
7bb8eac7 1643
d6884e63
ML
1644 switch SCM_TYP7 (scmptr)
1645 {
0f2d19dd 1646 case scm_tcs_cons_gloc:
0f2d19dd 1647 {
c8045e8d
DH
1648 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1649 * struct or a gloc. See the corresponding comment in
1650 * scm_gc_mark.
1651 */
7445e0e8
MD
1652 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1653 - scm_tc3_cons_gloc);
1654 /* access as struct */
1655 scm_bits_t * vtable_data = (scm_bits_t *) word0;
d6884e63 1656 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1657 {
7445e0e8
MD
1658 /* Structs need to be freed in a special order.
1659 * This is handled by GC C hooks in struct.c.
1660 */
1661 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1662 scm_structs_to_free = scmptr;
7bb8eac7 1663 continue;
c8045e8d 1664 }
7445e0e8 1665 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1666 }
1667 break;
1668 case scm_tcs_cons_imcar:
1669 case scm_tcs_cons_nimcar:
1670 case scm_tcs_closures:
e641afaf 1671 case scm_tc7_pws:
0f2d19dd
JB
1672 break;
1673 case scm_tc7_wvect:
b5c2579a 1674 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
06ee04b2 1675 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
d6884e63 1676 break;
0f2d19dd 1677 case scm_tc7_vector:
1b9be268
DH
1678 {
1679 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1680 if (length > 0)
1681 {
1682 m += length * sizeof (scm_bits_t);
1683 scm_must_free (SCM_VECTOR_BASE (scmptr));
1684 }
1685 break;
1686 }
0f2d19dd
JB
1687#ifdef CCLO
1688 case scm_tc7_cclo:
b5c2579a 1689 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1690 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1691 break;
06ee04b2 1692#endif
afe5177e 1693#ifdef HAVE_ARRAYS
0f2d19dd 1694 case scm_tc7_bvect:
93778877
DH
1695 {
1696 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1697 if (length > 0)
1698 {
1699 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1700 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1701 }
1702 }
06ee04b2 1703 break;
0f2d19dd 1704 case scm_tc7_byvect:
0f2d19dd
JB
1705 case scm_tc7_ivect:
1706 case scm_tc7_uvect:
0f2d19dd 1707 case scm_tc7_svect:
5c11cc9d 1708#ifdef HAVE_LONG_LONGS
0f2d19dd 1709 case scm_tc7_llvect:
0f2d19dd
JB
1710#endif
1711 case scm_tc7_fvect:
0f2d19dd 1712 case scm_tc7_dvect:
0f2d19dd 1713 case scm_tc7_cvect:
d1ca2c64 1714 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1715 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1716 break;
afe5177e 1717#endif
0f2d19dd 1718 case scm_tc7_substring:
0f2d19dd
JB
1719 break;
1720 case scm_tc7_string:
b5c2579a 1721 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1722 scm_must_free (SCM_STRING_CHARS (scmptr));
1723 break;
28b06554 1724 case scm_tc7_symbol:
b5c2579a 1725 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1726 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1727 break;
0f2d19dd 1728 case scm_tcs_subrs:
d6884e63 1729 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1730 continue;
1731 case scm_tc7_port:
0f2d19dd
JB
1732 if SCM_OPENP (scmptr)
1733 {
1734 int k = SCM_PTOBNUM (scmptr);
7a7f7c53 1735#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1736 if (!(k < scm_numptob))
7a7f7c53
DH
1737 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1738#endif
0f2d19dd 1739 /* Keep "revealed" ports alive. */
945fec60 1740 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1741 continue;
1742 /* Yes, I really do mean scm_ptobs[k].free */
1743 /* rather than ftobs[k].close. .close */
1744 /* is for explicit CLOSE-PORT by user */
84af0382 1745 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1746 SCM_SETSTREAM (scmptr, 0);
1747 scm_remove_from_port_table (scmptr);
1748 scm_gc_ports_collected++;
22a52da1 1749 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1750 }
1751 break;
1752 case scm_tc7_smob:
d6884e63 1753 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1754 {
1755 case scm_tc_free_cell:
acb0a19c 1756 case scm_tc16_real:
0f2d19dd
JB
1757 break;
1758#ifdef SCM_BIGDIG
acb0a19c 1759 case scm_tc16_big:
0f2d19dd 1760 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1761 scm_must_free (SCM_BDIGITS (scmptr));
1762 break;
0f2d19dd 1763#endif /* def SCM_BIGDIG */
acb0a19c 1764 case scm_tc16_complex:
06ee04b2 1765 m += sizeof (scm_complex_t);
405aaef9 1766 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1767 break;
0f2d19dd 1768 default:
0f2d19dd
JB
1769 {
1770 int k;
1771 k = SCM_SMOBNUM (scmptr);
7a7f7c53 1772#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
0f2d19dd 1773 if (!(k < scm_numsmob))
7a7f7c53
DH
1774 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1775#endif
1776 if (scm_smobs[k].free)
1777 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1778 break;
1779 }
1780 }
1781 break;
1782 default:
acf4331f 1783 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1784 }
7bb8eac7 1785
4c48ba06 1786 if (!--left_to_collect)
4a4c9785 1787 {
22a52da1 1788 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1789 *freelist->clustertail = scmptr;
1790 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1791
4a4c9785 1792 nfreelist = SCM_EOL;
4c48ba06
MD
1793 freelist->collected += span * freelist->cluster_size;
1794 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1795 }
1796 else
4a4c9785
MD
1797 {
1798 /* Stick the new cell on the front of nfreelist. It's
1799 critical that we mark this cell as freed; otherwise, the
1800 conservative collector might trace it as some other type
1801 of object. */
54778cd3 1802 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1803 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1804 nfreelist = scmptr;
1805 }
0f2d19dd 1806 }
d6884e63 1807
0f2d19dd
JB
1808#ifdef GC_FREE_SEGMENTS
1809 if (n == seg_size)
1810 {
15e9d186
JB
1811 register long j;
1812
4c48ba06 1813 freelist->heap_size -= seg_size;
cf2d30f6
JB
1814 free ((char *) scm_heap_table[i].bounds[0]);
1815 scm_heap_table[i].bounds[0] = 0;
1816 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1817 scm_heap_table[j - 1] = scm_heap_table[j];
1818 scm_n_heap_segs -= 1;
cf2d30f6 1819 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1820 }
1821 else
1822#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1823 {
1824 /* Update the real freelist pointer to point to the head of
1825 the list of free cells we've built for this segment. */
4c48ba06 1826 freelist->cells = nfreelist;
4c48ba06 1827 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1828 }
1829
fca7547b 1830#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1831 scm_map_free_list ();
1832#endif
4a4c9785 1833 }
a00c95d9 1834
4c48ba06
MD
1835 gc_sweep_freelist_finish (&scm_master_freelist);
1836 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1837
8ded62a3
MD
1838 /* When we move to POSIX threads private freelists should probably
1839 be GC-protected instead. */
1840 scm_freelist = SCM_EOL;
1841 scm_freelist2 = SCM_EOL;
a00c95d9 1842
b37fe1c5 1843 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1844 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1845 scm_mallocated -= m;
1846 scm_gc_malloc_collected = m;
1847}
acf4331f 1848#undef FUNC_NAME
0f2d19dd
JB
1849
1850
1851\f
0f2d19dd
JB
1852/* {Front end to malloc}
1853 *
9d47a1e6
ML
1854 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1855 * scm_done_free
0f2d19dd 1856 *
c6c79933
GH
1857 * These functions provide services comparable to malloc, realloc, and
1858 * free. They should be used when allocating memory that will be under
1859 * control of the garbage collector, i.e., if the memory may be freed
1860 * during garbage collection.
1861 */
bc9d9bb2 1862
0f2d19dd
JB
1863/* scm_must_malloc
1864 * Return newly malloced storage or throw an error.
1865 *
1866 * The parameter WHAT is a string for error reporting.
a00c95d9 1867 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1868 * allocation, or if the first call to malloc fails,
1869 * garbage collect -- on the presumption that some objects
1870 * using malloced storage may be collected.
1871 *
1872 * The limit scm_mtrigger may be raised by this allocation.
1873 */
07806695 1874void *
e4ef2330 1875scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1876{
07806695 1877 void *ptr;
15e9d186 1878 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1879
1880 if (nm <= scm_mtrigger)
0f2d19dd 1881 {
07806695 1882 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1883 if (NULL != ptr)
1884 {
1885 scm_mallocated = nm;
bc9d9bb2
MD
1886#ifdef GUILE_DEBUG_MALLOC
1887 scm_malloc_register (ptr, what);
1888#endif
0f2d19dd
JB
1889 return ptr;
1890 }
1891 }
6064dcc6 1892
0f2d19dd 1893 scm_igc (what);
e4ef2330 1894
0f2d19dd 1895 nm = scm_mallocated + size;
07806695 1896 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1897 if (NULL != ptr)
1898 {
1899 scm_mallocated = nm;
6064dcc6
MV
1900 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1901 if (nm > scm_mtrigger)
1902 scm_mtrigger = nm + nm / 2;
1903 else
1904 scm_mtrigger += scm_mtrigger / 2;
1905 }
bc9d9bb2
MD
1906#ifdef GUILE_DEBUG_MALLOC
1907 scm_malloc_register (ptr, what);
1908#endif
1909
0f2d19dd
JB
1910 return ptr;
1911 }
e4ef2330 1912
acf4331f 1913 scm_memory_error (what);
0f2d19dd
JB
1914}
1915
1916
1917/* scm_must_realloc
1918 * is similar to scm_must_malloc.
1919 */
07806695
JB
1920void *
1921scm_must_realloc (void *where,
e4ef2330
MD
1922 scm_sizet old_size,
1923 scm_sizet size,
3eeba8d4 1924 const char *what)
0f2d19dd 1925{
07806695 1926 void *ptr;
e4ef2330
MD
1927 scm_sizet nm = scm_mallocated + size - old_size;
1928
1929 if (nm <= scm_mtrigger)
0f2d19dd 1930 {
07806695 1931 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1932 if (NULL != ptr)
1933 {
1934 scm_mallocated = nm;
bc9d9bb2
MD
1935#ifdef GUILE_DEBUG_MALLOC
1936 scm_malloc_reregister (where, ptr, what);
1937#endif
0f2d19dd
JB
1938 return ptr;
1939 }
1940 }
e4ef2330 1941
0f2d19dd 1942 scm_igc (what);
e4ef2330
MD
1943
1944 nm = scm_mallocated + size - old_size;
07806695 1945 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1946 if (NULL != ptr)
1947 {
1948 scm_mallocated = nm;
6064dcc6
MV
1949 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1950 if (nm > scm_mtrigger)
1951 scm_mtrigger = nm + nm / 2;
1952 else
1953 scm_mtrigger += scm_mtrigger / 2;
1954 }
bc9d9bb2
MD
1955#ifdef GUILE_DEBUG_MALLOC
1956 scm_malloc_reregister (where, ptr, what);
1957#endif
0f2d19dd
JB
1958 return ptr;
1959 }
e4ef2330 1960
acf4331f 1961 scm_memory_error (what);
0f2d19dd
JB
1962}
1963
acf4331f 1964
a00c95d9 1965void
07806695 1966scm_must_free (void *obj)
acf4331f 1967#define FUNC_NAME "scm_must_free"
0f2d19dd 1968{
bc9d9bb2
MD
1969#ifdef GUILE_DEBUG_MALLOC
1970 scm_malloc_unregister (obj);
1971#endif
0f2d19dd
JB
1972 if (obj)
1973 free (obj);
1974 else
acf4331f 1975 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1976}
acf4331f
DH
1977#undef FUNC_NAME
1978
0f2d19dd 1979
c68296f8
MV
1980/* Announce that there has been some malloc done that will be freed
1981 * during gc. A typical use is for a smob that uses some malloced
1982 * memory but can not get it from scm_must_malloc (for whatever
1983 * reason). When a new object of this smob is created you call
1984 * scm_done_malloc with the size of the object. When your smob free
1985 * function is called, be sure to include this size in the return
9d47a1e6
ML
1986 * value.
1987 *
1988 * If you can't actually free the memory in the smob free function,
1989 * for whatever reason (like reference counting), you still can (and
1990 * should) report the amount of memory freed when you actually free it.
1991 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1992 * eh? Or even better, call scm_done_free. */
0f2d19dd 1993
c68296f8 1994void
6e8d25a6 1995scm_done_malloc (long size)
c68296f8
MV
1996{
1997 scm_mallocated += size;
1998
1999 if (scm_mallocated > scm_mtrigger)
2000 {
2001 scm_igc ("foreign mallocs");
2002 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2003 {
2004 if (scm_mallocated > scm_mtrigger)
2005 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2006 else
2007 scm_mtrigger += scm_mtrigger / 2;
2008 }
2009 }
2010}
2011
9d47a1e6
ML
2012void
2013scm_done_free (long size)
2014{
2015 scm_mallocated -= size;
2016}
2017
c68296f8
MV
2018
2019\f
0f2d19dd
JB
2020/* {Heap Segments}
2021 *
2022 * Each heap segment is an array of objects of a particular size.
2023 * Every segment has an associated (possibly shared) freelist.
2024 * A table of segment records is kept that records the upper and
2025 * lower extents of the segment; this is used during the conservative
2026 * phase of gc to identify probably gc roots (because they point
c68296f8 2027 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2028
2029/* scm_expmem
2030 * is true if the first segment was smaller than INIT_HEAP_SEG.
2031 * If scm_expmem is set to one, subsequent segment allocations will
2032 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2033 */
2034int scm_expmem = 0;
2035
4c48ba06
MD
2036scm_sizet scm_max_segment_size;
2037
0f2d19dd
JB
2038/* scm_heap_org
2039 * is the lowest base address of any heap segment.
2040 */
2041SCM_CELLPTR scm_heap_org;
2042
a00c95d9 2043scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 2044static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
2045int scm_n_heap_segs = 0;
2046
0f2d19dd 2047/* init_heap_seg
d6884e63 2048 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2049 *
d6884e63
ML
2050 * The segment origin and segment size in bytes are input parameters.
2051 * The freelist is both input and output.
0f2d19dd 2052 *
d6884e63
ML
2053 * This function presumes that the scm_heap_table has already been expanded
2054 * to accomodate a new segment record and that the markbit space was reserved
2055 * for all the cards in this segment.
0f2d19dd
JB
2056 */
2057
d6884e63
ML
2058#define INIT_CARD(card, span) \
2059 do { \
322ec19d 2060 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2061 if ((span) == 2) \
2062 SCM_GC_SET_CARD_DOUBLECELL (card); \
2063 } while (0)
0f2d19dd 2064
a00c95d9 2065static scm_sizet
4c48ba06 2066init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2067{
2068 register SCM_CELLPTR ptr;
0f2d19dd 2069 SCM_CELLPTR seg_end;
15e9d186 2070 int new_seg_index;
acb0a19c 2071 int n_new_cells;
4c48ba06 2072 int span = freelist->span;
a00c95d9 2073
0f2d19dd
JB
2074 if (seg_org == NULL)
2075 return 0;
2076
d6884e63
ML
2077 /* Align the begin ptr up.
2078 */
2079 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2080
a00c95d9 2081 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2082 */
d6884e63 2083 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2084
a00c95d9 2085 /* Find the right place and insert the segment record.
0f2d19dd
JB
2086 *
2087 */
2088 for (new_seg_index = 0;
2089 ( (new_seg_index < scm_n_heap_segs)
2090 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2091 new_seg_index++)
2092 ;
2093
2094 {
2095 int i;
2096 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2097 scm_heap_table[i] = scm_heap_table[i - 1];
2098 }
a00c95d9 2099
0f2d19dd
JB
2100 ++scm_n_heap_segs;
2101
945fec60 2102 scm_heap_table[new_seg_index].span = span;
4c48ba06 2103 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2104 scm_heap_table[new_seg_index].bounds[0] = ptr;
2105 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2106
acb0a19c
MD
2107 /*n_new_cells*/
2108 n_new_cells = seg_end - ptr;
0f2d19dd 2109
4c48ba06 2110 freelist->heap_size += n_new_cells;
4a4c9785 2111
a00c95d9 2112 /* Partition objects in this segment into clusters */
4a4c9785
MD
2113 {
2114 SCM clusters;
2115 SCM *clusterp = &clusters;
4a4c9785 2116
d6884e63
ML
2117 NEXT_DATA_CELL (ptr, span);
2118 while (ptr < seg_end)
4a4c9785 2119 {
d6884e63
ML
2120 scm_cell *nxt = ptr;
2121 scm_cell *prv = NULL;
2122 scm_cell *last_card = NULL;
2123 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2124 NEXT_DATA_CELL(nxt, span);
4a4c9785 2125
4c48ba06
MD
2126 /* Allocate cluster spine
2127 */
4a4c9785 2128 *clusterp = PTR2SCM (ptr);
d6884e63 2129 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2130 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2131 ptr = nxt;
a00c95d9 2132
d6884e63 2133 while (n_data_cells--)
4a4c9785 2134 {
d6884e63 2135 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2136 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2137 nxt = ptr;
2138 NEXT_DATA_CELL (nxt, span);
2139 prv = ptr;
2140
2141 if (card != last_card)
2142 {
2143 INIT_CARD (card, span);
2144 last_card = card;
2145 }
96f6f4ae 2146
54778cd3 2147 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2148 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2149
2150 ptr = nxt;
4a4c9785 2151 }
4c48ba06 2152
d6884e63 2153 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2154 }
a00c95d9 2155
d6884e63
ML
2156 /* sanity check */
2157 {
2158 scm_cell *ref = seg_end;
2159 NEXT_DATA_CELL (ref, span);
2160 if (ref != ptr)
2161 /* [cmm] looks like the segment size doesn't divide cleanly by
2162 cluster size. bad cmm! */
2163 abort();
2164 }
2165
4a4c9785
MD
2166 /* Patch up the last cluster pointer in the segment
2167 * to join it to the input freelist.
2168 */
4c48ba06
MD
2169 *clusterp = freelist->clusters;
2170 freelist->clusters = clusters;
4a4c9785
MD
2171 }
2172
4c48ba06
MD
2173#ifdef DEBUGINFO
2174 fprintf (stderr, "H");
2175#endif
0f2d19dd 2176 return size;
0f2d19dd
JB
2177}
2178
a00c95d9
ML
2179static scm_sizet
2180round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2181{
2182 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2183
2184 return
2185 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2186 + ALIGNMENT_SLACK (freelist);
2187}
2188
a00c95d9 2189static void
b6efc951 2190alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 2191#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2192{
0f2d19dd 2193 SCM_CELLPTR ptr;
b37fe1c5 2194 long len;
a00c95d9 2195
9d47a1e6 2196 if (scm_gc_heap_lock)
b6efc951
DH
2197 {
2198 /* Critical code sections (such as the garbage collector) aren't
2199 * supposed to add heap segments.
2200 */
2201 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2202 abort ();
2203 }
0f2d19dd 2204
9d47a1e6 2205 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2206 {
2207 /* We have to expand the heap segment table to have room for the new
2208 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2209 * init_heap_seg only if the allocation of the segment itself succeeds.
2210 */
2211 unsigned int new_table_size = scm_n_heap_segs + 1;
2212 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2213 scm_heap_seg_data_t * new_heap_table;
2214
2215 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2216 realloc ((char *)scm_heap_table, size)));
2217 if (!new_heap_table)
2218 {
2219 if (error_policy == abort_on_error)
2220 {
2221 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2222 abort ();
2223 }
2224 else
2225 {
2226 return;
2227 }
2228 }
2229 else
2230 {
2231 scm_heap_table = new_heap_table;
2232 heap_segment_table_size = new_table_size;
2233 }
2234 }
0f2d19dd 2235
0f2d19dd 2236 /* Pick a size for the new heap segment.
a00c95d9 2237 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2238 * gc.h
2239 */
4c48ba06 2240 {
1811ebce
MD
2241 /* Assure that the new segment is predicted to be large enough.
2242 *
2243 * New yield should at least equal GC fraction of new heap size, i.e.
2244 *
2245 * y + dh > f * (h + dh)
2246 *
2247 * y : yield
8fef55a8 2248 * f : min yield fraction
1811ebce
MD
2249 * h : heap size
2250 * dh : size of new heap segment
2251 *
2252 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2253 */
8fef55a8 2254 int f = freelist->min_yield_fraction;
1811ebce
MD
2255 long h = SCM_HEAP_SIZE;
2256 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2257 len = SCM_EXPHEAP (freelist->heap_size);
2258#ifdef DEBUGINFO
2259 fprintf (stderr, "(%d < %d)", len, min_cells);
2260#endif
2261 if (len < min_cells)
1811ebce 2262 len = min_cells + freelist->cluster_size;
4c48ba06 2263 len *= sizeof (scm_cell);
1811ebce
MD
2264 /* force new sampling */
2265 freelist->collected = LONG_MAX;
4c48ba06 2266 }
a00c95d9 2267
4c48ba06
MD
2268 if (len > scm_max_segment_size)
2269 len = scm_max_segment_size;
0f2d19dd
JB
2270
2271 {
2272 scm_sizet smallest;
2273
a00c95d9 2274 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2275
0f2d19dd 2276 if (len < smallest)
a00c95d9 2277 len = smallest;
0f2d19dd
JB
2278
2279 /* Allocate with decaying ambition. */
2280 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2281 && (len >= smallest))
2282 {
1811ebce 2283 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2284 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2285 if (ptr)
2286 {
a00c95d9 2287 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2288 return;
2289 }
2290 len /= 2;
2291 }
2292 }
2293
b6efc951
DH
2294 if (error_policy == abort_on_error)
2295 {
2296 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2297 abort ();
2298 }
0f2d19dd 2299}
acf4331f 2300#undef FUNC_NAME
0f2d19dd
JB
2301
2302
a00c95d9 2303SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2304 (SCM name),
cf359417
MG
2305 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2306 "is @code{#t}.")
1bbd0b84 2307#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2308{
2309 int x;
2310 int bound;
3b3b36dd 2311 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2312 SCM_DEFER_INTS;
2313 bound = scm_n_heap_segs;
2314 for (x = 0; x < bound; ++x)
2315 {
2316 SCM_CELLPTR p;
2317 SCM_CELLPTR pbound;
195e6201
DH
2318 p = scm_heap_table[x].bounds[0];
2319 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2320 while (p < pbound)
2321 {
c8045e8d
DH
2322 SCM cell = PTR2SCM (p);
2323 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2324 {
c8045e8d
DH
2325 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2326 * struct cell. See the corresponding comment in scm_gc_mark.
2327 */
2328 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2329 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2330 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2331 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2332 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2333 {
c8045e8d 2334 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2335 }
2336 }
2337 ++p;
2338 }
2339 }
2340 SCM_ALLOW_INTS;
2341 return name;
2342}
1bbd0b84 2343#undef FUNC_NAME
0f2d19dd
JB
2344
2345
2346\f
2347/* {GC Protection Helper Functions}
2348 */
2349
2350
5d2b97cd
DH
2351/*
2352 * If within a function you need to protect one or more scheme objects from
2353 * garbage collection, pass them as parameters to one of the
2354 * scm_remember_upto_here* functions below. These functions don't do
2355 * anything, but since the compiler does not know that they are actually
2356 * no-ops, it will generate code that calls these functions with the given
2357 * parameters. Therefore, you can be sure that the compiler will keep those
2358 * scheme values alive (on the stack or in a register) up to the point where
2359 * scm_remember_upto_here* is called. In other words, place the call to
2360 * scm_remember_upt_here* _behind_ the last code in your function, that
2361 * depends on the scheme object to exist.
2362 *
2363 * Example: We want to make sure, that the string object str does not get
2364 * garbage collected during the execution of 'some_function', because
2365 * otherwise the characters belonging to str would be freed and
2366 * 'some_function' might access freed memory. To make sure that the compiler
2367 * keeps str alive on the stack or in a register such that it is visible to
2368 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2369 * call to 'some_function'. Note that this would not be necessary if str was
2370 * used anyway after the call to 'some_function'.
2371 * char *chars = SCM_STRING_CHARS (str);
2372 * some_function (chars);
2373 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2374 */
2375
2376void
2377scm_remember_upto_here_1 (SCM obj)
2378{
2379 /* Empty. Protects a single object from garbage collection. */
2380}
2381
2382void
2383scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2384{
2385 /* Empty. Protects two objects from garbage collection. */
2386}
2387
2388void
2389scm_remember_upto_here (SCM obj, ...)
2390{
2391 /* Empty. Protects any number of objects from garbage collection. */
2392}
2393
2394
2395#if (SCM_DEBUG_DEPRECATED == 0)
2396
0f2d19dd 2397void
6e8d25a6 2398scm_remember (SCM *ptr)
b24b5e13
DH
2399{
2400 /* empty */
2401}
0f2d19dd 2402
5d2b97cd 2403#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2404
c209c88e 2405/*
41b0806d
GB
2406 These crazy functions prevent garbage collection
2407 of arguments after the first argument by
2408 ensuring they remain live throughout the
2409 function because they are used in the last
2410 line of the code block.
2411 It'd be better to have a nice compiler hint to
2412 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2413SCM
2414scm_return_first (SCM elt, ...)
0f2d19dd
JB
2415{
2416 return elt;
2417}
2418
41b0806d
GB
2419int
2420scm_return_first_int (int i, ...)
2421{
2422 return i;
2423}
2424
0f2d19dd 2425
0f2d19dd 2426SCM
6e8d25a6 2427scm_permanent_object (SCM obj)
0f2d19dd
JB
2428{
2429 SCM_REDEFER_INTS;
2430 scm_permobjs = scm_cons (obj, scm_permobjs);
2431 SCM_REALLOW_INTS;
2432 return obj;
2433}
2434
2435
7bd4fbe2
MD
2436/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2437 other references are dropped, until the object is unprotected by calling
2438 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2439 i. e. it is possible to protect the same object several times, but it is
2440 necessary to unprotect the object the same number of times to actually get
2441 the object unprotected. It is an error to unprotect an object more often
2442 than it has been protected before. The function scm_protect_object returns
2443 OBJ.
2444*/
2445
2446/* Implementation note: For every object X, there is a counter which
2447 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2448*/
686765af 2449
ef290276 2450SCM
6e8d25a6 2451scm_protect_object (SCM obj)
ef290276 2452{
686765af 2453 SCM handle;
9d47a1e6 2454
686765af 2455 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2456 SCM_REDEFER_INTS;
9d47a1e6 2457
0f0f0899
MD
2458 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2459 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
9d47a1e6 2460
2dd6a83a 2461 SCM_REALLOW_INTS;
9d47a1e6 2462
ef290276
JB
2463 return obj;
2464}
2465
2466
2467/* Remove any protection for OBJ established by a prior call to
dab7f566 2468 scm_protect_object. This function returns OBJ.
ef290276 2469
dab7f566 2470 See scm_protect_object for more information. */
ef290276 2471SCM
6e8d25a6 2472scm_unprotect_object (SCM obj)
ef290276 2473{
686765af 2474 SCM handle;
9d47a1e6 2475
686765af 2476 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2477 SCM_REDEFER_INTS;
9d47a1e6 2478
686765af 2479 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2480
22a52da1 2481 if (SCM_FALSEP (handle))
686765af 2482 {
0f0f0899
MD
2483 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2484 abort ();
686765af 2485 }
6a199940
DH
2486 else
2487 {
2488 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2489 if (count == 0)
2490 scm_hashq_remove_x (scm_protects, obj);
2491 else
2492 SCM_SETCDR (handle, SCM_MAKINUM (count));
2493 }
686765af 2494
2dd6a83a 2495 SCM_REALLOW_INTS;
ef290276
JB
2496
2497 return obj;
2498}
2499
c45acc34
JB
2500int terminating;
2501
2502/* called on process termination. */
e52ceaac
MD
2503#ifdef HAVE_ATEXIT
2504static void
2505cleanup (void)
2506#else
2507#ifdef HAVE_ON_EXIT
51157deb
MD
2508extern int on_exit (void (*procp) (), int arg);
2509
e52ceaac
MD
2510static void
2511cleanup (int status, void *arg)
2512#else
2513#error Dont know how to setup a cleanup handler on your system.
2514#endif
2515#endif
c45acc34
JB
2516{
2517 terminating = 1;
2518 scm_flush_all_ports ();
2519}
ef290276 2520
0f2d19dd 2521\f
acb0a19c 2522static int
4c48ba06 2523make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2524{
a00c95d9 2525 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2526
a00c95d9
ML
2527 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2528 rounded_size,
4c48ba06 2529 freelist))
acb0a19c 2530 {
a00c95d9
ML
2531 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2532 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2533 rounded_size,
4c48ba06 2534 freelist))
acb0a19c
MD
2535 return 1;
2536 }
2537 else
2538 scm_expmem = 1;
2539
8fef55a8
MD
2540 if (freelist->min_yield_fraction)
2541 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2542 / 100);
8fef55a8 2543 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2544
acb0a19c
MD
2545 return 0;
2546}
2547
2548\f
4c48ba06
MD
2549static void
2550init_freelist (scm_freelist_t *freelist,
2551 int span,
2552 int cluster_size,
8fef55a8 2553 int min_yield)
4c48ba06
MD
2554{
2555 freelist->clusters = SCM_EOL;
2556 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2557 freelist->left_to_collect = 0;
2558 freelist->clusters_allocated = 0;
8fef55a8
MD
2559 freelist->min_yield = 0;
2560 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2561 freelist->span = span;
2562 freelist->collected = 0;
1811ebce 2563 freelist->collected_1 = 0;
4c48ba06
MD
2564 freelist->heap_size = 0;
2565}
2566
85db4a2c
DH
2567
2568/* Get an integer from an environment variable. */
2569static int
2570scm_i_getenv_int (const char *var, int def)
2571{
2572 char *end, *val = getenv (var);
2573 long res;
2574 if (!val)
2575 return def;
2576 res = strtol (val, &end, 10);
2577 if (end == val)
2578 return def;
2579 return res;
2580}
2581
2582
4a4c9785 2583int
85db4a2c 2584scm_init_storage ()
0f2d19dd 2585{
85db4a2c
DH
2586 scm_sizet gc_trigger_1;
2587 scm_sizet gc_trigger_2;
2588 scm_sizet init_heap_size_1;
2589 scm_sizet init_heap_size_2;
0f2d19dd
JB
2590 scm_sizet j;
2591
2592 j = SCM_NUM_PROTECTS;
2593 while (j)
2594 scm_sys_protects[--j] = SCM_BOOL_F;
2595 scm_block_gc = 1;
4a4c9785 2596
4a4c9785 2597 scm_freelist = SCM_EOL;
4c48ba06 2598 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2599 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2600 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2601 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2602 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2603 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2604
0f2d19dd
JB
2605 scm_expmem = 0;
2606
2607 j = SCM_HEAP_SEG_SIZE;
2608 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2609 scm_heap_table = ((scm_heap_seg_data_t *)
2610 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2611 heap_segment_table_size = 2;
acb0a19c 2612
d6884e63
ML
2613 mark_space_ptr = &mark_space_head;
2614
85db4a2c
DH
2615 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2616 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2617 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2618 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2619 return 1;
acb0a19c 2620
801cb5e7 2621 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2622 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2623
801cb5e7
MD
2624 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2625 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2626 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2627 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2628 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2629
2630 /* Initialise the list of ports. */
840ae05d
JB
2631 scm_port_table = (scm_port **)
2632 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2633 if (!scm_port_table)
2634 return 1;
2635
a18bcd0e 2636#ifdef HAVE_ATEXIT
c45acc34 2637 atexit (cleanup);
e52ceaac
MD
2638#else
2639#ifdef HAVE_ON_EXIT
2640 on_exit (cleanup, 0);
2641#endif
a18bcd0e 2642#endif
0f2d19dd
JB
2643
2644 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2645 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2646
2647 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
be54b15d 2648 scm_nullstr = scm_allocate_string (0);
00ffa0e7 2649 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
93d40df2
DH
2650
2651#define DEFAULT_SYMHASH_SIZE 277
00ffa0e7
KN
2652 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2653 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
93d40df2 2654
8960e0a0 2655 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2656 scm_permobjs = SCM_EOL;
00ffa0e7 2657 scm_protects = scm_c_make_hash_table (31);
d6884e63 2658
0f2d19dd
JB
2659 return 0;
2660}
939794ce 2661
0f2d19dd
JB
2662\f
2663
939794ce
DH
2664SCM scm_after_gc_hook;
2665
939794ce
DH
2666static SCM gc_async;
2667
939794ce
DH
2668/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2669 * is run after the gc, as soon as the asynchronous events are handled by the
2670 * evaluator.
2671 */
2672static SCM
2673gc_async_thunk (void)
2674{
2675 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
939794ce
DH
2676 return SCM_UNSPECIFIED;
2677}
2678
2679
2680/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2681 * the garbage collection. The only purpose of this function is to mark the
2682 * gc_async (which will eventually lead to the execution of the
2683 * gc_async_thunk).
2684 */
2685static void *
2686mark_gc_async (void * hook_data, void *func_data, void *data)
2687{
2688 scm_system_async_mark (gc_async);
2689 return NULL;
2690}
2691
2692
0f2d19dd
JB
2693void
2694scm_init_gc ()
0f2d19dd 2695{
939794ce
DH
2696 SCM after_gc_thunk;
2697
61045190
DH
2698#if (SCM_DEBUG_CELL_ACCESSES == 1)
2699 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2700#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2701
801cb5e7 2702 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce 2703
78573619 2704 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
23670993 2705 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2706
2707 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2708
8dc9439f 2709#ifndef SCM_MAGIC_SNARFER
a0599745 2710#include "libguile/gc.x"
8dc9439f 2711#endif
0f2d19dd 2712}
89e00824 2713
56495472
ML
2714#endif /*MARK_DEPENDENCIES*/
2715
89e00824
ML
2716/*
2717 Local Variables:
2718 c-file-style: "gnu"
2719 End:
2720*/