Correct, update, improve and clean up a lot of docstrings in order to make
[bpt/guile.git] / libguile / gc.c
CommitLineData
22a52da1 1/* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
56495472
ML
47/* SECTION: This code is compiled once.
48 */
49
50#ifndef MARK_DEPENDENCIES
51
0f2d19dd
JB
52\f
53#include <stdio.h>
e6e2e95a 54#include <errno.h>
783e7774 55#include <string.h>
e6e2e95a 56
a0599745 57#include "libguile/_scm.h"
0a7a7445 58#include "libguile/eval.h"
a0599745
MD
59#include "libguile/stime.h"
60#include "libguile/stackchk.h"
61#include "libguile/struct.h"
a0599745
MD
62#include "libguile/smob.h"
63#include "libguile/unif.h"
64#include "libguile/async.h"
65#include "libguile/ports.h"
66#include "libguile/root.h"
67#include "libguile/strings.h"
68#include "libguile/vectors.h"
801cb5e7 69#include "libguile/weaks.h"
686765af 70#include "libguile/hashtab.h"
ecf470a2 71#include "libguile/tags.h"
a0599745
MD
72
73#include "libguile/validate.h"
74#include "libguile/gc.h"
fce59c93 75
bc9d9bb2 76#ifdef GUILE_DEBUG_MALLOC
a0599745 77#include "libguile/debug-malloc.h"
bc9d9bb2
MD
78#endif
79
0f2d19dd 80#ifdef HAVE_MALLOC_H
95b88819 81#include <malloc.h>
0f2d19dd
JB
82#endif
83
84#ifdef HAVE_UNISTD_H
95b88819 85#include <unistd.h>
0f2d19dd
JB
86#endif
87
1cc91f1b
JB
88#ifdef __STDC__
89#include <stdarg.h>
90#define var_start(x, y) va_start(x, y)
91#else
92#include <varargs.h>
93#define var_start(x, y) va_start(x)
94#endif
95
0f2d19dd 96\f
406c7d90
DH
97
98unsigned int scm_gc_running_p = 0;
99
100\f
101
102#if (SCM_DEBUG_CELL_ACCESSES == 1)
103
61045190
DH
104scm_bits_t scm_tc16_allocated;
105
106/* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108unsigned int scm_debug_cell_accesses_p = 1;
406c7d90
DH
109
110
111/* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116void
117scm_assert_cell_valid (SCM cell)
118{
61045190
DH
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
406c7d90 122 {
61045190 123 already_running = 1; /* set to avoid recursion */
406c7d90 124
9d47a1e6 125 if (!scm_cellp (cell))
406c7d90
DH
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
61045190 147 already_running = 0; /* re-enable */
406c7d90
DH
148 }
149}
150
151
152SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
1e6808ea
MG
154 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
155 "If @var{flag} is @code{#t}, cell access checking is enabled.\n"
156 "This procedure only exists when the compile-time flag\n"
157 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
406c7d90
DH
158#define FUNC_NAME s_scm_set_debug_cell_accesses_x
159{
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168}
169#undef FUNC_NAME
170
171#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173\f
174
0f2d19dd 175/* {heap tuning parameters}
a00c95d9 176 *
0f2d19dd
JB
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 192 *
0f2d19dd
JB
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 200 * trigger a GC.
6064dcc6
MV
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 206 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
207 */
208
8fef55a8
MD
209/*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
d6884e63
ML
215
216#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 221int scm_default_min_yield_1 = 40;
4c48ba06 222
d6884e63
ML
223#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
226/* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
aeacfc8f 229int scm_default_min_yield_2 = 40;
4c48ba06 230
aeacfc8f 231int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 232
d6884e63 233#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
234#ifdef _QC
235# define SCM_HEAP_SEG_SIZE 32768L
236#else
237# ifdef sequent
4c48ba06 238# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 239# else
4c48ba06 240# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
241# endif
242#endif
4c48ba06 243/* Make heap grow with factor 1.5 */
4a4c9785 244#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 245#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 246#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 247
d6884e63
ML
248/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
0f2d19dd
JB
250
251#ifdef PROT386
252/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
253# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
255#else
256# ifdef _UNICOS
a00c95d9
ML
257# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 259# else
a00c95d9
ML
260# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
262# endif /* UNICOS */
263#endif /* PROT386 */
264
ecf470a2
ML
265#define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
d6884e63
ML
267#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268#define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
270
271\f
945fec60 272/* scm_freelists
0f2d19dd 273 */
945fec60 274
a00c95d9
ML
275typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
a00c95d9
ML
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
b37fe1c5
MD
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
8fef55a8
MD
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
a00c95d9
ML
285 SCM clusters;
286 SCM *clustertail;
b37fe1c5 287 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 288 int cluster_size;
8fef55a8 289 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
290 */
291 int grow_heap_p;
8fef55a8 292 /* minimum yield on this list in order not to grow the heap
a00c95d9 293 */
8fef55a8
MD
294 long min_yield;
295 /* defines min_yield as percent of total heap size
a00c95d9 296 */
8fef55a8 297 int min_yield_fraction;
a00c95d9
ML
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
1811ebce
MD
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
a00c95d9
ML
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
1811ebce 307 long heap_size;
a00c95d9
ML
308} scm_freelist_t;
309
4a4c9785
MD
310SCM scm_freelist = SCM_EOL;
311scm_freelist_t scm_master_freelist = {
b37fe1c5 312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
313};
314SCM scm_freelist2 = SCM_EOL;
315scm_freelist_t scm_master_freelist2 = {
b37fe1c5 316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 317};
0f2d19dd
JB
318
319/* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
15e9d186 322unsigned long scm_mtrigger;
0f2d19dd 323
0f2d19dd
JB
324/* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328int scm_gc_heap_lock = 0;
329
330/* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
0f2d19dd
JB
334int scm_block_gc = 1;
335
0f2d19dd
JB
336/* During collection, this accumulates objects holding
337 * weak references.
338 */
ab4bef85 339SCM scm_weak_vectors;
0f2d19dd 340
7445e0e8
MD
341/* During collection, this accumulates structures which are to be freed.
342 */
343SCM scm_structs_to_free;
344
0f2d19dd
JB
345/* GC Statistics Keeping
346 */
347unsigned long scm_cells_allocated = 0;
a5c314c8 348long scm_mallocated = 0;
b37fe1c5 349unsigned long scm_gc_cells_collected;
8b0d194f 350unsigned long scm_gc_yield;
37ddcaf6 351static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
352unsigned long scm_gc_malloc_collected;
353unsigned long scm_gc_ports_collected;
0f2d19dd 354unsigned long scm_gc_time_taken = 0;
c9b0d4b0
ML
355static unsigned long t_before_gc;
356static unsigned long t_before_sweep;
357unsigned long scm_gc_mark_time_taken = 0;
358unsigned long scm_gc_sweep_time_taken = 0;
359unsigned long scm_gc_times = 0;
360unsigned long scm_gc_cells_swept = 0;
361double scm_gc_cells_marked_acc = 0.;
362double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
363
364SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
370SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372SCM_SYMBOL (sym_times, "gc-times");
373SCM_SYMBOL (sym_cells_marked, "cells-marked");
374SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 375
a00c95d9 376typedef struct scm_heap_seg_data_t
0f2d19dd 377{
cf2d30f6
JB
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
4c48ba06 383 scm_freelist_t *freelist;
cf2d30f6 384
fe517a7d 385 /* number of cells per object in this segment */
945fec60 386 int span;
a00c95d9 387} scm_heap_seg_data_t;
0f2d19dd
JB
388
389
390
945fec60 391static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
392
393typedef enum { return_on_error, abort_on_error } policy_on_error;
394static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
395
396
d6884e63
ML
397#define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401#define BVEC_GROW_SIZE 256
402#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405/* mark space allocation */
406
407typedef struct scm_mark_space_t
408{
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411} scm_mark_space_t;
412
413static scm_mark_space_t *current_mark_space;
414static scm_mark_space_t **mark_space_ptr;
415static int current_mark_space_offset;
416static scm_mark_space_t *mark_space_head;
417
418static scm_c_bvec_limb_t *
419get_bvec ()
db4b4ca6 420#define FUNC_NAME "get_bvec"
d6884e63
ML
421{
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
db4b4ca6 428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
db4b4ca6 444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
d6884e63
ML
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462}
db4b4ca6
DH
463#undef FUNC_NAME
464
d6884e63
ML
465
466static void
467clear_mark_space ()
468{
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473}
474
475
0f2d19dd 476\f
cf2d30f6
JB
477/* Debugging functions. */
478
bb2c57fa 479#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
480
481/* Return the number of the heap segment containing CELL. */
482static int
483which_seg (SCM cell)
484{
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 492 SCM_UNPACK (cell));
cf2d30f6
JB
493 abort ();
494}
495
496
8ded62a3
MD
497static void
498map_free_list (scm_freelist_t *master, SCM freelist)
499{
500 int last_seg = -1, count = 0;
501 SCM f;
a00c95d9 502
3f5d82cd 503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3
MD
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520}
cf2d30f6 521
a00c95d9 522SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c 523 (),
5352393c
MG
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
acb0a19c
MD
527#define FUNC_NAME s_scm_map_free_list
528{
4c48ba06
MD
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
8ded62a3
MD
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544}
1bbd0b84 545#undef FUNC_NAME
cf2d30f6 546
4c48ba06
MD
547static int last_cluster;
548static int last_size;
549
5384bc5b
MD
550static int
551free_list_length (char *title, int i, SCM freelist)
552{
553 SCM ls;
554 int n = 0;
3f5d82cd
DH
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
4c48ba06
MD
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
5384bc5b
MD
579 return n;
580}
581
582static void
583free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584{
585 SCM clusters;
4c48ba06 586 int i = 0, len, n = 0;
5384bc5b
MD
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
4c48ba06
MD
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
601}
602
a00c95d9 603SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b 604 (),
5352393c
MG
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
5384bc5b
MD
608#define FUNC_NAME s_scm_free_list_length
609{
b37fe1c5
MD
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 612 return SCM_UNSPECIFIED;
5384bc5b
MD
613}
614#undef FUNC_NAME
615
bb2c57fa
MD
616#endif
617
618#ifdef GUILE_DEBUG_FREELIST
cf2d30f6 619
d3dd80ab
MG
620/* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622static int scm_debug_check_freelist = 0;
623
cf2d30f6
JB
624/* Number of calls to SCM_NEWCELL since startup. */
625static unsigned long scm_newcell_count;
acb0a19c 626static unsigned long scm_newcell2_count;
cf2d30f6
JB
627
628/* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
8ded62a3
MD
630static void
631scm_check_freelist (SCM freelist)
632{
633 SCM f;
634 int i = 0;
635
3f5d82cd
DH
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
8ded62a3
MD
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
8ded62a3
MD
641 abort ();
642 }
643}
cf2d30f6 644
a00c95d9 645SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 646 (SCM flag),
1e6808ea
MG
647 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
648 "on each cell allocation. This procedure only exists when the\n"
649 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
1bbd0b84 650#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 651{
d6884e63
ML
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
945fec60 654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
655 return SCM_UNSPECIFIED;
656}
1bbd0b84 657#undef FUNC_NAME
25748c78
GB
658
659
4a4c9785
MD
660SCM
661scm_debug_newcell (void)
662{
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
8ded62a3 668 scm_check_freelist (scm_freelist);
4a4c9785
MD
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
3f5d82cd 674 if (SCM_NULLP (scm_freelist))
4a4c9785
MD
675 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
676 else
677 {
678 new = scm_freelist;
3f5d82cd 679 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
4a4c9785
MD
680 }
681
682 return new;
683}
684
685SCM
686scm_debug_newcell2 (void)
687{
688 SCM new;
689
690 scm_newcell2_count++;
691 if (scm_debug_check_freelist)
692 {
8ded62a3 693 scm_check_freelist (scm_freelist2);
4a4c9785
MD
694 scm_gc ();
695 }
696
697 /* The rest of this is supposed to be identical to the SCM_NEWCELL
698 macro. */
3f5d82cd 699 if (SCM_NULLP (scm_freelist2))
4a4c9785
MD
700 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
701 else
702 {
703 new = scm_freelist2;
3f5d82cd 704 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
4a4c9785
MD
705 }
706
707 return new;
708}
709
fca7547b 710#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
711
712\f
0f2d19dd 713
b37fe1c5
MD
714static unsigned long
715master_cells_allocated (scm_freelist_t *master)
716{
d6884e63 717 /* the '- 1' below is to ignore the cluster spine cells. */
b37fe1c5
MD
718 int objects = master->clusters_allocated * (master->cluster_size - 1);
719 if (SCM_NULLP (master->clusters))
720 objects -= master->left_to_collect;
721 return master->span * objects;
722}
723
724static unsigned long
725freelist_length (SCM freelist)
726{
727 int n;
3f5d82cd 728 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
729 ++n;
730 return n;
731}
732
733static unsigned long
734compute_cells_allocated ()
735{
736 return (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2)
739 - scm_master_freelist.span * freelist_length (scm_freelist)
740 - scm_master_freelist2.span * freelist_length (scm_freelist2));
741}
b37fe1c5 742
0f2d19dd
JB
743/* {Scheme Interface to GC}
744 */
745
a00c95d9 746SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 747 (),
1e6808ea
MG
748 "Return an association list of statistics about Guile's current\n"
749 "use of storage.")
1bbd0b84 750#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
751{
752 int i;
753 int n;
754 SCM heap_segs;
c209c88e
GB
755 long int local_scm_mtrigger;
756 long int local_scm_mallocated;
757 long int local_scm_heap_size;
758 long int local_scm_cells_allocated;
759 long int local_scm_gc_time_taken;
c9b0d4b0
ML
760 long int local_scm_gc_times;
761 long int local_scm_gc_mark_time_taken;
762 long int local_scm_gc_sweep_time_taken;
763 double local_scm_gc_cells_swept;
764 double local_scm_gc_cells_marked;
0f2d19dd
JB
765 SCM answer;
766
767 SCM_DEFER_INTS;
939794ce
DH
768
769 ++scm_block_gc;
770
0f2d19dd
JB
771 retry:
772 heap_segs = SCM_EOL;
773 n = scm_n_heap_segs;
774 for (i = scm_n_heap_segs; i--; )
775 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
776 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
777 heap_segs);
778 if (scm_n_heap_segs != n)
779 goto retry;
939794ce
DH
780
781 --scm_block_gc;
0f2d19dd 782
7febb4a2
MD
783 /* Below, we cons to produce the resulting list. We want a snapshot of
784 * the heap situation before consing.
785 */
0f2d19dd
JB
786 local_scm_mtrigger = scm_mtrigger;
787 local_scm_mallocated = scm_mallocated;
b37fe1c5 788 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 789 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 790 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
791 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
792 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
793 local_scm_gc_times = scm_gc_times;
794 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
795 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd
JB
796
797 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
798 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
799 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
800 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
801 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
c9b0d4b0
ML
802 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
803 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
804 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
805 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
806 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
0f2d19dd
JB
807 scm_cons (sym_heap_segments, heap_segs),
808 SCM_UNDEFINED);
809 SCM_ALLOW_INTS;
810 return answer;
811}
1bbd0b84 812#undef FUNC_NAME
0f2d19dd
JB
813
814
c9b0d4b0
ML
815static void
816gc_start_stats (const char *what)
0f2d19dd 817{
c9b0d4b0
ML
818 t_before_gc = scm_c_get_internal_run_time ();
819 scm_gc_cells_swept = 0;
b37fe1c5 820 scm_gc_cells_collected = 0;
37ddcaf6 821 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
822 scm_gc_yield = (scm_cells_allocated
823 + master_cells_allocated (&scm_master_freelist)
824 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
825 scm_gc_malloc_collected = 0;
826 scm_gc_ports_collected = 0;
827}
828
939794ce 829
c9b0d4b0
ML
830static void
831gc_end_stats ()
0f2d19dd 832{
c9b0d4b0
ML
833 unsigned long t = scm_c_get_internal_run_time ();
834 scm_gc_time_taken += (t - t_before_gc);
835 scm_gc_sweep_time_taken += (t - t_before_sweep);
836 ++scm_gc_times;
837
838 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
839 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
840}
841
842
a00c95d9 843SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 844 (SCM obj),
b380b885
MD
845 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
846 "returned by this function for @var{obj}")
1bbd0b84 847#define FUNC_NAME s_scm_object_address
0f2d19dd 848{
54778cd3 849 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 850}
1bbd0b84 851#undef FUNC_NAME
0f2d19dd
JB
852
853
a00c95d9 854SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 855 (),
b380b885
MD
856 "Scans all of SCM objects and reclaims for further use those that are\n"
857 "no longer accessible.")
1bbd0b84 858#define FUNC_NAME s_scm_gc
0f2d19dd
JB
859{
860 SCM_DEFER_INTS;
861 scm_igc ("call");
862 SCM_ALLOW_INTS;
863 return SCM_UNSPECIFIED;
864}
1bbd0b84 865#undef FUNC_NAME
0f2d19dd
JB
866
867
868\f
869/* {C Interface For When GC is Triggered}
870 */
871
b37fe1c5 872static void
8fef55a8 873adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 874{
8fef55a8 875 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 876 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
877 * `min_yield_fraction' of total heap size. Note, however, that
878 * the absolute value of min_yield will correspond to `collected'
bda1446c 879 * on one master (the one which currently is triggering GC).
b37fe1c5 880 *
bda1446c
MD
881 * The reason why we look at total yield instead of cells collected
882 * on one list is that we want to take other freelists into account.
883 * On this freelist, we know that (local) yield = collected cells,
884 * but that's probably not the case on the other lists.
b37fe1c5
MD
885 *
886 * (We might consider computing a better prediction, for example
887 * by computing an average over multiple GC:s.)
888 */
8fef55a8 889 if (freelist->min_yield_fraction)
b37fe1c5 890 {
37ddcaf6 891 /* Pick largest of last two yields. */
8fef55a8
MD
892 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
893 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
894#ifdef DEBUGINFO
895 fprintf (stderr, " after GC = %d, delta = %d\n",
896 scm_cells_allocated,
897 delta);
898#endif
899 if (delta > 0)
8fef55a8 900 freelist->min_yield += delta;
b37fe1c5
MD
901 }
902}
903
b6efc951 904
4a4c9785 905/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
906 * common while the freelist will be individual for each thread.
907 */
4a4c9785
MD
908
909SCM
910scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
911{
912 SCM cell;
913 ++scm_ints_disabled;
4c48ba06
MD
914 do
915 {
c7387918 916 if (SCM_NULLP (master->clusters))
4c48ba06 917 {
150c200b 918 if (master->grow_heap_p || scm_block_gc)
4c48ba06 919 {
b6efc951
DH
920 /* In order to reduce gc frequency, try to allocate a new heap
921 * segment first, even if gc might find some free cells. If we
922 * can't obtain a new heap segment, we will try gc later.
923 */
4c48ba06 924 master->grow_heap_p = 0;
b6efc951 925 alloc_some_heap (master, return_on_error);
4c48ba06 926 }
b6efc951 927 if (SCM_NULLP (master->clusters))
b37fe1c5 928 {
b6efc951
DH
929 /* The heap was not grown, either because it wasn't scheduled to
930 * grow, or because there was not enough memory available. In
931 * both cases we have to try gc to get some free cells.
932 */
37ddcaf6
MD
933#ifdef DEBUGINFO
934 fprintf (stderr, "allocated = %d, ",
935 scm_cells_allocated
936 + master_cells_allocated (&scm_master_freelist)
937 + master_cells_allocated (&scm_master_freelist2));
938#endif
b37fe1c5 939 scm_igc ("cells");
8fef55a8 940 adjust_min_yield (master);
c7387918
DH
941 if (SCM_NULLP (master->clusters))
942 {
b6efc951
DH
943 /* gc could not free any cells. Now, we _must_ allocate a
944 * new heap segment, because there is no other possibility
945 * to provide a new cell for the caller.
946 */
947 alloc_some_heap (master, abort_on_error);
c7387918 948 }
b37fe1c5 949 }
4c48ba06
MD
950 }
951 cell = SCM_CAR (master->clusters);
952 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 953 ++master->clusters_allocated;
4c48ba06
MD
954 }
955 while (SCM_NULLP (cell));
d6884e63
ML
956
957#ifdef GUILE_DEBUG_FREELIST
958 scm_check_freelist (cell);
959#endif
960
4a4c9785 961 --scm_ints_disabled;
3f5d82cd 962 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
963 return cell;
964}
965
b6efc951 966
4c48ba06
MD
967#if 0
968/* This is a support routine which can be used to reserve a cluster
969 * for some special use, such as debugging. It won't be useful until
970 * free cells are preserved between garbage collections.
971 */
972
973void
974scm_alloc_cluster (scm_freelist_t *master)
975{
976 SCM freelist, cell;
977 cell = scm_gc_for_newcell (master, &freelist);
978 SCM_SETCDR (cell, freelist);
979 return cell;
980}
981#endif
982
801cb5e7
MD
983
984scm_c_hook_t scm_before_gc_c_hook;
985scm_c_hook_t scm_before_mark_c_hook;
986scm_c_hook_t scm_before_sweep_c_hook;
987scm_c_hook_t scm_after_sweep_c_hook;
988scm_c_hook_t scm_after_gc_c_hook;
989
b6efc951 990
0f2d19dd 991void
1bbd0b84 992scm_igc (const char *what)
0f2d19dd
JB
993{
994 int j;
995
406c7d90 996 ++scm_gc_running_p;
801cb5e7 997 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
998#ifdef DEBUGINFO
999 fprintf (stderr,
1000 SCM_NULLP (scm_freelist)
1001 ? "*"
1002 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1003#endif
42db06f0
MD
1004#ifdef USE_THREADS
1005 /* During the critical section, only the current thread may run. */
1006 SCM_THREAD_CRITICAL_SECTION_START;
1007#endif
1008
e242dfd2 1009 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 1010
ab4bef85
JB
1011 if (!scm_stack_base || scm_block_gc)
1012 {
406c7d90 1013 --scm_gc_running_p;
ab4bef85
JB
1014 return;
1015 }
1016
c9b0d4b0
ML
1017 gc_start_stats (what);
1018
a5c314c8
JB
1019 if (scm_mallocated < 0)
1020 /* The byte count of allocated objects has underflowed. This is
1021 probably because you forgot to report the sizes of objects you
1022 have allocated, by calling scm_done_malloc or some such. When
1023 the GC freed them, it subtracted their size from
1024 scm_mallocated, which underflowed. */
1025 abort ();
c45acc34 1026
ab4bef85
JB
1027 if (scm_gc_heap_lock)
1028 /* We've invoked the collector while a GC is already in progress.
1029 That should never happen. */
1030 abort ();
0f2d19dd
JB
1031
1032 ++scm_gc_heap_lock;
ab4bef85 1033
0f2d19dd
JB
1034 /* flush dead entries from the continuation stack */
1035 {
1036 int x;
1037 int bound;
1038 SCM * elts;
1039 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1040 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1041 x = SCM_INUM (scm_continuation_stack_ptr);
1042 while (x < bound)
1043 {
1044 elts[x] = SCM_BOOL_F;
1045 ++x;
1046 }
1047 }
1048
801cb5e7
MD
1049 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1050
d6884e63
ML
1051 clear_mark_space ();
1052
42db06f0 1053#ifndef USE_THREADS
a00c95d9 1054
1b9be268 1055 /* Mark objects on the C stack. */
0f2d19dd
JB
1056 SCM_FLUSH_REGISTER_WINDOWS;
1057 /* This assumes that all registers are saved into the jmp_buf */
1058 setjmp (scm_save_regs_gc_mark);
1059 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1060 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1061 sizeof scm_save_regs_gc_mark)
1062 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1063
1064 {
6ba93e5e 1065 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1066#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1067 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1068#else
6ba93e5e 1069 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1070#endif
1071 }
1072
42db06f0
MD
1073#else /* USE_THREADS */
1074
1075 /* Mark every thread's stack and registers */
945fec60 1076 scm_threads_mark_stacks ();
42db06f0
MD
1077
1078#endif /* USE_THREADS */
0f2d19dd 1079
0f2d19dd
JB
1080 j = SCM_NUM_PROTECTS;
1081 while (j--)
1082 scm_gc_mark (scm_sys_protects[j]);
1083
9de33deb
MD
1084 /* FIXME: we should have a means to register C functions to be run
1085 * in different phases of GC
a00c95d9 1086 */
9de33deb 1087 scm_mark_subr_table ();
a00c95d9 1088
42db06f0
MD
1089#ifndef USE_THREADS
1090 scm_gc_mark (scm_root->handle);
1091#endif
a00c95d9 1092
c9b0d4b0
ML
1093 t_before_sweep = scm_c_get_internal_run_time ();
1094 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1095
801cb5e7 1096 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1097
0f2d19dd
JB
1098 scm_gc_sweep ();
1099
801cb5e7
MD
1100 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1101
0f2d19dd 1102 --scm_gc_heap_lock;
c9b0d4b0 1103 gc_end_stats ();
42db06f0
MD
1104
1105#ifdef USE_THREADS
1106 SCM_THREAD_CRITICAL_SECTION_END;
1107#endif
801cb5e7 1108 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1109 --scm_gc_running_p;
0f2d19dd
JB
1110}
1111
1112\f
939794ce 1113
a00c95d9 1114/* {Mark/Sweep}
0f2d19dd
JB
1115 */
1116
56495472
ML
1117#define MARK scm_gc_mark
1118#define FNAME "scm_gc_mark"
0f2d19dd 1119
56495472 1120#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1121
1122/* Mark an object precisely.
1123 */
a00c95d9 1124void
56495472
ML
1125MARK (SCM p)
1126#define FUNC_NAME FNAME
0f2d19dd
JB
1127{
1128 register long i;
1129 register SCM ptr;
61045190 1130 scm_bits_t cell_type;
0f2d19dd 1131
56495472
ML
1132#ifndef MARK_DEPENDENCIES
1133# define RECURSE scm_gc_mark
1134#else
1135 /* go through the usual marking, but not for self-cycles. */
1136# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1137#endif
0f2d19dd
JB
1138 ptr = p;
1139
56495472
ML
1140#ifdef MARK_DEPENDENCIES
1141 goto gc_mark_loop_first_time;
1142#endif
1143
0f2d19dd
JB
1144gc_mark_loop:
1145 if (SCM_IMP (ptr))
1146 return;
1147
1148gc_mark_nimp:
56495472
ML
1149
1150#ifdef MARK_DEPENDENCIES
0209177b 1151 if (SCM_EQ_P (ptr, p))
56495472
ML
1152 return;
1153
1154 scm_gc_mark (ptr);
0209177b 1155 return;
56495472
ML
1156
1157gc_mark_loop_first_time:
1158#endif
9a6976cd 1159
61045190 1160#if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
9a6976cd 1161 /* We are in debug mode. Check the ptr exhaustively. */
61045190 1162 if (!scm_cellp (ptr))
db4b4ca6 1163 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
9a6976cd
DH
1164#else
1165 /* In non-debug mode, do at least some cheap testing. */
1166 if (!SCM_CELLP (ptr))
1167 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
d6884e63
ML
1168#endif
1169
56495472
ML
1170#ifndef MARK_DEPENDENCIES
1171
d6884e63
ML
1172 if (SCM_GCMARKP (ptr))
1173 return;
56495472 1174
d6884e63
ML
1175 SCM_SETGCMARK (ptr);
1176
56495472
ML
1177#endif
1178
61045190
DH
1179 cell_type = SCM_GC_CELL_TYPE (ptr);
1180 switch (SCM_ITAG7 (cell_type))
0f2d19dd
JB
1181 {
1182 case scm_tcs_cons_nimcar:
d6884e63 1183 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1184 {
1185 ptr = SCM_CAR (ptr);
1186 goto gc_mark_nimp;
1187 }
56495472 1188 RECURSE (SCM_CAR (ptr));
d6884e63 1189 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1190 goto gc_mark_nimp;
1191 case scm_tcs_cons_imcar:
d6884e63 1192 ptr = SCM_CDR (ptr);
acb0a19c 1193 goto gc_mark_loop;
e641afaf 1194 case scm_tc7_pws:
22a52da1
DH
1195 RECURSE (SCM_SETTER (ptr));
1196 ptr = SCM_PROCEDURE (ptr);
0f2d19dd
JB
1197 goto gc_mark_loop;
1198 case scm_tcs_cons_gloc:
0f2d19dd 1199 {
c8045e8d
DH
1200 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1201 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1202 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1203 * pointer to a struct vtable data region. The fact that these are
1204 * accessed in the same way restricts the possibilites to change the
9d47a1e6 1205 * data layout of structs or heap cells.
c8045e8d
DH
1206 */
1207 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1208 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
7445e0e8 1209 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1210 {
d6884e63
ML
1211 /* ptr is a gloc */
1212 SCM gloc_car = SCM_PACK (word0);
56495472 1213 RECURSE (gloc_car);
d6884e63
ML
1214 ptr = SCM_CDR (ptr);
1215 goto gc_mark_loop;
1216 }
1217 else
1218 {
1219 /* ptr is a struct */
1220 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
b5c2579a 1221 int len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1222 char * fields_desc = SCM_SYMBOL_CHARS (layout);
d6884e63 1223 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1224
d6884e63
ML
1225 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1226 {
56495472
ML
1227 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1228 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1229 }
1230 if (len)
1231 {
1232 int x;
7bb8eac7 1233
d6884e63
ML
1234 for (x = 0; x < len - 2; x += 2, ++struct_data)
1235 if (fields_desc[x] == 'p')
56495472 1236 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1237 if (fields_desc[x] == 'p')
1238 {
1239 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1240 for (x = *struct_data++; x; --x, ++struct_data)
1241 RECURSE (SCM_PACK (*struct_data));
d6884e63 1242 else
56495472 1243 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1244 }
1245 }
1246 /* mark vtable */
1247 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1248 goto gc_mark_loop;
0f2d19dd
JB
1249 }
1250 }
1251 break;
1252 case scm_tcs_closures:
22a52da1 1253 if (SCM_IMP (SCM_ENV (ptr)))
0f2d19dd
JB
1254 {
1255 ptr = SCM_CLOSCAR (ptr);
1256 goto gc_mark_nimp;
1257 }
56495472 1258 RECURSE (SCM_CLOSCAR (ptr));
22a52da1 1259 ptr = SCM_ENV (ptr);
0f2d19dd
JB
1260 goto gc_mark_nimp;
1261 case scm_tc7_vector:
b5c2579a
DH
1262 i = SCM_VECTOR_LENGTH (ptr);
1263 if (i == 0)
1264 break;
1265 while (--i > 0)
1266 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1267 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a
DH
1268 ptr = SCM_VELTS (ptr)[0];
1269 goto gc_mark_loop;
0f2d19dd
JB
1270#ifdef CCLO
1271 case scm_tc7_cclo:
362306b9
DH
1272 {
1273 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1274 unsigned long int j;
1275 for (j = 1; j != i; ++j)
1276 {
1277 SCM obj = SCM_CCLO_REF (ptr, j);
1278 if (!SCM_IMP (obj))
56495472 1279 RECURSE (obj);
362306b9
DH
1280 }
1281 ptr = SCM_CCLO_REF (ptr, 0);
1282 goto gc_mark_loop;
1283 }
b5c2579a 1284#endif
afe5177e 1285#ifdef HAVE_ARRAYS
0f2d19dd
JB
1286 case scm_tc7_bvect:
1287 case scm_tc7_byvect:
1288 case scm_tc7_ivect:
1289 case scm_tc7_uvect:
1290 case scm_tc7_fvect:
1291 case scm_tc7_dvect:
1292 case scm_tc7_cvect:
1293 case scm_tc7_svect:
5c11cc9d 1294#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1295 case scm_tc7_llvect:
1296#endif
afe5177e 1297#endif
0f2d19dd 1298 case scm_tc7_string:
0f2d19dd
JB
1299 break;
1300
1301 case scm_tc7_substring:
0f2d19dd
JB
1302 ptr = SCM_CDR (ptr);
1303 goto gc_mark_loop;
1304
1305 case scm_tc7_wvect:
ab4bef85
JB
1306 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1307 scm_weak_vectors = ptr;
0f2d19dd
JB
1308 if (SCM_IS_WHVEC_ANY (ptr))
1309 {
1310 int x;
1311 int len;
1312 int weak_keys;
1313 int weak_values;
1314
b5c2579a 1315 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1316 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1317 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1318
0f2d19dd
JB
1319 for (x = 0; x < len; ++x)
1320 {
1321 SCM alist;
1322 alist = SCM_VELTS (ptr)[x];
46408039
JB
1323
1324 /* mark everything on the alist except the keys or
1325 * values, according to weak_values and weak_keys. */
0b5f3f34 1326 while ( SCM_CONSP (alist)
0f2d19dd 1327 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1328 && SCM_CONSP (SCM_CAR (alist)))
1329 {
1330 SCM kvpair;
1331 SCM next_alist;
1332
1333 kvpair = SCM_CAR (alist);
1334 next_alist = SCM_CDR (alist);
a00c95d9 1335 /*
0f2d19dd
JB
1336 * Do not do this:
1337 * SCM_SETGCMARK (alist);
1338 * SCM_SETGCMARK (kvpair);
1339 *
1340 * It may be that either the key or value is protected by
1341 * an escaped reference to part of the spine of this alist.
1342 * If we mark the spine here, and only mark one or neither of the
1343 * key and value, they may never be properly marked.
1344 * This leads to a horrible situation in which an alist containing
1345 * freelist cells is exported.
1346 *
1347 * So only mark the spines of these arrays last of all marking.
1348 * If somebody confuses us by constructing a weak vector
1349 * with a circular alist then we are hosed, but at least we
1350 * won't prematurely drop table entries.
1351 */
1352 if (!weak_keys)
56495472 1353 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1354 if (!weak_values)
56495472 1355 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1356 alist = next_alist;
1357 }
1358 if (SCM_NIMP (alist))
56495472 1359 RECURSE (alist);
0f2d19dd
JB
1360 }
1361 }
1362 break;
1363
28b06554
DH
1364 case scm_tc7_symbol:
1365 ptr = SCM_PROP_SLOTS (ptr);
0f2d19dd 1366 goto gc_mark_loop;
0f2d19dd 1367 case scm_tcs_subrs:
9de33deb 1368 break;
0f2d19dd
JB
1369 case scm_tc7_port:
1370 i = SCM_PTOBNUM (ptr);
1371 if (!(i < scm_numptob))
1372 goto def;
ebf7394e 1373 if (SCM_PTAB_ENTRY(ptr))
56495472 1374 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1375 if (scm_ptobs[i].mark)
1376 {
1377 ptr = (scm_ptobs[i].mark) (ptr);
1378 goto gc_mark_loop;
1379 }
1380 else
1381 return;
0f2d19dd
JB
1382 break;
1383 case scm_tc7_smob:
d6884e63 1384 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1385 { /* should be faster than going through scm_smobs */
1386 case scm_tc_free_cell:
1387 /* printf("found free_cell %X ", ptr); fflush(stdout); */
acb0a19c
MD
1388 case scm_tc16_big:
1389 case scm_tc16_real:
1390 case scm_tc16_complex:
0f2d19dd
JB
1391 break;
1392 default:
1393 i = SCM_SMOBNUM (ptr);
1394 if (!(i < scm_numsmob))
1395 goto def;
dc53f026
JB
1396 if (scm_smobs[i].mark)
1397 {
1398 ptr = (scm_smobs[i].mark) (ptr);
1399 goto gc_mark_loop;
1400 }
1401 else
1402 return;
0f2d19dd
JB
1403 }
1404 break;
1405 default:
acf4331f
DH
1406 def:
1407 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1408 }
0209177b 1409#undef RECURSE
0f2d19dd 1410}
acf4331f 1411#undef FUNC_NAME
0f2d19dd 1412
56495472
ML
1413#ifndef MARK_DEPENDENCIES
1414
1415#undef MARK
56495472
ML
1416#undef FNAME
1417
1418/* And here we define `scm_gc_mark_dependencies', by including this
1419 * same file in itself.
1420 */
1421#define MARK scm_gc_mark_dependencies
1422#define FNAME "scm_gc_mark_dependencies"
1423#define MARK_DEPENDENCIES
1424#include "gc.c"
1425#undef MARK_DEPENDENCIES
1426#undef MARK
56495472
ML
1427#undef FNAME
1428
0f2d19dd
JB
1429
1430/* Mark a Region Conservatively
1431 */
1432
a00c95d9 1433void
6e8d25a6 1434scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd 1435{
c4da09e2 1436 unsigned long m;
0f2d19dd 1437
c4da09e2
DH
1438 for (m = 0; m < n; ++m)
1439 {
1440 SCM obj = * (SCM *) &x[m];
1441 if (SCM_CELLP (obj))
1442 {
1443 SCM_CELLPTR ptr = SCM2PTR (obj);
1444 int i = 0;
1445 int j = scm_n_heap_segs - 1;
1446 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1447 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1448 {
1449 while (i <= j)
1450 {
1451 int seg_id;
1452 seg_id = -1;
1453 if ((i == j)
1454 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1455 seg_id = i;
1456 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1457 seg_id = j;
1458 else
1459 {
1460 int k;
1461 k = (i + j) / 2;
1462 if (k == i)
1463 break;
1464 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1465 {
1466 j = k;
1467 ++i;
1468 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1469 continue;
1470 else
1471 break;
1472 }
1473 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1474 {
1475 i = k;
1476 --j;
1477 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1478 continue;
1479 else
1480 break;
1481 }
1482 }
7bb8eac7 1483
d6884e63
ML
1484 if (SCM_GC_IN_CARD_HEADERP (ptr))
1485 break;
7bb8eac7 1486
c4da09e2 1487 if (scm_heap_table[seg_id].span == 1
ecf470a2 1488 || DOUBLECELL_ALIGNED_P (obj))
3731149d
ML
1489 scm_gc_mark (obj);
1490
c4da09e2
DH
1491 break;
1492 }
1493 }
1494 }
1495 }
0f2d19dd
JB
1496}
1497
1498
1a548472
DH
1499/* The function scm_cellp determines whether an SCM value can be regarded as a
1500 * pointer to a cell on the heap. Binary search is used in order to determine
1501 * the heap segment that contains the cell.
1502 */
2e11a577 1503int
6e8d25a6 1504scm_cellp (SCM value)
2e11a577 1505{
1a548472
DH
1506 if (SCM_CELLP (value)) {
1507 scm_cell * ptr = SCM2PTR (value);
1508 unsigned int i = 0;
1509 unsigned int j = scm_n_heap_segs - 1;
1510
61045190
DH
1511 if (SCM_GC_IN_CARD_HEADERP (ptr))
1512 return 0;
1513
1a548472
DH
1514 while (i < j) {
1515 int k = (i + j) / 2;
1516 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1517 j = k;
1518 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1519 i = k + 1;
1520 }
1521 }
2e11a577 1522
9d47a1e6 1523 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1a548472 1524 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
ecf470a2 1525 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
d6884e63
ML
1526 && !SCM_GC_IN_CARD_HEADERP (ptr)
1527 )
1a548472 1528 return 1;
d6884e63 1529 else
1a548472 1530 return 0;
d6884e63 1531 } else
1a548472 1532 return 0;
2e11a577
MD
1533}
1534
1535
4c48ba06
MD
1536static void
1537gc_sweep_freelist_start (scm_freelist_t *freelist)
1538{
1539 freelist->cells = SCM_EOL;
1540 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1541 freelist->clusters_allocated = 0;
4c48ba06
MD
1542 freelist->clusters = SCM_EOL;
1543 freelist->clustertail = &freelist->clusters;
1811ebce 1544 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1545 freelist->collected = 0;
1546}
1547
1548static void
1549gc_sweep_freelist_finish (scm_freelist_t *freelist)
1550{
1811ebce 1551 int collected;
4c48ba06 1552 *freelist->clustertail = freelist->cells;
3f5d82cd 1553 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1554 {
1555 SCM c = freelist->cells;
22a52da1
DH
1556 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1557 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
4c48ba06
MD
1558 freelist->collected +=
1559 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1560 }
b37fe1c5 1561 scm_gc_cells_collected += freelist->collected;
a00c95d9 1562
8fef55a8 1563 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1564 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1565 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1566 * size. This means that a too low yield is compensated by more
1567 * heap on the list which is currently doing most work, which is
1568 * just what we want.
1569 */
1811ebce 1570 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1571 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1572}
0f2d19dd 1573
d6884e63
ML
1574#define NEXT_DATA_CELL(ptr, span) \
1575 do { \
1576 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1577 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1578 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1579 : nxt__); \
1580 } while (0)
1581
a00c95d9 1582void
0f2d19dd 1583scm_gc_sweep ()
acf4331f 1584#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1585{
1586 register SCM_CELLPTR ptr;
0f2d19dd 1587 register SCM nfreelist;
4c48ba06 1588 register scm_freelist_t *freelist;
0f2d19dd 1589 register long m;
0f2d19dd 1590 register int span;
15e9d186 1591 long i;
0f2d19dd
JB
1592 scm_sizet seg_size;
1593
0f2d19dd 1594 m = 0;
0f2d19dd 1595
4c48ba06
MD
1596 gc_sweep_freelist_start (&scm_master_freelist);
1597 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1598
cf2d30f6 1599 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1600 {
4c48ba06 1601 register unsigned int left_to_collect;
4c48ba06 1602 register scm_sizet j;
15e9d186 1603
cf2d30f6
JB
1604 /* Unmarked cells go onto the front of the freelist this heap
1605 segment points to. Rather than updating the real freelist
1606 pointer as we go along, we accumulate the new head in
1607 nfreelist. Then, if it turns out that the entire segment is
1608 free, we free (i.e., malloc's free) the whole segment, and
1609 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1610 freelist = scm_heap_table[i].freelist;
1611 nfreelist = freelist->cells;
4c48ba06 1612 left_to_collect = freelist->left_to_collect;
945fec60 1613 span = scm_heap_table[i].span;
cf2d30f6 1614
a00c95d9
ML
1615 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1616 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1617
d6884e63
ML
1618 /* use only data cells in seg_size */
1619 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1620
c9b0d4b0
ML
1621 scm_gc_cells_swept += seg_size;
1622
0f2d19dd
JB
1623 for (j = seg_size + span; j -= span; ptr += span)
1624 {
d6884e63 1625 SCM scmptr;
96f6f4ae 1626
d6884e63 1627 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1628 {
d6884e63
ML
1629 SCM_CELLPTR nxt;
1630
1631 /* cheat here */
1632 nxt = ptr;
1633 NEXT_DATA_CELL (nxt, span);
1634 j += span;
1635
1636 ptr = nxt - span;
1637 continue;
1638 }
1639
1640 scmptr = PTR2SCM (ptr);
1641
1642 if (SCM_GCMARKP (scmptr))
1643 continue;
7bb8eac7 1644
d6884e63
ML
1645 switch SCM_TYP7 (scmptr)
1646 {
0f2d19dd 1647 case scm_tcs_cons_gloc:
0f2d19dd 1648 {
c8045e8d
DH
1649 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1650 * struct or a gloc. See the corresponding comment in
1651 * scm_gc_mark.
1652 */
7445e0e8
MD
1653 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1654 - scm_tc3_cons_gloc);
1655 /* access as struct */
1656 scm_bits_t * vtable_data = (scm_bits_t *) word0;
d6884e63 1657 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1658 {
7445e0e8
MD
1659 /* Structs need to be freed in a special order.
1660 * This is handled by GC C hooks in struct.c.
1661 */
1662 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1663 scm_structs_to_free = scmptr;
7bb8eac7 1664 continue;
c8045e8d 1665 }
7445e0e8 1666 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1667 }
1668 break;
1669 case scm_tcs_cons_imcar:
1670 case scm_tcs_cons_nimcar:
1671 case scm_tcs_closures:
e641afaf 1672 case scm_tc7_pws:
0f2d19dd
JB
1673 break;
1674 case scm_tc7_wvect:
b5c2579a 1675 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
06ee04b2 1676 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
d6884e63 1677 break;
0f2d19dd 1678 case scm_tc7_vector:
1b9be268
DH
1679 {
1680 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1681 if (length > 0)
1682 {
1683 m += length * sizeof (scm_bits_t);
1684 scm_must_free (SCM_VECTOR_BASE (scmptr));
1685 }
1686 break;
1687 }
0f2d19dd
JB
1688#ifdef CCLO
1689 case scm_tc7_cclo:
b5c2579a 1690 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1691 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1692 break;
06ee04b2 1693#endif
afe5177e 1694#ifdef HAVE_ARRAYS
0f2d19dd 1695 case scm_tc7_bvect:
93778877
DH
1696 {
1697 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1698 if (length > 0)
1699 {
1700 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1701 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1702 }
1703 }
06ee04b2 1704 break;
0f2d19dd 1705 case scm_tc7_byvect:
0f2d19dd
JB
1706 case scm_tc7_ivect:
1707 case scm_tc7_uvect:
0f2d19dd 1708 case scm_tc7_svect:
5c11cc9d 1709#ifdef HAVE_LONG_LONGS
0f2d19dd 1710 case scm_tc7_llvect:
0f2d19dd
JB
1711#endif
1712 case scm_tc7_fvect:
0f2d19dd 1713 case scm_tc7_dvect:
0f2d19dd 1714 case scm_tc7_cvect:
d1ca2c64 1715 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1716 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1717 break;
afe5177e 1718#endif
0f2d19dd 1719 case scm_tc7_substring:
0f2d19dd
JB
1720 break;
1721 case scm_tc7_string:
b5c2579a 1722 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1723 scm_must_free (SCM_STRING_CHARS (scmptr));
1724 break;
28b06554 1725 case scm_tc7_symbol:
b5c2579a 1726 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1727 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1728 break;
0f2d19dd 1729 case scm_tcs_subrs:
d6884e63 1730 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1731 continue;
1732 case scm_tc7_port:
0f2d19dd
JB
1733 if SCM_OPENP (scmptr)
1734 {
1735 int k = SCM_PTOBNUM (scmptr);
1736 if (!(k < scm_numptob))
1737 goto sweeperr;
1738 /* Keep "revealed" ports alive. */
945fec60 1739 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1740 continue;
1741 /* Yes, I really do mean scm_ptobs[k].free */
1742 /* rather than ftobs[k].close. .close */
1743 /* is for explicit CLOSE-PORT by user */
84af0382 1744 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1745 SCM_SETSTREAM (scmptr, 0);
1746 scm_remove_from_port_table (scmptr);
1747 scm_gc_ports_collected++;
22a52da1 1748 SCM_CLR_PORT_OPEN_FLAG (scmptr);
0f2d19dd
JB
1749 }
1750 break;
1751 case scm_tc7_smob:
d6884e63 1752 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1753 {
1754 case scm_tc_free_cell:
acb0a19c 1755 case scm_tc16_real:
0f2d19dd
JB
1756 break;
1757#ifdef SCM_BIGDIG
acb0a19c 1758 case scm_tc16_big:
0f2d19dd 1759 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1760 scm_must_free (SCM_BDIGITS (scmptr));
1761 break;
0f2d19dd 1762#endif /* def SCM_BIGDIG */
acb0a19c 1763 case scm_tc16_complex:
06ee04b2 1764 m += sizeof (scm_complex_t);
405aaef9 1765 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1766 break;
0f2d19dd 1767 default:
0f2d19dd
JB
1768 {
1769 int k;
1770 k = SCM_SMOBNUM (scmptr);
1771 if (!(k < scm_numsmob))
1772 goto sweeperr;
c8045e8d 1773 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1774 break;
1775 }
1776 }
1777 break;
1778 default:
acf4331f
DH
1779 sweeperr:
1780 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1781 }
7bb8eac7 1782
4c48ba06 1783 if (!--left_to_collect)
4a4c9785 1784 {
22a52da1 1785 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
4c48ba06
MD
1786 *freelist->clustertail = scmptr;
1787 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1788
4a4c9785 1789 nfreelist = SCM_EOL;
4c48ba06
MD
1790 freelist->collected += span * freelist->cluster_size;
1791 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1792 }
1793 else
4a4c9785
MD
1794 {
1795 /* Stick the new cell on the front of nfreelist. It's
1796 critical that we mark this cell as freed; otherwise, the
1797 conservative collector might trace it as some other type
1798 of object. */
54778cd3 1799 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1800 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1801 nfreelist = scmptr;
1802 }
0f2d19dd 1803 }
d6884e63 1804
0f2d19dd
JB
1805#ifdef GC_FREE_SEGMENTS
1806 if (n == seg_size)
1807 {
15e9d186
JB
1808 register long j;
1809
4c48ba06 1810 freelist->heap_size -= seg_size;
cf2d30f6
JB
1811 free ((char *) scm_heap_table[i].bounds[0]);
1812 scm_heap_table[i].bounds[0] = 0;
1813 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1814 scm_heap_table[j - 1] = scm_heap_table[j];
1815 scm_n_heap_segs -= 1;
cf2d30f6 1816 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1817 }
1818 else
1819#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1820 {
1821 /* Update the real freelist pointer to point to the head of
1822 the list of free cells we've built for this segment. */
4c48ba06 1823 freelist->cells = nfreelist;
4c48ba06 1824 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1825 }
1826
fca7547b 1827#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1828 scm_map_free_list ();
1829#endif
4a4c9785 1830 }
a00c95d9 1831
4c48ba06
MD
1832 gc_sweep_freelist_finish (&scm_master_freelist);
1833 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1834
8ded62a3
MD
1835 /* When we move to POSIX threads private freelists should probably
1836 be GC-protected instead. */
1837 scm_freelist = SCM_EOL;
1838 scm_freelist2 = SCM_EOL;
a00c95d9 1839
b37fe1c5 1840 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1841 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1842 scm_mallocated -= m;
1843 scm_gc_malloc_collected = m;
1844}
acf4331f 1845#undef FUNC_NAME
0f2d19dd
JB
1846
1847
1848\f
0f2d19dd
JB
1849/* {Front end to malloc}
1850 *
9d47a1e6
ML
1851 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1852 * scm_done_free
0f2d19dd 1853 *
c6c79933
GH
1854 * These functions provide services comparable to malloc, realloc, and
1855 * free. They should be used when allocating memory that will be under
1856 * control of the garbage collector, i.e., if the memory may be freed
1857 * during garbage collection.
1858 */
bc9d9bb2 1859
0f2d19dd
JB
1860/* scm_must_malloc
1861 * Return newly malloced storage or throw an error.
1862 *
1863 * The parameter WHAT is a string for error reporting.
a00c95d9 1864 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1865 * allocation, or if the first call to malloc fails,
1866 * garbage collect -- on the presumption that some objects
1867 * using malloced storage may be collected.
1868 *
1869 * The limit scm_mtrigger may be raised by this allocation.
1870 */
07806695 1871void *
e4ef2330 1872scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1873{
07806695 1874 void *ptr;
15e9d186 1875 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1876
1877 if (nm <= scm_mtrigger)
0f2d19dd 1878 {
07806695 1879 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1880 if (NULL != ptr)
1881 {
1882 scm_mallocated = nm;
bc9d9bb2
MD
1883#ifdef GUILE_DEBUG_MALLOC
1884 scm_malloc_register (ptr, what);
1885#endif
0f2d19dd
JB
1886 return ptr;
1887 }
1888 }
6064dcc6 1889
0f2d19dd 1890 scm_igc (what);
e4ef2330 1891
0f2d19dd 1892 nm = scm_mallocated + size;
07806695 1893 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1894 if (NULL != ptr)
1895 {
1896 scm_mallocated = nm;
6064dcc6
MV
1897 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1898 if (nm > scm_mtrigger)
1899 scm_mtrigger = nm + nm / 2;
1900 else
1901 scm_mtrigger += scm_mtrigger / 2;
1902 }
bc9d9bb2
MD
1903#ifdef GUILE_DEBUG_MALLOC
1904 scm_malloc_register (ptr, what);
1905#endif
1906
0f2d19dd
JB
1907 return ptr;
1908 }
e4ef2330 1909
acf4331f 1910 scm_memory_error (what);
0f2d19dd
JB
1911}
1912
1913
1914/* scm_must_realloc
1915 * is similar to scm_must_malloc.
1916 */
07806695
JB
1917void *
1918scm_must_realloc (void *where,
e4ef2330
MD
1919 scm_sizet old_size,
1920 scm_sizet size,
3eeba8d4 1921 const char *what)
0f2d19dd 1922{
07806695 1923 void *ptr;
e4ef2330
MD
1924 scm_sizet nm = scm_mallocated + size - old_size;
1925
1926 if (nm <= scm_mtrigger)
0f2d19dd 1927 {
07806695 1928 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1929 if (NULL != ptr)
1930 {
1931 scm_mallocated = nm;
bc9d9bb2
MD
1932#ifdef GUILE_DEBUG_MALLOC
1933 scm_malloc_reregister (where, ptr, what);
1934#endif
0f2d19dd
JB
1935 return ptr;
1936 }
1937 }
e4ef2330 1938
0f2d19dd 1939 scm_igc (what);
e4ef2330
MD
1940
1941 nm = scm_mallocated + size - old_size;
07806695 1942 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1943 if (NULL != ptr)
1944 {
1945 scm_mallocated = nm;
6064dcc6
MV
1946 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1947 if (nm > scm_mtrigger)
1948 scm_mtrigger = nm + nm / 2;
1949 else
1950 scm_mtrigger += scm_mtrigger / 2;
1951 }
bc9d9bb2
MD
1952#ifdef GUILE_DEBUG_MALLOC
1953 scm_malloc_reregister (where, ptr, what);
1954#endif
0f2d19dd
JB
1955 return ptr;
1956 }
e4ef2330 1957
acf4331f 1958 scm_memory_error (what);
0f2d19dd
JB
1959}
1960
acf4331f 1961
a00c95d9 1962void
07806695 1963scm_must_free (void *obj)
acf4331f 1964#define FUNC_NAME "scm_must_free"
0f2d19dd 1965{
bc9d9bb2
MD
1966#ifdef GUILE_DEBUG_MALLOC
1967 scm_malloc_unregister (obj);
1968#endif
0f2d19dd
JB
1969 if (obj)
1970 free (obj);
1971 else
acf4331f 1972 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1973}
acf4331f
DH
1974#undef FUNC_NAME
1975
0f2d19dd 1976
c68296f8
MV
1977/* Announce that there has been some malloc done that will be freed
1978 * during gc. A typical use is for a smob that uses some malloced
1979 * memory but can not get it from scm_must_malloc (for whatever
1980 * reason). When a new object of this smob is created you call
1981 * scm_done_malloc with the size of the object. When your smob free
1982 * function is called, be sure to include this size in the return
9d47a1e6
ML
1983 * value.
1984 *
1985 * If you can't actually free the memory in the smob free function,
1986 * for whatever reason (like reference counting), you still can (and
1987 * should) report the amount of memory freed when you actually free it.
1988 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1989 * eh? Or even better, call scm_done_free. */
0f2d19dd 1990
c68296f8 1991void
6e8d25a6 1992scm_done_malloc (long size)
c68296f8
MV
1993{
1994 scm_mallocated += size;
1995
1996 if (scm_mallocated > scm_mtrigger)
1997 {
1998 scm_igc ("foreign mallocs");
1999 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2000 {
2001 if (scm_mallocated > scm_mtrigger)
2002 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2003 else
2004 scm_mtrigger += scm_mtrigger / 2;
2005 }
2006 }
2007}
2008
9d47a1e6
ML
2009void
2010scm_done_free (long size)
2011{
2012 scm_mallocated -= size;
2013}
2014
c68296f8
MV
2015
2016\f
0f2d19dd
JB
2017/* {Heap Segments}
2018 *
2019 * Each heap segment is an array of objects of a particular size.
2020 * Every segment has an associated (possibly shared) freelist.
2021 * A table of segment records is kept that records the upper and
2022 * lower extents of the segment; this is used during the conservative
2023 * phase of gc to identify probably gc roots (because they point
c68296f8 2024 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2025
2026/* scm_expmem
2027 * is true if the first segment was smaller than INIT_HEAP_SEG.
2028 * If scm_expmem is set to one, subsequent segment allocations will
2029 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2030 */
2031int scm_expmem = 0;
2032
4c48ba06
MD
2033scm_sizet scm_max_segment_size;
2034
0f2d19dd
JB
2035/* scm_heap_org
2036 * is the lowest base address of any heap segment.
2037 */
2038SCM_CELLPTR scm_heap_org;
2039
a00c95d9 2040scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 2041static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
2042int scm_n_heap_segs = 0;
2043
0f2d19dd 2044/* init_heap_seg
d6884e63 2045 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2046 *
d6884e63
ML
2047 * The segment origin and segment size in bytes are input parameters.
2048 * The freelist is both input and output.
0f2d19dd 2049 *
d6884e63
ML
2050 * This function presumes that the scm_heap_table has already been expanded
2051 * to accomodate a new segment record and that the markbit space was reserved
2052 * for all the cards in this segment.
0f2d19dd
JB
2053 */
2054
d6884e63
ML
2055#define INIT_CARD(card, span) \
2056 do { \
322ec19d 2057 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
d6884e63
ML
2058 if ((span) == 2) \
2059 SCM_GC_SET_CARD_DOUBLECELL (card); \
2060 } while (0)
0f2d19dd 2061
a00c95d9 2062static scm_sizet
4c48ba06 2063init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2064{
2065 register SCM_CELLPTR ptr;
0f2d19dd 2066 SCM_CELLPTR seg_end;
15e9d186 2067 int new_seg_index;
acb0a19c 2068 int n_new_cells;
4c48ba06 2069 int span = freelist->span;
a00c95d9 2070
0f2d19dd
JB
2071 if (seg_org == NULL)
2072 return 0;
2073
d6884e63
ML
2074 /* Align the begin ptr up.
2075 */
2076 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2077
a00c95d9 2078 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2079 */
d6884e63 2080 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2081
a00c95d9 2082 /* Find the right place and insert the segment record.
0f2d19dd
JB
2083 *
2084 */
2085 for (new_seg_index = 0;
2086 ( (new_seg_index < scm_n_heap_segs)
2087 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2088 new_seg_index++)
2089 ;
2090
2091 {
2092 int i;
2093 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2094 scm_heap_table[i] = scm_heap_table[i - 1];
2095 }
a00c95d9 2096
0f2d19dd
JB
2097 ++scm_n_heap_segs;
2098
945fec60 2099 scm_heap_table[new_seg_index].span = span;
4c48ba06 2100 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2101 scm_heap_table[new_seg_index].bounds[0] = ptr;
2102 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2103
acb0a19c
MD
2104 /*n_new_cells*/
2105 n_new_cells = seg_end - ptr;
0f2d19dd 2106
4c48ba06 2107 freelist->heap_size += n_new_cells;
4a4c9785 2108
a00c95d9 2109 /* Partition objects in this segment into clusters */
4a4c9785
MD
2110 {
2111 SCM clusters;
2112 SCM *clusterp = &clusters;
4a4c9785 2113
d6884e63
ML
2114 NEXT_DATA_CELL (ptr, span);
2115 while (ptr < seg_end)
4a4c9785 2116 {
d6884e63
ML
2117 scm_cell *nxt = ptr;
2118 scm_cell *prv = NULL;
2119 scm_cell *last_card = NULL;
2120 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2121 NEXT_DATA_CELL(nxt, span);
4a4c9785 2122
4c48ba06
MD
2123 /* Allocate cluster spine
2124 */
4a4c9785 2125 *clusterp = PTR2SCM (ptr);
d6884e63 2126 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2127 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2128 ptr = nxt;
a00c95d9 2129
d6884e63 2130 while (n_data_cells--)
4a4c9785 2131 {
d6884e63 2132 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2133 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2134 nxt = ptr;
2135 NEXT_DATA_CELL (nxt, span);
2136 prv = ptr;
2137
2138 if (card != last_card)
2139 {
2140 INIT_CARD (card, span);
2141 last_card = card;
2142 }
96f6f4ae 2143
54778cd3 2144 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
22a52da1 2145 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
d6884e63
ML
2146
2147 ptr = nxt;
4a4c9785 2148 }
4c48ba06 2149
d6884e63 2150 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2151 }
a00c95d9 2152
d6884e63
ML
2153 /* sanity check */
2154 {
2155 scm_cell *ref = seg_end;
2156 NEXT_DATA_CELL (ref, span);
2157 if (ref != ptr)
2158 /* [cmm] looks like the segment size doesn't divide cleanly by
2159 cluster size. bad cmm! */
2160 abort();
2161 }
2162
4a4c9785
MD
2163 /* Patch up the last cluster pointer in the segment
2164 * to join it to the input freelist.
2165 */
4c48ba06
MD
2166 *clusterp = freelist->clusters;
2167 freelist->clusters = clusters;
4a4c9785
MD
2168 }
2169
4c48ba06
MD
2170#ifdef DEBUGINFO
2171 fprintf (stderr, "H");
2172#endif
0f2d19dd 2173 return size;
0f2d19dd
JB
2174}
2175
a00c95d9
ML
2176static scm_sizet
2177round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2178{
2179 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2180
2181 return
2182 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2183 + ALIGNMENT_SLACK (freelist);
2184}
2185
a00c95d9 2186static void
b6efc951 2187alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 2188#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2189{
0f2d19dd 2190 SCM_CELLPTR ptr;
b37fe1c5 2191 long len;
a00c95d9 2192
9d47a1e6 2193 if (scm_gc_heap_lock)
b6efc951
DH
2194 {
2195 /* Critical code sections (such as the garbage collector) aren't
2196 * supposed to add heap segments.
2197 */
2198 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2199 abort ();
2200 }
0f2d19dd 2201
9d47a1e6 2202 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2203 {
2204 /* We have to expand the heap segment table to have room for the new
2205 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2206 * init_heap_seg only if the allocation of the segment itself succeeds.
2207 */
2208 unsigned int new_table_size = scm_n_heap_segs + 1;
2209 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2210 scm_heap_seg_data_t * new_heap_table;
2211
2212 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2213 realloc ((char *)scm_heap_table, size)));
2214 if (!new_heap_table)
2215 {
2216 if (error_policy == abort_on_error)
2217 {
2218 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2219 abort ();
2220 }
2221 else
2222 {
2223 return;
2224 }
2225 }
2226 else
2227 {
2228 scm_heap_table = new_heap_table;
2229 heap_segment_table_size = new_table_size;
2230 }
2231 }
0f2d19dd 2232
0f2d19dd 2233 /* Pick a size for the new heap segment.
a00c95d9 2234 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2235 * gc.h
2236 */
4c48ba06 2237 {
1811ebce
MD
2238 /* Assure that the new segment is predicted to be large enough.
2239 *
2240 * New yield should at least equal GC fraction of new heap size, i.e.
2241 *
2242 * y + dh > f * (h + dh)
2243 *
2244 * y : yield
8fef55a8 2245 * f : min yield fraction
1811ebce
MD
2246 * h : heap size
2247 * dh : size of new heap segment
2248 *
2249 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2250 */
8fef55a8 2251 int f = freelist->min_yield_fraction;
1811ebce
MD
2252 long h = SCM_HEAP_SIZE;
2253 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2254 len = SCM_EXPHEAP (freelist->heap_size);
2255#ifdef DEBUGINFO
2256 fprintf (stderr, "(%d < %d)", len, min_cells);
2257#endif
2258 if (len < min_cells)
1811ebce 2259 len = min_cells + freelist->cluster_size;
4c48ba06 2260 len *= sizeof (scm_cell);
1811ebce
MD
2261 /* force new sampling */
2262 freelist->collected = LONG_MAX;
4c48ba06 2263 }
a00c95d9 2264
4c48ba06
MD
2265 if (len > scm_max_segment_size)
2266 len = scm_max_segment_size;
0f2d19dd
JB
2267
2268 {
2269 scm_sizet smallest;
2270
a00c95d9 2271 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2272
0f2d19dd 2273 if (len < smallest)
a00c95d9 2274 len = smallest;
0f2d19dd
JB
2275
2276 /* Allocate with decaying ambition. */
2277 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2278 && (len >= smallest))
2279 {
1811ebce 2280 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2281 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2282 if (ptr)
2283 {
a00c95d9 2284 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2285 return;
2286 }
2287 len /= 2;
2288 }
2289 }
2290
b6efc951
DH
2291 if (error_policy == abort_on_error)
2292 {
2293 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2294 abort ();
2295 }
0f2d19dd 2296}
acf4331f 2297#undef FUNC_NAME
0f2d19dd
JB
2298
2299
a00c95d9 2300SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2301 (SCM name),
cf359417
MG
2302 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2303 "is @code{#t}.")
1bbd0b84 2304#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2305{
2306 int x;
2307 int bound;
3b3b36dd 2308 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2309 SCM_DEFER_INTS;
2310 bound = scm_n_heap_segs;
2311 for (x = 0; x < bound; ++x)
2312 {
2313 SCM_CELLPTR p;
2314 SCM_CELLPTR pbound;
195e6201
DH
2315 p = scm_heap_table[x].bounds[0];
2316 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2317 while (p < pbound)
2318 {
c8045e8d
DH
2319 SCM cell = PTR2SCM (p);
2320 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2321 {
c8045e8d
DH
2322 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2323 * struct cell. See the corresponding comment in scm_gc_mark.
2324 */
2325 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2326 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2327 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2328 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2329 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2330 {
c8045e8d 2331 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2332 }
2333 }
2334 ++p;
2335 }
2336 }
2337 SCM_ALLOW_INTS;
2338 return name;
2339}
1bbd0b84 2340#undef FUNC_NAME
0f2d19dd
JB
2341
2342
2343\f
2344/* {GC Protection Helper Functions}
2345 */
2346
2347
5d2b97cd
DH
2348/*
2349 * If within a function you need to protect one or more scheme objects from
2350 * garbage collection, pass them as parameters to one of the
2351 * scm_remember_upto_here* functions below. These functions don't do
2352 * anything, but since the compiler does not know that they are actually
2353 * no-ops, it will generate code that calls these functions with the given
2354 * parameters. Therefore, you can be sure that the compiler will keep those
2355 * scheme values alive (on the stack or in a register) up to the point where
2356 * scm_remember_upto_here* is called. In other words, place the call to
2357 * scm_remember_upt_here* _behind_ the last code in your function, that
2358 * depends on the scheme object to exist.
2359 *
2360 * Example: We want to make sure, that the string object str does not get
2361 * garbage collected during the execution of 'some_function', because
2362 * otherwise the characters belonging to str would be freed and
2363 * 'some_function' might access freed memory. To make sure that the compiler
2364 * keeps str alive on the stack or in a register such that it is visible to
2365 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2366 * call to 'some_function'. Note that this would not be necessary if str was
2367 * used anyway after the call to 'some_function'.
2368 * char *chars = SCM_STRING_CHARS (str);
2369 * some_function (chars);
2370 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2371 */
2372
2373void
2374scm_remember_upto_here_1 (SCM obj)
2375{
2376 /* Empty. Protects a single object from garbage collection. */
2377}
2378
2379void
2380scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2381{
2382 /* Empty. Protects two objects from garbage collection. */
2383}
2384
2385void
2386scm_remember_upto_here (SCM obj, ...)
2387{
2388 /* Empty. Protects any number of objects from garbage collection. */
2389}
2390
2391
2392#if (SCM_DEBUG_DEPRECATED == 0)
2393
0f2d19dd 2394void
6e8d25a6 2395scm_remember (SCM *ptr)
b24b5e13
DH
2396{
2397 /* empty */
2398}
0f2d19dd 2399
5d2b97cd 2400#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2401
c209c88e 2402/*
41b0806d
GB
2403 These crazy functions prevent garbage collection
2404 of arguments after the first argument by
2405 ensuring they remain live throughout the
2406 function because they are used in the last
2407 line of the code block.
2408 It'd be better to have a nice compiler hint to
2409 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2410SCM
2411scm_return_first (SCM elt, ...)
0f2d19dd
JB
2412{
2413 return elt;
2414}
2415
41b0806d
GB
2416int
2417scm_return_first_int (int i, ...)
2418{
2419 return i;
2420}
2421
0f2d19dd 2422
0f2d19dd 2423SCM
6e8d25a6 2424scm_permanent_object (SCM obj)
0f2d19dd
JB
2425{
2426 SCM_REDEFER_INTS;
2427 scm_permobjs = scm_cons (obj, scm_permobjs);
2428 SCM_REALLOW_INTS;
2429 return obj;
2430}
2431
2432
7bd4fbe2
MD
2433/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2434 other references are dropped, until the object is unprotected by calling
2435 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2436 i. e. it is possible to protect the same object several times, but it is
2437 necessary to unprotect the object the same number of times to actually get
2438 the object unprotected. It is an error to unprotect an object more often
2439 than it has been protected before. The function scm_protect_object returns
2440 OBJ.
2441*/
2442
2443/* Implementation note: For every object X, there is a counter which
2444 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2445*/
686765af 2446
ef290276 2447SCM
6e8d25a6 2448scm_protect_object (SCM obj)
ef290276 2449{
686765af 2450 SCM handle;
9d47a1e6 2451
686765af 2452 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2453 SCM_REDEFER_INTS;
9d47a1e6 2454
0f0f0899
MD
2455 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2456 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
9d47a1e6 2457
2dd6a83a 2458 SCM_REALLOW_INTS;
9d47a1e6 2459
ef290276
JB
2460 return obj;
2461}
2462
2463
2464/* Remove any protection for OBJ established by a prior call to
dab7f566 2465 scm_protect_object. This function returns OBJ.
ef290276 2466
dab7f566 2467 See scm_protect_object for more information. */
ef290276 2468SCM
6e8d25a6 2469scm_unprotect_object (SCM obj)
ef290276 2470{
686765af 2471 SCM handle;
9d47a1e6 2472
686765af 2473 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2474 SCM_REDEFER_INTS;
9d47a1e6 2475
686765af 2476 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2477
22a52da1 2478 if (SCM_FALSEP (handle))
686765af 2479 {
0f0f0899
MD
2480 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2481 abort ();
686765af 2482 }
6a199940
DH
2483 else
2484 {
2485 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2486 if (count == 0)
2487 scm_hashq_remove_x (scm_protects, obj);
2488 else
2489 SCM_SETCDR (handle, SCM_MAKINUM (count));
2490 }
686765af 2491
2dd6a83a 2492 SCM_REALLOW_INTS;
ef290276
JB
2493
2494 return obj;
2495}
2496
c45acc34
JB
2497int terminating;
2498
2499/* called on process termination. */
e52ceaac
MD
2500#ifdef HAVE_ATEXIT
2501static void
2502cleanup (void)
2503#else
2504#ifdef HAVE_ON_EXIT
51157deb
MD
2505extern int on_exit (void (*procp) (), int arg);
2506
e52ceaac
MD
2507static void
2508cleanup (int status, void *arg)
2509#else
2510#error Dont know how to setup a cleanup handler on your system.
2511#endif
2512#endif
c45acc34
JB
2513{
2514 terminating = 1;
2515 scm_flush_all_ports ();
2516}
ef290276 2517
0f2d19dd 2518\f
acb0a19c 2519static int
4c48ba06 2520make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2521{
a00c95d9 2522 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2523
a00c95d9
ML
2524 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2525 rounded_size,
4c48ba06 2526 freelist))
acb0a19c 2527 {
a00c95d9
ML
2528 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2529 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2530 rounded_size,
4c48ba06 2531 freelist))
acb0a19c
MD
2532 return 1;
2533 }
2534 else
2535 scm_expmem = 1;
2536
8fef55a8
MD
2537 if (freelist->min_yield_fraction)
2538 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2539 / 100);
8fef55a8 2540 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2541
acb0a19c
MD
2542 return 0;
2543}
2544
2545\f
4c48ba06
MD
2546static void
2547init_freelist (scm_freelist_t *freelist,
2548 int span,
2549 int cluster_size,
8fef55a8 2550 int min_yield)
4c48ba06
MD
2551{
2552 freelist->clusters = SCM_EOL;
2553 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2554 freelist->left_to_collect = 0;
2555 freelist->clusters_allocated = 0;
8fef55a8
MD
2556 freelist->min_yield = 0;
2557 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2558 freelist->span = span;
2559 freelist->collected = 0;
1811ebce 2560 freelist->collected_1 = 0;
4c48ba06
MD
2561 freelist->heap_size = 0;
2562}
2563
85db4a2c
DH
2564
2565/* Get an integer from an environment variable. */
2566static int
2567scm_i_getenv_int (const char *var, int def)
2568{
2569 char *end, *val = getenv (var);
2570 long res;
2571 if (!val)
2572 return def;
2573 res = strtol (val, &end, 10);
2574 if (end == val)
2575 return def;
2576 return res;
2577}
2578
2579
4a4c9785 2580int
85db4a2c 2581scm_init_storage ()
0f2d19dd 2582{
85db4a2c
DH
2583 scm_sizet gc_trigger_1;
2584 scm_sizet gc_trigger_2;
2585 scm_sizet init_heap_size_1;
2586 scm_sizet init_heap_size_2;
0f2d19dd
JB
2587 scm_sizet j;
2588
2589 j = SCM_NUM_PROTECTS;
2590 while (j)
2591 scm_sys_protects[--j] = SCM_BOOL_F;
2592 scm_block_gc = 1;
4a4c9785 2593
4a4c9785 2594 scm_freelist = SCM_EOL;
4c48ba06 2595 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2596 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2597 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2598 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2599 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2600 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2601
0f2d19dd
JB
2602 scm_expmem = 0;
2603
2604 j = SCM_HEAP_SEG_SIZE;
2605 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2606 scm_heap_table = ((scm_heap_seg_data_t *)
2607 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2608 heap_segment_table_size = 2;
acb0a19c 2609
d6884e63
ML
2610 mark_space_ptr = &mark_space_head;
2611
85db4a2c
DH
2612 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2613 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2614 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2615 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2616 return 1;
acb0a19c 2617
801cb5e7 2618 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2619 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2620
801cb5e7
MD
2621 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2622 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2623 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2624 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2625 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2626
2627 /* Initialise the list of ports. */
840ae05d
JB
2628 scm_port_table = (scm_port **)
2629 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2630 if (!scm_port_table)
2631 return 1;
2632
a18bcd0e 2633#ifdef HAVE_ATEXIT
c45acc34 2634 atexit (cleanup);
e52ceaac
MD
2635#else
2636#ifdef HAVE_ON_EXIT
2637 on_exit (cleanup, 0);
2638#endif
a18bcd0e 2639#endif
0f2d19dd
JB
2640
2641 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2642 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2643
2644 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
be54b15d 2645 scm_nullstr = scm_allocate_string (0);
00ffa0e7 2646 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
93d40df2
DH
2647
2648#define DEFAULT_SYMHASH_SIZE 277
00ffa0e7
KN
2649 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2650 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
93d40df2 2651
8960e0a0 2652 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2653 scm_permobjs = SCM_EOL;
00ffa0e7 2654 scm_protects = scm_c_make_hash_table (31);
d6884e63 2655
0f2d19dd
JB
2656 return 0;
2657}
939794ce 2658
0f2d19dd
JB
2659\f
2660
939794ce
DH
2661SCM scm_after_gc_hook;
2662
2663#if (SCM_DEBUG_DEPRECATED == 0)
2664static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2665#endif /* SCM_DEBUG_DEPRECATED == 0 */
2666static SCM gc_async;
2667
2668
2669/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2670 * is run after the gc, as soon as the asynchronous events are handled by the
2671 * evaluator.
2672 */
2673static SCM
2674gc_async_thunk (void)
2675{
2676 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2677
2678#if (SCM_DEBUG_DEPRECATED == 0)
2679
2680 /* The following code will be removed in Guile 1.5. */
2681 if (SCM_NFALSEP (scm_gc_vcell))
2682 {
2683 SCM proc = SCM_CDR (scm_gc_vcell);
2684
2685 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2686 scm_apply (proc, SCM_EOL, SCM_EOL);
2687 }
2688
2689#endif /* SCM_DEBUG_DEPRECATED == 0 */
2690
2691 return SCM_UNSPECIFIED;
2692}
2693
2694
2695/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2696 * the garbage collection. The only purpose of this function is to mark the
2697 * gc_async (which will eventually lead to the execution of the
2698 * gc_async_thunk).
2699 */
2700static void *
2701mark_gc_async (void * hook_data, void *func_data, void *data)
2702{
2703 scm_system_async_mark (gc_async);
2704 return NULL;
2705}
2706
2707
0f2d19dd
JB
2708void
2709scm_init_gc ()
0f2d19dd 2710{
939794ce
DH
2711 SCM after_gc_thunk;
2712
61045190
DH
2713#if (SCM_DEBUG_CELL_ACCESSES == 1)
2714 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2715#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2716
801cb5e7 2717 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2718
2719#if (SCM_DEBUG_DEPRECATED == 0)
2720 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2721#endif /* SCM_DEBUG_DEPRECATED == 0 */
78573619 2722 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
23670993 2723 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2724
2725 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2726
8dc9439f 2727#ifndef SCM_MAGIC_SNARFER
a0599745 2728#include "libguile/gc.x"
8dc9439f 2729#endif
0f2d19dd 2730}
89e00824 2731
56495472
ML
2732#endif /*MARK_DEPENDENCIES*/
2733
89e00824
ML
2734/*
2735 Local Variables:
2736 c-file-style: "gnu"
2737 End:
2738*/