* Fixed the changelog entry regarding re-introduction of struct member
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
56495472
ML
47/* SECTION: This code is compiled once.
48 */
49
50#ifndef MARK_DEPENDENCIES
51
0f2d19dd
JB
52\f
53#include <stdio.h>
a0599745 54#include "libguile/_scm.h"
0a7a7445 55#include "libguile/eval.h"
a0599745
MD
56#include "libguile/stime.h"
57#include "libguile/stackchk.h"
58#include "libguile/struct.h"
a0599745
MD
59#include "libguile/smob.h"
60#include "libguile/unif.h"
61#include "libguile/async.h"
62#include "libguile/ports.h"
63#include "libguile/root.h"
64#include "libguile/strings.h"
65#include "libguile/vectors.h"
801cb5e7 66#include "libguile/weaks.h"
686765af 67#include "libguile/hashtab.h"
a0599745
MD
68
69#include "libguile/validate.h"
70#include "libguile/gc.h"
fce59c93 71
bc9d9bb2 72#ifdef GUILE_DEBUG_MALLOC
a0599745 73#include "libguile/debug-malloc.h"
bc9d9bb2
MD
74#endif
75
0f2d19dd 76#ifdef HAVE_MALLOC_H
95b88819 77#include <malloc.h>
0f2d19dd
JB
78#endif
79
80#ifdef HAVE_UNISTD_H
95b88819 81#include <unistd.h>
0f2d19dd
JB
82#endif
83
1cc91f1b
JB
84#ifdef __STDC__
85#include <stdarg.h>
86#define var_start(x, y) va_start(x, y)
87#else
88#include <varargs.h>
89#define var_start(x, y) va_start(x)
90#endif
91
0f2d19dd 92\f
406c7d90
DH
93
94unsigned int scm_gc_running_p = 0;
95
96\f
97
98#if (SCM_DEBUG_CELL_ACCESSES == 1)
99
100unsigned int scm_debug_cell_accesses_p = 0;
101
102
103/* Assert that the given object is a valid reference to a valid cell. This
104 * test involves to determine whether the object is a cell pointer, whether
105 * this pointer actually points into a heap segment and whether the cell
106 * pointed to is not a free cell.
107 */
108void
109scm_assert_cell_valid (SCM cell)
110{
111 if (scm_debug_cell_accesses_p)
112 {
113 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
114
9d47a1e6 115 if (!scm_cellp (cell))
406c7d90
DH
116 {
117 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
118 abort ();
119 }
120 else if (!scm_gc_running_p)
121 {
122 /* Dirk::FIXME:: During garbage collection there occur references to
123 free cells. This is allright during conservative marking, but
124 should not happen otherwise (I think). The case of free cells
125 accessed during conservative marking is handled in function
126 scm_mark_locations. However, there still occur accesses to free
127 cells during gc. I don't understand why this happens. If it is
128 a bug and gets fixed, the following test should also work while
129 gc is running.
130 */
131 if (SCM_FREE_CELL_P (cell))
132 {
133 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
134 abort ();
135 }
136 }
137 scm_debug_cell_accesses_p = 1; /* re-enable */
138 }
139}
140
141
142SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
143 (SCM flag),
144 "If FLAG is #f, cell access checking is disabled.\n"
145 "If FLAG is #t, cell access checking is enabled.\n"
146 "This procedure only exists because the compile-time flag\n"
147 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
148#define FUNC_NAME s_scm_set_debug_cell_accesses_x
149{
150 if (SCM_FALSEP (flag)) {
151 scm_debug_cell_accesses_p = 0;
152 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
153 scm_debug_cell_accesses_p = 1;
154 } else {
155 SCM_WRONG_TYPE_ARG (1, flag);
156 }
157 return SCM_UNSPECIFIED;
158}
159#undef FUNC_NAME
160
161#endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
162
163\f
164
0f2d19dd 165/* {heap tuning parameters}
a00c95d9 166 *
0f2d19dd
JB
167 * These are parameters for controlling memory allocation. The heap
168 * is the area out of which scm_cons, and object headers are allocated.
169 *
170 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
171 * 64 bit machine. The units of the _SIZE parameters are bytes.
172 * Cons pairs and object headers occupy one heap cell.
173 *
174 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
175 * allocated initially the heap will grow by half its current size
176 * each subsequent time more heap is needed.
177 *
178 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
179 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
180 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
181 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 182 *
0f2d19dd
JB
183 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
184 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
185 *
186 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
187 * is needed.
188 *
189 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 190 * trigger a GC.
6064dcc6
MV
191 *
192 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
193 * reclaimed by a GC triggered by must_malloc. If less than this is
194 * reclaimed, the trigger threshold is raised. [I don't know what a
195 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 196 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
197 */
198
8fef55a8
MD
199/*
200 * Heap size 45000 and 40% min yield gives quick startup and no extra
201 * heap allocation. Having higher values on min yield may lead to
202 * large heaps, especially if code behaviour is varying its
203 * maximum consumption between different freelists.
204 */
d6884e63
ML
205
206#define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
207#define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
208#define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
209int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
210 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
aeacfc8f 211int scm_default_min_yield_1 = 40;
4c48ba06 212
d6884e63
ML
213#define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
214int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
215 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
4c48ba06
MD
216/* The following value may seem large, but note that if we get to GC at
217 * all, this means that we have a numerically intensive application
218 */
aeacfc8f 219int scm_default_min_yield_2 = 40;
4c48ba06 220
aeacfc8f 221int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 222
d6884e63 223#define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
0f2d19dd
JB
224#ifdef _QC
225# define SCM_HEAP_SEG_SIZE 32768L
226#else
227# ifdef sequent
4c48ba06 228# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 229# else
4c48ba06 230# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
231# endif
232#endif
4c48ba06 233/* Make heap grow with factor 1.5 */
4a4c9785 234#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 235#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 236#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd 237
d6884e63
ML
238/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
239 aligned inner bounds for allocated storage */
0f2d19dd
JB
240
241#ifdef PROT386
242/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
243# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
244# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
245#else
246# ifdef _UNICOS
a00c95d9
ML
247# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
248# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 249# else
a00c95d9
ML
250# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
251# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
252# endif /* UNICOS */
253#endif /* PROT386 */
254
d6884e63
ML
255#define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
256#define CLUSTER_SIZE_IN_BYTES(freelist) \
257 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
0f2d19dd
JB
258
259\f
945fec60 260/* scm_freelists
0f2d19dd 261 */
945fec60 262
a00c95d9
ML
263typedef struct scm_freelist_t {
264 /* collected cells */
265 SCM cells;
a00c95d9
ML
266 /* number of cells left to collect before cluster is full */
267 unsigned int left_to_collect;
b37fe1c5
MD
268 /* number of clusters which have been allocated */
269 unsigned int clusters_allocated;
8fef55a8
MD
270 /* a list of freelists, each of size cluster_size,
271 * except the last one which may be shorter
272 */
a00c95d9
ML
273 SCM clusters;
274 SCM *clustertail;
b37fe1c5 275 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 276 int cluster_size;
8fef55a8 277 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
278 */
279 int grow_heap_p;
8fef55a8 280 /* minimum yield on this list in order not to grow the heap
a00c95d9 281 */
8fef55a8
MD
282 long min_yield;
283 /* defines min_yield as percent of total heap size
a00c95d9 284 */
8fef55a8 285 int min_yield_fraction;
a00c95d9
ML
286 /* number of cells per object on this list */
287 int span;
288 /* number of collected cells during last GC */
1811ebce
MD
289 long collected;
290 /* number of collected cells during penultimate GC */
291 long collected_1;
a00c95d9
ML
292 /* total number of cells in heap segments
293 * belonging to this list.
294 */
1811ebce 295 long heap_size;
a00c95d9
ML
296} scm_freelist_t;
297
4a4c9785
MD
298SCM scm_freelist = SCM_EOL;
299scm_freelist_t scm_master_freelist = {
b37fe1c5 300 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
301};
302SCM scm_freelist2 = SCM_EOL;
303scm_freelist_t scm_master_freelist2 = {
b37fe1c5 304 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 305};
0f2d19dd
JB
306
307/* scm_mtrigger
308 * is the number of bytes of must_malloc allocation needed to trigger gc.
309 */
15e9d186 310unsigned long scm_mtrigger;
0f2d19dd 311
0f2d19dd
JB
312/* scm_gc_heap_lock
313 * If set, don't expand the heap. Set only during gc, during which no allocation
314 * is supposed to take place anyway.
315 */
316int scm_gc_heap_lock = 0;
317
318/* GC Blocking
319 * Don't pause for collection if this is set -- just
320 * expand the heap.
321 */
0f2d19dd
JB
322int scm_block_gc = 1;
323
0f2d19dd
JB
324/* During collection, this accumulates objects holding
325 * weak references.
326 */
ab4bef85 327SCM scm_weak_vectors;
0f2d19dd 328
7445e0e8
MD
329/* During collection, this accumulates structures which are to be freed.
330 */
331SCM scm_structs_to_free;
332
0f2d19dd
JB
333/* GC Statistics Keeping
334 */
335unsigned long scm_cells_allocated = 0;
a5c314c8 336long scm_mallocated = 0;
b37fe1c5 337unsigned long scm_gc_cells_collected;
8b0d194f 338unsigned long scm_gc_yield;
37ddcaf6 339static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
340unsigned long scm_gc_malloc_collected;
341unsigned long scm_gc_ports_collected;
0f2d19dd 342unsigned long scm_gc_time_taken = 0;
c9b0d4b0
ML
343static unsigned long t_before_gc;
344static unsigned long t_before_sweep;
345unsigned long scm_gc_mark_time_taken = 0;
346unsigned long scm_gc_sweep_time_taken = 0;
347unsigned long scm_gc_times = 0;
348unsigned long scm_gc_cells_swept = 0;
349double scm_gc_cells_marked_acc = 0.;
350double scm_gc_cells_swept_acc = 0.;
0f2d19dd
JB
351
352SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
353SCM_SYMBOL (sym_heap_size, "cell-heap-size");
354SCM_SYMBOL (sym_mallocated, "bytes-malloced");
355SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
356SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
357SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
c9b0d4b0
ML
358SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
359SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
360SCM_SYMBOL (sym_times, "gc-times");
361SCM_SYMBOL (sym_cells_marked, "cells-marked");
362SCM_SYMBOL (sym_cells_swept, "cells-swept");
0f2d19dd 363
a00c95d9 364typedef struct scm_heap_seg_data_t
0f2d19dd 365{
cf2d30f6
JB
366 /* lower and upper bounds of the segment */
367 SCM_CELLPTR bounds[2];
368
369 /* address of the head-of-freelist pointer for this segment's cells.
370 All segments usually point to the same one, scm_freelist. */
4c48ba06 371 scm_freelist_t *freelist;
cf2d30f6 372
fe517a7d 373 /* number of cells per object in this segment */
945fec60 374 int span;
a00c95d9 375} scm_heap_seg_data_t;
0f2d19dd
JB
376
377
378
945fec60 379static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
380
381typedef enum { return_on_error, abort_on_error } policy_on_error;
382static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
383
384
d6884e63
ML
385#define SCM_HEAP_SIZE \
386 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
387#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
388
389#define BVEC_GROW_SIZE 256
390#define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
391#define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
392
393/* mark space allocation */
394
395typedef struct scm_mark_space_t
396{
397 scm_c_bvec_limb_t *bvec_space;
398 struct scm_mark_space_t *next;
399} scm_mark_space_t;
400
401static scm_mark_space_t *current_mark_space;
402static scm_mark_space_t **mark_space_ptr;
403static int current_mark_space_offset;
404static scm_mark_space_t *mark_space_head;
405
406static scm_c_bvec_limb_t *
407get_bvec ()
408{
409 scm_c_bvec_limb_t *res;
410
411 if (!current_mark_space)
412 {
413 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
414 if (!current_mark_space)
415 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
416
417 current_mark_space->bvec_space = NULL;
418 current_mark_space->next = NULL;
419
420 *mark_space_ptr = current_mark_space;
421 mark_space_ptr = &(current_mark_space->next);
422
423 return get_bvec ();
424 }
425
426 if (!(current_mark_space->bvec_space))
427 {
428 SCM_SYSCALL (current_mark_space->bvec_space =
429 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
430 if (!(current_mark_space->bvec_space))
431 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
432
433 current_mark_space_offset = 0;
434
435 return get_bvec ();
436 }
437
438 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
439 {
440 current_mark_space = NULL;
441
442 return get_bvec ();
443 }
444
445 res = current_mark_space->bvec_space + current_mark_space_offset;
446 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
447
448 return res;
449}
450
451static void
452clear_mark_space ()
453{
454 scm_mark_space_t *ms;
455
456 for (ms = mark_space_head; ms; ms = ms->next)
457 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
458}
459
460
0f2d19dd 461\f
cf2d30f6
JB
462/* Debugging functions. */
463
bb2c57fa 464#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
465
466/* Return the number of the heap segment containing CELL. */
467static int
468which_seg (SCM cell)
469{
470 int i;
471
472 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
473 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
474 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
475 return i;
476 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 477 SCM_UNPACK (cell));
cf2d30f6
JB
478 abort ();
479}
480
481
8ded62a3
MD
482static void
483map_free_list (scm_freelist_t *master, SCM freelist)
484{
485 int last_seg = -1, count = 0;
486 SCM f;
a00c95d9 487
3f5d82cd 488 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3
MD
489 {
490 int this_seg = which_seg (f);
491
492 if (this_seg != last_seg)
493 {
494 if (last_seg != -1)
495 fprintf (stderr, " %5d %d-cells in segment %d\n",
496 count, master->span, last_seg);
497 last_seg = this_seg;
498 count = 0;
499 }
500 count++;
501 }
502 if (last_seg != -1)
503 fprintf (stderr, " %5d %d-cells in segment %d\n",
504 count, master->span, last_seg);
505}
cf2d30f6 506
a00c95d9 507SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
508 (),
509 "Print debugging information about the free-list.\n"
5384bc5b 510 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
511#define FUNC_NAME s_scm_map_free_list
512{
4c48ba06
MD
513 int i;
514 fprintf (stderr, "%d segments total (%d:%d",
515 scm_n_heap_segs,
516 scm_heap_table[0].span,
517 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
518 for (i = 1; i < scm_n_heap_segs; i++)
519 fprintf (stderr, ", %d:%d",
520 scm_heap_table[i].span,
521 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
522 fprintf (stderr, ")\n");
8ded62a3
MD
523 map_free_list (&scm_master_freelist, scm_freelist);
524 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
525 fflush (stderr);
526
527 return SCM_UNSPECIFIED;
528}
1bbd0b84 529#undef FUNC_NAME
cf2d30f6 530
4c48ba06
MD
531static int last_cluster;
532static int last_size;
533
5384bc5b
MD
534static int
535free_list_length (char *title, int i, SCM freelist)
536{
537 SCM ls;
538 int n = 0;
3f5d82cd
DH
539 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
540 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
541 ++n;
542 else
543 {
544 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
545 abort ();
546 }
4c48ba06
MD
547 if (n != last_size)
548 {
549 if (i > 0)
550 {
551 if (last_cluster == i - 1)
552 fprintf (stderr, "\t%d\n", last_size);
553 else
554 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
555 }
556 if (i >= 0)
557 fprintf (stderr, "%s %d", title, i);
558 else
559 fprintf (stderr, "%s\t%d\n", title, n);
560 last_cluster = i;
561 last_size = n;
562 }
5384bc5b
MD
563 return n;
564}
565
566static void
567free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
568{
569 SCM clusters;
4c48ba06 570 int i = 0, len, n = 0;
5384bc5b
MD
571 fprintf (stderr, "%s\n\n", title);
572 n += free_list_length ("free list", -1, freelist);
573 for (clusters = master->clusters;
574 SCM_NNULLP (clusters);
575 clusters = SCM_CDR (clusters))
4c48ba06
MD
576 {
577 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
578 n += len;
579 }
580 if (last_cluster == i - 1)
581 fprintf (stderr, "\t%d\n", last_size);
582 else
583 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
584 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
585}
586
a00c95d9 587SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
588 (),
589 "Print debugging information about the free-list.\n"
590 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
591#define FUNC_NAME s_scm_free_list_length
592{
b37fe1c5
MD
593 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
594 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 595 return SCM_UNSPECIFIED;
5384bc5b
MD
596}
597#undef FUNC_NAME
598
bb2c57fa
MD
599#endif
600
601#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
602
603/* Number of calls to SCM_NEWCELL since startup. */
604static unsigned long scm_newcell_count;
acb0a19c 605static unsigned long scm_newcell2_count;
cf2d30f6
JB
606
607/* Search freelist for anything that isn't marked as a free cell.
608 Abort if we find something. */
8ded62a3
MD
609static void
610scm_check_freelist (SCM freelist)
611{
612 SCM f;
613 int i = 0;
614
3f5d82cd
DH
615 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
616 if (!SCM_FREE_CELL_P (f))
8ded62a3
MD
617 {
618 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
619 scm_newcell_count, i);
8ded62a3
MD
620 abort ();
621 }
622}
cf2d30f6 623
a00c95d9 624SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 625 (SCM flag),
da4a1dba
GB
626 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
627 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
628 "compile-time flag was selected.\n")
1bbd0b84 629#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 630{
d6884e63
ML
631 /* [cmm] I did a double-take when I read this code the first time.
632 well, FWIW. */
945fec60 633 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
634 return SCM_UNSPECIFIED;
635}
1bbd0b84 636#undef FUNC_NAME
25748c78
GB
637
638
4a4c9785
MD
639SCM
640scm_debug_newcell (void)
641{
642 SCM new;
643
644 scm_newcell_count++;
645 if (scm_debug_check_freelist)
646 {
8ded62a3 647 scm_check_freelist (scm_freelist);
4a4c9785
MD
648 scm_gc();
649 }
650
651 /* The rest of this is supposed to be identical to the SCM_NEWCELL
652 macro. */
3f5d82cd 653 if (SCM_NULLP (scm_freelist))
4a4c9785
MD
654 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
655 else
656 {
657 new = scm_freelist;
3f5d82cd 658 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
4a4c9785
MD
659 }
660
661 return new;
662}
663
664SCM
665scm_debug_newcell2 (void)
666{
667 SCM new;
668
669 scm_newcell2_count++;
670 if (scm_debug_check_freelist)
671 {
8ded62a3 672 scm_check_freelist (scm_freelist2);
4a4c9785
MD
673 scm_gc ();
674 }
675
676 /* The rest of this is supposed to be identical to the SCM_NEWCELL
677 macro. */
3f5d82cd 678 if (SCM_NULLP (scm_freelist2))
4a4c9785
MD
679 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
680 else
681 {
682 new = scm_freelist2;
3f5d82cd 683 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
4a4c9785
MD
684 }
685
686 return new;
687}
688
fca7547b 689#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
690
691\f
0f2d19dd 692
b37fe1c5
MD
693static unsigned long
694master_cells_allocated (scm_freelist_t *master)
695{
d6884e63 696 /* the '- 1' below is to ignore the cluster spine cells. */
b37fe1c5
MD
697 int objects = master->clusters_allocated * (master->cluster_size - 1);
698 if (SCM_NULLP (master->clusters))
699 objects -= master->left_to_collect;
700 return master->span * objects;
701}
702
703static unsigned long
704freelist_length (SCM freelist)
705{
706 int n;
3f5d82cd 707 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
708 ++n;
709 return n;
710}
711
712static unsigned long
713compute_cells_allocated ()
714{
715 return (scm_cells_allocated
716 + master_cells_allocated (&scm_master_freelist)
717 + master_cells_allocated (&scm_master_freelist2)
718 - scm_master_freelist.span * freelist_length (scm_freelist)
719 - scm_master_freelist2.span * freelist_length (scm_freelist2));
720}
b37fe1c5 721
0f2d19dd
JB
722/* {Scheme Interface to GC}
723 */
724
a00c95d9 725SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 726 (),
b380b885 727 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 728#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
729{
730 int i;
731 int n;
732 SCM heap_segs;
c209c88e
GB
733 long int local_scm_mtrigger;
734 long int local_scm_mallocated;
735 long int local_scm_heap_size;
736 long int local_scm_cells_allocated;
737 long int local_scm_gc_time_taken;
c9b0d4b0
ML
738 long int local_scm_gc_times;
739 long int local_scm_gc_mark_time_taken;
740 long int local_scm_gc_sweep_time_taken;
741 double local_scm_gc_cells_swept;
742 double local_scm_gc_cells_marked;
0f2d19dd
JB
743 SCM answer;
744
745 SCM_DEFER_INTS;
939794ce
DH
746
747 ++scm_block_gc;
748
0f2d19dd
JB
749 retry:
750 heap_segs = SCM_EOL;
751 n = scm_n_heap_segs;
752 for (i = scm_n_heap_segs; i--; )
753 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
754 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
755 heap_segs);
756 if (scm_n_heap_segs != n)
757 goto retry;
939794ce
DH
758
759 --scm_block_gc;
0f2d19dd 760
7febb4a2
MD
761 /* Below, we cons to produce the resulting list. We want a snapshot of
762 * the heap situation before consing.
763 */
0f2d19dd
JB
764 local_scm_mtrigger = scm_mtrigger;
765 local_scm_mallocated = scm_mallocated;
b37fe1c5 766 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 767 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd 768 local_scm_gc_time_taken = scm_gc_time_taken;
c9b0d4b0
ML
769 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
770 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
771 local_scm_gc_times = scm_gc_times;
772 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
773 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
0f2d19dd
JB
774
775 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
776 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
777 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
778 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
779 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
c9b0d4b0
ML
780 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
781 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
782 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
783 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
784 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
0f2d19dd
JB
785 scm_cons (sym_heap_segments, heap_segs),
786 SCM_UNDEFINED);
787 SCM_ALLOW_INTS;
788 return answer;
789}
1bbd0b84 790#undef FUNC_NAME
0f2d19dd
JB
791
792
c9b0d4b0
ML
793static void
794gc_start_stats (const char *what)
0f2d19dd 795{
c9b0d4b0
ML
796 t_before_gc = scm_c_get_internal_run_time ();
797 scm_gc_cells_swept = 0;
b37fe1c5 798 scm_gc_cells_collected = 0;
37ddcaf6 799 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
800 scm_gc_yield = (scm_cells_allocated
801 + master_cells_allocated (&scm_master_freelist)
802 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
803 scm_gc_malloc_collected = 0;
804 scm_gc_ports_collected = 0;
805}
806
939794ce 807
c9b0d4b0
ML
808static void
809gc_end_stats ()
0f2d19dd 810{
c9b0d4b0
ML
811 unsigned long t = scm_c_get_internal_run_time ();
812 scm_gc_time_taken += (t - t_before_gc);
813 scm_gc_sweep_time_taken += (t - t_before_sweep);
814 ++scm_gc_times;
815
816 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
817 scm_gc_cells_swept_acc += scm_gc_cells_swept;
0f2d19dd
JB
818}
819
820
a00c95d9 821SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 822 (SCM obj),
b380b885
MD
823 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
824 "returned by this function for @var{obj}")
1bbd0b84 825#define FUNC_NAME s_scm_object_address
0f2d19dd 826{
54778cd3 827 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 828}
1bbd0b84 829#undef FUNC_NAME
0f2d19dd
JB
830
831
a00c95d9 832SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 833 (),
b380b885
MD
834 "Scans all of SCM objects and reclaims for further use those that are\n"
835 "no longer accessible.")
1bbd0b84 836#define FUNC_NAME s_scm_gc
0f2d19dd
JB
837{
838 SCM_DEFER_INTS;
839 scm_igc ("call");
840 SCM_ALLOW_INTS;
841 return SCM_UNSPECIFIED;
842}
1bbd0b84 843#undef FUNC_NAME
0f2d19dd
JB
844
845
846\f
847/* {C Interface For When GC is Triggered}
848 */
849
b37fe1c5 850static void
8fef55a8 851adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 852{
8fef55a8 853 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 854 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
855 * `min_yield_fraction' of total heap size. Note, however, that
856 * the absolute value of min_yield will correspond to `collected'
bda1446c 857 * on one master (the one which currently is triggering GC).
b37fe1c5 858 *
bda1446c
MD
859 * The reason why we look at total yield instead of cells collected
860 * on one list is that we want to take other freelists into account.
861 * On this freelist, we know that (local) yield = collected cells,
862 * but that's probably not the case on the other lists.
b37fe1c5
MD
863 *
864 * (We might consider computing a better prediction, for example
865 * by computing an average over multiple GC:s.)
866 */
8fef55a8 867 if (freelist->min_yield_fraction)
b37fe1c5 868 {
37ddcaf6 869 /* Pick largest of last two yields. */
8fef55a8
MD
870 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
871 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
872#ifdef DEBUGINFO
873 fprintf (stderr, " after GC = %d, delta = %d\n",
874 scm_cells_allocated,
875 delta);
876#endif
877 if (delta > 0)
8fef55a8 878 freelist->min_yield += delta;
b37fe1c5
MD
879 }
880}
881
b6efc951 882
4a4c9785 883/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
884 * common while the freelist will be individual for each thread.
885 */
4a4c9785
MD
886
887SCM
888scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
889{
890 SCM cell;
891 ++scm_ints_disabled;
4c48ba06
MD
892 do
893 {
c7387918 894 if (SCM_NULLP (master->clusters))
4c48ba06 895 {
150c200b 896 if (master->grow_heap_p || scm_block_gc)
4c48ba06 897 {
b6efc951
DH
898 /* In order to reduce gc frequency, try to allocate a new heap
899 * segment first, even if gc might find some free cells. If we
900 * can't obtain a new heap segment, we will try gc later.
901 */
4c48ba06 902 master->grow_heap_p = 0;
b6efc951 903 alloc_some_heap (master, return_on_error);
4c48ba06 904 }
b6efc951 905 if (SCM_NULLP (master->clusters))
b37fe1c5 906 {
b6efc951
DH
907 /* The heap was not grown, either because it wasn't scheduled to
908 * grow, or because there was not enough memory available. In
909 * both cases we have to try gc to get some free cells.
910 */
37ddcaf6
MD
911#ifdef DEBUGINFO
912 fprintf (stderr, "allocated = %d, ",
913 scm_cells_allocated
914 + master_cells_allocated (&scm_master_freelist)
915 + master_cells_allocated (&scm_master_freelist2));
916#endif
b37fe1c5 917 scm_igc ("cells");
8fef55a8 918 adjust_min_yield (master);
c7387918
DH
919 if (SCM_NULLP (master->clusters))
920 {
b6efc951
DH
921 /* gc could not free any cells. Now, we _must_ allocate a
922 * new heap segment, because there is no other possibility
923 * to provide a new cell for the caller.
924 */
925 alloc_some_heap (master, abort_on_error);
c7387918 926 }
b37fe1c5 927 }
4c48ba06
MD
928 }
929 cell = SCM_CAR (master->clusters);
930 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 931 ++master->clusters_allocated;
4c48ba06
MD
932 }
933 while (SCM_NULLP (cell));
d6884e63
ML
934
935#ifdef GUILE_DEBUG_FREELIST
936 scm_check_freelist (cell);
937#endif
938
4a4c9785 939 --scm_ints_disabled;
3f5d82cd 940 *freelist = SCM_FREE_CELL_CDR (cell);
4a4c9785
MD
941 return cell;
942}
943
b6efc951 944
4c48ba06
MD
945#if 0
946/* This is a support routine which can be used to reserve a cluster
947 * for some special use, such as debugging. It won't be useful until
948 * free cells are preserved between garbage collections.
949 */
950
951void
952scm_alloc_cluster (scm_freelist_t *master)
953{
954 SCM freelist, cell;
955 cell = scm_gc_for_newcell (master, &freelist);
956 SCM_SETCDR (cell, freelist);
957 return cell;
958}
959#endif
960
801cb5e7
MD
961
962scm_c_hook_t scm_before_gc_c_hook;
963scm_c_hook_t scm_before_mark_c_hook;
964scm_c_hook_t scm_before_sweep_c_hook;
965scm_c_hook_t scm_after_sweep_c_hook;
966scm_c_hook_t scm_after_gc_c_hook;
967
b6efc951 968
0f2d19dd 969void
1bbd0b84 970scm_igc (const char *what)
0f2d19dd
JB
971{
972 int j;
973
406c7d90 974 ++scm_gc_running_p;
801cb5e7 975 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
976#ifdef DEBUGINFO
977 fprintf (stderr,
978 SCM_NULLP (scm_freelist)
979 ? "*"
980 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
981#endif
42db06f0
MD
982#ifdef USE_THREADS
983 /* During the critical section, only the current thread may run. */
984 SCM_THREAD_CRITICAL_SECTION_START;
985#endif
986
e242dfd2 987 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 988
ab4bef85
JB
989 if (!scm_stack_base || scm_block_gc)
990 {
406c7d90 991 --scm_gc_running_p;
ab4bef85
JB
992 return;
993 }
994
c9b0d4b0
ML
995 gc_start_stats (what);
996
a5c314c8
JB
997 if (scm_mallocated < 0)
998 /* The byte count of allocated objects has underflowed. This is
999 probably because you forgot to report the sizes of objects you
1000 have allocated, by calling scm_done_malloc or some such. When
1001 the GC freed them, it subtracted their size from
1002 scm_mallocated, which underflowed. */
1003 abort ();
c45acc34 1004
ab4bef85
JB
1005 if (scm_gc_heap_lock)
1006 /* We've invoked the collector while a GC is already in progress.
1007 That should never happen. */
1008 abort ();
0f2d19dd
JB
1009
1010 ++scm_gc_heap_lock;
ab4bef85 1011
0f2d19dd
JB
1012 /* flush dead entries from the continuation stack */
1013 {
1014 int x;
1015 int bound;
1016 SCM * elts;
1017 elts = SCM_VELTS (scm_continuation_stack);
b5c2579a 1018 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
0f2d19dd
JB
1019 x = SCM_INUM (scm_continuation_stack_ptr);
1020 while (x < bound)
1021 {
1022 elts[x] = SCM_BOOL_F;
1023 ++x;
1024 }
1025 }
1026
801cb5e7
MD
1027 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1028
d6884e63
ML
1029 clear_mark_space ();
1030
42db06f0 1031#ifndef USE_THREADS
a00c95d9 1032
1b9be268 1033 /* Mark objects on the C stack. */
0f2d19dd
JB
1034 SCM_FLUSH_REGISTER_WINDOWS;
1035 /* This assumes that all registers are saved into the jmp_buf */
1036 setjmp (scm_save_regs_gc_mark);
1037 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1038 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1039 sizeof scm_save_regs_gc_mark)
1040 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1041
1042 {
6ba93e5e 1043 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 1044#ifdef SCM_STACK_GROWS_UP
6ba93e5e 1045 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 1046#else
6ba93e5e 1047 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
1048#endif
1049 }
1050
42db06f0
MD
1051#else /* USE_THREADS */
1052
1053 /* Mark every thread's stack and registers */
945fec60 1054 scm_threads_mark_stacks ();
42db06f0
MD
1055
1056#endif /* USE_THREADS */
0f2d19dd 1057
0f2d19dd
JB
1058 j = SCM_NUM_PROTECTS;
1059 while (j--)
1060 scm_gc_mark (scm_sys_protects[j]);
1061
9de33deb
MD
1062 /* FIXME: we should have a means to register C functions to be run
1063 * in different phases of GC
a00c95d9 1064 */
9de33deb 1065 scm_mark_subr_table ();
a00c95d9 1066
42db06f0
MD
1067#ifndef USE_THREADS
1068 scm_gc_mark (scm_root->handle);
1069#endif
a00c95d9 1070
c9b0d4b0
ML
1071 t_before_sweep = scm_c_get_internal_run_time ();
1072 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1073
801cb5e7 1074 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1075
0f2d19dd
JB
1076 scm_gc_sweep ();
1077
801cb5e7
MD
1078 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1079
0f2d19dd 1080 --scm_gc_heap_lock;
c9b0d4b0 1081 gc_end_stats ();
42db06f0
MD
1082
1083#ifdef USE_THREADS
1084 SCM_THREAD_CRITICAL_SECTION_END;
1085#endif
801cb5e7 1086 scm_c_hook_run (&scm_after_gc_c_hook, 0);
406c7d90 1087 --scm_gc_running_p;
0f2d19dd
JB
1088}
1089
1090\f
939794ce 1091
a00c95d9 1092/* {Mark/Sweep}
0f2d19dd
JB
1093 */
1094
56495472
ML
1095#define MARK scm_gc_mark
1096#define FNAME "scm_gc_mark"
0f2d19dd 1097
56495472 1098#endif /*!MARK_DEPENDENCIES*/
0f2d19dd
JB
1099
1100/* Mark an object precisely.
1101 */
a00c95d9 1102void
56495472
ML
1103MARK (SCM p)
1104#define FUNC_NAME FNAME
0f2d19dd
JB
1105{
1106 register long i;
1107 register SCM ptr;
1108
56495472
ML
1109#ifndef MARK_DEPENDENCIES
1110# define RECURSE scm_gc_mark
1111#else
1112 /* go through the usual marking, but not for self-cycles. */
1113# define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1114#endif
0f2d19dd
JB
1115 ptr = p;
1116
56495472
ML
1117#ifdef MARK_DEPENDENCIES
1118 goto gc_mark_loop_first_time;
1119#endif
1120
0f2d19dd
JB
1121gc_mark_loop:
1122 if (SCM_IMP (ptr))
1123 return;
1124
1125gc_mark_nimp:
56495472
ML
1126
1127#ifdef MARK_DEPENDENCIES
0209177b 1128 if (SCM_EQ_P (ptr, p))
56495472
ML
1129 return;
1130
1131 scm_gc_mark (ptr);
0209177b 1132 return;
56495472
ML
1133
1134gc_mark_loop_first_time:
1135#endif
1136
3f5d82cd 1137 if (!SCM_CELLP (ptr))
acf4331f 1138 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
0f2d19dd 1139
641d17a8 1140#if (defined (GUILE_DEBUG_FREELIST))
d6884e63
ML
1141
1142 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1143 scm_wta (ptr, "rogue pointer in heap", NULL);
1144
1145#endif
1146
56495472
ML
1147#ifndef MARK_DEPENDENCIES
1148
d6884e63
ML
1149 if (SCM_GCMARKP (ptr))
1150 return;
56495472 1151
d6884e63
ML
1152 SCM_SETGCMARK (ptr);
1153
56495472
ML
1154#endif
1155
0f2d19dd
JB
1156 switch (SCM_TYP7 (ptr))
1157 {
1158 case scm_tcs_cons_nimcar:
d6884e63 1159 if (SCM_IMP (SCM_CDR (ptr)))
0f2d19dd
JB
1160 {
1161 ptr = SCM_CAR (ptr);
1162 goto gc_mark_nimp;
1163 }
56495472 1164 RECURSE (SCM_CAR (ptr));
d6884e63 1165 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1166 goto gc_mark_nimp;
1167 case scm_tcs_cons_imcar:
d6884e63 1168 ptr = SCM_CDR (ptr);
acb0a19c 1169 goto gc_mark_loop;
e641afaf 1170 case scm_tc7_pws:
56495472 1171 RECURSE (SCM_CELL_OBJECT_2 (ptr));
d6884e63 1172 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1173 goto gc_mark_loop;
1174 case scm_tcs_cons_gloc:
0f2d19dd 1175 {
c8045e8d
DH
1176 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1177 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1178 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1179 * pointer to a struct vtable data region. The fact that these are
1180 * accessed in the same way restricts the possibilites to change the
9d47a1e6 1181 * data layout of structs or heap cells.
c8045e8d
DH
1182 */
1183 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1184 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
7445e0e8 1185 if (vtable_data [scm_vtable_index_vcell] != 0)
0f2d19dd 1186 {
d6884e63
ML
1187 /* ptr is a gloc */
1188 SCM gloc_car = SCM_PACK (word0);
56495472 1189 RECURSE (gloc_car);
d6884e63
ML
1190 ptr = SCM_CDR (ptr);
1191 goto gc_mark_loop;
1192 }
1193 else
1194 {
1195 /* ptr is a struct */
1196 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
b5c2579a 1197 int len = SCM_SYMBOL_LENGTH (layout);
06ee04b2 1198 char * fields_desc = SCM_SYMBOL_CHARS (layout);
d6884e63 1199 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
7bb8eac7 1200
d6884e63
ML
1201 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1202 {
56495472
ML
1203 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1204 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
d6884e63
ML
1205 }
1206 if (len)
1207 {
1208 int x;
7bb8eac7 1209
d6884e63
ML
1210 for (x = 0; x < len - 2; x += 2, ++struct_data)
1211 if (fields_desc[x] == 'p')
56495472 1212 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1213 if (fields_desc[x] == 'p')
1214 {
1215 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
56495472
ML
1216 for (x = *struct_data++; x; --x, ++struct_data)
1217 RECURSE (SCM_PACK (*struct_data));
d6884e63 1218 else
56495472 1219 RECURSE (SCM_PACK (*struct_data));
d6884e63
ML
1220 }
1221 }
1222 /* mark vtable */
1223 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1224 goto gc_mark_loop;
0f2d19dd
JB
1225 }
1226 }
1227 break;
1228 case scm_tcs_closures:
0f2d19dd
JB
1229 if (SCM_IMP (SCM_CDR (ptr)))
1230 {
1231 ptr = SCM_CLOSCAR (ptr);
1232 goto gc_mark_nimp;
1233 }
56495472 1234 RECURSE (SCM_CLOSCAR (ptr));
d6884e63 1235 ptr = SCM_CDR (ptr);
0f2d19dd
JB
1236 goto gc_mark_nimp;
1237 case scm_tc7_vector:
b5c2579a
DH
1238 i = SCM_VECTOR_LENGTH (ptr);
1239 if (i == 0)
1240 break;
1241 while (--i > 0)
1242 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
56495472 1243 RECURSE (SCM_VELTS (ptr)[i]);
b5c2579a
DH
1244 ptr = SCM_VELTS (ptr)[0];
1245 goto gc_mark_loop;
0f2d19dd
JB
1246#ifdef CCLO
1247 case scm_tc7_cclo:
362306b9
DH
1248 {
1249 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1250 unsigned long int j;
1251 for (j = 1; j != i; ++j)
1252 {
1253 SCM obj = SCM_CCLO_REF (ptr, j);
1254 if (!SCM_IMP (obj))
56495472 1255 RECURSE (obj);
362306b9
DH
1256 }
1257 ptr = SCM_CCLO_REF (ptr, 0);
1258 goto gc_mark_loop;
1259 }
b5c2579a 1260#endif
afe5177e 1261#ifdef HAVE_ARRAYS
0f2d19dd
JB
1262 case scm_tc7_bvect:
1263 case scm_tc7_byvect:
1264 case scm_tc7_ivect:
1265 case scm_tc7_uvect:
1266 case scm_tc7_fvect:
1267 case scm_tc7_dvect:
1268 case scm_tc7_cvect:
1269 case scm_tc7_svect:
5c11cc9d 1270#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1271 case scm_tc7_llvect:
1272#endif
afe5177e 1273#endif
0f2d19dd 1274 case scm_tc7_string:
0f2d19dd
JB
1275 break;
1276
1277 case scm_tc7_substring:
0f2d19dd
JB
1278 ptr = SCM_CDR (ptr);
1279 goto gc_mark_loop;
1280
1281 case scm_tc7_wvect:
ab4bef85
JB
1282 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1283 scm_weak_vectors = ptr;
0f2d19dd
JB
1284 if (SCM_IS_WHVEC_ANY (ptr))
1285 {
1286 int x;
1287 int len;
1288 int weak_keys;
1289 int weak_values;
1290
b5c2579a 1291 len = SCM_VECTOR_LENGTH (ptr);
0f2d19dd
JB
1292 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1293 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1294
0f2d19dd
JB
1295 for (x = 0; x < len; ++x)
1296 {
1297 SCM alist;
1298 alist = SCM_VELTS (ptr)[x];
46408039
JB
1299
1300 /* mark everything on the alist except the keys or
1301 * values, according to weak_values and weak_keys. */
0b5f3f34 1302 while ( SCM_CONSP (alist)
0f2d19dd 1303 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1304 && SCM_CONSP (SCM_CAR (alist)))
1305 {
1306 SCM kvpair;
1307 SCM next_alist;
1308
1309 kvpair = SCM_CAR (alist);
1310 next_alist = SCM_CDR (alist);
a00c95d9 1311 /*
0f2d19dd
JB
1312 * Do not do this:
1313 * SCM_SETGCMARK (alist);
1314 * SCM_SETGCMARK (kvpair);
1315 *
1316 * It may be that either the key or value is protected by
1317 * an escaped reference to part of the spine of this alist.
1318 * If we mark the spine here, and only mark one or neither of the
1319 * key and value, they may never be properly marked.
1320 * This leads to a horrible situation in which an alist containing
1321 * freelist cells is exported.
1322 *
1323 * So only mark the spines of these arrays last of all marking.
1324 * If somebody confuses us by constructing a weak vector
1325 * with a circular alist then we are hosed, but at least we
1326 * won't prematurely drop table entries.
1327 */
1328 if (!weak_keys)
56495472 1329 RECURSE (SCM_CAR (kvpair));
0f2d19dd 1330 if (!weak_values)
56495472 1331 RECURSE (SCM_CDR (kvpair));
0f2d19dd
JB
1332 alist = next_alist;
1333 }
1334 if (SCM_NIMP (alist))
56495472 1335 RECURSE (alist);
0f2d19dd
JB
1336 }
1337 }
1338 break;
1339
28b06554
DH
1340 case scm_tc7_symbol:
1341 ptr = SCM_PROP_SLOTS (ptr);
0f2d19dd 1342 goto gc_mark_loop;
0f2d19dd 1343 case scm_tcs_subrs:
9de33deb 1344 break;
0f2d19dd
JB
1345 case scm_tc7_port:
1346 i = SCM_PTOBNUM (ptr);
1347 if (!(i < scm_numptob))
1348 goto def;
ebf7394e 1349 if (SCM_PTAB_ENTRY(ptr))
56495472 1350 RECURSE (SCM_FILENAME (ptr));
dc53f026
JB
1351 if (scm_ptobs[i].mark)
1352 {
1353 ptr = (scm_ptobs[i].mark) (ptr);
1354 goto gc_mark_loop;
1355 }
1356 else
1357 return;
0f2d19dd
JB
1358 break;
1359 case scm_tc7_smob:
d6884e63 1360 switch (SCM_TYP16 (ptr))
0f2d19dd
JB
1361 { /* should be faster than going through scm_smobs */
1362 case scm_tc_free_cell:
1363 /* printf("found free_cell %X ", ptr); fflush(stdout); */
acb0a19c
MD
1364 case scm_tc16_big:
1365 case scm_tc16_real:
1366 case scm_tc16_complex:
0f2d19dd
JB
1367 break;
1368 default:
1369 i = SCM_SMOBNUM (ptr);
1370 if (!(i < scm_numsmob))
1371 goto def;
dc53f026
JB
1372 if (scm_smobs[i].mark)
1373 {
1374 ptr = (scm_smobs[i].mark) (ptr);
1375 goto gc_mark_loop;
1376 }
1377 else
1378 return;
0f2d19dd
JB
1379 }
1380 break;
1381 default:
acf4331f
DH
1382 def:
1383 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1384 }
0209177b 1385#undef RECURSE
0f2d19dd 1386}
acf4331f 1387#undef FUNC_NAME
0f2d19dd 1388
56495472
ML
1389#ifndef MARK_DEPENDENCIES
1390
1391#undef MARK
56495472
ML
1392#undef FNAME
1393
1394/* And here we define `scm_gc_mark_dependencies', by including this
1395 * same file in itself.
1396 */
1397#define MARK scm_gc_mark_dependencies
1398#define FNAME "scm_gc_mark_dependencies"
1399#define MARK_DEPENDENCIES
1400#include "gc.c"
1401#undef MARK_DEPENDENCIES
1402#undef MARK
56495472
ML
1403#undef FNAME
1404
0f2d19dd
JB
1405
1406/* Mark a Region Conservatively
1407 */
1408
a00c95d9 1409void
6e8d25a6 1410scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd 1411{
c4da09e2 1412 unsigned long m;
0f2d19dd 1413
c4da09e2
DH
1414 for (m = 0; m < n; ++m)
1415 {
1416 SCM obj = * (SCM *) &x[m];
1417 if (SCM_CELLP (obj))
1418 {
1419 SCM_CELLPTR ptr = SCM2PTR (obj);
1420 int i = 0;
1421 int j = scm_n_heap_segs - 1;
1422 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1423 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1424 {
1425 while (i <= j)
1426 {
1427 int seg_id;
1428 seg_id = -1;
1429 if ((i == j)
1430 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1431 seg_id = i;
1432 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1433 seg_id = j;
1434 else
1435 {
1436 int k;
1437 k = (i + j) / 2;
1438 if (k == i)
1439 break;
1440 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1441 {
1442 j = k;
1443 ++i;
1444 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1445 continue;
1446 else
1447 break;
1448 }
1449 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1450 {
1451 i = k;
1452 --j;
1453 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1454 continue;
1455 else
1456 break;
1457 }
1458 }
7bb8eac7 1459
d6884e63
ML
1460 if (SCM_GC_IN_CARD_HEADERP (ptr))
1461 break;
7bb8eac7 1462
c4da09e2
DH
1463 if (scm_heap_table[seg_id].span == 1
1464 || SCM_DOUBLE_CELLP (obj))
3731149d
ML
1465 scm_gc_mark (obj);
1466
c4da09e2
DH
1467 break;
1468 }
1469 }
1470 }
1471 }
0f2d19dd
JB
1472}
1473
1474
1a548472
DH
1475/* The function scm_cellp determines whether an SCM value can be regarded as a
1476 * pointer to a cell on the heap. Binary search is used in order to determine
1477 * the heap segment that contains the cell.
1478 */
2e11a577 1479int
6e8d25a6 1480scm_cellp (SCM value)
2e11a577 1481{
1a548472
DH
1482 if (SCM_CELLP (value)) {
1483 scm_cell * ptr = SCM2PTR (value);
1484 unsigned int i = 0;
1485 unsigned int j = scm_n_heap_segs - 1;
1486
1487 while (i < j) {
1488 int k = (i + j) / 2;
1489 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1490 j = k;
1491 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1492 i = k + 1;
1493 }
1494 }
2e11a577 1495
9d47a1e6 1496 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1a548472 1497 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
d6884e63
ML
1498 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))
1499 && !SCM_GC_IN_CARD_HEADERP (ptr)
1500 )
1a548472 1501 return 1;
d6884e63 1502 else
1a548472 1503 return 0;
d6884e63 1504 } else
1a548472 1505 return 0;
2e11a577
MD
1506}
1507
1508
4c48ba06
MD
1509static void
1510gc_sweep_freelist_start (scm_freelist_t *freelist)
1511{
1512 freelist->cells = SCM_EOL;
1513 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1514 freelist->clusters_allocated = 0;
4c48ba06
MD
1515 freelist->clusters = SCM_EOL;
1516 freelist->clustertail = &freelist->clusters;
1811ebce 1517 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1518 freelist->collected = 0;
1519}
1520
1521static void
1522gc_sweep_freelist_finish (scm_freelist_t *freelist)
1523{
1811ebce 1524 int collected;
4c48ba06 1525 *freelist->clustertail = freelist->cells;
3f5d82cd 1526 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1527 {
1528 SCM c = freelist->cells;
1529 SCM_SETCAR (c, SCM_CDR (c));
1530 SCM_SETCDR (c, SCM_EOL);
1531 freelist->collected +=
1532 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1533 }
b37fe1c5 1534 scm_gc_cells_collected += freelist->collected;
a00c95d9 1535
8fef55a8 1536 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1537 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1538 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1539 * size. This means that a too low yield is compensated by more
1540 * heap on the list which is currently doing most work, which is
1541 * just what we want.
1542 */
1811ebce 1543 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1544 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1545}
0f2d19dd 1546
d6884e63
ML
1547#define NEXT_DATA_CELL(ptr, span) \
1548 do { \
1549 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1550 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1551 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1552 : nxt__); \
1553 } while (0)
1554
a00c95d9 1555void
0f2d19dd 1556scm_gc_sweep ()
acf4331f 1557#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1558{
1559 register SCM_CELLPTR ptr;
0f2d19dd 1560 register SCM nfreelist;
4c48ba06 1561 register scm_freelist_t *freelist;
0f2d19dd 1562 register long m;
0f2d19dd 1563 register int span;
15e9d186 1564 long i;
0f2d19dd
JB
1565 scm_sizet seg_size;
1566
0f2d19dd 1567 m = 0;
0f2d19dd 1568
4c48ba06
MD
1569 gc_sweep_freelist_start (&scm_master_freelist);
1570 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1571
cf2d30f6 1572 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1573 {
4c48ba06 1574 register unsigned int left_to_collect;
4c48ba06 1575 register scm_sizet j;
15e9d186 1576
cf2d30f6
JB
1577 /* Unmarked cells go onto the front of the freelist this heap
1578 segment points to. Rather than updating the real freelist
1579 pointer as we go along, we accumulate the new head in
1580 nfreelist. Then, if it turns out that the entire segment is
1581 free, we free (i.e., malloc's free) the whole segment, and
1582 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1583 freelist = scm_heap_table[i].freelist;
1584 nfreelist = freelist->cells;
4c48ba06 1585 left_to_collect = freelist->left_to_collect;
945fec60 1586 span = scm_heap_table[i].span;
cf2d30f6 1587
a00c95d9
ML
1588 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1589 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
c9b0d4b0 1590
d6884e63
ML
1591 /* use only data cells in seg_size */
1592 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1593
c9b0d4b0
ML
1594 scm_gc_cells_swept += seg_size;
1595
0f2d19dd
JB
1596 for (j = seg_size + span; j -= span; ptr += span)
1597 {
d6884e63 1598 SCM scmptr;
96f6f4ae 1599
d6884e63 1600 if (SCM_GC_IN_CARD_HEADERP (ptr))
0f2d19dd 1601 {
d6884e63
ML
1602 SCM_CELLPTR nxt;
1603
1604 /* cheat here */
1605 nxt = ptr;
1606 NEXT_DATA_CELL (nxt, span);
1607 j += span;
1608
1609 ptr = nxt - span;
1610 continue;
1611 }
1612
1613 scmptr = PTR2SCM (ptr);
1614
1615 if (SCM_GCMARKP (scmptr))
1616 continue;
7bb8eac7 1617
d6884e63
ML
1618 switch SCM_TYP7 (scmptr)
1619 {
0f2d19dd 1620 case scm_tcs_cons_gloc:
0f2d19dd 1621 {
c8045e8d
DH
1622 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1623 * struct or a gloc. See the corresponding comment in
1624 * scm_gc_mark.
1625 */
7445e0e8
MD
1626 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1627 - scm_tc3_cons_gloc);
1628 /* access as struct */
1629 scm_bits_t * vtable_data = (scm_bits_t *) word0;
d6884e63 1630 if (vtable_data[scm_vtable_index_vcell] == 0)
0f2d19dd 1631 {
7445e0e8
MD
1632 /* Structs need to be freed in a special order.
1633 * This is handled by GC C hooks in struct.c.
1634 */
1635 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1636 scm_structs_to_free = scmptr;
7bb8eac7 1637 continue;
c8045e8d 1638 }
7445e0e8 1639 /* fall through so that scmptr gets collected */
0f2d19dd
JB
1640 }
1641 break;
1642 case scm_tcs_cons_imcar:
1643 case scm_tcs_cons_nimcar:
1644 case scm_tcs_closures:
e641afaf 1645 case scm_tc7_pws:
0f2d19dd
JB
1646 break;
1647 case scm_tc7_wvect:
b5c2579a 1648 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
06ee04b2 1649 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
d6884e63 1650 break;
0f2d19dd 1651 case scm_tc7_vector:
1b9be268
DH
1652 {
1653 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1654 if (length > 0)
1655 {
1656 m += length * sizeof (scm_bits_t);
1657 scm_must_free (SCM_VECTOR_BASE (scmptr));
1658 }
1659 break;
1660 }
0f2d19dd
JB
1661#ifdef CCLO
1662 case scm_tc7_cclo:
b5c2579a 1663 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
06ee04b2 1664 scm_must_free (SCM_CCLO_BASE (scmptr));
0f2d19dd 1665 break;
06ee04b2 1666#endif
afe5177e 1667#ifdef HAVE_ARRAYS
0f2d19dd 1668 case scm_tc7_bvect:
93778877
DH
1669 {
1670 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1671 if (length > 0)
1672 {
1673 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1674 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1675 }
1676 }
06ee04b2 1677 break;
0f2d19dd 1678 case scm_tc7_byvect:
0f2d19dd
JB
1679 case scm_tc7_ivect:
1680 case scm_tc7_uvect:
0f2d19dd 1681 case scm_tc7_svect:
5c11cc9d 1682#ifdef HAVE_LONG_LONGS
0f2d19dd 1683 case scm_tc7_llvect:
0f2d19dd
JB
1684#endif
1685 case scm_tc7_fvect:
0f2d19dd 1686 case scm_tc7_dvect:
0f2d19dd 1687 case scm_tc7_cvect:
d1ca2c64 1688 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
06ee04b2
DH
1689 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1690 break;
afe5177e 1691#endif
0f2d19dd 1692 case scm_tc7_substring:
0f2d19dd
JB
1693 break;
1694 case scm_tc7_string:
b5c2579a 1695 m += SCM_STRING_LENGTH (scmptr) + 1;
f151f912
DH
1696 scm_must_free (SCM_STRING_CHARS (scmptr));
1697 break;
28b06554 1698 case scm_tc7_symbol:
b5c2579a 1699 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
f151f912 1700 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
0f2d19dd 1701 break;
0f2d19dd 1702 case scm_tcs_subrs:
d6884e63 1703 /* the various "subrs" (primitives) are never freed */
0f2d19dd
JB
1704 continue;
1705 case scm_tc7_port:
0f2d19dd
JB
1706 if SCM_OPENP (scmptr)
1707 {
1708 int k = SCM_PTOBNUM (scmptr);
1709 if (!(k < scm_numptob))
1710 goto sweeperr;
1711 /* Keep "revealed" ports alive. */
945fec60 1712 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1713 continue;
1714 /* Yes, I really do mean scm_ptobs[k].free */
1715 /* rather than ftobs[k].close. .close */
1716 /* is for explicit CLOSE-PORT by user */
84af0382 1717 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1718 SCM_SETSTREAM (scmptr, 0);
1719 scm_remove_from_port_table (scmptr);
1720 scm_gc_ports_collected++;
24e68a57 1721 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1722 }
1723 break;
1724 case scm_tc7_smob:
d6884e63 1725 switch SCM_TYP16 (scmptr)
0f2d19dd
JB
1726 {
1727 case scm_tc_free_cell:
acb0a19c 1728 case scm_tc16_real:
0f2d19dd
JB
1729 break;
1730#ifdef SCM_BIGDIG
acb0a19c 1731 case scm_tc16_big:
0f2d19dd 1732 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
06ee04b2
DH
1733 scm_must_free (SCM_BDIGITS (scmptr));
1734 break;
0f2d19dd 1735#endif /* def SCM_BIGDIG */
acb0a19c 1736 case scm_tc16_complex:
06ee04b2 1737 m += sizeof (scm_complex_t);
405aaef9 1738 scm_must_free (SCM_COMPLEX_MEM (scmptr));
06ee04b2 1739 break;
0f2d19dd 1740 default:
0f2d19dd
JB
1741 {
1742 int k;
1743 k = SCM_SMOBNUM (scmptr);
1744 if (!(k < scm_numsmob))
1745 goto sweeperr;
c8045e8d 1746 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1747 break;
1748 }
1749 }
1750 break;
1751 default:
acf4331f
DH
1752 sweeperr:
1753 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1754 }
7bb8eac7 1755
4c48ba06 1756 if (!--left_to_collect)
4a4c9785
MD
1757 {
1758 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1759 *freelist->clustertail = scmptr;
1760 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1761
4a4c9785 1762 nfreelist = SCM_EOL;
4c48ba06
MD
1763 freelist->collected += span * freelist->cluster_size;
1764 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1765 }
1766 else
4a4c9785
MD
1767 {
1768 /* Stick the new cell on the front of nfreelist. It's
1769 critical that we mark this cell as freed; otherwise, the
1770 conservative collector might trace it as some other type
1771 of object. */
54778cd3 1772 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1773 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1774 nfreelist = scmptr;
1775 }
0f2d19dd 1776 }
d6884e63 1777
0f2d19dd
JB
1778#ifdef GC_FREE_SEGMENTS
1779 if (n == seg_size)
1780 {
15e9d186
JB
1781 register long j;
1782
4c48ba06 1783 freelist->heap_size -= seg_size;
cf2d30f6
JB
1784 free ((char *) scm_heap_table[i].bounds[0]);
1785 scm_heap_table[i].bounds[0] = 0;
1786 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1787 scm_heap_table[j - 1] = scm_heap_table[j];
1788 scm_n_heap_segs -= 1;
cf2d30f6 1789 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1790 }
1791 else
1792#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1793 {
1794 /* Update the real freelist pointer to point to the head of
1795 the list of free cells we've built for this segment. */
4c48ba06 1796 freelist->cells = nfreelist;
4c48ba06 1797 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1798 }
1799
fca7547b 1800#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
1801 scm_map_free_list ();
1802#endif
4a4c9785 1803 }
a00c95d9 1804
4c48ba06
MD
1805 gc_sweep_freelist_finish (&scm_master_freelist);
1806 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1807
8ded62a3
MD
1808 /* When we move to POSIX threads private freelists should probably
1809 be GC-protected instead. */
1810 scm_freelist = SCM_EOL;
1811 scm_freelist2 = SCM_EOL;
a00c95d9 1812
b37fe1c5 1813 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1814 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1815 scm_mallocated -= m;
1816 scm_gc_malloc_collected = m;
1817}
acf4331f 1818#undef FUNC_NAME
0f2d19dd
JB
1819
1820
1821\f
0f2d19dd
JB
1822/* {Front end to malloc}
1823 *
9d47a1e6
ML
1824 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1825 * scm_done_free
0f2d19dd
JB
1826 *
1827 * These functions provide services comperable to malloc, realloc, and
1828 * free. They are for allocating malloced parts of scheme objects.
9d47a1e6 1829 * The primary purpose of the front end is to impose calls to gc. */
0f2d19dd 1830
bc9d9bb2 1831
0f2d19dd
JB
1832/* scm_must_malloc
1833 * Return newly malloced storage or throw an error.
1834 *
1835 * The parameter WHAT is a string for error reporting.
a00c95d9 1836 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1837 * allocation, or if the first call to malloc fails,
1838 * garbage collect -- on the presumption that some objects
1839 * using malloced storage may be collected.
1840 *
1841 * The limit scm_mtrigger may be raised by this allocation.
1842 */
07806695 1843void *
e4ef2330 1844scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1845{
07806695 1846 void *ptr;
15e9d186 1847 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1848
1849 if (nm <= scm_mtrigger)
0f2d19dd 1850 {
07806695 1851 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1852 if (NULL != ptr)
1853 {
1854 scm_mallocated = nm;
bc9d9bb2
MD
1855#ifdef GUILE_DEBUG_MALLOC
1856 scm_malloc_register (ptr, what);
1857#endif
0f2d19dd
JB
1858 return ptr;
1859 }
1860 }
6064dcc6 1861
0f2d19dd 1862 scm_igc (what);
e4ef2330 1863
0f2d19dd 1864 nm = scm_mallocated + size;
07806695 1865 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1866 if (NULL != ptr)
1867 {
1868 scm_mallocated = nm;
6064dcc6
MV
1869 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1870 if (nm > scm_mtrigger)
1871 scm_mtrigger = nm + nm / 2;
1872 else
1873 scm_mtrigger += scm_mtrigger / 2;
1874 }
bc9d9bb2
MD
1875#ifdef GUILE_DEBUG_MALLOC
1876 scm_malloc_register (ptr, what);
1877#endif
1878
0f2d19dd
JB
1879 return ptr;
1880 }
e4ef2330 1881
acf4331f 1882 scm_memory_error (what);
0f2d19dd
JB
1883}
1884
1885
1886/* scm_must_realloc
1887 * is similar to scm_must_malloc.
1888 */
07806695
JB
1889void *
1890scm_must_realloc (void *where,
e4ef2330
MD
1891 scm_sizet old_size,
1892 scm_sizet size,
3eeba8d4 1893 const char *what)
0f2d19dd 1894{
07806695 1895 void *ptr;
e4ef2330
MD
1896 scm_sizet nm = scm_mallocated + size - old_size;
1897
1898 if (nm <= scm_mtrigger)
0f2d19dd 1899 {
07806695 1900 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1901 if (NULL != ptr)
1902 {
1903 scm_mallocated = nm;
bc9d9bb2
MD
1904#ifdef GUILE_DEBUG_MALLOC
1905 scm_malloc_reregister (where, ptr, what);
1906#endif
0f2d19dd
JB
1907 return ptr;
1908 }
1909 }
e4ef2330 1910
0f2d19dd 1911 scm_igc (what);
e4ef2330
MD
1912
1913 nm = scm_mallocated + size - old_size;
07806695 1914 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1915 if (NULL != ptr)
1916 {
1917 scm_mallocated = nm;
6064dcc6
MV
1918 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1919 if (nm > scm_mtrigger)
1920 scm_mtrigger = nm + nm / 2;
1921 else
1922 scm_mtrigger += scm_mtrigger / 2;
1923 }
bc9d9bb2
MD
1924#ifdef GUILE_DEBUG_MALLOC
1925 scm_malloc_reregister (where, ptr, what);
1926#endif
0f2d19dd
JB
1927 return ptr;
1928 }
e4ef2330 1929
acf4331f 1930 scm_memory_error (what);
0f2d19dd
JB
1931}
1932
acf4331f 1933
a00c95d9 1934void
07806695 1935scm_must_free (void *obj)
acf4331f 1936#define FUNC_NAME "scm_must_free"
0f2d19dd 1937{
bc9d9bb2
MD
1938#ifdef GUILE_DEBUG_MALLOC
1939 scm_malloc_unregister (obj);
1940#endif
0f2d19dd
JB
1941 if (obj)
1942 free (obj);
1943 else
acf4331f 1944 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1945}
acf4331f
DH
1946#undef FUNC_NAME
1947
0f2d19dd 1948
c68296f8
MV
1949/* Announce that there has been some malloc done that will be freed
1950 * during gc. A typical use is for a smob that uses some malloced
1951 * memory but can not get it from scm_must_malloc (for whatever
1952 * reason). When a new object of this smob is created you call
1953 * scm_done_malloc with the size of the object. When your smob free
1954 * function is called, be sure to include this size in the return
9d47a1e6
ML
1955 * value.
1956 *
1957 * If you can't actually free the memory in the smob free function,
1958 * for whatever reason (like reference counting), you still can (and
1959 * should) report the amount of memory freed when you actually free it.
1960 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1961 * eh? Or even better, call scm_done_free. */
0f2d19dd 1962
c68296f8 1963void
6e8d25a6 1964scm_done_malloc (long size)
c68296f8
MV
1965{
1966 scm_mallocated += size;
1967
1968 if (scm_mallocated > scm_mtrigger)
1969 {
1970 scm_igc ("foreign mallocs");
1971 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1972 {
1973 if (scm_mallocated > scm_mtrigger)
1974 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1975 else
1976 scm_mtrigger += scm_mtrigger / 2;
1977 }
1978 }
1979}
1980
9d47a1e6
ML
1981void
1982scm_done_free (long size)
1983{
1984 scm_mallocated -= size;
1985}
1986
c68296f8
MV
1987
1988\f
0f2d19dd
JB
1989/* {Heap Segments}
1990 *
1991 * Each heap segment is an array of objects of a particular size.
1992 * Every segment has an associated (possibly shared) freelist.
1993 * A table of segment records is kept that records the upper and
1994 * lower extents of the segment; this is used during the conservative
1995 * phase of gc to identify probably gc roots (because they point
c68296f8 1996 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1997
1998/* scm_expmem
1999 * is true if the first segment was smaller than INIT_HEAP_SEG.
2000 * If scm_expmem is set to one, subsequent segment allocations will
2001 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2002 */
2003int scm_expmem = 0;
2004
4c48ba06
MD
2005scm_sizet scm_max_segment_size;
2006
0f2d19dd
JB
2007/* scm_heap_org
2008 * is the lowest base address of any heap segment.
2009 */
2010SCM_CELLPTR scm_heap_org;
2011
a00c95d9 2012scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 2013static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
2014int scm_n_heap_segs = 0;
2015
0f2d19dd 2016/* init_heap_seg
d6884e63 2017 * initializes a new heap segment and returns the number of objects it contains.
0f2d19dd 2018 *
d6884e63
ML
2019 * The segment origin and segment size in bytes are input parameters.
2020 * The freelist is both input and output.
0f2d19dd 2021 *
d6884e63
ML
2022 * This function presumes that the scm_heap_table has already been expanded
2023 * to accomodate a new segment record and that the markbit space was reserved
2024 * for all the cards in this segment.
0f2d19dd
JB
2025 */
2026
d6884e63
ML
2027#define INIT_CARD(card, span) \
2028 do { \
2029 SCM_GC_CARD_BVEC (card) = get_bvec (); \
2030 if ((span) == 2) \
2031 SCM_GC_SET_CARD_DOUBLECELL (card); \
2032 } while (0)
0f2d19dd 2033
a00c95d9 2034static scm_sizet
4c48ba06 2035init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2036{
2037 register SCM_CELLPTR ptr;
0f2d19dd 2038 SCM_CELLPTR seg_end;
15e9d186 2039 int new_seg_index;
acb0a19c 2040 int n_new_cells;
4c48ba06 2041 int span = freelist->span;
a00c95d9 2042
0f2d19dd
JB
2043 if (seg_org == NULL)
2044 return 0;
2045
d6884e63
ML
2046 /* Align the begin ptr up.
2047 */
2048 ptr = SCM_GC_CARD_UP (seg_org);
acb0a19c 2049
a00c95d9 2050 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2051 */
d6884e63 2052 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
0f2d19dd 2053
a00c95d9 2054 /* Find the right place and insert the segment record.
0f2d19dd
JB
2055 *
2056 */
2057 for (new_seg_index = 0;
2058 ( (new_seg_index < scm_n_heap_segs)
2059 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2060 new_seg_index++)
2061 ;
2062
2063 {
2064 int i;
2065 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2066 scm_heap_table[i] = scm_heap_table[i - 1];
2067 }
a00c95d9 2068
0f2d19dd
JB
2069 ++scm_n_heap_segs;
2070
945fec60 2071 scm_heap_table[new_seg_index].span = span;
4c48ba06 2072 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2073 scm_heap_table[new_seg_index].bounds[0] = ptr;
2074 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd 2075
acb0a19c
MD
2076 /*n_new_cells*/
2077 n_new_cells = seg_end - ptr;
0f2d19dd 2078
4c48ba06 2079 freelist->heap_size += n_new_cells;
4a4c9785 2080
a00c95d9 2081 /* Partition objects in this segment into clusters */
4a4c9785
MD
2082 {
2083 SCM clusters;
2084 SCM *clusterp = &clusters;
4a4c9785 2085
d6884e63
ML
2086 NEXT_DATA_CELL (ptr, span);
2087 while (ptr < seg_end)
4a4c9785 2088 {
d6884e63
ML
2089 scm_cell *nxt = ptr;
2090 scm_cell *prv = NULL;
2091 scm_cell *last_card = NULL;
2092 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2093 NEXT_DATA_CELL(nxt, span);
4a4c9785 2094
4c48ba06
MD
2095 /* Allocate cluster spine
2096 */
4a4c9785 2097 *clusterp = PTR2SCM (ptr);
d6884e63 2098 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
4a4c9785 2099 clusterp = SCM_CDRLOC (*clusterp);
d6884e63 2100 ptr = nxt;
a00c95d9 2101
d6884e63 2102 while (n_data_cells--)
4a4c9785 2103 {
d6884e63 2104 scm_cell *card = SCM_GC_CELL_CARD (ptr);
96f6f4ae 2105 SCM scmptr = PTR2SCM (ptr);
d6884e63
ML
2106 nxt = ptr;
2107 NEXT_DATA_CELL (nxt, span);
2108 prv = ptr;
2109
2110 if (card != last_card)
2111 {
2112 INIT_CARD (card, span);
2113 last_card = card;
2114 }
96f6f4ae 2115
54778cd3 2116 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
d6884e63
ML
2117 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2118
2119 ptr = nxt;
4a4c9785 2120 }
4c48ba06 2121
d6884e63 2122 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
4a4c9785 2123 }
a00c95d9 2124
d6884e63
ML
2125 /* sanity check */
2126 {
2127 scm_cell *ref = seg_end;
2128 NEXT_DATA_CELL (ref, span);
2129 if (ref != ptr)
2130 /* [cmm] looks like the segment size doesn't divide cleanly by
2131 cluster size. bad cmm! */
2132 abort();
2133 }
2134
4a4c9785
MD
2135 /* Patch up the last cluster pointer in the segment
2136 * to join it to the input freelist.
2137 */
4c48ba06
MD
2138 *clusterp = freelist->clusters;
2139 freelist->clusters = clusters;
4a4c9785
MD
2140 }
2141
4c48ba06
MD
2142#ifdef DEBUGINFO
2143 fprintf (stderr, "H");
2144#endif
0f2d19dd 2145 return size;
0f2d19dd
JB
2146}
2147
a00c95d9
ML
2148static scm_sizet
2149round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2150{
2151 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2152
2153 return
2154 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2155 + ALIGNMENT_SLACK (freelist);
2156}
2157
a00c95d9 2158static void
b6efc951 2159alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 2160#define FUNC_NAME "alloc_some_heap"
0f2d19dd 2161{
0f2d19dd 2162 SCM_CELLPTR ptr;
b37fe1c5 2163 long len;
a00c95d9 2164
9d47a1e6 2165 if (scm_gc_heap_lock)
b6efc951
DH
2166 {
2167 /* Critical code sections (such as the garbage collector) aren't
2168 * supposed to add heap segments.
2169 */
2170 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2171 abort ();
2172 }
0f2d19dd 2173
9d47a1e6 2174 if (scm_n_heap_segs == heap_segment_table_size)
b6efc951
DH
2175 {
2176 /* We have to expand the heap segment table to have room for the new
2177 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2178 * init_heap_seg only if the allocation of the segment itself succeeds.
2179 */
2180 unsigned int new_table_size = scm_n_heap_segs + 1;
2181 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2182 scm_heap_seg_data_t * new_heap_table;
2183
2184 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2185 realloc ((char *)scm_heap_table, size)));
2186 if (!new_heap_table)
2187 {
2188 if (error_policy == abort_on_error)
2189 {
2190 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2191 abort ();
2192 }
2193 else
2194 {
2195 return;
2196 }
2197 }
2198 else
2199 {
2200 scm_heap_table = new_heap_table;
2201 heap_segment_table_size = new_table_size;
2202 }
2203 }
0f2d19dd 2204
0f2d19dd 2205 /* Pick a size for the new heap segment.
a00c95d9 2206 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2207 * gc.h
2208 */
4c48ba06 2209 {
1811ebce
MD
2210 /* Assure that the new segment is predicted to be large enough.
2211 *
2212 * New yield should at least equal GC fraction of new heap size, i.e.
2213 *
2214 * y + dh > f * (h + dh)
2215 *
2216 * y : yield
8fef55a8 2217 * f : min yield fraction
1811ebce
MD
2218 * h : heap size
2219 * dh : size of new heap segment
2220 *
2221 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2222 */
8fef55a8 2223 int f = freelist->min_yield_fraction;
1811ebce
MD
2224 long h = SCM_HEAP_SIZE;
2225 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2226 len = SCM_EXPHEAP (freelist->heap_size);
2227#ifdef DEBUGINFO
2228 fprintf (stderr, "(%d < %d)", len, min_cells);
2229#endif
2230 if (len < min_cells)
1811ebce 2231 len = min_cells + freelist->cluster_size;
4c48ba06 2232 len *= sizeof (scm_cell);
1811ebce
MD
2233 /* force new sampling */
2234 freelist->collected = LONG_MAX;
4c48ba06 2235 }
a00c95d9 2236
4c48ba06
MD
2237 if (len > scm_max_segment_size)
2238 len = scm_max_segment_size;
0f2d19dd
JB
2239
2240 {
2241 scm_sizet smallest;
2242
a00c95d9 2243 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2244
0f2d19dd 2245 if (len < smallest)
a00c95d9 2246 len = smallest;
0f2d19dd
JB
2247
2248 /* Allocate with decaying ambition. */
2249 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2250 && (len >= smallest))
2251 {
1811ebce 2252 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2253 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2254 if (ptr)
2255 {
a00c95d9 2256 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2257 return;
2258 }
2259 len /= 2;
2260 }
2261 }
2262
b6efc951
DH
2263 if (error_policy == abort_on_error)
2264 {
2265 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2266 abort ();
2267 }
0f2d19dd 2268}
acf4331f 2269#undef FUNC_NAME
0f2d19dd
JB
2270
2271
a00c95d9 2272SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2273 (SCM name),
b380b885 2274 "")
1bbd0b84 2275#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2276{
2277 int x;
2278 int bound;
3b3b36dd 2279 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2280 SCM_DEFER_INTS;
2281 bound = scm_n_heap_segs;
2282 for (x = 0; x < bound; ++x)
2283 {
2284 SCM_CELLPTR p;
2285 SCM_CELLPTR pbound;
195e6201
DH
2286 p = scm_heap_table[x].bounds[0];
2287 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2288 while (p < pbound)
2289 {
c8045e8d
DH
2290 SCM cell = PTR2SCM (p);
2291 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2292 {
c8045e8d
DH
2293 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2294 * struct cell. See the corresponding comment in scm_gc_mark.
2295 */
2296 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2297 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2298 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2299 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2300 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2301 {
c8045e8d 2302 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2303 }
2304 }
2305 ++p;
2306 }
2307 }
2308 SCM_ALLOW_INTS;
2309 return name;
2310}
1bbd0b84 2311#undef FUNC_NAME
0f2d19dd
JB
2312
2313
2314\f
2315/* {GC Protection Helper Functions}
2316 */
2317
2318
5d2b97cd
DH
2319/*
2320 * If within a function you need to protect one or more scheme objects from
2321 * garbage collection, pass them as parameters to one of the
2322 * scm_remember_upto_here* functions below. These functions don't do
2323 * anything, but since the compiler does not know that they are actually
2324 * no-ops, it will generate code that calls these functions with the given
2325 * parameters. Therefore, you can be sure that the compiler will keep those
2326 * scheme values alive (on the stack or in a register) up to the point where
2327 * scm_remember_upto_here* is called. In other words, place the call to
2328 * scm_remember_upt_here* _behind_ the last code in your function, that
2329 * depends on the scheme object to exist.
2330 *
2331 * Example: We want to make sure, that the string object str does not get
2332 * garbage collected during the execution of 'some_function', because
2333 * otherwise the characters belonging to str would be freed and
2334 * 'some_function' might access freed memory. To make sure that the compiler
2335 * keeps str alive on the stack or in a register such that it is visible to
2336 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2337 * call to 'some_function'. Note that this would not be necessary if str was
2338 * used anyway after the call to 'some_function'.
2339 * char *chars = SCM_STRING_CHARS (str);
2340 * some_function (chars);
2341 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2342 */
2343
2344void
2345scm_remember_upto_here_1 (SCM obj)
2346{
2347 /* Empty. Protects a single object from garbage collection. */
2348}
2349
2350void
2351scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2352{
2353 /* Empty. Protects two objects from garbage collection. */
2354}
2355
2356void
2357scm_remember_upto_here (SCM obj, ...)
2358{
2359 /* Empty. Protects any number of objects from garbage collection. */
2360}
2361
2362
2363#if (SCM_DEBUG_DEPRECATED == 0)
2364
0f2d19dd 2365void
6e8d25a6 2366scm_remember (SCM *ptr)
b24b5e13
DH
2367{
2368 /* empty */
2369}
0f2d19dd 2370
5d2b97cd 2371#endif /* SCM_DEBUG_DEPRECATED == 0 */
1cc91f1b 2372
c209c88e 2373/*
41b0806d
GB
2374 These crazy functions prevent garbage collection
2375 of arguments after the first argument by
2376 ensuring they remain live throughout the
2377 function because they are used in the last
2378 line of the code block.
2379 It'd be better to have a nice compiler hint to
2380 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2381SCM
2382scm_return_first (SCM elt, ...)
0f2d19dd
JB
2383{
2384 return elt;
2385}
2386
41b0806d
GB
2387int
2388scm_return_first_int (int i, ...)
2389{
2390 return i;
2391}
2392
0f2d19dd 2393
0f2d19dd 2394SCM
6e8d25a6 2395scm_permanent_object (SCM obj)
0f2d19dd
JB
2396{
2397 SCM_REDEFER_INTS;
2398 scm_permobjs = scm_cons (obj, scm_permobjs);
2399 SCM_REALLOW_INTS;
2400 return obj;
2401}
2402
2403
7bd4fbe2
MD
2404/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2405 other references are dropped, until the object is unprotected by calling
2406 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2407 i. e. it is possible to protect the same object several times, but it is
2408 necessary to unprotect the object the same number of times to actually get
2409 the object unprotected. It is an error to unprotect an object more often
2410 than it has been protected before. The function scm_protect_object returns
2411 OBJ.
2412*/
2413
2414/* Implementation note: For every object X, there is a counter which
2415 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2416*/
686765af 2417
ef290276 2418SCM
6e8d25a6 2419scm_protect_object (SCM obj)
ef290276 2420{
686765af 2421 SCM handle;
9d47a1e6 2422
686765af 2423 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2424 SCM_REDEFER_INTS;
9d47a1e6 2425
0f0f0899
MD
2426 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2427 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
9d47a1e6 2428
2dd6a83a 2429 SCM_REALLOW_INTS;
9d47a1e6 2430
ef290276
JB
2431 return obj;
2432}
2433
2434
2435/* Remove any protection for OBJ established by a prior call to
dab7f566 2436 scm_protect_object. This function returns OBJ.
ef290276 2437
dab7f566 2438 See scm_protect_object for more information. */
ef290276 2439SCM
6e8d25a6 2440scm_unprotect_object (SCM obj)
ef290276 2441{
686765af 2442 SCM handle;
9d47a1e6 2443
686765af 2444 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2445 SCM_REDEFER_INTS;
9d47a1e6 2446
686765af 2447 handle = scm_hashq_get_handle (scm_protects, obj);
9d47a1e6 2448
0f0f0899 2449 if (SCM_IMP (handle))
686765af 2450 {
0f0f0899
MD
2451 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2452 abort ();
686765af 2453 }
6a199940
DH
2454 else
2455 {
2456 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2457 if (count == 0)
2458 scm_hashq_remove_x (scm_protects, obj);
2459 else
2460 SCM_SETCDR (handle, SCM_MAKINUM (count));
2461 }
686765af 2462
2dd6a83a 2463 SCM_REALLOW_INTS;
ef290276
JB
2464
2465 return obj;
2466}
2467
c45acc34
JB
2468int terminating;
2469
2470/* called on process termination. */
e52ceaac
MD
2471#ifdef HAVE_ATEXIT
2472static void
2473cleanup (void)
2474#else
2475#ifdef HAVE_ON_EXIT
51157deb
MD
2476extern int on_exit (void (*procp) (), int arg);
2477
e52ceaac
MD
2478static void
2479cleanup (int status, void *arg)
2480#else
2481#error Dont know how to setup a cleanup handler on your system.
2482#endif
2483#endif
c45acc34
JB
2484{
2485 terminating = 1;
2486 scm_flush_all_ports ();
2487}
ef290276 2488
0f2d19dd 2489\f
acb0a19c 2490static int
4c48ba06 2491make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2492{
a00c95d9 2493 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
d6884e63 2494
a00c95d9
ML
2495 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2496 rounded_size,
4c48ba06 2497 freelist))
acb0a19c 2498 {
a00c95d9
ML
2499 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2500 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2501 rounded_size,
4c48ba06 2502 freelist))
acb0a19c
MD
2503 return 1;
2504 }
2505 else
2506 scm_expmem = 1;
2507
8fef55a8
MD
2508 if (freelist->min_yield_fraction)
2509 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2510 / 100);
8fef55a8 2511 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2512
acb0a19c
MD
2513 return 0;
2514}
2515
2516\f
4c48ba06
MD
2517static void
2518init_freelist (scm_freelist_t *freelist,
2519 int span,
2520 int cluster_size,
8fef55a8 2521 int min_yield)
4c48ba06
MD
2522{
2523 freelist->clusters = SCM_EOL;
2524 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2525 freelist->left_to_collect = 0;
2526 freelist->clusters_allocated = 0;
8fef55a8
MD
2527 freelist->min_yield = 0;
2528 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2529 freelist->span = span;
2530 freelist->collected = 0;
1811ebce 2531 freelist->collected_1 = 0;
4c48ba06
MD
2532 freelist->heap_size = 0;
2533}
2534
85db4a2c
DH
2535
2536/* Get an integer from an environment variable. */
2537static int
2538scm_i_getenv_int (const char *var, int def)
2539{
2540 char *end, *val = getenv (var);
2541 long res;
2542 if (!val)
2543 return def;
2544 res = strtol (val, &end, 10);
2545 if (end == val)
2546 return def;
2547 return res;
2548}
2549
2550
4a4c9785 2551int
85db4a2c 2552scm_init_storage ()
0f2d19dd 2553{
85db4a2c
DH
2554 scm_sizet gc_trigger_1;
2555 scm_sizet gc_trigger_2;
2556 scm_sizet init_heap_size_1;
2557 scm_sizet init_heap_size_2;
0f2d19dd
JB
2558 scm_sizet j;
2559
2560 j = SCM_NUM_PROTECTS;
2561 while (j)
2562 scm_sys_protects[--j] = SCM_BOOL_F;
2563 scm_block_gc = 1;
4a4c9785 2564
4a4c9785 2565 scm_freelist = SCM_EOL;
4c48ba06 2566 scm_freelist2 = SCM_EOL;
85db4a2c
DH
2567 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2568 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2569 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2570 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2571 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
4a4c9785 2572
0f2d19dd
JB
2573 scm_expmem = 0;
2574
2575 j = SCM_HEAP_SEG_SIZE;
2576 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2577 scm_heap_table = ((scm_heap_seg_data_t *)
2578 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2579 heap_segment_table_size = 2;
acb0a19c 2580
d6884e63
ML
2581 mark_space_ptr = &mark_space_head;
2582
85db4a2c
DH
2583 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2584 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
4c48ba06
MD
2585 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2586 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2587 return 1;
acb0a19c 2588
801cb5e7 2589 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2590 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2591
801cb5e7
MD
2592 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2593 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2594 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2595 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2596 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2597
2598 /* Initialise the list of ports. */
840ae05d
JB
2599 scm_port_table = (scm_port **)
2600 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2601 if (!scm_port_table)
2602 return 1;
2603
a18bcd0e 2604#ifdef HAVE_ATEXIT
c45acc34 2605 atexit (cleanup);
e52ceaac
MD
2606#else
2607#ifdef HAVE_ON_EXIT
2608 on_exit (cleanup, 0);
2609#endif
a18bcd0e 2610#endif
0f2d19dd
JB
2611
2612 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2613 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2614
2615 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2616 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2617 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
93d40df2
DH
2618
2619#define DEFAULT_SYMHASH_SIZE 277
2620 scm_symhash = scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE), SCM_EOL);
93d40df2
DH
2621 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE), SCM_EOL);
2622
8960e0a0 2623 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2624 scm_permobjs = SCM_EOL;
686765af 2625 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
d6884e63 2626
0f2d19dd
JB
2627 return 0;
2628}
939794ce 2629
0f2d19dd
JB
2630\f
2631
939794ce
DH
2632SCM scm_after_gc_hook;
2633
2634#if (SCM_DEBUG_DEPRECATED == 0)
2635static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2636#endif /* SCM_DEBUG_DEPRECATED == 0 */
2637static SCM gc_async;
2638
2639
2640/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2641 * is run after the gc, as soon as the asynchronous events are handled by the
2642 * evaluator.
2643 */
2644static SCM
2645gc_async_thunk (void)
2646{
2647 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2648
2649#if (SCM_DEBUG_DEPRECATED == 0)
2650
2651 /* The following code will be removed in Guile 1.5. */
2652 if (SCM_NFALSEP (scm_gc_vcell))
2653 {
2654 SCM proc = SCM_CDR (scm_gc_vcell);
2655
2656 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2657 scm_apply (proc, SCM_EOL, SCM_EOL);
2658 }
2659
2660#endif /* SCM_DEBUG_DEPRECATED == 0 */
2661
2662 return SCM_UNSPECIFIED;
2663}
2664
2665
2666/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2667 * the garbage collection. The only purpose of this function is to mark the
2668 * gc_async (which will eventually lead to the execution of the
2669 * gc_async_thunk).
2670 */
2671static void *
2672mark_gc_async (void * hook_data, void *func_data, void *data)
2673{
2674 scm_system_async_mark (gc_async);
2675 return NULL;
2676}
2677
2678
0f2d19dd
JB
2679void
2680scm_init_gc ()
0f2d19dd 2681{
939794ce
DH
2682 SCM after_gc_thunk;
2683
801cb5e7 2684 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2685
2686#if (SCM_DEBUG_DEPRECATED == 0)
2687 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2688#endif /* SCM_DEBUG_DEPRECATED == 0 */
78573619 2689 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
23670993 2690 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
939794ce
DH
2691
2692 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2693
8dc9439f 2694#ifndef SCM_MAGIC_SNARFER
a0599745 2695#include "libguile/gc.x"
8dc9439f 2696#endif
0f2d19dd 2697}
89e00824 2698
56495472
ML
2699#endif /*MARK_DEPENDENCIES*/
2700
89e00824
ML
2701/*
2702 Local Variables:
2703 c-file-style: "gnu"
2704 End:
2705*/