* Make the compiler warn about code like SCM_CELL_WORD (x, n) = y.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745 49#include "libguile/_scm.h"
0a7a7445 50#include "libguile/eval.h"
a0599745
MD
51#include "libguile/stime.h"
52#include "libguile/stackchk.h"
53#include "libguile/struct.h"
a0599745
MD
54#include "libguile/smob.h"
55#include "libguile/unif.h"
56#include "libguile/async.h"
57#include "libguile/ports.h"
58#include "libguile/root.h"
59#include "libguile/strings.h"
60#include "libguile/vectors.h"
801cb5e7 61#include "libguile/weaks.h"
686765af 62#include "libguile/hashtab.h"
a0599745
MD
63
64#include "libguile/validate.h"
65#include "libguile/gc.h"
fce59c93 66
bc9d9bb2 67#ifdef GUILE_DEBUG_MALLOC
a0599745 68#include "libguile/debug-malloc.h"
bc9d9bb2
MD
69#endif
70
0f2d19dd 71#ifdef HAVE_MALLOC_H
95b88819 72#include <malloc.h>
0f2d19dd
JB
73#endif
74
75#ifdef HAVE_UNISTD_H
95b88819 76#include <unistd.h>
0f2d19dd
JB
77#endif
78
1cc91f1b
JB
79#ifdef __STDC__
80#include <stdarg.h>
81#define var_start(x, y) va_start(x, y)
82#else
83#include <varargs.h>
84#define var_start(x, y) va_start(x)
85#endif
86
0f2d19dd
JB
87\f
88/* {heap tuning parameters}
a00c95d9 89 *
0f2d19dd
JB
90 * These are parameters for controlling memory allocation. The heap
91 * is the area out of which scm_cons, and object headers are allocated.
92 *
93 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
94 * 64 bit machine. The units of the _SIZE parameters are bytes.
95 * Cons pairs and object headers occupy one heap cell.
96 *
97 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
98 * allocated initially the heap will grow by half its current size
99 * each subsequent time more heap is needed.
100 *
101 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
102 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
103 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
104 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 105 *
0f2d19dd
JB
106 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
107 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 *
109 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
110 * is needed.
111 *
112 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 113 * trigger a GC.
6064dcc6
MV
114 *
115 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
116 * reclaimed by a GC triggered by must_malloc. If less than this is
117 * reclaimed, the trigger threshold is raised. [I don't know what a
118 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 119 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
120 */
121
8fef55a8
MD
122/*
123 * Heap size 45000 and 40% min yield gives quick startup and no extra
124 * heap allocation. Having higher values on min yield may lead to
125 * large heaps, especially if code behaviour is varying its
126 * maximum consumption between different freelists.
127 */
aeacfc8f
MD
128int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
129int scm_default_min_yield_1 = 40;
4c48ba06 130#define SCM_CLUSTER_SIZE_1 2000L
4c48ba06 131
aeacfc8f 132int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
4c48ba06
MD
133/* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
aeacfc8f
MD
136int scm_default_min_yield_2 = 40;
137#define SCM_CLUSTER_SIZE_2 1000L
4c48ba06 138
aeacfc8f 139int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 140
945fec60 141#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
142#ifdef _QC
143# define SCM_HEAP_SEG_SIZE 32768L
144#else
145# ifdef sequent
4c48ba06 146# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 147# else
4c48ba06 148# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
149# endif
150#endif
4c48ba06 151/* Make heap grow with factor 1.5 */
4a4c9785 152#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 153#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 154#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
155
156/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
157 bounds for allocated storage */
158
159#ifdef PROT386
160/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
161# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
162# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
163#else
164# ifdef _UNICOS
a00c95d9
ML
165# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
166# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 167# else
a00c95d9
ML
168# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
169# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
170# endif /* UNICOS */
171#endif /* PROT386 */
a00c95d9
ML
172#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
173#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
174#define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
1811ebce 176#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
177
178
179\f
945fec60 180/* scm_freelists
0f2d19dd 181 */
945fec60 182
a00c95d9
ML
183typedef struct scm_freelist_t {
184 /* collected cells */
185 SCM cells;
a00c95d9
ML
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
b37fe1c5
MD
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
8fef55a8
MD
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
a00c95d9
ML
193 SCM clusters;
194 SCM *clustertail;
b37fe1c5 195 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 196 int cluster_size;
8fef55a8 197 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
198 */
199 int grow_heap_p;
8fef55a8 200 /* minimum yield on this list in order not to grow the heap
a00c95d9 201 */
8fef55a8
MD
202 long min_yield;
203 /* defines min_yield as percent of total heap size
a00c95d9 204 */
8fef55a8 205 int min_yield_fraction;
a00c95d9
ML
206 /* number of cells per object on this list */
207 int span;
208 /* number of collected cells during last GC */
1811ebce
MD
209 long collected;
210 /* number of collected cells during penultimate GC */
211 long collected_1;
a00c95d9
ML
212 /* total number of cells in heap segments
213 * belonging to this list.
214 */
1811ebce 215 long heap_size;
a00c95d9
ML
216} scm_freelist_t;
217
4a4c9785
MD
218SCM scm_freelist = SCM_EOL;
219scm_freelist_t scm_master_freelist = {
b37fe1c5 220 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
221};
222SCM scm_freelist2 = SCM_EOL;
223scm_freelist_t scm_master_freelist2 = {
b37fe1c5 224 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 225};
0f2d19dd
JB
226
227/* scm_mtrigger
228 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 */
15e9d186 230unsigned long scm_mtrigger;
0f2d19dd
JB
231
232
233/* scm_gc_heap_lock
234 * If set, don't expand the heap. Set only during gc, during which no allocation
235 * is supposed to take place anyway.
236 */
237int scm_gc_heap_lock = 0;
238
239/* GC Blocking
240 * Don't pause for collection if this is set -- just
241 * expand the heap.
242 */
0f2d19dd
JB
243int scm_block_gc = 1;
244
0f2d19dd
JB
245/* During collection, this accumulates objects holding
246 * weak references.
247 */
ab4bef85 248SCM scm_weak_vectors;
0f2d19dd
JB
249
250/* GC Statistics Keeping
251 */
252unsigned long scm_cells_allocated = 0;
a5c314c8 253long scm_mallocated = 0;
b37fe1c5 254unsigned long scm_gc_cells_collected;
8b0d194f 255unsigned long scm_gc_yield;
37ddcaf6 256static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
257unsigned long scm_gc_malloc_collected;
258unsigned long scm_gc_ports_collected;
259unsigned long scm_gc_rt;
260unsigned long scm_gc_time_taken = 0;
261
262SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
263SCM_SYMBOL (sym_heap_size, "cell-heap-size");
264SCM_SYMBOL (sym_mallocated, "bytes-malloced");
265SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
266SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
267SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
268
a00c95d9 269typedef struct scm_heap_seg_data_t
0f2d19dd 270{
cf2d30f6
JB
271 /* lower and upper bounds of the segment */
272 SCM_CELLPTR bounds[2];
273
274 /* address of the head-of-freelist pointer for this segment's cells.
275 All segments usually point to the same one, scm_freelist. */
4c48ba06 276 scm_freelist_t *freelist;
cf2d30f6 277
fe517a7d 278 /* number of cells per object in this segment */
945fec60 279 int span;
a00c95d9 280} scm_heap_seg_data_t;
0f2d19dd
JB
281
282
283
945fec60 284static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
285
286typedef enum { return_on_error, abort_on_error } policy_on_error;
287static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
288
289
290\f
cf2d30f6
JB
291/* Debugging functions. */
292
bb2c57fa 293#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
294
295/* Return the number of the heap segment containing CELL. */
296static int
297which_seg (SCM cell)
298{
299 int i;
300
301 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
302 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
303 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
304 return i;
305 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 306 SCM_UNPACK (cell));
cf2d30f6
JB
307 abort ();
308}
309
310
8ded62a3
MD
311static void
312map_free_list (scm_freelist_t *master, SCM freelist)
313{
314 int last_seg = -1, count = 0;
315 SCM f;
a00c95d9 316
8ded62a3
MD
317 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
318 {
319 int this_seg = which_seg (f);
320
321 if (this_seg != last_seg)
322 {
323 if (last_seg != -1)
324 fprintf (stderr, " %5d %d-cells in segment %d\n",
325 count, master->span, last_seg);
326 last_seg = this_seg;
327 count = 0;
328 }
329 count++;
330 }
331 if (last_seg != -1)
332 fprintf (stderr, " %5d %d-cells in segment %d\n",
333 count, master->span, last_seg);
334}
cf2d30f6 335
a00c95d9 336SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
337 (),
338 "Print debugging information about the free-list.\n"
5384bc5b 339 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
340#define FUNC_NAME s_scm_map_free_list
341{
4c48ba06
MD
342 int i;
343 fprintf (stderr, "%d segments total (%d:%d",
344 scm_n_heap_segs,
345 scm_heap_table[0].span,
346 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
347 for (i = 1; i < scm_n_heap_segs; i++)
348 fprintf (stderr, ", %d:%d",
349 scm_heap_table[i].span,
350 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
351 fprintf (stderr, ")\n");
8ded62a3
MD
352 map_free_list (&scm_master_freelist, scm_freelist);
353 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
354 fflush (stderr);
355
356 return SCM_UNSPECIFIED;
357}
1bbd0b84 358#undef FUNC_NAME
cf2d30f6 359
4c48ba06
MD
360static int last_cluster;
361static int last_size;
362
5384bc5b
MD
363static int
364free_list_length (char *title, int i, SCM freelist)
365{
366 SCM ls;
367 int n = 0;
368 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
206d3de3 369 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
5384bc5b
MD
370 ++n;
371 else
372 {
373 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
374 abort ();
375 }
4c48ba06
MD
376 if (n != last_size)
377 {
378 if (i > 0)
379 {
380 if (last_cluster == i - 1)
381 fprintf (stderr, "\t%d\n", last_size);
382 else
383 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
384 }
385 if (i >= 0)
386 fprintf (stderr, "%s %d", title, i);
387 else
388 fprintf (stderr, "%s\t%d\n", title, n);
389 last_cluster = i;
390 last_size = n;
391 }
5384bc5b
MD
392 return n;
393}
394
395static void
396free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
397{
398 SCM clusters;
4c48ba06 399 int i = 0, len, n = 0;
5384bc5b
MD
400 fprintf (stderr, "%s\n\n", title);
401 n += free_list_length ("free list", -1, freelist);
402 for (clusters = master->clusters;
403 SCM_NNULLP (clusters);
404 clusters = SCM_CDR (clusters))
4c48ba06
MD
405 {
406 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
407 n += len;
408 }
409 if (last_cluster == i - 1)
410 fprintf (stderr, "\t%d\n", last_size);
411 else
412 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
413 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
414}
415
a00c95d9 416SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
417 (),
418 "Print debugging information about the free-list.\n"
419 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
420#define FUNC_NAME s_scm_free_list_length
421{
b37fe1c5
MD
422 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
423 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 424 return SCM_UNSPECIFIED;
5384bc5b
MD
425}
426#undef FUNC_NAME
427
bb2c57fa
MD
428#endif
429
430#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
431
432/* Number of calls to SCM_NEWCELL since startup. */
433static unsigned long scm_newcell_count;
acb0a19c 434static unsigned long scm_newcell2_count;
cf2d30f6
JB
435
436/* Search freelist for anything that isn't marked as a free cell.
437 Abort if we find something. */
8ded62a3
MD
438static void
439scm_check_freelist (SCM freelist)
440{
441 SCM f;
442 int i = 0;
443
444 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
445 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
446 {
447 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
448 scm_newcell_count, i);
8ded62a3
MD
449 abort ();
450 }
451}
cf2d30f6
JB
452
453static int scm_debug_check_freelist = 0;
25748c78 454
a00c95d9 455SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 456 (SCM flag),
da4a1dba
GB
457 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
458 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
459 "compile-time flag was selected.\n")
1bbd0b84 460#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 461{
945fec60 462 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
463 return SCM_UNSPECIFIED;
464}
1bbd0b84 465#undef FUNC_NAME
25748c78
GB
466
467
4a4c9785
MD
468SCM
469scm_debug_newcell (void)
470{
471 SCM new;
472
473 scm_newcell_count++;
474 if (scm_debug_check_freelist)
475 {
8ded62a3 476 scm_check_freelist (scm_freelist);
4a4c9785
MD
477 scm_gc();
478 }
479
480 /* The rest of this is supposed to be identical to the SCM_NEWCELL
481 macro. */
482 if (SCM_IMP (scm_freelist))
483 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
484 else
485 {
486 new = scm_freelist;
487 scm_freelist = SCM_CDR (scm_freelist);
488 SCM_SETCAR (new, scm_tc16_allocated);
489 }
490
491 return new;
492}
493
494SCM
495scm_debug_newcell2 (void)
496{
497 SCM new;
498
499 scm_newcell2_count++;
500 if (scm_debug_check_freelist)
501 {
8ded62a3 502 scm_check_freelist (scm_freelist2);
4a4c9785
MD
503 scm_gc ();
504 }
505
506 /* The rest of this is supposed to be identical to the SCM_NEWCELL
507 macro. */
508 if (SCM_IMP (scm_freelist2))
509 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
510 else
511 {
512 new = scm_freelist2;
513 scm_freelist2 = SCM_CDR (scm_freelist2);
514 SCM_SETCAR (new, scm_tc16_allocated);
515 }
516
517 return new;
518}
519
fca7547b 520#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
521
522\f
0f2d19dd 523
b37fe1c5
MD
524static unsigned long
525master_cells_allocated (scm_freelist_t *master)
526{
527 int objects = master->clusters_allocated * (master->cluster_size - 1);
528 if (SCM_NULLP (master->clusters))
529 objects -= master->left_to_collect;
530 return master->span * objects;
531}
532
533static unsigned long
534freelist_length (SCM freelist)
535{
536 int n;
537 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
538 ++n;
539 return n;
540}
541
542static unsigned long
543compute_cells_allocated ()
544{
545 return (scm_cells_allocated
546 + master_cells_allocated (&scm_master_freelist)
547 + master_cells_allocated (&scm_master_freelist2)
548 - scm_master_freelist.span * freelist_length (scm_freelist)
549 - scm_master_freelist2.span * freelist_length (scm_freelist2));
550}
b37fe1c5 551
0f2d19dd
JB
552/* {Scheme Interface to GC}
553 */
554
a00c95d9 555SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 556 (),
b380b885 557 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 558#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
559{
560 int i;
561 int n;
562 SCM heap_segs;
c209c88e
GB
563 long int local_scm_mtrigger;
564 long int local_scm_mallocated;
565 long int local_scm_heap_size;
566 long int local_scm_cells_allocated;
567 long int local_scm_gc_time_taken;
0f2d19dd
JB
568 SCM answer;
569
570 SCM_DEFER_INTS;
939794ce
DH
571
572 ++scm_block_gc;
573
0f2d19dd
JB
574 retry:
575 heap_segs = SCM_EOL;
576 n = scm_n_heap_segs;
577 for (i = scm_n_heap_segs; i--; )
578 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
579 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
580 heap_segs);
581 if (scm_n_heap_segs != n)
582 goto retry;
939794ce
DH
583
584 --scm_block_gc;
0f2d19dd 585
7febb4a2
MD
586 /* Below, we cons to produce the resulting list. We want a snapshot of
587 * the heap situation before consing.
588 */
0f2d19dd
JB
589 local_scm_mtrigger = scm_mtrigger;
590 local_scm_mallocated = scm_mallocated;
b37fe1c5 591 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 592 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd
JB
593 local_scm_gc_time_taken = scm_gc_time_taken;
594
595 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
596 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
597 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
598 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
599 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
600 scm_cons (sym_heap_segments, heap_segs),
601 SCM_UNDEFINED);
602 SCM_ALLOW_INTS;
603 return answer;
604}
1bbd0b84 605#undef FUNC_NAME
0f2d19dd
JB
606
607
a00c95d9 608void
6e8d25a6 609scm_gc_start (const char *what)
0f2d19dd
JB
610{
611 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 612 scm_gc_cells_collected = 0;
37ddcaf6 613 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
614 scm_gc_yield = (scm_cells_allocated
615 + master_cells_allocated (&scm_master_freelist)
616 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
617 scm_gc_malloc_collected = 0;
618 scm_gc_ports_collected = 0;
619}
620
939794ce 621
a00c95d9 622void
0f2d19dd 623scm_gc_end ()
0f2d19dd
JB
624{
625 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 626 scm_gc_time_taken += scm_gc_rt;
0f2d19dd
JB
627}
628
629
a00c95d9 630SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 631 (SCM obj),
b380b885
MD
632 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
633 "returned by this function for @var{obj}")
1bbd0b84 634#define FUNC_NAME s_scm_object_address
0f2d19dd 635{
54778cd3 636 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 637}
1bbd0b84 638#undef FUNC_NAME
0f2d19dd
JB
639
640
a00c95d9 641SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 642 (),
b380b885
MD
643 "Scans all of SCM objects and reclaims for further use those that are\n"
644 "no longer accessible.")
1bbd0b84 645#define FUNC_NAME s_scm_gc
0f2d19dd
JB
646{
647 SCM_DEFER_INTS;
648 scm_igc ("call");
649 SCM_ALLOW_INTS;
650 return SCM_UNSPECIFIED;
651}
1bbd0b84 652#undef FUNC_NAME
0f2d19dd
JB
653
654
655\f
656/* {C Interface For When GC is Triggered}
657 */
658
b37fe1c5 659static void
8fef55a8 660adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 661{
8fef55a8 662 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 663 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
664 * `min_yield_fraction' of total heap size. Note, however, that
665 * the absolute value of min_yield will correspond to `collected'
bda1446c 666 * on one master (the one which currently is triggering GC).
b37fe1c5 667 *
bda1446c
MD
668 * The reason why we look at total yield instead of cells collected
669 * on one list is that we want to take other freelists into account.
670 * On this freelist, we know that (local) yield = collected cells,
671 * but that's probably not the case on the other lists.
b37fe1c5
MD
672 *
673 * (We might consider computing a better prediction, for example
674 * by computing an average over multiple GC:s.)
675 */
8fef55a8 676 if (freelist->min_yield_fraction)
b37fe1c5 677 {
37ddcaf6 678 /* Pick largest of last two yields. */
8fef55a8
MD
679 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
680 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
681#ifdef DEBUGINFO
682 fprintf (stderr, " after GC = %d, delta = %d\n",
683 scm_cells_allocated,
684 delta);
685#endif
686 if (delta > 0)
8fef55a8 687 freelist->min_yield += delta;
b37fe1c5
MD
688 }
689}
690
b6efc951 691
4a4c9785 692/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
693 * common while the freelist will be individual for each thread.
694 */
4a4c9785
MD
695
696SCM
697scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
698{
699 SCM cell;
700 ++scm_ints_disabled;
4c48ba06
MD
701 do
702 {
c7387918 703 if (SCM_NULLP (master->clusters))
4c48ba06 704 {
150c200b 705 if (master->grow_heap_p || scm_block_gc)
4c48ba06 706 {
b6efc951
DH
707 /* In order to reduce gc frequency, try to allocate a new heap
708 * segment first, even if gc might find some free cells. If we
709 * can't obtain a new heap segment, we will try gc later.
710 */
4c48ba06 711 master->grow_heap_p = 0;
b6efc951 712 alloc_some_heap (master, return_on_error);
4c48ba06 713 }
b6efc951 714 if (SCM_NULLP (master->clusters))
b37fe1c5 715 {
b6efc951
DH
716 /* The heap was not grown, either because it wasn't scheduled to
717 * grow, or because there was not enough memory available. In
718 * both cases we have to try gc to get some free cells.
719 */
37ddcaf6
MD
720#ifdef DEBUGINFO
721 fprintf (stderr, "allocated = %d, ",
722 scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist)
724 + master_cells_allocated (&scm_master_freelist2));
725#endif
b37fe1c5 726 scm_igc ("cells");
8fef55a8 727 adjust_min_yield (master);
c7387918
DH
728 if (SCM_NULLP (master->clusters))
729 {
b6efc951
DH
730 /* gc could not free any cells. Now, we _must_ allocate a
731 * new heap segment, because there is no other possibility
732 * to provide a new cell for the caller.
733 */
734 alloc_some_heap (master, abort_on_error);
c7387918 735 }
b37fe1c5 736 }
4c48ba06
MD
737 }
738 cell = SCM_CAR (master->clusters);
739 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 740 ++master->clusters_allocated;
4c48ba06
MD
741 }
742 while (SCM_NULLP (cell));
4a4c9785 743 --scm_ints_disabled;
4a4c9785 744 *freelist = SCM_CDR (cell);
54778cd3 745 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
746 return cell;
747}
748
b6efc951 749
4c48ba06
MD
750#if 0
751/* This is a support routine which can be used to reserve a cluster
752 * for some special use, such as debugging. It won't be useful until
753 * free cells are preserved between garbage collections.
754 */
755
756void
757scm_alloc_cluster (scm_freelist_t *master)
758{
759 SCM freelist, cell;
760 cell = scm_gc_for_newcell (master, &freelist);
761 SCM_SETCDR (cell, freelist);
762 return cell;
763}
764#endif
765
801cb5e7
MD
766
767scm_c_hook_t scm_before_gc_c_hook;
768scm_c_hook_t scm_before_mark_c_hook;
769scm_c_hook_t scm_before_sweep_c_hook;
770scm_c_hook_t scm_after_sweep_c_hook;
771scm_c_hook_t scm_after_gc_c_hook;
772
b6efc951 773
0f2d19dd 774void
1bbd0b84 775scm_igc (const char *what)
0f2d19dd
JB
776{
777 int j;
778
801cb5e7 779 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
780#ifdef DEBUGINFO
781 fprintf (stderr,
782 SCM_NULLP (scm_freelist)
783 ? "*"
784 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
785#endif
42db06f0
MD
786#ifdef USE_THREADS
787 /* During the critical section, only the current thread may run. */
788 SCM_THREAD_CRITICAL_SECTION_START;
789#endif
790
e242dfd2 791 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 792
ab4bef85
JB
793 scm_gc_start (what);
794
795 if (!scm_stack_base || scm_block_gc)
796 {
797 scm_gc_end ();
798 return;
799 }
800
a5c314c8
JB
801 if (scm_mallocated < 0)
802 /* The byte count of allocated objects has underflowed. This is
803 probably because you forgot to report the sizes of objects you
804 have allocated, by calling scm_done_malloc or some such. When
805 the GC freed them, it subtracted their size from
806 scm_mallocated, which underflowed. */
807 abort ();
c45acc34 808
ab4bef85
JB
809 if (scm_gc_heap_lock)
810 /* We've invoked the collector while a GC is already in progress.
811 That should never happen. */
812 abort ();
0f2d19dd
JB
813
814 ++scm_gc_heap_lock;
ab4bef85 815
0f2d19dd
JB
816 /* flush dead entries from the continuation stack */
817 {
818 int x;
819 int bound;
820 SCM * elts;
821 elts = SCM_VELTS (scm_continuation_stack);
822 bound = SCM_LENGTH (scm_continuation_stack);
823 x = SCM_INUM (scm_continuation_stack_ptr);
824 while (x < bound)
825 {
826 elts[x] = SCM_BOOL_F;
827 ++x;
828 }
829 }
830
801cb5e7
MD
831 scm_c_hook_run (&scm_before_mark_c_hook, 0);
832
42db06f0 833#ifndef USE_THREADS
a00c95d9 834
0f2d19dd
JB
835 /* Protect from the C stack. This must be the first marking
836 * done because it provides information about what objects
837 * are "in-use" by the C code. "in-use" objects are those
838 * for which the values from SCM_LENGTH and SCM_CHARS must remain
839 * usable. This requirement is stricter than a liveness
840 * requirement -- in particular, it constrains the implementation
841 * of scm_vector_set_length_x.
842 */
843 SCM_FLUSH_REGISTER_WINDOWS;
844 /* This assumes that all registers are saved into the jmp_buf */
845 setjmp (scm_save_regs_gc_mark);
846 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
847 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
848 sizeof scm_save_regs_gc_mark)
849 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
850
851 {
6ba93e5e 852 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 853#ifdef SCM_STACK_GROWS_UP
6ba93e5e 854 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 855#else
6ba93e5e 856 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
857#endif
858 }
859
42db06f0
MD
860#else /* USE_THREADS */
861
862 /* Mark every thread's stack and registers */
945fec60 863 scm_threads_mark_stacks ();
42db06f0
MD
864
865#endif /* USE_THREADS */
0f2d19dd
JB
866
867 /* FIXME: insert a phase to un-protect string-data preserved
868 * in scm_vector_set_length_x.
869 */
870
871 j = SCM_NUM_PROTECTS;
872 while (j--)
873 scm_gc_mark (scm_sys_protects[j]);
874
9de33deb
MD
875 /* FIXME: we should have a means to register C functions to be run
876 * in different phases of GC
a00c95d9 877 */
9de33deb 878 scm_mark_subr_table ();
a00c95d9 879
42db06f0
MD
880#ifndef USE_THREADS
881 scm_gc_mark (scm_root->handle);
882#endif
a00c95d9 883
801cb5e7 884 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 885
0f2d19dd
JB
886 scm_gc_sweep ();
887
801cb5e7
MD
888 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
889
0f2d19dd
JB
890 --scm_gc_heap_lock;
891 scm_gc_end ();
42db06f0
MD
892
893#ifdef USE_THREADS
894 SCM_THREAD_CRITICAL_SECTION_END;
895#endif
801cb5e7 896 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
897}
898
899\f
939794ce 900
a00c95d9 901/* {Mark/Sweep}
0f2d19dd
JB
902 */
903
904
905
906/* Mark an object precisely.
907 */
a00c95d9 908void
1bbd0b84 909scm_gc_mark (SCM p)
acf4331f 910#define FUNC_NAME "scm_gc_mark"
0f2d19dd
JB
911{
912 register long i;
913 register SCM ptr;
914
915 ptr = p;
916
917gc_mark_loop:
918 if (SCM_IMP (ptr))
919 return;
920
921gc_mark_nimp:
922 if (SCM_NCELLP (ptr))
acf4331f 923 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
0f2d19dd
JB
924
925 switch (SCM_TYP7 (ptr))
926 {
927 case scm_tcs_cons_nimcar:
928 if (SCM_GCMARKP (ptr))
929 break;
930 SCM_SETGCMARK (ptr);
931 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
932 {
933 ptr = SCM_CAR (ptr);
934 goto gc_mark_nimp;
935 }
936 scm_gc_mark (SCM_CAR (ptr));
937 ptr = SCM_GCCDR (ptr);
938 goto gc_mark_nimp;
939 case scm_tcs_cons_imcar:
acb0a19c
MD
940 if (SCM_GCMARKP (ptr))
941 break;
942 SCM_SETGCMARK (ptr);
943 ptr = SCM_GCCDR (ptr);
944 goto gc_mark_loop;
e641afaf 945 case scm_tc7_pws:
0f2d19dd
JB
946 if (SCM_GCMARKP (ptr))
947 break;
948 SCM_SETGCMARK (ptr);
54778cd3 949 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
950 ptr = SCM_GCCDR (ptr);
951 goto gc_mark_loop;
952 case scm_tcs_cons_gloc:
953 if (SCM_GCMARKP (ptr))
954 break;
955 SCM_SETGCMARK (ptr);
956 {
c8045e8d
DH
957 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
958 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
959 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
960 * pointer to a struct vtable data region. The fact that these are
961 * accessed in the same way restricts the possibilites to change the
962 * data layout of structs or heap cells.
963 */
964 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
965 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
966 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
967 {
968 default:
c8045e8d
DH
969 {
970 /* ptr is a gloc */
971 SCM gloc_car = SCM_PACK (word0);
972 scm_gc_mark (gloc_car);
973 ptr = SCM_GCCDR (ptr);
974 goto gc_mark_loop;
975 }
0f2d19dd
JB
976 case 1: /* ! */
977 case 0: /* ! */
978 {
c8045e8d
DH
979 /* ptr is a struct */
980 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
981 int len = SCM_LENGTH (layout);
982 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
983 /* We're using SCM_GCCDR here like STRUCT_DATA, except
984 that it removes the mark */
c8045e8d 985 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 986
c8045e8d 987 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 988 {
c8045e8d
DH
989 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
990 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 991 }
ad75306c
MD
992 if (len)
993 {
c8045e8d
DH
994 int x;
995
996 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 997 if (fields_desc[x] == 'p')
c8045e8d 998 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
999 if (fields_desc[x] == 'p')
1000 {
1001 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
1002 for (x = *struct_data; x; --x)
1003 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 1004 else
c8045e8d 1005 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1006 }
1007 }
c8045e8d 1008 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 1009 {
c8045e8d
DH
1010 vtable_data [scm_vtable_index_vcell] = 1;
1011 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
1012 goto gc_mark_loop;
1013 }
1014 }
1015 }
1016 }
1017 break;
1018 case scm_tcs_closures:
1019 if (SCM_GCMARKP (ptr))
1020 break;
1021 SCM_SETGCMARK (ptr);
1022 if (SCM_IMP (SCM_CDR (ptr)))
1023 {
1024 ptr = SCM_CLOSCAR (ptr);
1025 goto gc_mark_nimp;
1026 }
1027 scm_gc_mark (SCM_CLOSCAR (ptr));
1028 ptr = SCM_GCCDR (ptr);
1029 goto gc_mark_nimp;
1030 case scm_tc7_vector:
1031 case scm_tc7_lvector:
1032#ifdef CCLO
1033 case scm_tc7_cclo:
1034#endif
1035 if (SCM_GC8MARKP (ptr))
1036 break;
1037 SCM_SETGC8MARK (ptr);
1038 i = SCM_LENGTH (ptr);
1039 if (i == 0)
1040 break;
1041 while (--i > 0)
1042 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1043 scm_gc_mark (SCM_VELTS (ptr)[i]);
1044 ptr = SCM_VELTS (ptr)[0];
1045 goto gc_mark_loop;
1046 case scm_tc7_contin:
1047 if SCM_GC8MARKP
1048 (ptr) break;
1049 SCM_SETGC8MARK (ptr);
c68296f8 1050 if (SCM_VELTS (ptr))
41b0806d 1051 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1052 (scm_sizet)
1053 (SCM_LENGTH (ptr) +
1054 (sizeof (SCM_STACKITEM) + -1 +
1055 sizeof (scm_contregs)) /
1056 sizeof (SCM_STACKITEM)));
0f2d19dd 1057 break;
afe5177e 1058#ifdef HAVE_ARRAYS
0f2d19dd
JB
1059 case scm_tc7_bvect:
1060 case scm_tc7_byvect:
1061 case scm_tc7_ivect:
1062 case scm_tc7_uvect:
1063 case scm_tc7_fvect:
1064 case scm_tc7_dvect:
1065 case scm_tc7_cvect:
1066 case scm_tc7_svect:
5c11cc9d 1067#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1068 case scm_tc7_llvect:
1069#endif
afe5177e 1070#endif
0f2d19dd 1071 case scm_tc7_string:
0f2d19dd
JB
1072 SCM_SETGC8MARK (ptr);
1073 break;
1074
1075 case scm_tc7_substring:
0f2d19dd
JB
1076 if (SCM_GC8MARKP(ptr))
1077 break;
1078 SCM_SETGC8MARK (ptr);
1079 ptr = SCM_CDR (ptr);
1080 goto gc_mark_loop;
1081
1082 case scm_tc7_wvect:
1083 if (SCM_GC8MARKP(ptr))
1084 break;
ab4bef85
JB
1085 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1086 scm_weak_vectors = ptr;
0f2d19dd
JB
1087 SCM_SETGC8MARK (ptr);
1088 if (SCM_IS_WHVEC_ANY (ptr))
1089 {
1090 int x;
1091 int len;
1092 int weak_keys;
1093 int weak_values;
1094
1095 len = SCM_LENGTH (ptr);
1096 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1097 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1098
0f2d19dd
JB
1099 for (x = 0; x < len; ++x)
1100 {
1101 SCM alist;
1102 alist = SCM_VELTS (ptr)[x];
46408039
JB
1103
1104 /* mark everything on the alist except the keys or
1105 * values, according to weak_values and weak_keys. */
0b5f3f34 1106 while ( SCM_CONSP (alist)
0f2d19dd 1107 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1108 && SCM_CONSP (SCM_CAR (alist)))
1109 {
1110 SCM kvpair;
1111 SCM next_alist;
1112
1113 kvpair = SCM_CAR (alist);
1114 next_alist = SCM_CDR (alist);
a00c95d9 1115 /*
0f2d19dd
JB
1116 * Do not do this:
1117 * SCM_SETGCMARK (alist);
1118 * SCM_SETGCMARK (kvpair);
1119 *
1120 * It may be that either the key or value is protected by
1121 * an escaped reference to part of the spine of this alist.
1122 * If we mark the spine here, and only mark one or neither of the
1123 * key and value, they may never be properly marked.
1124 * This leads to a horrible situation in which an alist containing
1125 * freelist cells is exported.
1126 *
1127 * So only mark the spines of these arrays last of all marking.
1128 * If somebody confuses us by constructing a weak vector
1129 * with a circular alist then we are hosed, but at least we
1130 * won't prematurely drop table entries.
1131 */
1132 if (!weak_keys)
1133 scm_gc_mark (SCM_CAR (kvpair));
1134 if (!weak_values)
1135 scm_gc_mark (SCM_GCCDR (kvpair));
1136 alist = next_alist;
1137 }
1138 if (SCM_NIMP (alist))
1139 scm_gc_mark (alist);
1140 }
1141 }
1142 break;
1143
1144 case scm_tc7_msymbol:
1145 if (SCM_GC8MARKP(ptr))
1146 break;
1147 SCM_SETGC8MARK (ptr);
1148 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1149 ptr = SCM_SYMBOL_PROPS (ptr);
1150 goto gc_mark_loop;
1151 case scm_tc7_ssymbol:
1152 if (SCM_GC8MARKP(ptr))
1153 break;
1154 SCM_SETGC8MARK (ptr);
1155 break;
1156 case scm_tcs_subrs:
9de33deb 1157 break;
0f2d19dd
JB
1158 case scm_tc7_port:
1159 i = SCM_PTOBNUM (ptr);
1160 if (!(i < scm_numptob))
1161 goto def;
1162 if (SCM_GC8MARKP (ptr))
1163 break;
dc53f026 1164 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1165 if (SCM_PTAB_ENTRY(ptr))
1166 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1167 if (scm_ptobs[i].mark)
1168 {
1169 ptr = (scm_ptobs[i].mark) (ptr);
1170 goto gc_mark_loop;
1171 }
1172 else
1173 return;
0f2d19dd
JB
1174 break;
1175 case scm_tc7_smob:
1176 if (SCM_GC8MARKP (ptr))
1177 break;
dc53f026 1178 SCM_SETGC8MARK (ptr);
acb0a19c 1179 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1180 { /* should be faster than going through scm_smobs */
1181 case scm_tc_free_cell:
1182 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1183 case scm_tc16_allocated:
acb0a19c
MD
1184 case scm_tc16_big:
1185 case scm_tc16_real:
1186 case scm_tc16_complex:
0f2d19dd
JB
1187 break;
1188 default:
1189 i = SCM_SMOBNUM (ptr);
1190 if (!(i < scm_numsmob))
1191 goto def;
dc53f026
JB
1192 if (scm_smobs[i].mark)
1193 {
1194 ptr = (scm_smobs[i].mark) (ptr);
1195 goto gc_mark_loop;
1196 }
1197 else
1198 return;
0f2d19dd
JB
1199 }
1200 break;
1201 default:
acf4331f
DH
1202 def:
1203 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd
JB
1204 }
1205}
acf4331f 1206#undef FUNC_NAME
0f2d19dd
JB
1207
1208
1209/* Mark a Region Conservatively
1210 */
1211
a00c95d9 1212void
6e8d25a6 1213scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1214{
1215 register long m = n;
1216 register int i, j;
1217 register SCM_CELLPTR ptr;
1218
1219 while (0 <= --m)
c67baafd 1220 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1221 {
195e6201 1222 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1223 i = 0;
1224 j = scm_n_heap_segs - 1;
1225 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1226 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1227 {
1228 while (i <= j)
1229 {
1230 int seg_id;
1231 seg_id = -1;
1232 if ( (i == j)
1233 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1234 seg_id = i;
1235 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1236 seg_id = j;
1237 else
1238 {
1239 int k;
1240 k = (i + j) / 2;
1241 if (k == i)
1242 break;
1243 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1244 {
1245 j = k;
1246 ++i;
1247 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1248 continue;
1249 else
1250 break;
1251 }
1252 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1253 {
1254 i = k;
1255 --j;
1256 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1257 continue;
1258 else
1259 break;
1260 }
1261 }
47457e8a
DH
1262 if (scm_heap_table[seg_id].span == 1
1263 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1264 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1265 break;
1266 }
1267
1268 }
1269 }
1270}
1271
1272
1a548472
DH
1273/* The function scm_cellp determines whether an SCM value can be regarded as a
1274 * pointer to a cell on the heap. Binary search is used in order to determine
1275 * the heap segment that contains the cell.
1276 */
2e11a577 1277int
6e8d25a6 1278scm_cellp (SCM value)
2e11a577 1279{
1a548472
DH
1280 if (SCM_CELLP (value)) {
1281 scm_cell * ptr = SCM2PTR (value);
1282 unsigned int i = 0;
1283 unsigned int j = scm_n_heap_segs - 1;
1284
1285 while (i < j) {
1286 int k = (i + j) / 2;
1287 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1288 j = k;
1289 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1290 i = k + 1;
1291 }
1292 }
2e11a577 1293
1a548472
DH
1294 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1295 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1a548472
DH
1296 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1297 return 1;
1298 } else {
1299 return 0;
2e11a577 1300 }
1a548472
DH
1301 } else {
1302 return 0;
1303 }
2e11a577
MD
1304}
1305
1306
4c48ba06
MD
1307static void
1308gc_sweep_freelist_start (scm_freelist_t *freelist)
1309{
1310 freelist->cells = SCM_EOL;
1311 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1312 freelist->clusters_allocated = 0;
4c48ba06
MD
1313 freelist->clusters = SCM_EOL;
1314 freelist->clustertail = &freelist->clusters;
1811ebce 1315 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1316 freelist->collected = 0;
1317}
1318
1319static void
1320gc_sweep_freelist_finish (scm_freelist_t *freelist)
1321{
1811ebce 1322 int collected;
4c48ba06
MD
1323 *freelist->clustertail = freelist->cells;
1324 if (SCM_NNULLP (freelist->cells))
1325 {
1326 SCM c = freelist->cells;
1327 SCM_SETCAR (c, SCM_CDR (c));
1328 SCM_SETCDR (c, SCM_EOL);
1329 freelist->collected +=
1330 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1331 }
b37fe1c5 1332 scm_gc_cells_collected += freelist->collected;
a00c95d9 1333
8fef55a8 1334 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1335 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1336 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1337 * size. This means that a too low yield is compensated by more
1338 * heap on the list which is currently doing most work, which is
1339 * just what we want.
1340 */
1811ebce 1341 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1342 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1343}
0f2d19dd 1344
a00c95d9 1345void
0f2d19dd 1346scm_gc_sweep ()
acf4331f 1347#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1348{
1349 register SCM_CELLPTR ptr;
0f2d19dd 1350 register SCM nfreelist;
4c48ba06 1351 register scm_freelist_t *freelist;
0f2d19dd 1352 register long m;
0f2d19dd 1353 register int span;
15e9d186 1354 long i;
0f2d19dd
JB
1355 scm_sizet seg_size;
1356
0f2d19dd 1357 m = 0;
0f2d19dd 1358
4c48ba06
MD
1359 gc_sweep_freelist_start (&scm_master_freelist);
1360 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1361
cf2d30f6 1362 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1363 {
4c48ba06 1364 register unsigned int left_to_collect;
4c48ba06 1365 register scm_sizet j;
15e9d186 1366
cf2d30f6
JB
1367 /* Unmarked cells go onto the front of the freelist this heap
1368 segment points to. Rather than updating the real freelist
1369 pointer as we go along, we accumulate the new head in
1370 nfreelist. Then, if it turns out that the entire segment is
1371 free, we free (i.e., malloc's free) the whole segment, and
1372 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1373 freelist = scm_heap_table[i].freelist;
1374 nfreelist = freelist->cells;
4c48ba06 1375 left_to_collect = freelist->left_to_collect;
945fec60 1376 span = scm_heap_table[i].span;
cf2d30f6 1377
a00c95d9
ML
1378 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1379 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1380 for (j = seg_size + span; j -= span; ptr += span)
1381 {
96f6f4ae
DH
1382 SCM scmptr = PTR2SCM (ptr);
1383
0f2d19dd
JB
1384 switch SCM_TYP7 (scmptr)
1385 {
1386 case scm_tcs_cons_gloc:
0f2d19dd 1387 {
c8045e8d
DH
1388 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1389 * struct or a gloc. See the corresponding comment in
1390 * scm_gc_mark.
1391 */
1392 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1393 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1394 if (SCM_GCMARKP (scmptr))
0f2d19dd 1395 {
c8045e8d
DH
1396 if (vtable_data [scm_vtable_index_vcell] == 1)
1397 vtable_data [scm_vtable_index_vcell] = 0;
1398 goto cmrkcontinue;
1399 }
1400 else
1401 {
1402 if (vtable_data [scm_vtable_index_vcell] == 0
1403 || vtable_data [scm_vtable_index_vcell] == 1)
1404 {
1405 scm_struct_free_t free
1406 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1407 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1408 }
0f2d19dd
JB
1409 }
1410 }
1411 break;
1412 case scm_tcs_cons_imcar:
1413 case scm_tcs_cons_nimcar:
1414 case scm_tcs_closures:
e641afaf 1415 case scm_tc7_pws:
0f2d19dd
JB
1416 if (SCM_GCMARKP (scmptr))
1417 goto cmrkcontinue;
1418 break;
1419 case scm_tc7_wvect:
1420 if (SCM_GC8MARKP (scmptr))
1421 {
1422 goto c8mrkcontinue;
1423 }
1424 else
1425 {
ab4bef85
JB
1426 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1427 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1428 break;
1429 }
1430
1431 case scm_tc7_vector:
1432 case scm_tc7_lvector:
1433#ifdef CCLO
1434 case scm_tc7_cclo:
1435#endif
1436 if (SCM_GC8MARKP (scmptr))
1437 goto c8mrkcontinue;
1438
1439 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1440 freechars:
1441 scm_must_free (SCM_CHARS (scmptr));
1442 /* SCM_SETCHARS(scmptr, 0);*/
1443 break;
afe5177e 1444#ifdef HAVE_ARRAYS
0f2d19dd
JB
1445 case scm_tc7_bvect:
1446 if SCM_GC8MARKP (scmptr)
1447 goto c8mrkcontinue;
1448 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1449 goto freechars;
1450 case scm_tc7_byvect:
1451 if SCM_GC8MARKP (scmptr)
1452 goto c8mrkcontinue;
1453 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1454 goto freechars;
1455 case scm_tc7_ivect:
1456 case scm_tc7_uvect:
1457 if SCM_GC8MARKP (scmptr)
1458 goto c8mrkcontinue;
1459 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1460 goto freechars;
1461 case scm_tc7_svect:
1462 if SCM_GC8MARKP (scmptr)
1463 goto c8mrkcontinue;
1464 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1465 goto freechars;
5c11cc9d 1466#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1467 case scm_tc7_llvect:
1468 if SCM_GC8MARKP (scmptr)
1469 goto c8mrkcontinue;
1470 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1471 goto freechars;
1472#endif
1473 case scm_tc7_fvect:
1474 if SCM_GC8MARKP (scmptr)
1475 goto c8mrkcontinue;
1476 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1477 goto freechars;
1478 case scm_tc7_dvect:
1479 if SCM_GC8MARKP (scmptr)
1480 goto c8mrkcontinue;
1481 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1482 goto freechars;
1483 case scm_tc7_cvect:
1484 if SCM_GC8MARKP (scmptr)
1485 goto c8mrkcontinue;
1486 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1487 goto freechars;
afe5177e 1488#endif
0f2d19dd 1489 case scm_tc7_substring:
0f2d19dd
JB
1490 if (SCM_GC8MARKP (scmptr))
1491 goto c8mrkcontinue;
1492 break;
1493 case scm_tc7_string:
0f2d19dd
JB
1494 if (SCM_GC8MARKP (scmptr))
1495 goto c8mrkcontinue;
1496 m += SCM_HUGE_LENGTH (scmptr) + 1;
1497 goto freechars;
1498 case scm_tc7_msymbol:
1499 if (SCM_GC8MARKP (scmptr))
1500 goto c8mrkcontinue;
cf551a2b
DH
1501 m += (SCM_LENGTH (scmptr) + 1
1502 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1503 scm_must_free ((char *)SCM_SLOTS (scmptr));
1504 break;
1505 case scm_tc7_contin:
1506 if SCM_GC8MARKP (scmptr)
1507 goto c8mrkcontinue;
0db18cf4 1508 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1509 if (SCM_VELTS (scmptr))
1510 goto freechars;
0f2d19dd
JB
1511 case scm_tc7_ssymbol:
1512 if SCM_GC8MARKP(scmptr)
1513 goto c8mrkcontinue;
1514 break;
1515 case scm_tcs_subrs:
1516 continue;
1517 case scm_tc7_port:
1518 if SCM_GC8MARKP (scmptr)
1519 goto c8mrkcontinue;
1520 if SCM_OPENP (scmptr)
1521 {
1522 int k = SCM_PTOBNUM (scmptr);
1523 if (!(k < scm_numptob))
1524 goto sweeperr;
1525 /* Keep "revealed" ports alive. */
945fec60 1526 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1527 continue;
1528 /* Yes, I really do mean scm_ptobs[k].free */
1529 /* rather than ftobs[k].close. .close */
1530 /* is for explicit CLOSE-PORT by user */
84af0382 1531 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1532 SCM_SETSTREAM (scmptr, 0);
1533 scm_remove_from_port_table (scmptr);
1534 scm_gc_ports_collected++;
24e68a57 1535 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1536 }
1537 break;
1538 case scm_tc7_smob:
1539 switch SCM_GCTYP16 (scmptr)
1540 {
1541 case scm_tc_free_cell:
acb0a19c 1542 case scm_tc16_real:
0f2d19dd
JB
1543 if SCM_GC8MARKP (scmptr)
1544 goto c8mrkcontinue;
1545 break;
1546#ifdef SCM_BIGDIG
acb0a19c 1547 case scm_tc16_big:
0f2d19dd
JB
1548 if SCM_GC8MARKP (scmptr)
1549 goto c8mrkcontinue;
1550 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1551 goto freechars;
1552#endif /* def SCM_BIGDIG */
acb0a19c 1553 case scm_tc16_complex:
0f2d19dd
JB
1554 if SCM_GC8MARKP (scmptr)
1555 goto c8mrkcontinue;
acb0a19c
MD
1556 m += 2 * sizeof (double);
1557 goto freechars;
0f2d19dd
JB
1558 default:
1559 if SCM_GC8MARKP (scmptr)
1560 goto c8mrkcontinue;
1561
1562 {
1563 int k;
1564 k = SCM_SMOBNUM (scmptr);
1565 if (!(k < scm_numsmob))
1566 goto sweeperr;
c8045e8d 1567 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1568 break;
1569 }
1570 }
1571 break;
1572 default:
acf4331f
DH
1573 sweeperr:
1574 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1575 }
0f2d19dd
JB
1576#if 0
1577 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1578 exit (2);
1579#endif
4c48ba06 1580 if (!--left_to_collect)
4a4c9785
MD
1581 {
1582 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1583 *freelist->clustertail = scmptr;
1584 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1585
4a4c9785 1586 nfreelist = SCM_EOL;
4c48ba06
MD
1587 freelist->collected += span * freelist->cluster_size;
1588 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1589 }
1590 else
4a4c9785
MD
1591 {
1592 /* Stick the new cell on the front of nfreelist. It's
1593 critical that we mark this cell as freed; otherwise, the
1594 conservative collector might trace it as some other type
1595 of object. */
54778cd3 1596 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1597 SCM_SETCDR (scmptr, nfreelist);
1598 nfreelist = scmptr;
1599 }
a00c95d9 1600
0f2d19dd
JB
1601 continue;
1602 c8mrkcontinue:
1603 SCM_CLRGC8MARK (scmptr);
1604 continue;
1605 cmrkcontinue:
1606 SCM_CLRGCMARK (scmptr);
1607 }
1608#ifdef GC_FREE_SEGMENTS
1609 if (n == seg_size)
1610 {
15e9d186
JB
1611 register long j;
1612
4c48ba06 1613 freelist->heap_size -= seg_size;
cf2d30f6
JB
1614 free ((char *) scm_heap_table[i].bounds[0]);
1615 scm_heap_table[i].bounds[0] = 0;
1616 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1617 scm_heap_table[j - 1] = scm_heap_table[j];
1618 scm_n_heap_segs -= 1;
cf2d30f6 1619 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1620 }
1621 else
1622#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1623 {
1624 /* Update the real freelist pointer to point to the head of
1625 the list of free cells we've built for this segment. */
4c48ba06 1626 freelist->cells = nfreelist;
4c48ba06 1627 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1628 }
1629
fca7547b 1630#ifdef GUILE_DEBUG_FREELIST
4c48ba06 1631 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1632 ? scm_freelist
1633 : scm_freelist2);
cf2d30f6
JB
1634 scm_map_free_list ();
1635#endif
4a4c9785 1636 }
a00c95d9 1637
4c48ba06
MD
1638 gc_sweep_freelist_finish (&scm_master_freelist);
1639 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1640
8ded62a3
MD
1641 /* When we move to POSIX threads private freelists should probably
1642 be GC-protected instead. */
1643 scm_freelist = SCM_EOL;
1644 scm_freelist2 = SCM_EOL;
a00c95d9 1645
b37fe1c5 1646 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1647 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1648 scm_mallocated -= m;
1649 scm_gc_malloc_collected = m;
1650}
acf4331f 1651#undef FUNC_NAME
0f2d19dd
JB
1652
1653
1654\f
1655
1656/* {Front end to malloc}
1657 *
c68296f8 1658 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1659 *
1660 * These functions provide services comperable to malloc, realloc, and
1661 * free. They are for allocating malloced parts of scheme objects.
1662 * The primary purpose of the front end is to impose calls to gc.
1663 */
1664
bc9d9bb2 1665
0f2d19dd
JB
1666/* scm_must_malloc
1667 * Return newly malloced storage or throw an error.
1668 *
1669 * The parameter WHAT is a string for error reporting.
a00c95d9 1670 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1671 * allocation, or if the first call to malloc fails,
1672 * garbage collect -- on the presumption that some objects
1673 * using malloced storage may be collected.
1674 *
1675 * The limit scm_mtrigger may be raised by this allocation.
1676 */
07806695 1677void *
e4ef2330 1678scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1679{
07806695 1680 void *ptr;
15e9d186 1681 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1682
1683 if (nm <= scm_mtrigger)
0f2d19dd 1684 {
07806695 1685 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1686 if (NULL != ptr)
1687 {
1688 scm_mallocated = nm;
bc9d9bb2
MD
1689#ifdef GUILE_DEBUG_MALLOC
1690 scm_malloc_register (ptr, what);
1691#endif
0f2d19dd
JB
1692 return ptr;
1693 }
1694 }
6064dcc6 1695
0f2d19dd 1696 scm_igc (what);
e4ef2330 1697
0f2d19dd 1698 nm = scm_mallocated + size;
07806695 1699 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1700 if (NULL != ptr)
1701 {
1702 scm_mallocated = nm;
6064dcc6
MV
1703 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1704 if (nm > scm_mtrigger)
1705 scm_mtrigger = nm + nm / 2;
1706 else
1707 scm_mtrigger += scm_mtrigger / 2;
1708 }
bc9d9bb2
MD
1709#ifdef GUILE_DEBUG_MALLOC
1710 scm_malloc_register (ptr, what);
1711#endif
1712
0f2d19dd
JB
1713 return ptr;
1714 }
e4ef2330 1715
acf4331f 1716 scm_memory_error (what);
0f2d19dd
JB
1717}
1718
1719
1720/* scm_must_realloc
1721 * is similar to scm_must_malloc.
1722 */
07806695
JB
1723void *
1724scm_must_realloc (void *where,
e4ef2330
MD
1725 scm_sizet old_size,
1726 scm_sizet size,
3eeba8d4 1727 const char *what)
0f2d19dd 1728{
07806695 1729 void *ptr;
e4ef2330
MD
1730 scm_sizet nm = scm_mallocated + size - old_size;
1731
1732 if (nm <= scm_mtrigger)
0f2d19dd 1733 {
07806695 1734 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1735 if (NULL != ptr)
1736 {
1737 scm_mallocated = nm;
bc9d9bb2
MD
1738#ifdef GUILE_DEBUG_MALLOC
1739 scm_malloc_reregister (where, ptr, what);
1740#endif
0f2d19dd
JB
1741 return ptr;
1742 }
1743 }
e4ef2330 1744
0f2d19dd 1745 scm_igc (what);
e4ef2330
MD
1746
1747 nm = scm_mallocated + size - old_size;
07806695 1748 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1749 if (NULL != ptr)
1750 {
1751 scm_mallocated = nm;
6064dcc6
MV
1752 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1753 if (nm > scm_mtrigger)
1754 scm_mtrigger = nm + nm / 2;
1755 else
1756 scm_mtrigger += scm_mtrigger / 2;
1757 }
bc9d9bb2
MD
1758#ifdef GUILE_DEBUG_MALLOC
1759 scm_malloc_reregister (where, ptr, what);
1760#endif
0f2d19dd
JB
1761 return ptr;
1762 }
e4ef2330 1763
acf4331f 1764 scm_memory_error (what);
0f2d19dd
JB
1765}
1766
acf4331f 1767
a00c95d9 1768void
07806695 1769scm_must_free (void *obj)
acf4331f 1770#define FUNC_NAME "scm_must_free"
0f2d19dd 1771{
bc9d9bb2
MD
1772#ifdef GUILE_DEBUG_MALLOC
1773 scm_malloc_unregister (obj);
1774#endif
0f2d19dd
JB
1775 if (obj)
1776 free (obj);
1777 else
acf4331f 1778 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1779}
acf4331f
DH
1780#undef FUNC_NAME
1781
0f2d19dd 1782
c68296f8
MV
1783/* Announce that there has been some malloc done that will be freed
1784 * during gc. A typical use is for a smob that uses some malloced
1785 * memory but can not get it from scm_must_malloc (for whatever
1786 * reason). When a new object of this smob is created you call
1787 * scm_done_malloc with the size of the object. When your smob free
1788 * function is called, be sure to include this size in the return
1789 * value. */
0f2d19dd 1790
c68296f8 1791void
6e8d25a6 1792scm_done_malloc (long size)
c68296f8
MV
1793{
1794 scm_mallocated += size;
1795
1796 if (scm_mallocated > scm_mtrigger)
1797 {
1798 scm_igc ("foreign mallocs");
1799 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1800 {
1801 if (scm_mallocated > scm_mtrigger)
1802 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1803 else
1804 scm_mtrigger += scm_mtrigger / 2;
1805 }
1806 }
1807}
1808
1809
1810\f
0f2d19dd
JB
1811
1812/* {Heap Segments}
1813 *
1814 * Each heap segment is an array of objects of a particular size.
1815 * Every segment has an associated (possibly shared) freelist.
1816 * A table of segment records is kept that records the upper and
1817 * lower extents of the segment; this is used during the conservative
1818 * phase of gc to identify probably gc roots (because they point
c68296f8 1819 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1820
1821/* scm_expmem
1822 * is true if the first segment was smaller than INIT_HEAP_SEG.
1823 * If scm_expmem is set to one, subsequent segment allocations will
1824 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1825 */
1826int scm_expmem = 0;
1827
4c48ba06
MD
1828scm_sizet scm_max_segment_size;
1829
0f2d19dd
JB
1830/* scm_heap_org
1831 * is the lowest base address of any heap segment.
1832 */
1833SCM_CELLPTR scm_heap_org;
1834
a00c95d9 1835scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 1836static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
1837int scm_n_heap_segs = 0;
1838
0f2d19dd
JB
1839/* init_heap_seg
1840 * initializes a new heap segment and return the number of objects it contains.
1841 *
1842 * The segment origin, segment size in bytes, and the span of objects
1843 * in cells are input parameters. The freelist is both input and output.
1844 *
1845 * This function presume that the scm_heap_table has already been expanded
1846 * to accomodate a new segment record.
1847 */
1848
1849
a00c95d9 1850static scm_sizet
4c48ba06 1851init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
1852{
1853 register SCM_CELLPTR ptr;
0f2d19dd 1854 SCM_CELLPTR seg_end;
15e9d186 1855 int new_seg_index;
acb0a19c 1856 int n_new_cells;
4c48ba06 1857 int span = freelist->span;
a00c95d9 1858
0f2d19dd
JB
1859 if (seg_org == NULL)
1860 return 0;
1861
a00c95d9 1862 ptr = CELL_UP (seg_org, span);
acb0a19c 1863
a00c95d9 1864 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 1865 */
a00c95d9 1866 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 1867
a00c95d9 1868 /* Find the right place and insert the segment record.
0f2d19dd
JB
1869 *
1870 */
1871 for (new_seg_index = 0;
1872 ( (new_seg_index < scm_n_heap_segs)
1873 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1874 new_seg_index++)
1875 ;
1876
1877 {
1878 int i;
1879 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1880 scm_heap_table[i] = scm_heap_table[i - 1];
1881 }
a00c95d9 1882
0f2d19dd
JB
1883 ++scm_n_heap_segs;
1884
945fec60 1885 scm_heap_table[new_seg_index].span = span;
4c48ba06 1886 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
1887 scm_heap_table[new_seg_index].bounds[0] = ptr;
1888 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
1889
1890
a00c95d9 1891 /* Compute the least valid object pointer w/in this segment
0f2d19dd 1892 */
a00c95d9 1893 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
1894
1895
acb0a19c
MD
1896 /*n_new_cells*/
1897 n_new_cells = seg_end - ptr;
0f2d19dd 1898
4c48ba06 1899 freelist->heap_size += n_new_cells;
4a4c9785 1900
a00c95d9 1901 /* Partition objects in this segment into clusters */
4a4c9785
MD
1902 {
1903 SCM clusters;
1904 SCM *clusterp = &clusters;
4c48ba06 1905 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 1906
4c48ba06 1907 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 1908 {
4c48ba06
MD
1909 /* Determine end of cluster
1910 */
1911 if (n_new_cells >= n_cluster_cells)
1912 {
1913 seg_end = ptr + n_cluster_cells;
1914 n_new_cells -= n_cluster_cells;
1915 }
4a4c9785 1916 else
a00c95d9
ML
1917 /* [cmm] looks like the segment size doesn't divide cleanly by
1918 cluster size. bad cmm! */
1919 abort();
4a4c9785 1920
4c48ba06
MD
1921 /* Allocate cluster spine
1922 */
4a4c9785
MD
1923 *clusterp = PTR2SCM (ptr);
1924 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1925 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 1926 ptr += span;
a00c95d9 1927
4a4c9785
MD
1928 while (ptr < seg_end)
1929 {
96f6f4ae
DH
1930 SCM scmptr = PTR2SCM (ptr);
1931
54778cd3 1932 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1933 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1934 ptr += span;
1935 }
4c48ba06 1936
4a4c9785
MD
1937 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1938 }
a00c95d9 1939
4a4c9785
MD
1940 /* Patch up the last cluster pointer in the segment
1941 * to join it to the input freelist.
1942 */
4c48ba06
MD
1943 *clusterp = freelist->clusters;
1944 freelist->clusters = clusters;
4a4c9785
MD
1945 }
1946
4c48ba06
MD
1947#ifdef DEBUGINFO
1948 fprintf (stderr, "H");
1949#endif
0f2d19dd 1950 return size;
0f2d19dd
JB
1951}
1952
a00c95d9
ML
1953static scm_sizet
1954round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1955{
1956 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1957
1958 return
1959 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1960 + ALIGNMENT_SLACK (freelist);
1961}
1962
a00c95d9 1963static void
b6efc951 1964alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 1965#define FUNC_NAME "alloc_some_heap"
0f2d19dd 1966{
0f2d19dd 1967 SCM_CELLPTR ptr;
b37fe1c5 1968 long len;
a00c95d9 1969
b6efc951
DH
1970 if (scm_gc_heap_lock)
1971 {
1972 /* Critical code sections (such as the garbage collector) aren't
1973 * supposed to add heap segments.
1974 */
1975 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
1976 abort ();
1977 }
0f2d19dd 1978
b6efc951
DH
1979 if (scm_n_heap_segs == heap_segment_table_size)
1980 {
1981 /* We have to expand the heap segment table to have room for the new
1982 * segment. Do not yet increment scm_n_heap_segs -- that is done by
1983 * init_heap_seg only if the allocation of the segment itself succeeds.
1984 */
1985 unsigned int new_table_size = scm_n_heap_segs + 1;
1986 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
1987 scm_heap_seg_data_t * new_heap_table;
1988
1989 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
1990 realloc ((char *)scm_heap_table, size)));
1991 if (!new_heap_table)
1992 {
1993 if (error_policy == abort_on_error)
1994 {
1995 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
1996 abort ();
1997 }
1998 else
1999 {
2000 return;
2001 }
2002 }
2003 else
2004 {
2005 scm_heap_table = new_heap_table;
2006 heap_segment_table_size = new_table_size;
2007 }
2008 }
0f2d19dd
JB
2009
2010
2011 /* Pick a size for the new heap segment.
a00c95d9 2012 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2013 * gc.h
2014 */
4c48ba06 2015 {
1811ebce
MD
2016 /* Assure that the new segment is predicted to be large enough.
2017 *
2018 * New yield should at least equal GC fraction of new heap size, i.e.
2019 *
2020 * y + dh > f * (h + dh)
2021 *
2022 * y : yield
8fef55a8 2023 * f : min yield fraction
1811ebce
MD
2024 * h : heap size
2025 * dh : size of new heap segment
2026 *
2027 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2028 */
8fef55a8 2029 int f = freelist->min_yield_fraction;
1811ebce
MD
2030 long h = SCM_HEAP_SIZE;
2031 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2032 len = SCM_EXPHEAP (freelist->heap_size);
2033#ifdef DEBUGINFO
2034 fprintf (stderr, "(%d < %d)", len, min_cells);
2035#endif
2036 if (len < min_cells)
1811ebce 2037 len = min_cells + freelist->cluster_size;
4c48ba06 2038 len *= sizeof (scm_cell);
1811ebce
MD
2039 /* force new sampling */
2040 freelist->collected = LONG_MAX;
4c48ba06 2041 }
a00c95d9 2042
4c48ba06
MD
2043 if (len > scm_max_segment_size)
2044 len = scm_max_segment_size;
0f2d19dd
JB
2045
2046 {
2047 scm_sizet smallest;
2048
a00c95d9 2049 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2050
0f2d19dd 2051 if (len < smallest)
a00c95d9 2052 len = smallest;
0f2d19dd
JB
2053
2054 /* Allocate with decaying ambition. */
2055 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2056 && (len >= smallest))
2057 {
1811ebce 2058 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2059 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2060 if (ptr)
2061 {
a00c95d9 2062 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2063 return;
2064 }
2065 len /= 2;
2066 }
2067 }
2068
b6efc951
DH
2069 if (error_policy == abort_on_error)
2070 {
2071 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2072 abort ();
2073 }
0f2d19dd 2074}
acf4331f 2075#undef FUNC_NAME
0f2d19dd
JB
2076
2077
a00c95d9 2078SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2079 (SCM name),
b380b885 2080 "")
1bbd0b84 2081#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2082{
2083 int x;
2084 int bound;
3b3b36dd 2085 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2086 SCM_DEFER_INTS;
2087 bound = scm_n_heap_segs;
2088 for (x = 0; x < bound; ++x)
2089 {
2090 SCM_CELLPTR p;
2091 SCM_CELLPTR pbound;
195e6201
DH
2092 p = scm_heap_table[x].bounds[0];
2093 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2094 while (p < pbound)
2095 {
c8045e8d
DH
2096 SCM cell = PTR2SCM (p);
2097 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2098 {
c8045e8d
DH
2099 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2100 * struct cell. See the corresponding comment in scm_gc_mark.
2101 */
2102 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2103 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2104 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2105 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2106 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2107 {
c8045e8d 2108 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2109 }
2110 }
2111 ++p;
2112 }
2113 }
2114 SCM_ALLOW_INTS;
2115 return name;
2116}
1bbd0b84 2117#undef FUNC_NAME
0f2d19dd
JB
2118
2119
2120\f
2121/* {GC Protection Helper Functions}
2122 */
2123
2124
0f2d19dd 2125void
6e8d25a6
GB
2126scm_remember (SCM *ptr)
2127{ /* empty */ }
0f2d19dd 2128
1cc91f1b 2129
c209c88e 2130/*
41b0806d
GB
2131 These crazy functions prevent garbage collection
2132 of arguments after the first argument by
2133 ensuring they remain live throughout the
2134 function because they are used in the last
2135 line of the code block.
2136 It'd be better to have a nice compiler hint to
2137 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2138SCM
2139scm_return_first (SCM elt, ...)
0f2d19dd
JB
2140{
2141 return elt;
2142}
2143
41b0806d
GB
2144int
2145scm_return_first_int (int i, ...)
2146{
2147 return i;
2148}
2149
0f2d19dd 2150
0f2d19dd 2151SCM
6e8d25a6 2152scm_permanent_object (SCM obj)
0f2d19dd
JB
2153{
2154 SCM_REDEFER_INTS;
2155 scm_permobjs = scm_cons (obj, scm_permobjs);
2156 SCM_REALLOW_INTS;
2157 return obj;
2158}
2159
2160
7bd4fbe2
MD
2161/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2162 other references are dropped, until the object is unprotected by calling
2163 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2164 i. e. it is possible to protect the same object several times, but it is
2165 necessary to unprotect the object the same number of times to actually get
2166 the object unprotected. It is an error to unprotect an object more often
2167 than it has been protected before. The function scm_protect_object returns
2168 OBJ.
2169*/
2170
2171/* Implementation note: For every object X, there is a counter which
2172 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2173*/
686765af 2174
ef290276 2175SCM
6e8d25a6 2176scm_protect_object (SCM obj)
ef290276 2177{
686765af
ML
2178 SCM handle;
2179
2180 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2181 SCM_REDEFER_INTS;
686765af 2182
0f0f0899
MD
2183 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2184 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
686765af 2185
2dd6a83a 2186 SCM_REALLOW_INTS;
686765af 2187
ef290276
JB
2188 return obj;
2189}
2190
2191
2192/* Remove any protection for OBJ established by a prior call to
dab7f566 2193 scm_protect_object. This function returns OBJ.
ef290276 2194
dab7f566 2195 See scm_protect_object for more information. */
ef290276 2196SCM
6e8d25a6 2197scm_unprotect_object (SCM obj)
ef290276 2198{
686765af
ML
2199 SCM handle;
2200
2201 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2202 SCM_REDEFER_INTS;
686765af
ML
2203
2204 handle = scm_hashq_get_handle (scm_protects, obj);
0f0f0899
MD
2205
2206 if (SCM_IMP (handle))
686765af 2207 {
0f0f0899
MD
2208 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2209 abort ();
686765af 2210 }
6a199940
DH
2211 else
2212 {
2213 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2214 if (count == 0)
2215 scm_hashq_remove_x (scm_protects, obj);
2216 else
2217 SCM_SETCDR (handle, SCM_MAKINUM (count));
2218 }
686765af 2219
2dd6a83a 2220 SCM_REALLOW_INTS;
ef290276
JB
2221
2222 return obj;
2223}
2224
c45acc34
JB
2225int terminating;
2226
2227/* called on process termination. */
e52ceaac
MD
2228#ifdef HAVE_ATEXIT
2229static void
2230cleanup (void)
2231#else
2232#ifdef HAVE_ON_EXIT
51157deb
MD
2233extern int on_exit (void (*procp) (), int arg);
2234
e52ceaac
MD
2235static void
2236cleanup (int status, void *arg)
2237#else
2238#error Dont know how to setup a cleanup handler on your system.
2239#endif
2240#endif
c45acc34
JB
2241{
2242 terminating = 1;
2243 scm_flush_all_ports ();
2244}
ef290276 2245
0f2d19dd 2246\f
acb0a19c 2247static int
4c48ba06 2248make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2249{
a00c95d9
ML
2250 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2251 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2252 rounded_size,
4c48ba06 2253 freelist))
acb0a19c 2254 {
a00c95d9
ML
2255 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2256 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2257 rounded_size,
4c48ba06 2258 freelist))
acb0a19c
MD
2259 return 1;
2260 }
2261 else
2262 scm_expmem = 1;
2263
8fef55a8
MD
2264 if (freelist->min_yield_fraction)
2265 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2266 / 100);
8fef55a8 2267 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2268
acb0a19c
MD
2269 return 0;
2270}
2271
2272\f
4c48ba06
MD
2273static void
2274init_freelist (scm_freelist_t *freelist,
2275 int span,
2276 int cluster_size,
8fef55a8 2277 int min_yield)
4c48ba06
MD
2278{
2279 freelist->clusters = SCM_EOL;
2280 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2281 freelist->left_to_collect = 0;
2282 freelist->clusters_allocated = 0;
8fef55a8
MD
2283 freelist->min_yield = 0;
2284 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2285 freelist->span = span;
2286 freelist->collected = 0;
1811ebce 2287 freelist->collected_1 = 0;
4c48ba06
MD
2288 freelist->heap_size = 0;
2289}
2290
4a4c9785 2291int
4c48ba06
MD
2292scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2293 scm_sizet init_heap_size_2, int gc_trigger_2,
2294 scm_sizet max_segment_size)
0f2d19dd
JB
2295{
2296 scm_sizet j;
2297
4c48ba06 2298 if (!init_heap_size_1)
aeacfc8f 2299 init_heap_size_1 = scm_default_init_heap_size_1;
4c48ba06 2300 if (!init_heap_size_2)
aeacfc8f 2301 init_heap_size_2 = scm_default_init_heap_size_2;
4c48ba06 2302
0f2d19dd
JB
2303 j = SCM_NUM_PROTECTS;
2304 while (j)
2305 scm_sys_protects[--j] = SCM_BOOL_F;
2306 scm_block_gc = 1;
4a4c9785 2307
4a4c9785 2308 scm_freelist = SCM_EOL;
4c48ba06
MD
2309 scm_freelist2 = SCM_EOL;
2310 init_freelist (&scm_master_freelist,
2311 1, SCM_CLUSTER_SIZE_1,
aeacfc8f 2312 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
4c48ba06
MD
2313 init_freelist (&scm_master_freelist2,
2314 2, SCM_CLUSTER_SIZE_2,
aeacfc8f 2315 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
4c48ba06 2316 scm_max_segment_size
aeacfc8f 2317 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
4a4c9785 2318
0f2d19dd
JB
2319 scm_expmem = 0;
2320
2321 j = SCM_HEAP_SEG_SIZE;
2322 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2323 scm_heap_table = ((scm_heap_seg_data_t *)
2324 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2325 heap_segment_table_size = 2;
acb0a19c 2326
4c48ba06
MD
2327 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2328 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2329 return 1;
acb0a19c 2330
801cb5e7 2331 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2332 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2333
801cb5e7
MD
2334 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2335 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2336 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2337 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2338 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2339
2340 /* Initialise the list of ports. */
840ae05d
JB
2341 scm_port_table = (scm_port **)
2342 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2343 if (!scm_port_table)
2344 return 1;
2345
a18bcd0e 2346#ifdef HAVE_ATEXIT
c45acc34 2347 atexit (cleanup);
e52ceaac
MD
2348#else
2349#ifdef HAVE_ON_EXIT
2350 on_exit (cleanup, 0);
2351#endif
a18bcd0e 2352#endif
0f2d19dd
JB
2353
2354 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2355 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2356
2357 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2358 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2359 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2360 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2361 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2362 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2363 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2364 scm_permobjs = SCM_EOL;
686765af 2365 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
54778cd3
DH
2366 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2367 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2368#ifdef SCM_BIGDIG
2369 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2370#endif
2371 return 0;
2372}
939794ce 2373
0f2d19dd
JB
2374\f
2375
939794ce
DH
2376SCM scm_after_gc_hook;
2377
2378#if (SCM_DEBUG_DEPRECATED == 0)
2379static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2380#endif /* SCM_DEBUG_DEPRECATED == 0 */
2381static SCM gc_async;
2382
2383
2384/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2385 * is run after the gc, as soon as the asynchronous events are handled by the
2386 * evaluator.
2387 */
2388static SCM
2389gc_async_thunk (void)
2390{
2391 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2392
2393#if (SCM_DEBUG_DEPRECATED == 0)
2394
2395 /* The following code will be removed in Guile 1.5. */
2396 if (SCM_NFALSEP (scm_gc_vcell))
2397 {
2398 SCM proc = SCM_CDR (scm_gc_vcell);
2399
2400 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2401 scm_apply (proc, SCM_EOL, SCM_EOL);
2402 }
2403
2404#endif /* SCM_DEBUG_DEPRECATED == 0 */
2405
2406 return SCM_UNSPECIFIED;
2407}
2408
2409
2410/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2411 * the garbage collection. The only purpose of this function is to mark the
2412 * gc_async (which will eventually lead to the execution of the
2413 * gc_async_thunk).
2414 */
2415static void *
2416mark_gc_async (void * hook_data, void *func_data, void *data)
2417{
2418 scm_system_async_mark (gc_async);
2419 return NULL;
2420}
2421
2422
0f2d19dd
JB
2423void
2424scm_init_gc ()
0f2d19dd 2425{
939794ce
DH
2426 SCM after_gc_thunk;
2427
801cb5e7 2428 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2429
2430#if (SCM_DEBUG_DEPRECATED == 0)
2431 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2432#endif /* SCM_DEBUG_DEPRECATED == 0 */
2433 /* Dirk:FIXME:: We don't really want a binding here. */
2434 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2435 gc_async = scm_system_async (after_gc_thunk);
2436
2437 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2438
a0599745 2439#include "libguile/gc.x"
0f2d19dd 2440}
89e00824
ML
2441
2442/*
2443 Local Variables:
2444 c-file-style: "gnu"
2445 End:
2446*/