* Removed commented code from gc.c.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745
MD
49#include "libguile/_scm.h"
50#include "libguile/stime.h"
51#include "libguile/stackchk.h"
52#include "libguile/struct.h"
a0599745
MD
53#include "libguile/smob.h"
54#include "libguile/unif.h"
55#include "libguile/async.h"
56#include "libguile/ports.h"
57#include "libguile/root.h"
58#include "libguile/strings.h"
59#include "libguile/vectors.h"
801cb5e7 60#include "libguile/weaks.h"
686765af 61#include "libguile/hashtab.h"
a0599745
MD
62
63#include "libguile/validate.h"
64#include "libguile/gc.h"
fce59c93 65
bc9d9bb2 66#ifdef GUILE_DEBUG_MALLOC
a0599745 67#include "libguile/debug-malloc.h"
bc9d9bb2
MD
68#endif
69
0f2d19dd 70#ifdef HAVE_MALLOC_H
95b88819 71#include <malloc.h>
0f2d19dd
JB
72#endif
73
74#ifdef HAVE_UNISTD_H
95b88819 75#include <unistd.h>
0f2d19dd
JB
76#endif
77
1cc91f1b
JB
78#ifdef __STDC__
79#include <stdarg.h>
80#define var_start(x, y) va_start(x, y)
81#else
82#include <varargs.h>
83#define var_start(x, y) va_start(x)
84#endif
85
0f2d19dd
JB
86\f
87/* {heap tuning parameters}
a00c95d9 88 *
0f2d19dd
JB
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
91 *
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
95 *
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
99 *
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 104 *
0f2d19dd
JB
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 *
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
109 * is needed.
110 *
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 112 * trigger a GC.
6064dcc6
MV
113 *
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 118 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
119 */
120
8fef55a8
MD
121/*
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
126 */
aeacfc8f
MD
127int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
128int scm_default_min_yield_1 = 40;
4c48ba06 129#define SCM_CLUSTER_SIZE_1 2000L
4c48ba06 130
aeacfc8f 131int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
4c48ba06
MD
132/* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
134 */
aeacfc8f
MD
135int scm_default_min_yield_2 = 40;
136#define SCM_CLUSTER_SIZE_2 1000L
4c48ba06 137
aeacfc8f 138int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 139
945fec60 140#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
141#ifdef _QC
142# define SCM_HEAP_SEG_SIZE 32768L
143#else
144# ifdef sequent
4c48ba06 145# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 146# else
4c48ba06 147# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
148# endif
149#endif
4c48ba06 150/* Make heap grow with factor 1.5 */
4a4c9785 151#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 152#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 153#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
154
155/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
157
158#ifdef PROT386
159/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
160# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
162#else
163# ifdef _UNICOS
a00c95d9
ML
164# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 166# else
a00c95d9
ML
167# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
169# endif /* UNICOS */
170#endif /* PROT386 */
a00c95d9
ML
171#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
173#define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
1811ebce 175#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
176
177
178\f
945fec60 179/* scm_freelists
0f2d19dd 180 */
945fec60 181
a00c95d9
ML
182typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
a00c95d9
ML
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect;
b37fe1c5
MD
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated;
8fef55a8
MD
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
191 */
a00c95d9
ML
192 SCM clusters;
193 SCM *clustertail;
b37fe1c5 194 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 195 int cluster_size;
8fef55a8 196 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
197 */
198 int grow_heap_p;
8fef55a8 199 /* minimum yield on this list in order not to grow the heap
a00c95d9 200 */
8fef55a8
MD
201 long min_yield;
202 /* defines min_yield as percent of total heap size
a00c95d9 203 */
8fef55a8 204 int min_yield_fraction;
a00c95d9
ML
205 /* number of cells per object on this list */
206 int span;
207 /* number of collected cells during last GC */
1811ebce
MD
208 long collected;
209 /* number of collected cells during penultimate GC */
210 long collected_1;
a00c95d9
ML
211 /* total number of cells in heap segments
212 * belonging to this list.
213 */
1811ebce 214 long heap_size;
a00c95d9
ML
215} scm_freelist_t;
216
4a4c9785
MD
217SCM scm_freelist = SCM_EOL;
218scm_freelist_t scm_master_freelist = {
b37fe1c5 219 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
220};
221SCM scm_freelist2 = SCM_EOL;
222scm_freelist_t scm_master_freelist2 = {
b37fe1c5 223 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 224};
0f2d19dd
JB
225
226/* scm_mtrigger
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
228 */
15e9d186 229unsigned long scm_mtrigger;
0f2d19dd
JB
230
231
232/* scm_gc_heap_lock
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
235 */
236int scm_gc_heap_lock = 0;
237
238/* GC Blocking
239 * Don't pause for collection if this is set -- just
240 * expand the heap.
241 */
242
243int scm_block_gc = 1;
244
245/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
247 */
945fec60 248#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
249
250/* During collection, this accumulates objects holding
251 * weak references.
252 */
ab4bef85 253SCM scm_weak_vectors;
0f2d19dd
JB
254
255/* GC Statistics Keeping
256 */
257unsigned long scm_cells_allocated = 0;
a5c314c8 258long scm_mallocated = 0;
b37fe1c5 259unsigned long scm_gc_cells_collected;
8b0d194f 260unsigned long scm_gc_yield;
37ddcaf6 261static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
262unsigned long scm_gc_malloc_collected;
263unsigned long scm_gc_ports_collected;
264unsigned long scm_gc_rt;
265unsigned long scm_gc_time_taken = 0;
266
267SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
268SCM_SYMBOL (sym_heap_size, "cell-heap-size");
269SCM_SYMBOL (sym_mallocated, "bytes-malloced");
270SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
271SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
272SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
273
a00c95d9 274typedef struct scm_heap_seg_data_t
0f2d19dd 275{
cf2d30f6
JB
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds[2];
278
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
4c48ba06 281 scm_freelist_t *freelist;
cf2d30f6 282
fe517a7d 283 /* number of cells per object in this segment */
945fec60 284 int span;
a00c95d9 285} scm_heap_seg_data_t;
0f2d19dd
JB
286
287
288
945fec60
MD
289static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
290static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
291
292
293\f
cf2d30f6
JB
294/* Debugging functions. */
295
bb2c57fa 296#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
297
298/* Return the number of the heap segment containing CELL. */
299static int
300which_seg (SCM cell)
301{
302 int i;
303
304 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
305 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
306 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
307 return i;
308 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 309 SCM_UNPACK (cell));
cf2d30f6
JB
310 abort ();
311}
312
313
8ded62a3
MD
314static void
315map_free_list (scm_freelist_t *master, SCM freelist)
316{
317 int last_seg = -1, count = 0;
318 SCM f;
a00c95d9 319
8ded62a3
MD
320 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
321 {
322 int this_seg = which_seg (f);
323
324 if (this_seg != last_seg)
325 {
326 if (last_seg != -1)
327 fprintf (stderr, " %5d %d-cells in segment %d\n",
328 count, master->span, last_seg);
329 last_seg = this_seg;
330 count = 0;
331 }
332 count++;
333 }
334 if (last_seg != -1)
335 fprintf (stderr, " %5d %d-cells in segment %d\n",
336 count, master->span, last_seg);
337}
cf2d30f6 338
a00c95d9 339SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
340 (),
341 "Print debugging information about the free-list.\n"
5384bc5b 342 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
343#define FUNC_NAME s_scm_map_free_list
344{
4c48ba06
MD
345 int i;
346 fprintf (stderr, "%d segments total (%d:%d",
347 scm_n_heap_segs,
348 scm_heap_table[0].span,
349 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
350 for (i = 1; i < scm_n_heap_segs; i++)
351 fprintf (stderr, ", %d:%d",
352 scm_heap_table[i].span,
353 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
354 fprintf (stderr, ")\n");
8ded62a3
MD
355 map_free_list (&scm_master_freelist, scm_freelist);
356 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
357 fflush (stderr);
358
359 return SCM_UNSPECIFIED;
360}
1bbd0b84 361#undef FUNC_NAME
cf2d30f6 362
4c48ba06
MD
363static int last_cluster;
364static int last_size;
365
5384bc5b
MD
366static int
367free_list_length (char *title, int i, SCM freelist)
368{
369 SCM ls;
370 int n = 0;
371 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
206d3de3 372 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
5384bc5b
MD
373 ++n;
374 else
375 {
376 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
377 abort ();
378 }
4c48ba06
MD
379 if (n != last_size)
380 {
381 if (i > 0)
382 {
383 if (last_cluster == i - 1)
384 fprintf (stderr, "\t%d\n", last_size);
385 else
386 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
387 }
388 if (i >= 0)
389 fprintf (stderr, "%s %d", title, i);
390 else
391 fprintf (stderr, "%s\t%d\n", title, n);
392 last_cluster = i;
393 last_size = n;
394 }
5384bc5b
MD
395 return n;
396}
397
398static void
399free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
400{
401 SCM clusters;
4c48ba06 402 int i = 0, len, n = 0;
5384bc5b
MD
403 fprintf (stderr, "%s\n\n", title);
404 n += free_list_length ("free list", -1, freelist);
405 for (clusters = master->clusters;
406 SCM_NNULLP (clusters);
407 clusters = SCM_CDR (clusters))
4c48ba06
MD
408 {
409 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
410 n += len;
411 }
412 if (last_cluster == i - 1)
413 fprintf (stderr, "\t%d\n", last_size);
414 else
415 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
416 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
417}
418
a00c95d9 419SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
420 (),
421 "Print debugging information about the free-list.\n"
422 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
423#define FUNC_NAME s_scm_free_list_length
424{
b37fe1c5
MD
425 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
426 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 427 return SCM_UNSPECIFIED;
5384bc5b
MD
428}
429#undef FUNC_NAME
430
bb2c57fa
MD
431#endif
432
433#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
434
435/* Number of calls to SCM_NEWCELL since startup. */
436static unsigned long scm_newcell_count;
acb0a19c 437static unsigned long scm_newcell2_count;
cf2d30f6
JB
438
439/* Search freelist for anything that isn't marked as a free cell.
440 Abort if we find something. */
8ded62a3
MD
441static void
442scm_check_freelist (SCM freelist)
443{
444 SCM f;
445 int i = 0;
446
447 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
448 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
449 {
450 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
451 scm_newcell_count, i);
452 fflush (stderr);
453 abort ();
454 }
455}
cf2d30f6
JB
456
457static int scm_debug_check_freelist = 0;
25748c78 458
a00c95d9 459SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 460 (SCM flag),
da4a1dba
GB
461 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
462 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
463 "compile-time flag was selected.\n")
1bbd0b84 464#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 465{
945fec60 466 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
467 return SCM_UNSPECIFIED;
468}
1bbd0b84 469#undef FUNC_NAME
25748c78
GB
470
471
4a4c9785
MD
472SCM
473scm_debug_newcell (void)
474{
475 SCM new;
476
477 scm_newcell_count++;
478 if (scm_debug_check_freelist)
479 {
8ded62a3 480 scm_check_freelist (scm_freelist);
4a4c9785
MD
481 scm_gc();
482 }
483
484 /* The rest of this is supposed to be identical to the SCM_NEWCELL
485 macro. */
486 if (SCM_IMP (scm_freelist))
487 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
488 else
489 {
490 new = scm_freelist;
491 scm_freelist = SCM_CDR (scm_freelist);
492 SCM_SETCAR (new, scm_tc16_allocated);
493 }
494
495 return new;
496}
497
498SCM
499scm_debug_newcell2 (void)
500{
501 SCM new;
502
503 scm_newcell2_count++;
504 if (scm_debug_check_freelist)
505 {
8ded62a3 506 scm_check_freelist (scm_freelist2);
4a4c9785
MD
507 scm_gc ();
508 }
509
510 /* The rest of this is supposed to be identical to the SCM_NEWCELL
511 macro. */
512 if (SCM_IMP (scm_freelist2))
513 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
514 else
515 {
516 new = scm_freelist2;
517 scm_freelist2 = SCM_CDR (scm_freelist2);
518 SCM_SETCAR (new, scm_tc16_allocated);
519 }
520
521 return new;
522}
523
fca7547b 524#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
525
526\f
0f2d19dd 527
b37fe1c5
MD
528static unsigned long
529master_cells_allocated (scm_freelist_t *master)
530{
531 int objects = master->clusters_allocated * (master->cluster_size - 1);
532 if (SCM_NULLP (master->clusters))
533 objects -= master->left_to_collect;
534 return master->span * objects;
535}
536
537static unsigned long
538freelist_length (SCM freelist)
539{
540 int n;
541 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
542 ++n;
543 return n;
544}
545
546static unsigned long
547compute_cells_allocated ()
548{
549 return (scm_cells_allocated
550 + master_cells_allocated (&scm_master_freelist)
551 + master_cells_allocated (&scm_master_freelist2)
552 - scm_master_freelist.span * freelist_length (scm_freelist)
553 - scm_master_freelist2.span * freelist_length (scm_freelist2));
554}
b37fe1c5 555
0f2d19dd
JB
556/* {Scheme Interface to GC}
557 */
558
a00c95d9 559SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 560 (),
b380b885 561 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 562#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
563{
564 int i;
565 int n;
566 SCM heap_segs;
c209c88e
GB
567 long int local_scm_mtrigger;
568 long int local_scm_mallocated;
569 long int local_scm_heap_size;
570 long int local_scm_cells_allocated;
571 long int local_scm_gc_time_taken;
0f2d19dd
JB
572 SCM answer;
573
574 SCM_DEFER_INTS;
575 scm_block_gc = 1;
576 retry:
577 heap_segs = SCM_EOL;
578 n = scm_n_heap_segs;
579 for (i = scm_n_heap_segs; i--; )
580 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
581 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
582 heap_segs);
583 if (scm_n_heap_segs != n)
584 goto retry;
585 scm_block_gc = 0;
586
7febb4a2
MD
587 /* Below, we cons to produce the resulting list. We want a snapshot of
588 * the heap situation before consing.
589 */
0f2d19dd
JB
590 local_scm_mtrigger = scm_mtrigger;
591 local_scm_mallocated = scm_mallocated;
b37fe1c5 592 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 593 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd
JB
594 local_scm_gc_time_taken = scm_gc_time_taken;
595
596 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
597 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
598 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
599 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
600 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
601 scm_cons (sym_heap_segments, heap_segs),
602 SCM_UNDEFINED);
603 SCM_ALLOW_INTS;
604 return answer;
605}
1bbd0b84 606#undef FUNC_NAME
0f2d19dd
JB
607
608
a00c95d9 609void
6e8d25a6 610scm_gc_start (const char *what)
0f2d19dd
JB
611{
612 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 613 scm_gc_cells_collected = 0;
37ddcaf6 614 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
615 scm_gc_yield = (scm_cells_allocated
616 + master_cells_allocated (&scm_master_freelist)
617 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
618 scm_gc_malloc_collected = 0;
619 scm_gc_ports_collected = 0;
620}
621
a00c95d9 622void
0f2d19dd 623scm_gc_end ()
0f2d19dd
JB
624{
625 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 626 scm_gc_time_taken += scm_gc_rt;
9ea54cc6 627 scm_system_async_mark (scm_gc_async);
0f2d19dd
JB
628}
629
630
a00c95d9 631SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 632 (SCM obj),
b380b885
MD
633 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
634 "returned by this function for @var{obj}")
1bbd0b84 635#define FUNC_NAME s_scm_object_address
0f2d19dd 636{
54778cd3 637 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 638}
1bbd0b84 639#undef FUNC_NAME
0f2d19dd
JB
640
641
a00c95d9 642SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 643 (),
b380b885
MD
644 "Scans all of SCM objects and reclaims for further use those that are\n"
645 "no longer accessible.")
1bbd0b84 646#define FUNC_NAME s_scm_gc
0f2d19dd
JB
647{
648 SCM_DEFER_INTS;
649 scm_igc ("call");
650 SCM_ALLOW_INTS;
651 return SCM_UNSPECIFIED;
652}
1bbd0b84 653#undef FUNC_NAME
0f2d19dd
JB
654
655
656\f
657/* {C Interface For When GC is Triggered}
658 */
659
b37fe1c5 660static void
8fef55a8 661adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 662{
8fef55a8 663 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 664 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
665 * `min_yield_fraction' of total heap size. Note, however, that
666 * the absolute value of min_yield will correspond to `collected'
bda1446c 667 * on one master (the one which currently is triggering GC).
b37fe1c5 668 *
bda1446c
MD
669 * The reason why we look at total yield instead of cells collected
670 * on one list is that we want to take other freelists into account.
671 * On this freelist, we know that (local) yield = collected cells,
672 * but that's probably not the case on the other lists.
b37fe1c5
MD
673 *
674 * (We might consider computing a better prediction, for example
675 * by computing an average over multiple GC:s.)
676 */
8fef55a8 677 if (freelist->min_yield_fraction)
b37fe1c5 678 {
37ddcaf6 679 /* Pick largest of last two yields. */
8fef55a8
MD
680 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
681 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
682#ifdef DEBUGINFO
683 fprintf (stderr, " after GC = %d, delta = %d\n",
684 scm_cells_allocated,
685 delta);
686#endif
687 if (delta > 0)
8fef55a8 688 freelist->min_yield += delta;
b37fe1c5
MD
689 }
690}
691
4a4c9785 692/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
693 * common while the freelist will be individual for each thread.
694 */
4a4c9785
MD
695
696SCM
697scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
698{
699 SCM cell;
700 ++scm_ints_disabled;
4c48ba06
MD
701 do
702 {
703 if (SCM_NULLP (master->clusters))
704 {
705 if (master->grow_heap_p)
706 {
707 master->grow_heap_p = 0;
708 alloc_some_heap (master);
709 }
710 else
b37fe1c5 711 {
37ddcaf6
MD
712#ifdef DEBUGINFO
713 fprintf (stderr, "allocated = %d, ",
714 scm_cells_allocated
715 + master_cells_allocated (&scm_master_freelist)
716 + master_cells_allocated (&scm_master_freelist2));
717#endif
b37fe1c5 718 scm_igc ("cells");
8fef55a8 719 adjust_min_yield (master);
b37fe1c5 720 }
4c48ba06
MD
721 }
722 cell = SCM_CAR (master->clusters);
723 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 724 ++master->clusters_allocated;
4c48ba06
MD
725 }
726 while (SCM_NULLP (cell));
4a4c9785 727 --scm_ints_disabled;
4a4c9785 728 *freelist = SCM_CDR (cell);
54778cd3 729 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
730 return cell;
731}
732
4c48ba06
MD
733#if 0
734/* This is a support routine which can be used to reserve a cluster
735 * for some special use, such as debugging. It won't be useful until
736 * free cells are preserved between garbage collections.
737 */
738
739void
740scm_alloc_cluster (scm_freelist_t *master)
741{
742 SCM freelist, cell;
743 cell = scm_gc_for_newcell (master, &freelist);
744 SCM_SETCDR (cell, freelist);
745 return cell;
746}
747#endif
748
801cb5e7
MD
749SCM scm_after_gc_hook;
750
751scm_c_hook_t scm_before_gc_c_hook;
752scm_c_hook_t scm_before_mark_c_hook;
753scm_c_hook_t scm_before_sweep_c_hook;
754scm_c_hook_t scm_after_sweep_c_hook;
755scm_c_hook_t scm_after_gc_c_hook;
756
0f2d19dd 757void
1bbd0b84 758scm_igc (const char *what)
0f2d19dd
JB
759{
760 int j;
761
801cb5e7 762 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
763#ifdef DEBUGINFO
764 fprintf (stderr,
765 SCM_NULLP (scm_freelist)
766 ? "*"
767 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
768#endif
42db06f0
MD
769#ifdef USE_THREADS
770 /* During the critical section, only the current thread may run. */
771 SCM_THREAD_CRITICAL_SECTION_START;
772#endif
773
e242dfd2 774 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 775
ab4bef85
JB
776 scm_gc_start (what);
777
778 if (!scm_stack_base || scm_block_gc)
779 {
780 scm_gc_end ();
781 return;
782 }
783
a5c314c8
JB
784 if (scm_mallocated < 0)
785 /* The byte count of allocated objects has underflowed. This is
786 probably because you forgot to report the sizes of objects you
787 have allocated, by calling scm_done_malloc or some such. When
788 the GC freed them, it subtracted their size from
789 scm_mallocated, which underflowed. */
790 abort ();
c45acc34 791
ab4bef85
JB
792 if (scm_gc_heap_lock)
793 /* We've invoked the collector while a GC is already in progress.
794 That should never happen. */
795 abort ();
0f2d19dd
JB
796
797 ++scm_gc_heap_lock;
ab4bef85 798
0f2d19dd
JB
799 /* flush dead entries from the continuation stack */
800 {
801 int x;
802 int bound;
803 SCM * elts;
804 elts = SCM_VELTS (scm_continuation_stack);
805 bound = SCM_LENGTH (scm_continuation_stack);
806 x = SCM_INUM (scm_continuation_stack_ptr);
807 while (x < bound)
808 {
809 elts[x] = SCM_BOOL_F;
810 ++x;
811 }
812 }
813
801cb5e7
MD
814 scm_c_hook_run (&scm_before_mark_c_hook, 0);
815
42db06f0 816#ifndef USE_THREADS
a00c95d9 817
0f2d19dd
JB
818 /* Protect from the C stack. This must be the first marking
819 * done because it provides information about what objects
820 * are "in-use" by the C code. "in-use" objects are those
821 * for which the values from SCM_LENGTH and SCM_CHARS must remain
822 * usable. This requirement is stricter than a liveness
823 * requirement -- in particular, it constrains the implementation
824 * of scm_vector_set_length_x.
825 */
826 SCM_FLUSH_REGISTER_WINDOWS;
827 /* This assumes that all registers are saved into the jmp_buf */
828 setjmp (scm_save_regs_gc_mark);
829 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
830 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
831 sizeof scm_save_regs_gc_mark)
832 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
833
834 {
6ba93e5e 835 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 836#ifdef SCM_STACK_GROWS_UP
6ba93e5e 837 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 838#else
6ba93e5e 839 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
840#endif
841 }
842
42db06f0
MD
843#else /* USE_THREADS */
844
845 /* Mark every thread's stack and registers */
945fec60 846 scm_threads_mark_stacks ();
42db06f0
MD
847
848#endif /* USE_THREADS */
0f2d19dd
JB
849
850 /* FIXME: insert a phase to un-protect string-data preserved
851 * in scm_vector_set_length_x.
852 */
853
854 j = SCM_NUM_PROTECTS;
855 while (j--)
856 scm_gc_mark (scm_sys_protects[j]);
857
9de33deb
MD
858 /* FIXME: we should have a means to register C functions to be run
859 * in different phases of GC
a00c95d9 860 */
9de33deb 861 scm_mark_subr_table ();
a00c95d9 862
42db06f0
MD
863#ifndef USE_THREADS
864 scm_gc_mark (scm_root->handle);
865#endif
a00c95d9 866
801cb5e7 867 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 868
0f2d19dd
JB
869 scm_gc_sweep ();
870
801cb5e7
MD
871 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
872
0f2d19dd
JB
873 --scm_gc_heap_lock;
874 scm_gc_end ();
42db06f0
MD
875
876#ifdef USE_THREADS
877 SCM_THREAD_CRITICAL_SECTION_END;
878#endif
801cb5e7 879 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
880}
881
882\f
a00c95d9 883/* {Mark/Sweep}
0f2d19dd
JB
884 */
885
886
887
888/* Mark an object precisely.
889 */
a00c95d9 890void
1bbd0b84 891scm_gc_mark (SCM p)
0f2d19dd
JB
892{
893 register long i;
894 register SCM ptr;
895
896 ptr = p;
897
898gc_mark_loop:
899 if (SCM_IMP (ptr))
900 return;
901
902gc_mark_nimp:
903 if (SCM_NCELLP (ptr))
f8392303 904 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
905
906 switch (SCM_TYP7 (ptr))
907 {
908 case scm_tcs_cons_nimcar:
909 if (SCM_GCMARKP (ptr))
910 break;
911 SCM_SETGCMARK (ptr);
912 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
913 {
914 ptr = SCM_CAR (ptr);
915 goto gc_mark_nimp;
916 }
917 scm_gc_mark (SCM_CAR (ptr));
918 ptr = SCM_GCCDR (ptr);
919 goto gc_mark_nimp;
920 case scm_tcs_cons_imcar:
acb0a19c
MD
921 if (SCM_GCMARKP (ptr))
922 break;
923 SCM_SETGCMARK (ptr);
924 ptr = SCM_GCCDR (ptr);
925 goto gc_mark_loop;
e641afaf 926 case scm_tc7_pws:
0f2d19dd
JB
927 if (SCM_GCMARKP (ptr))
928 break;
929 SCM_SETGCMARK (ptr);
54778cd3 930 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
931 ptr = SCM_GCCDR (ptr);
932 goto gc_mark_loop;
933 case scm_tcs_cons_gloc:
934 if (SCM_GCMARKP (ptr))
935 break;
936 SCM_SETGCMARK (ptr);
937 {
c8045e8d
DH
938 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
939 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
940 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
941 * pointer to a struct vtable data region. The fact that these are
942 * accessed in the same way restricts the possibilites to change the
943 * data layout of structs or heap cells.
944 */
945 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
946 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
947 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
948 {
949 default:
c8045e8d
DH
950 {
951 /* ptr is a gloc */
952 SCM gloc_car = SCM_PACK (word0);
953 scm_gc_mark (gloc_car);
954 ptr = SCM_GCCDR (ptr);
955 goto gc_mark_loop;
956 }
0f2d19dd
JB
957 case 1: /* ! */
958 case 0: /* ! */
959 {
c8045e8d
DH
960 /* ptr is a struct */
961 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
962 int len = SCM_LENGTH (layout);
963 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
964 /* We're using SCM_GCCDR here like STRUCT_DATA, except
965 that it removes the mark */
c8045e8d 966 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 967
c8045e8d 968 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 969 {
c8045e8d
DH
970 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
971 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 972 }
ad75306c
MD
973 if (len)
974 {
c8045e8d
DH
975 int x;
976
977 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 978 if (fields_desc[x] == 'p')
c8045e8d 979 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
980 if (fields_desc[x] == 'p')
981 {
982 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
983 for (x = *struct_data; x; --x)
984 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 985 else
c8045e8d 986 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
987 }
988 }
c8045e8d 989 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 990 {
c8045e8d
DH
991 vtable_data [scm_vtable_index_vcell] = 1;
992 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
993 goto gc_mark_loop;
994 }
995 }
996 }
997 }
998 break;
999 case scm_tcs_closures:
1000 if (SCM_GCMARKP (ptr))
1001 break;
1002 SCM_SETGCMARK (ptr);
1003 if (SCM_IMP (SCM_CDR (ptr)))
1004 {
1005 ptr = SCM_CLOSCAR (ptr);
1006 goto gc_mark_nimp;
1007 }
1008 scm_gc_mark (SCM_CLOSCAR (ptr));
1009 ptr = SCM_GCCDR (ptr);
1010 goto gc_mark_nimp;
1011 case scm_tc7_vector:
1012 case scm_tc7_lvector:
1013#ifdef CCLO
1014 case scm_tc7_cclo:
1015#endif
1016 if (SCM_GC8MARKP (ptr))
1017 break;
1018 SCM_SETGC8MARK (ptr);
1019 i = SCM_LENGTH (ptr);
1020 if (i == 0)
1021 break;
1022 while (--i > 0)
1023 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1024 scm_gc_mark (SCM_VELTS (ptr)[i]);
1025 ptr = SCM_VELTS (ptr)[0];
1026 goto gc_mark_loop;
1027 case scm_tc7_contin:
1028 if SCM_GC8MARKP
1029 (ptr) break;
1030 SCM_SETGC8MARK (ptr);
c68296f8 1031 if (SCM_VELTS (ptr))
41b0806d 1032 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1033 (scm_sizet)
1034 (SCM_LENGTH (ptr) +
1035 (sizeof (SCM_STACKITEM) + -1 +
1036 sizeof (scm_contregs)) /
1037 sizeof (SCM_STACKITEM)));
0f2d19dd 1038 break;
afe5177e 1039#ifdef HAVE_ARRAYS
0f2d19dd
JB
1040 case scm_tc7_bvect:
1041 case scm_tc7_byvect:
1042 case scm_tc7_ivect:
1043 case scm_tc7_uvect:
1044 case scm_tc7_fvect:
1045 case scm_tc7_dvect:
1046 case scm_tc7_cvect:
1047 case scm_tc7_svect:
5c11cc9d 1048#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1049 case scm_tc7_llvect:
1050#endif
afe5177e 1051#endif
0f2d19dd 1052 case scm_tc7_string:
0f2d19dd
JB
1053 SCM_SETGC8MARK (ptr);
1054 break;
1055
1056 case scm_tc7_substring:
0f2d19dd
JB
1057 if (SCM_GC8MARKP(ptr))
1058 break;
1059 SCM_SETGC8MARK (ptr);
1060 ptr = SCM_CDR (ptr);
1061 goto gc_mark_loop;
1062
1063 case scm_tc7_wvect:
1064 if (SCM_GC8MARKP(ptr))
1065 break;
ab4bef85
JB
1066 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1067 scm_weak_vectors = ptr;
0f2d19dd
JB
1068 SCM_SETGC8MARK (ptr);
1069 if (SCM_IS_WHVEC_ANY (ptr))
1070 {
1071 int x;
1072 int len;
1073 int weak_keys;
1074 int weak_values;
1075
1076 len = SCM_LENGTH (ptr);
1077 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1078 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1079
0f2d19dd
JB
1080 for (x = 0; x < len; ++x)
1081 {
1082 SCM alist;
1083 alist = SCM_VELTS (ptr)[x];
46408039
JB
1084
1085 /* mark everything on the alist except the keys or
1086 * values, according to weak_values and weak_keys. */
0b5f3f34 1087 while ( SCM_CONSP (alist)
0f2d19dd 1088 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1089 && SCM_CONSP (SCM_CAR (alist)))
1090 {
1091 SCM kvpair;
1092 SCM next_alist;
1093
1094 kvpair = SCM_CAR (alist);
1095 next_alist = SCM_CDR (alist);
a00c95d9 1096 /*
0f2d19dd
JB
1097 * Do not do this:
1098 * SCM_SETGCMARK (alist);
1099 * SCM_SETGCMARK (kvpair);
1100 *
1101 * It may be that either the key or value is protected by
1102 * an escaped reference to part of the spine of this alist.
1103 * If we mark the spine here, and only mark one or neither of the
1104 * key and value, they may never be properly marked.
1105 * This leads to a horrible situation in which an alist containing
1106 * freelist cells is exported.
1107 *
1108 * So only mark the spines of these arrays last of all marking.
1109 * If somebody confuses us by constructing a weak vector
1110 * with a circular alist then we are hosed, but at least we
1111 * won't prematurely drop table entries.
1112 */
1113 if (!weak_keys)
1114 scm_gc_mark (SCM_CAR (kvpair));
1115 if (!weak_values)
1116 scm_gc_mark (SCM_GCCDR (kvpair));
1117 alist = next_alist;
1118 }
1119 if (SCM_NIMP (alist))
1120 scm_gc_mark (alist);
1121 }
1122 }
1123 break;
1124
1125 case scm_tc7_msymbol:
1126 if (SCM_GC8MARKP(ptr))
1127 break;
1128 SCM_SETGC8MARK (ptr);
1129 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1130 ptr = SCM_SYMBOL_PROPS (ptr);
1131 goto gc_mark_loop;
1132 case scm_tc7_ssymbol:
1133 if (SCM_GC8MARKP(ptr))
1134 break;
1135 SCM_SETGC8MARK (ptr);
1136 break;
1137 case scm_tcs_subrs:
9de33deb 1138 break;
0f2d19dd
JB
1139 case scm_tc7_port:
1140 i = SCM_PTOBNUM (ptr);
1141 if (!(i < scm_numptob))
1142 goto def;
1143 if (SCM_GC8MARKP (ptr))
1144 break;
dc53f026 1145 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1146 if (SCM_PTAB_ENTRY(ptr))
1147 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1148 if (scm_ptobs[i].mark)
1149 {
1150 ptr = (scm_ptobs[i].mark) (ptr);
1151 goto gc_mark_loop;
1152 }
1153 else
1154 return;
0f2d19dd
JB
1155 break;
1156 case scm_tc7_smob:
1157 if (SCM_GC8MARKP (ptr))
1158 break;
dc53f026 1159 SCM_SETGC8MARK (ptr);
acb0a19c 1160 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1161 { /* should be faster than going through scm_smobs */
1162 case scm_tc_free_cell:
1163 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1164 case scm_tc16_allocated:
acb0a19c
MD
1165 case scm_tc16_big:
1166 case scm_tc16_real:
1167 case scm_tc16_complex:
0f2d19dd
JB
1168 break;
1169 default:
1170 i = SCM_SMOBNUM (ptr);
1171 if (!(i < scm_numsmob))
1172 goto def;
dc53f026
JB
1173 if (scm_smobs[i].mark)
1174 {
1175 ptr = (scm_smobs[i].mark) (ptr);
1176 goto gc_mark_loop;
1177 }
1178 else
1179 return;
0f2d19dd
JB
1180 }
1181 break;
1182 default:
1183 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1184 }
1185}
1186
1187
1188/* Mark a Region Conservatively
1189 */
1190
a00c95d9 1191void
6e8d25a6 1192scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1193{
1194 register long m = n;
1195 register int i, j;
1196 register SCM_CELLPTR ptr;
1197
1198 while (0 <= --m)
c67baafd 1199 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1200 {
195e6201 1201 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1202 i = 0;
1203 j = scm_n_heap_segs - 1;
1204 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1205 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1206 {
1207 while (i <= j)
1208 {
1209 int seg_id;
1210 seg_id = -1;
1211 if ( (i == j)
1212 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1213 seg_id = i;
1214 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1215 seg_id = j;
1216 else
1217 {
1218 int k;
1219 k = (i + j) / 2;
1220 if (k == i)
1221 break;
1222 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1223 {
1224 j = k;
1225 ++i;
1226 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1227 continue;
1228 else
1229 break;
1230 }
1231 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1232 {
1233 i = k;
1234 --j;
1235 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1236 continue;
1237 else
1238 break;
1239 }
1240 }
47457e8a
DH
1241 if (scm_heap_table[seg_id].span == 1
1242 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1243 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1244 break;
1245 }
1246
1247 }
1248 }
1249}
1250
1251
1a548472
DH
1252/* The function scm_cellp determines whether an SCM value can be regarded as a
1253 * pointer to a cell on the heap. Binary search is used in order to determine
1254 * the heap segment that contains the cell.
1255 */
2e11a577 1256int
6e8d25a6 1257scm_cellp (SCM value)
2e11a577 1258{
1a548472
DH
1259 if (SCM_CELLP (value)) {
1260 scm_cell * ptr = SCM2PTR (value);
1261 unsigned int i = 0;
1262 unsigned int j = scm_n_heap_segs - 1;
1263
1264 while (i < j) {
1265 int k = (i + j) / 2;
1266 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1267 j = k;
1268 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1269 i = k + 1;
1270 }
1271 }
2e11a577 1272
1a548472
DH
1273 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1274 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1a548472
DH
1275 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1276 return 1;
1277 } else {
1278 return 0;
2e11a577 1279 }
1a548472
DH
1280 } else {
1281 return 0;
1282 }
2e11a577
MD
1283}
1284
1285
4c48ba06
MD
1286static void
1287gc_sweep_freelist_start (scm_freelist_t *freelist)
1288{
1289 freelist->cells = SCM_EOL;
1290 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1291 freelist->clusters_allocated = 0;
4c48ba06
MD
1292 freelist->clusters = SCM_EOL;
1293 freelist->clustertail = &freelist->clusters;
1811ebce 1294 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1295 freelist->collected = 0;
1296}
1297
1298static void
1299gc_sweep_freelist_finish (scm_freelist_t *freelist)
1300{
1811ebce 1301 int collected;
4c48ba06
MD
1302 *freelist->clustertail = freelist->cells;
1303 if (SCM_NNULLP (freelist->cells))
1304 {
1305 SCM c = freelist->cells;
1306 SCM_SETCAR (c, SCM_CDR (c));
1307 SCM_SETCDR (c, SCM_EOL);
1308 freelist->collected +=
1309 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1310 }
b37fe1c5 1311 scm_gc_cells_collected += freelist->collected;
a00c95d9 1312
8fef55a8 1313 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1314 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1315 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1316 * size. This means that a too low yield is compensated by more
1317 * heap on the list which is currently doing most work, which is
1318 * just what we want.
1319 */
1811ebce 1320 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1321 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1322}
0f2d19dd 1323
a00c95d9 1324void
0f2d19dd 1325scm_gc_sweep ()
0f2d19dd
JB
1326{
1327 register SCM_CELLPTR ptr;
0f2d19dd 1328 register SCM nfreelist;
4c48ba06 1329 register scm_freelist_t *freelist;
0f2d19dd 1330 register long m;
0f2d19dd 1331 register int span;
15e9d186 1332 long i;
0f2d19dd
JB
1333 scm_sizet seg_size;
1334
0f2d19dd 1335 m = 0;
0f2d19dd 1336
4c48ba06
MD
1337 gc_sweep_freelist_start (&scm_master_freelist);
1338 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1339
cf2d30f6 1340 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1341 {
4c48ba06 1342 register unsigned int left_to_collect;
4c48ba06 1343 register scm_sizet j;
15e9d186 1344
cf2d30f6
JB
1345 /* Unmarked cells go onto the front of the freelist this heap
1346 segment points to. Rather than updating the real freelist
1347 pointer as we go along, we accumulate the new head in
1348 nfreelist. Then, if it turns out that the entire segment is
1349 free, we free (i.e., malloc's free) the whole segment, and
1350 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1351 freelist = scm_heap_table[i].freelist;
1352 nfreelist = freelist->cells;
4c48ba06 1353 left_to_collect = freelist->left_to_collect;
945fec60 1354 span = scm_heap_table[i].span;
cf2d30f6 1355
a00c95d9
ML
1356 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1357 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1358 for (j = seg_size + span; j -= span; ptr += span)
1359 {
96f6f4ae
DH
1360 SCM scmptr = PTR2SCM (ptr);
1361
0f2d19dd
JB
1362 switch SCM_TYP7 (scmptr)
1363 {
1364 case scm_tcs_cons_gloc:
0f2d19dd 1365 {
c8045e8d
DH
1366 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1367 * struct or a gloc. See the corresponding comment in
1368 * scm_gc_mark.
1369 */
1370 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1371 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1372 if (SCM_GCMARKP (scmptr))
0f2d19dd 1373 {
c8045e8d
DH
1374 if (vtable_data [scm_vtable_index_vcell] == 1)
1375 vtable_data [scm_vtable_index_vcell] = 0;
1376 goto cmrkcontinue;
1377 }
1378 else
1379 {
1380 if (vtable_data [scm_vtable_index_vcell] == 0
1381 || vtable_data [scm_vtable_index_vcell] == 1)
1382 {
1383 scm_struct_free_t free
1384 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1385 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1386 }
0f2d19dd
JB
1387 }
1388 }
1389 break;
1390 case scm_tcs_cons_imcar:
1391 case scm_tcs_cons_nimcar:
1392 case scm_tcs_closures:
e641afaf 1393 case scm_tc7_pws:
0f2d19dd
JB
1394 if (SCM_GCMARKP (scmptr))
1395 goto cmrkcontinue;
1396 break;
1397 case scm_tc7_wvect:
1398 if (SCM_GC8MARKP (scmptr))
1399 {
1400 goto c8mrkcontinue;
1401 }
1402 else
1403 {
ab4bef85
JB
1404 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1405 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1406 break;
1407 }
1408
1409 case scm_tc7_vector:
1410 case scm_tc7_lvector:
1411#ifdef CCLO
1412 case scm_tc7_cclo:
1413#endif
1414 if (SCM_GC8MARKP (scmptr))
1415 goto c8mrkcontinue;
1416
1417 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1418 freechars:
1419 scm_must_free (SCM_CHARS (scmptr));
1420 /* SCM_SETCHARS(scmptr, 0);*/
1421 break;
afe5177e 1422#ifdef HAVE_ARRAYS
0f2d19dd
JB
1423 case scm_tc7_bvect:
1424 if SCM_GC8MARKP (scmptr)
1425 goto c8mrkcontinue;
1426 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1427 goto freechars;
1428 case scm_tc7_byvect:
1429 if SCM_GC8MARKP (scmptr)
1430 goto c8mrkcontinue;
1431 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1432 goto freechars;
1433 case scm_tc7_ivect:
1434 case scm_tc7_uvect:
1435 if SCM_GC8MARKP (scmptr)
1436 goto c8mrkcontinue;
1437 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1438 goto freechars;
1439 case scm_tc7_svect:
1440 if SCM_GC8MARKP (scmptr)
1441 goto c8mrkcontinue;
1442 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1443 goto freechars;
5c11cc9d 1444#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1445 case scm_tc7_llvect:
1446 if SCM_GC8MARKP (scmptr)
1447 goto c8mrkcontinue;
1448 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1449 goto freechars;
1450#endif
1451 case scm_tc7_fvect:
1452 if SCM_GC8MARKP (scmptr)
1453 goto c8mrkcontinue;
1454 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1455 goto freechars;
1456 case scm_tc7_dvect:
1457 if SCM_GC8MARKP (scmptr)
1458 goto c8mrkcontinue;
1459 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1460 goto freechars;
1461 case scm_tc7_cvect:
1462 if SCM_GC8MARKP (scmptr)
1463 goto c8mrkcontinue;
1464 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1465 goto freechars;
afe5177e 1466#endif
0f2d19dd 1467 case scm_tc7_substring:
0f2d19dd
JB
1468 if (SCM_GC8MARKP (scmptr))
1469 goto c8mrkcontinue;
1470 break;
1471 case scm_tc7_string:
0f2d19dd
JB
1472 if (SCM_GC8MARKP (scmptr))
1473 goto c8mrkcontinue;
1474 m += SCM_HUGE_LENGTH (scmptr) + 1;
1475 goto freechars;
1476 case scm_tc7_msymbol:
1477 if (SCM_GC8MARKP (scmptr))
1478 goto c8mrkcontinue;
cf551a2b
DH
1479 m += (SCM_LENGTH (scmptr) + 1
1480 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1481 scm_must_free ((char *)SCM_SLOTS (scmptr));
1482 break;
1483 case scm_tc7_contin:
1484 if SCM_GC8MARKP (scmptr)
1485 goto c8mrkcontinue;
0db18cf4 1486 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1487 if (SCM_VELTS (scmptr))
1488 goto freechars;
0f2d19dd
JB
1489 case scm_tc7_ssymbol:
1490 if SCM_GC8MARKP(scmptr)
1491 goto c8mrkcontinue;
1492 break;
1493 case scm_tcs_subrs:
1494 continue;
1495 case scm_tc7_port:
1496 if SCM_GC8MARKP (scmptr)
1497 goto c8mrkcontinue;
1498 if SCM_OPENP (scmptr)
1499 {
1500 int k = SCM_PTOBNUM (scmptr);
1501 if (!(k < scm_numptob))
1502 goto sweeperr;
1503 /* Keep "revealed" ports alive. */
945fec60 1504 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1505 continue;
1506 /* Yes, I really do mean scm_ptobs[k].free */
1507 /* rather than ftobs[k].close. .close */
1508 /* is for explicit CLOSE-PORT by user */
84af0382 1509 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1510 SCM_SETSTREAM (scmptr, 0);
1511 scm_remove_from_port_table (scmptr);
1512 scm_gc_ports_collected++;
24e68a57 1513 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1514 }
1515 break;
1516 case scm_tc7_smob:
1517 switch SCM_GCTYP16 (scmptr)
1518 {
1519 case scm_tc_free_cell:
acb0a19c 1520 case scm_tc16_real:
0f2d19dd
JB
1521 if SCM_GC8MARKP (scmptr)
1522 goto c8mrkcontinue;
1523 break;
1524#ifdef SCM_BIGDIG
acb0a19c 1525 case scm_tc16_big:
0f2d19dd
JB
1526 if SCM_GC8MARKP (scmptr)
1527 goto c8mrkcontinue;
1528 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1529 goto freechars;
1530#endif /* def SCM_BIGDIG */
acb0a19c 1531 case scm_tc16_complex:
0f2d19dd
JB
1532 if SCM_GC8MARKP (scmptr)
1533 goto c8mrkcontinue;
acb0a19c
MD
1534 m += 2 * sizeof (double);
1535 goto freechars;
0f2d19dd
JB
1536 default:
1537 if SCM_GC8MARKP (scmptr)
1538 goto c8mrkcontinue;
1539
1540 {
1541 int k;
1542 k = SCM_SMOBNUM (scmptr);
1543 if (!(k < scm_numsmob))
1544 goto sweeperr;
c8045e8d 1545 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1546 break;
1547 }
1548 }
1549 break;
1550 default:
1551 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1552 }
0f2d19dd
JB
1553#if 0
1554 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1555 exit (2);
1556#endif
4c48ba06 1557 if (!--left_to_collect)
4a4c9785
MD
1558 {
1559 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1560 *freelist->clustertail = scmptr;
1561 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1562
4a4c9785 1563 nfreelist = SCM_EOL;
4c48ba06
MD
1564 freelist->collected += span * freelist->cluster_size;
1565 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1566 }
1567 else
4a4c9785
MD
1568 {
1569 /* Stick the new cell on the front of nfreelist. It's
1570 critical that we mark this cell as freed; otherwise, the
1571 conservative collector might trace it as some other type
1572 of object. */
54778cd3 1573 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1574 SCM_SETCDR (scmptr, nfreelist);
1575 nfreelist = scmptr;
1576 }
a00c95d9 1577
0f2d19dd
JB
1578 continue;
1579 c8mrkcontinue:
1580 SCM_CLRGC8MARK (scmptr);
1581 continue;
1582 cmrkcontinue:
1583 SCM_CLRGCMARK (scmptr);
1584 }
1585#ifdef GC_FREE_SEGMENTS
1586 if (n == seg_size)
1587 {
15e9d186
JB
1588 register long j;
1589
4c48ba06 1590 freelist->heap_size -= seg_size;
cf2d30f6
JB
1591 free ((char *) scm_heap_table[i].bounds[0]);
1592 scm_heap_table[i].bounds[0] = 0;
1593 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1594 scm_heap_table[j - 1] = scm_heap_table[j];
1595 scm_n_heap_segs -= 1;
cf2d30f6 1596 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1597 }
1598 else
1599#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1600 {
1601 /* Update the real freelist pointer to point to the head of
1602 the list of free cells we've built for this segment. */
4c48ba06 1603 freelist->cells = nfreelist;
4c48ba06 1604 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1605 }
1606
fca7547b 1607#ifdef GUILE_DEBUG_FREELIST
4c48ba06 1608 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1609 ? scm_freelist
1610 : scm_freelist2);
cf2d30f6
JB
1611 scm_map_free_list ();
1612#endif
4a4c9785 1613 }
a00c95d9 1614
4c48ba06
MD
1615 gc_sweep_freelist_finish (&scm_master_freelist);
1616 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1617
8ded62a3
MD
1618 /* When we move to POSIX threads private freelists should probably
1619 be GC-protected instead. */
1620 scm_freelist = SCM_EOL;
1621 scm_freelist2 = SCM_EOL;
a00c95d9 1622
b37fe1c5 1623 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1624 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1625 scm_mallocated -= m;
1626 scm_gc_malloc_collected = m;
1627}
1628
1629
1630\f
1631
1632/* {Front end to malloc}
1633 *
c68296f8 1634 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1635 *
1636 * These functions provide services comperable to malloc, realloc, and
1637 * free. They are for allocating malloced parts of scheme objects.
1638 * The primary purpose of the front end is to impose calls to gc.
1639 */
1640
bc9d9bb2 1641
0f2d19dd
JB
1642/* scm_must_malloc
1643 * Return newly malloced storage or throw an error.
1644 *
1645 * The parameter WHAT is a string for error reporting.
a00c95d9 1646 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1647 * allocation, or if the first call to malloc fails,
1648 * garbage collect -- on the presumption that some objects
1649 * using malloced storage may be collected.
1650 *
1651 * The limit scm_mtrigger may be raised by this allocation.
1652 */
07806695 1653void *
e4ef2330 1654scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1655{
07806695 1656 void *ptr;
15e9d186 1657 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1658
1659 if (nm <= scm_mtrigger)
0f2d19dd 1660 {
07806695 1661 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1662 if (NULL != ptr)
1663 {
1664 scm_mallocated = nm;
bc9d9bb2
MD
1665#ifdef GUILE_DEBUG_MALLOC
1666 scm_malloc_register (ptr, what);
1667#endif
0f2d19dd
JB
1668 return ptr;
1669 }
1670 }
6064dcc6 1671
0f2d19dd 1672 scm_igc (what);
e4ef2330 1673
0f2d19dd 1674 nm = scm_mallocated + size;
07806695 1675 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1676 if (NULL != ptr)
1677 {
1678 scm_mallocated = nm;
6064dcc6
MV
1679 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1680 if (nm > scm_mtrigger)
1681 scm_mtrigger = nm + nm / 2;
1682 else
1683 scm_mtrigger += scm_mtrigger / 2;
1684 }
bc9d9bb2
MD
1685#ifdef GUILE_DEBUG_MALLOC
1686 scm_malloc_register (ptr, what);
1687#endif
1688
0f2d19dd
JB
1689 return ptr;
1690 }
e4ef2330
MD
1691
1692 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1693 return 0; /* never reached */
0f2d19dd
JB
1694}
1695
1696
1697/* scm_must_realloc
1698 * is similar to scm_must_malloc.
1699 */
07806695
JB
1700void *
1701scm_must_realloc (void *where,
e4ef2330
MD
1702 scm_sizet old_size,
1703 scm_sizet size,
3eeba8d4 1704 const char *what)
0f2d19dd 1705{
07806695 1706 void *ptr;
e4ef2330
MD
1707 scm_sizet nm = scm_mallocated + size - old_size;
1708
1709 if (nm <= scm_mtrigger)
0f2d19dd 1710 {
07806695 1711 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1712 if (NULL != ptr)
1713 {
1714 scm_mallocated = nm;
bc9d9bb2
MD
1715#ifdef GUILE_DEBUG_MALLOC
1716 scm_malloc_reregister (where, ptr, what);
1717#endif
0f2d19dd
JB
1718 return ptr;
1719 }
1720 }
e4ef2330 1721
0f2d19dd 1722 scm_igc (what);
e4ef2330
MD
1723
1724 nm = scm_mallocated + size - old_size;
07806695 1725 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1726 if (NULL != ptr)
1727 {
1728 scm_mallocated = nm;
6064dcc6
MV
1729 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1730 if (nm > scm_mtrigger)
1731 scm_mtrigger = nm + nm / 2;
1732 else
1733 scm_mtrigger += scm_mtrigger / 2;
1734 }
bc9d9bb2
MD
1735#ifdef GUILE_DEBUG_MALLOC
1736 scm_malloc_reregister (where, ptr, what);
1737#endif
0f2d19dd
JB
1738 return ptr;
1739 }
e4ef2330
MD
1740
1741 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1742 return 0; /* never reached */
0f2d19dd
JB
1743}
1744
a00c95d9 1745void
07806695 1746scm_must_free (void *obj)
0f2d19dd 1747{
bc9d9bb2
MD
1748#ifdef GUILE_DEBUG_MALLOC
1749 scm_malloc_unregister (obj);
1750#endif
0f2d19dd
JB
1751 if (obj)
1752 free (obj);
1753 else
1754 scm_wta (SCM_INUM0, "already free", "");
1755}
0f2d19dd 1756
c68296f8
MV
1757/* Announce that there has been some malloc done that will be freed
1758 * during gc. A typical use is for a smob that uses some malloced
1759 * memory but can not get it from scm_must_malloc (for whatever
1760 * reason). When a new object of this smob is created you call
1761 * scm_done_malloc with the size of the object. When your smob free
1762 * function is called, be sure to include this size in the return
1763 * value. */
0f2d19dd 1764
c68296f8 1765void
6e8d25a6 1766scm_done_malloc (long size)
c68296f8
MV
1767{
1768 scm_mallocated += size;
1769
1770 if (scm_mallocated > scm_mtrigger)
1771 {
1772 scm_igc ("foreign mallocs");
1773 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1774 {
1775 if (scm_mallocated > scm_mtrigger)
1776 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1777 else
1778 scm_mtrigger += scm_mtrigger / 2;
1779 }
1780 }
1781}
1782
1783
1784\f
0f2d19dd
JB
1785
1786/* {Heap Segments}
1787 *
1788 * Each heap segment is an array of objects of a particular size.
1789 * Every segment has an associated (possibly shared) freelist.
1790 * A table of segment records is kept that records the upper and
1791 * lower extents of the segment; this is used during the conservative
1792 * phase of gc to identify probably gc roots (because they point
c68296f8 1793 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1794
1795/* scm_expmem
1796 * is true if the first segment was smaller than INIT_HEAP_SEG.
1797 * If scm_expmem is set to one, subsequent segment allocations will
1798 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1799 */
1800int scm_expmem = 0;
1801
4c48ba06
MD
1802scm_sizet scm_max_segment_size;
1803
0f2d19dd
JB
1804/* scm_heap_org
1805 * is the lowest base address of any heap segment.
1806 */
1807SCM_CELLPTR scm_heap_org;
1808
a00c95d9 1809scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
1810int scm_n_heap_segs = 0;
1811
0f2d19dd
JB
1812/* init_heap_seg
1813 * initializes a new heap segment and return the number of objects it contains.
1814 *
1815 * The segment origin, segment size in bytes, and the span of objects
1816 * in cells are input parameters. The freelist is both input and output.
1817 *
1818 * This function presume that the scm_heap_table has already been expanded
1819 * to accomodate a new segment record.
1820 */
1821
1822
a00c95d9 1823static scm_sizet
4c48ba06 1824init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
1825{
1826 register SCM_CELLPTR ptr;
0f2d19dd 1827 SCM_CELLPTR seg_end;
15e9d186 1828 int new_seg_index;
acb0a19c 1829 int n_new_cells;
4c48ba06 1830 int span = freelist->span;
a00c95d9 1831
0f2d19dd
JB
1832 if (seg_org == NULL)
1833 return 0;
1834
a00c95d9 1835 ptr = CELL_UP (seg_org, span);
acb0a19c 1836
a00c95d9 1837 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 1838 */
a00c95d9 1839 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 1840
a00c95d9 1841 /* Find the right place and insert the segment record.
0f2d19dd
JB
1842 *
1843 */
1844 for (new_seg_index = 0;
1845 ( (new_seg_index < scm_n_heap_segs)
1846 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1847 new_seg_index++)
1848 ;
1849
1850 {
1851 int i;
1852 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1853 scm_heap_table[i] = scm_heap_table[i - 1];
1854 }
a00c95d9 1855
0f2d19dd
JB
1856 ++scm_n_heap_segs;
1857
945fec60 1858 scm_heap_table[new_seg_index].span = span;
4c48ba06 1859 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
1860 scm_heap_table[new_seg_index].bounds[0] = ptr;
1861 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
1862
1863
a00c95d9 1864 /* Compute the least valid object pointer w/in this segment
0f2d19dd 1865 */
a00c95d9 1866 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
1867
1868
acb0a19c
MD
1869 /*n_new_cells*/
1870 n_new_cells = seg_end - ptr;
0f2d19dd 1871
4c48ba06 1872 freelist->heap_size += n_new_cells;
4a4c9785 1873
a00c95d9 1874 /* Partition objects in this segment into clusters */
4a4c9785
MD
1875 {
1876 SCM clusters;
1877 SCM *clusterp = &clusters;
4c48ba06 1878 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 1879
4c48ba06 1880 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 1881 {
4c48ba06
MD
1882 /* Determine end of cluster
1883 */
1884 if (n_new_cells >= n_cluster_cells)
1885 {
1886 seg_end = ptr + n_cluster_cells;
1887 n_new_cells -= n_cluster_cells;
1888 }
4a4c9785 1889 else
a00c95d9
ML
1890 /* [cmm] looks like the segment size doesn't divide cleanly by
1891 cluster size. bad cmm! */
1892 abort();
4a4c9785 1893
4c48ba06
MD
1894 /* Allocate cluster spine
1895 */
4a4c9785
MD
1896 *clusterp = PTR2SCM (ptr);
1897 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1898 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 1899 ptr += span;
a00c95d9 1900
4a4c9785
MD
1901 while (ptr < seg_end)
1902 {
96f6f4ae
DH
1903 SCM scmptr = PTR2SCM (ptr);
1904
54778cd3 1905 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1906 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1907 ptr += span;
1908 }
4c48ba06 1909
4a4c9785
MD
1910 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1911 }
a00c95d9 1912
4a4c9785
MD
1913 /* Patch up the last cluster pointer in the segment
1914 * to join it to the input freelist.
1915 */
4c48ba06
MD
1916 *clusterp = freelist->clusters;
1917 freelist->clusters = clusters;
4a4c9785
MD
1918 }
1919
4c48ba06
MD
1920#ifdef DEBUGINFO
1921 fprintf (stderr, "H");
1922#endif
0f2d19dd 1923 return size;
0f2d19dd
JB
1924}
1925
a00c95d9
ML
1926static scm_sizet
1927round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1928{
1929 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1930
1931 return
1932 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1933 + ALIGNMENT_SLACK (freelist);
1934}
1935
a00c95d9 1936static void
4c48ba06 1937alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 1938{
a00c95d9 1939 scm_heap_seg_data_t * tmptable;
0f2d19dd 1940 SCM_CELLPTR ptr;
b37fe1c5 1941 long len;
a00c95d9 1942
0f2d19dd
JB
1943 /* Critical code sections (such as the garbage collector)
1944 * aren't supposed to add heap segments.
1945 */
1946 if (scm_gc_heap_lock)
1947 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1948
1949 /* Expand the heap tables to have room for the new segment.
1950 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1951 * only if the allocation of the segment itself succeeds.
1952 */
a00c95d9 1953 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 1954
a00c95d9 1955 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
1956 realloc ((char *)scm_heap_table, len)));
1957 if (!tmptable)
1958 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1959 else
1960 scm_heap_table = tmptable;
1961
1962
1963 /* Pick a size for the new heap segment.
a00c95d9 1964 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
1965 * gc.h
1966 */
4c48ba06 1967 {
1811ebce
MD
1968 /* Assure that the new segment is predicted to be large enough.
1969 *
1970 * New yield should at least equal GC fraction of new heap size, i.e.
1971 *
1972 * y + dh > f * (h + dh)
1973 *
1974 * y : yield
8fef55a8 1975 * f : min yield fraction
1811ebce
MD
1976 * h : heap size
1977 * dh : size of new heap segment
1978 *
1979 * This gives dh > (f * h - y) / (1 - f)
bda1446c 1980 */
8fef55a8 1981 int f = freelist->min_yield_fraction;
1811ebce
MD
1982 long h = SCM_HEAP_SIZE;
1983 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
1984 len = SCM_EXPHEAP (freelist->heap_size);
1985#ifdef DEBUGINFO
1986 fprintf (stderr, "(%d < %d)", len, min_cells);
1987#endif
1988 if (len < min_cells)
1811ebce 1989 len = min_cells + freelist->cluster_size;
4c48ba06 1990 len *= sizeof (scm_cell);
1811ebce
MD
1991 /* force new sampling */
1992 freelist->collected = LONG_MAX;
4c48ba06 1993 }
a00c95d9 1994
4c48ba06
MD
1995 if (len > scm_max_segment_size)
1996 len = scm_max_segment_size;
0f2d19dd
JB
1997
1998 {
1999 scm_sizet smallest;
2000
a00c95d9 2001 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2002
0f2d19dd 2003 if (len < smallest)
a00c95d9 2004 len = smallest;
0f2d19dd
JB
2005
2006 /* Allocate with decaying ambition. */
2007 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2008 && (len >= smallest))
2009 {
1811ebce 2010 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2011 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2012 if (ptr)
2013 {
a00c95d9 2014 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2015 return;
2016 }
2017 len /= 2;
2018 }
2019 }
2020
2021 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2022}
2023
2024
a00c95d9 2025SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2026 (SCM name),
b380b885 2027 "")
1bbd0b84 2028#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2029{
2030 int x;
2031 int bound;
3b3b36dd 2032 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2033 SCM_DEFER_INTS;
2034 bound = scm_n_heap_segs;
2035 for (x = 0; x < bound; ++x)
2036 {
2037 SCM_CELLPTR p;
2038 SCM_CELLPTR pbound;
195e6201
DH
2039 p = scm_heap_table[x].bounds[0];
2040 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2041 while (p < pbound)
2042 {
c8045e8d
DH
2043 SCM cell = PTR2SCM (p);
2044 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2045 {
c8045e8d
DH
2046 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2047 * struct cell. See the corresponding comment in scm_gc_mark.
2048 */
2049 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2050 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2051 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2052 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2053 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2054 {
c8045e8d 2055 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2056 }
2057 }
2058 ++p;
2059 }
2060 }
2061 SCM_ALLOW_INTS;
2062 return name;
2063}
1bbd0b84 2064#undef FUNC_NAME
0f2d19dd
JB
2065
2066
2067\f
2068/* {GC Protection Helper Functions}
2069 */
2070
2071
0f2d19dd 2072void
6e8d25a6
GB
2073scm_remember (SCM *ptr)
2074{ /* empty */ }
0f2d19dd 2075
1cc91f1b 2076
c209c88e 2077/*
41b0806d
GB
2078 These crazy functions prevent garbage collection
2079 of arguments after the first argument by
2080 ensuring they remain live throughout the
2081 function because they are used in the last
2082 line of the code block.
2083 It'd be better to have a nice compiler hint to
2084 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2085SCM
2086scm_return_first (SCM elt, ...)
0f2d19dd
JB
2087{
2088 return elt;
2089}
2090
41b0806d
GB
2091int
2092scm_return_first_int (int i, ...)
2093{
2094 return i;
2095}
2096
0f2d19dd 2097
0f2d19dd 2098SCM
6e8d25a6 2099scm_permanent_object (SCM obj)
0f2d19dd
JB
2100{
2101 SCM_REDEFER_INTS;
2102 scm_permobjs = scm_cons (obj, scm_permobjs);
2103 SCM_REALLOW_INTS;
2104 return obj;
2105}
2106
2107
7bd4fbe2
MD
2108/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2109 other references are dropped, until the object is unprotected by calling
2110 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2111 i. e. it is possible to protect the same object several times, but it is
2112 necessary to unprotect the object the same number of times to actually get
2113 the object unprotected. It is an error to unprotect an object more often
2114 than it has been protected before. The function scm_protect_object returns
2115 OBJ.
2116*/
2117
2118/* Implementation note: For every object X, there is a counter which
2119 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2120*/
686765af 2121
ef290276 2122SCM
6e8d25a6 2123scm_protect_object (SCM obj)
ef290276 2124{
686765af
ML
2125 SCM handle;
2126
2127 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2128 SCM_REDEFER_INTS;
686765af 2129
0f0f0899
MD
2130 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2131 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
686765af 2132
2dd6a83a 2133 SCM_REALLOW_INTS;
686765af 2134
ef290276
JB
2135 return obj;
2136}
2137
2138
2139/* Remove any protection for OBJ established by a prior call to
dab7f566 2140 scm_protect_object. This function returns OBJ.
ef290276 2141
dab7f566 2142 See scm_protect_object for more information. */
ef290276 2143SCM
6e8d25a6 2144scm_unprotect_object (SCM obj)
ef290276 2145{
686765af
ML
2146 SCM handle;
2147
2148 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2149 SCM_REDEFER_INTS;
686765af
ML
2150
2151 handle = scm_hashq_get_handle (scm_protects, obj);
0f0f0899
MD
2152
2153 if (SCM_IMP (handle))
686765af 2154 {
0f0f0899
MD
2155 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2156 abort ();
686765af 2157 }
6a199940
DH
2158 else
2159 {
2160 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2161 if (count == 0)
2162 scm_hashq_remove_x (scm_protects, obj);
2163 else
2164 SCM_SETCDR (handle, SCM_MAKINUM (count));
2165 }
686765af 2166
2dd6a83a 2167 SCM_REALLOW_INTS;
ef290276
JB
2168
2169 return obj;
2170}
2171
c45acc34
JB
2172int terminating;
2173
2174/* called on process termination. */
e52ceaac
MD
2175#ifdef HAVE_ATEXIT
2176static void
2177cleanup (void)
2178#else
2179#ifdef HAVE_ON_EXIT
51157deb
MD
2180extern int on_exit (void (*procp) (), int arg);
2181
e52ceaac
MD
2182static void
2183cleanup (int status, void *arg)
2184#else
2185#error Dont know how to setup a cleanup handler on your system.
2186#endif
2187#endif
c45acc34
JB
2188{
2189 terminating = 1;
2190 scm_flush_all_ports ();
2191}
ef290276 2192
0f2d19dd 2193\f
acb0a19c 2194static int
4c48ba06 2195make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2196{
a00c95d9
ML
2197 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2198 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2199 rounded_size,
4c48ba06 2200 freelist))
acb0a19c 2201 {
a00c95d9
ML
2202 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2203 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2204 rounded_size,
4c48ba06 2205 freelist))
acb0a19c
MD
2206 return 1;
2207 }
2208 else
2209 scm_expmem = 1;
2210
8fef55a8
MD
2211 if (freelist->min_yield_fraction)
2212 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2213 / 100);
8fef55a8 2214 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2215
acb0a19c
MD
2216 return 0;
2217}
2218
2219\f
4c48ba06
MD
2220static void
2221init_freelist (scm_freelist_t *freelist,
2222 int span,
2223 int cluster_size,
8fef55a8 2224 int min_yield)
4c48ba06
MD
2225{
2226 freelist->clusters = SCM_EOL;
2227 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2228 freelist->left_to_collect = 0;
2229 freelist->clusters_allocated = 0;
8fef55a8
MD
2230 freelist->min_yield = 0;
2231 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2232 freelist->span = span;
2233 freelist->collected = 0;
1811ebce 2234 freelist->collected_1 = 0;
4c48ba06
MD
2235 freelist->heap_size = 0;
2236}
2237
4a4c9785 2238int
4c48ba06
MD
2239scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2240 scm_sizet init_heap_size_2, int gc_trigger_2,
2241 scm_sizet max_segment_size)
0f2d19dd
JB
2242{
2243 scm_sizet j;
2244
4c48ba06 2245 if (!init_heap_size_1)
aeacfc8f 2246 init_heap_size_1 = scm_default_init_heap_size_1;
4c48ba06 2247 if (!init_heap_size_2)
aeacfc8f 2248 init_heap_size_2 = scm_default_init_heap_size_2;
4c48ba06 2249
0f2d19dd
JB
2250 j = SCM_NUM_PROTECTS;
2251 while (j)
2252 scm_sys_protects[--j] = SCM_BOOL_F;
2253 scm_block_gc = 1;
4a4c9785 2254
4a4c9785 2255 scm_freelist = SCM_EOL;
4c48ba06
MD
2256 scm_freelist2 = SCM_EOL;
2257 init_freelist (&scm_master_freelist,
2258 1, SCM_CLUSTER_SIZE_1,
aeacfc8f 2259 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
4c48ba06
MD
2260 init_freelist (&scm_master_freelist2,
2261 2, SCM_CLUSTER_SIZE_2,
aeacfc8f 2262 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
4c48ba06 2263 scm_max_segment_size
aeacfc8f 2264 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
4a4c9785 2265
0f2d19dd
JB
2266 scm_expmem = 0;
2267
2268 j = SCM_HEAP_SEG_SIZE;
2269 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2270 scm_heap_table = ((scm_heap_seg_data_t *)
2271 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2272
4c48ba06
MD
2273 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2274 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2275 return 1;
acb0a19c 2276
801cb5e7 2277 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2278 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2279
801cb5e7
MD
2280 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2281 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2282 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2283 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2284 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2285
2286 /* Initialise the list of ports. */
840ae05d
JB
2287 scm_port_table = (scm_port **)
2288 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2289 if (!scm_port_table)
2290 return 1;
2291
a18bcd0e 2292#ifdef HAVE_ATEXIT
c45acc34 2293 atexit (cleanup);
e52ceaac
MD
2294#else
2295#ifdef HAVE_ON_EXIT
2296 on_exit (cleanup, 0);
2297#endif
a18bcd0e 2298#endif
0f2d19dd
JB
2299
2300 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2301 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2302
2303 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2304 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2305 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2306 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2307 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2308 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2309 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2310 scm_permobjs = SCM_EOL;
686765af 2311 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
3b2b8760 2312 scm_asyncs = SCM_EOL;
54778cd3
DH
2313 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2314 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2315#ifdef SCM_BIGDIG
2316 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2317#endif
2318 return 0;
2319}
2320\f
2321
0f2d19dd
JB
2322void
2323scm_init_gc ()
0f2d19dd 2324{
801cb5e7 2325 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
a0599745 2326#include "libguile/gc.x"
0f2d19dd 2327}
89e00824
ML
2328
2329/*
2330 Local Variables:
2331 c-file-style: "gnu"
2332 End:
2333*/