* Don't include <stdio.h> in gh.h. Thanks to Han-Wen Nienhuys.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745 49#include "libguile/_scm.h"
0a7a7445 50#include "libguile/eval.h"
a0599745
MD
51#include "libguile/stime.h"
52#include "libguile/stackchk.h"
53#include "libguile/struct.h"
a0599745
MD
54#include "libguile/smob.h"
55#include "libguile/unif.h"
56#include "libguile/async.h"
57#include "libguile/ports.h"
58#include "libguile/root.h"
59#include "libguile/strings.h"
60#include "libguile/vectors.h"
801cb5e7 61#include "libguile/weaks.h"
686765af 62#include "libguile/hashtab.h"
a0599745
MD
63
64#include "libguile/validate.h"
65#include "libguile/gc.h"
fce59c93 66
bc9d9bb2 67#ifdef GUILE_DEBUG_MALLOC
a0599745 68#include "libguile/debug-malloc.h"
bc9d9bb2
MD
69#endif
70
0f2d19dd 71#ifdef HAVE_MALLOC_H
95b88819 72#include <malloc.h>
0f2d19dd
JB
73#endif
74
75#ifdef HAVE_UNISTD_H
95b88819 76#include <unistd.h>
0f2d19dd
JB
77#endif
78
1cc91f1b
JB
79#ifdef __STDC__
80#include <stdarg.h>
81#define var_start(x, y) va_start(x, y)
82#else
83#include <varargs.h>
84#define var_start(x, y) va_start(x)
85#endif
86
0f2d19dd
JB
87\f
88/* {heap tuning parameters}
a00c95d9 89 *
0f2d19dd
JB
90 * These are parameters for controlling memory allocation. The heap
91 * is the area out of which scm_cons, and object headers are allocated.
92 *
93 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
94 * 64 bit machine. The units of the _SIZE parameters are bytes.
95 * Cons pairs and object headers occupy one heap cell.
96 *
97 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
98 * allocated initially the heap will grow by half its current size
99 * each subsequent time more heap is needed.
100 *
101 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
102 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
103 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
104 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 105 *
0f2d19dd
JB
106 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
107 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 *
109 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
110 * is needed.
111 *
112 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 113 * trigger a GC.
6064dcc6
MV
114 *
115 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
116 * reclaimed by a GC triggered by must_malloc. If less than this is
117 * reclaimed, the trigger threshold is raised. [I don't know what a
118 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 119 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
120 */
121
8fef55a8
MD
122/*
123 * Heap size 45000 and 40% min yield gives quick startup and no extra
124 * heap allocation. Having higher values on min yield may lead to
125 * large heaps, especially if code behaviour is varying its
126 * maximum consumption between different freelists.
127 */
aeacfc8f
MD
128int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
129int scm_default_min_yield_1 = 40;
4c48ba06 130#define SCM_CLUSTER_SIZE_1 2000L
4c48ba06 131
aeacfc8f 132int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
4c48ba06
MD
133/* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
aeacfc8f
MD
136int scm_default_min_yield_2 = 40;
137#define SCM_CLUSTER_SIZE_2 1000L
4c48ba06 138
aeacfc8f 139int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 140
945fec60 141#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
142#ifdef _QC
143# define SCM_HEAP_SEG_SIZE 32768L
144#else
145# ifdef sequent
4c48ba06 146# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 147# else
4c48ba06 148# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
149# endif
150#endif
4c48ba06 151/* Make heap grow with factor 1.5 */
4a4c9785 152#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 153#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 154#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
155
156/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
157 bounds for allocated storage */
158
159#ifdef PROT386
160/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
161# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
162# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
163#else
164# ifdef _UNICOS
a00c95d9
ML
165# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
166# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 167# else
a00c95d9
ML
168# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
169# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
170# endif /* UNICOS */
171#endif /* PROT386 */
a00c95d9
ML
172#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
173#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
174#define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
1811ebce 176#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
177
178
179\f
945fec60 180/* scm_freelists
0f2d19dd 181 */
945fec60 182
a00c95d9
ML
183typedef struct scm_freelist_t {
184 /* collected cells */
185 SCM cells;
a00c95d9
ML
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
b37fe1c5
MD
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
8fef55a8
MD
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
a00c95d9
ML
193 SCM clusters;
194 SCM *clustertail;
b37fe1c5 195 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 196 int cluster_size;
8fef55a8 197 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
198 */
199 int grow_heap_p;
8fef55a8 200 /* minimum yield on this list in order not to grow the heap
a00c95d9 201 */
8fef55a8
MD
202 long min_yield;
203 /* defines min_yield as percent of total heap size
a00c95d9 204 */
8fef55a8 205 int min_yield_fraction;
a00c95d9
ML
206 /* number of cells per object on this list */
207 int span;
208 /* number of collected cells during last GC */
1811ebce
MD
209 long collected;
210 /* number of collected cells during penultimate GC */
211 long collected_1;
a00c95d9
ML
212 /* total number of cells in heap segments
213 * belonging to this list.
214 */
1811ebce 215 long heap_size;
a00c95d9
ML
216} scm_freelist_t;
217
4a4c9785
MD
218SCM scm_freelist = SCM_EOL;
219scm_freelist_t scm_master_freelist = {
b37fe1c5 220 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
221};
222SCM scm_freelist2 = SCM_EOL;
223scm_freelist_t scm_master_freelist2 = {
b37fe1c5 224 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 225};
0f2d19dd
JB
226
227/* scm_mtrigger
228 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 */
15e9d186 230unsigned long scm_mtrigger;
0f2d19dd
JB
231
232
233/* scm_gc_heap_lock
234 * If set, don't expand the heap. Set only during gc, during which no allocation
235 * is supposed to take place anyway.
236 */
237int scm_gc_heap_lock = 0;
238
239/* GC Blocking
240 * Don't pause for collection if this is set -- just
241 * expand the heap.
242 */
0f2d19dd
JB
243int scm_block_gc = 1;
244
0f2d19dd
JB
245/* During collection, this accumulates objects holding
246 * weak references.
247 */
ab4bef85 248SCM scm_weak_vectors;
0f2d19dd
JB
249
250/* GC Statistics Keeping
251 */
252unsigned long scm_cells_allocated = 0;
a5c314c8 253long scm_mallocated = 0;
b37fe1c5 254unsigned long scm_gc_cells_collected;
8b0d194f 255unsigned long scm_gc_yield;
37ddcaf6 256static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
257unsigned long scm_gc_malloc_collected;
258unsigned long scm_gc_ports_collected;
259unsigned long scm_gc_rt;
260unsigned long scm_gc_time_taken = 0;
261
262SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
263SCM_SYMBOL (sym_heap_size, "cell-heap-size");
264SCM_SYMBOL (sym_mallocated, "bytes-malloced");
265SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
266SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
267SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
268
a00c95d9 269typedef struct scm_heap_seg_data_t
0f2d19dd 270{
cf2d30f6
JB
271 /* lower and upper bounds of the segment */
272 SCM_CELLPTR bounds[2];
273
274 /* address of the head-of-freelist pointer for this segment's cells.
275 All segments usually point to the same one, scm_freelist. */
4c48ba06 276 scm_freelist_t *freelist;
cf2d30f6 277
fe517a7d 278 /* number of cells per object in this segment */
945fec60 279 int span;
a00c95d9 280} scm_heap_seg_data_t;
0f2d19dd
JB
281
282
283
945fec60
MD
284static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
285static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
286
287
288\f
cf2d30f6
JB
289/* Debugging functions. */
290
bb2c57fa 291#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
292
293/* Return the number of the heap segment containing CELL. */
294static int
295which_seg (SCM cell)
296{
297 int i;
298
299 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
300 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
301 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
302 return i;
303 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 304 SCM_UNPACK (cell));
cf2d30f6
JB
305 abort ();
306}
307
308
8ded62a3
MD
309static void
310map_free_list (scm_freelist_t *master, SCM freelist)
311{
312 int last_seg = -1, count = 0;
313 SCM f;
a00c95d9 314
8ded62a3
MD
315 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
316 {
317 int this_seg = which_seg (f);
318
319 if (this_seg != last_seg)
320 {
321 if (last_seg != -1)
322 fprintf (stderr, " %5d %d-cells in segment %d\n",
323 count, master->span, last_seg);
324 last_seg = this_seg;
325 count = 0;
326 }
327 count++;
328 }
329 if (last_seg != -1)
330 fprintf (stderr, " %5d %d-cells in segment %d\n",
331 count, master->span, last_seg);
332}
cf2d30f6 333
a00c95d9 334SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
335 (),
336 "Print debugging information about the free-list.\n"
5384bc5b 337 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
338#define FUNC_NAME s_scm_map_free_list
339{
4c48ba06
MD
340 int i;
341 fprintf (stderr, "%d segments total (%d:%d",
342 scm_n_heap_segs,
343 scm_heap_table[0].span,
344 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
345 for (i = 1; i < scm_n_heap_segs; i++)
346 fprintf (stderr, ", %d:%d",
347 scm_heap_table[i].span,
348 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
349 fprintf (stderr, ")\n");
8ded62a3
MD
350 map_free_list (&scm_master_freelist, scm_freelist);
351 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
352 fflush (stderr);
353
354 return SCM_UNSPECIFIED;
355}
1bbd0b84 356#undef FUNC_NAME
cf2d30f6 357
4c48ba06
MD
358static int last_cluster;
359static int last_size;
360
5384bc5b
MD
361static int
362free_list_length (char *title, int i, SCM freelist)
363{
364 SCM ls;
365 int n = 0;
366 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
206d3de3 367 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
5384bc5b
MD
368 ++n;
369 else
370 {
371 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
372 abort ();
373 }
4c48ba06
MD
374 if (n != last_size)
375 {
376 if (i > 0)
377 {
378 if (last_cluster == i - 1)
379 fprintf (stderr, "\t%d\n", last_size);
380 else
381 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
382 }
383 if (i >= 0)
384 fprintf (stderr, "%s %d", title, i);
385 else
386 fprintf (stderr, "%s\t%d\n", title, n);
387 last_cluster = i;
388 last_size = n;
389 }
5384bc5b
MD
390 return n;
391}
392
393static void
394free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
395{
396 SCM clusters;
4c48ba06 397 int i = 0, len, n = 0;
5384bc5b
MD
398 fprintf (stderr, "%s\n\n", title);
399 n += free_list_length ("free list", -1, freelist);
400 for (clusters = master->clusters;
401 SCM_NNULLP (clusters);
402 clusters = SCM_CDR (clusters))
4c48ba06
MD
403 {
404 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
405 n += len;
406 }
407 if (last_cluster == i - 1)
408 fprintf (stderr, "\t%d\n", last_size);
409 else
410 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
411 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
412}
413
a00c95d9 414SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
415 (),
416 "Print debugging information about the free-list.\n"
417 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
418#define FUNC_NAME s_scm_free_list_length
419{
b37fe1c5
MD
420 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
421 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 422 return SCM_UNSPECIFIED;
5384bc5b
MD
423}
424#undef FUNC_NAME
425
bb2c57fa
MD
426#endif
427
428#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
429
430/* Number of calls to SCM_NEWCELL since startup. */
431static unsigned long scm_newcell_count;
acb0a19c 432static unsigned long scm_newcell2_count;
cf2d30f6
JB
433
434/* Search freelist for anything that isn't marked as a free cell.
435 Abort if we find something. */
8ded62a3
MD
436static void
437scm_check_freelist (SCM freelist)
438{
439 SCM f;
440 int i = 0;
441
442 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
443 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
444 {
445 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
446 scm_newcell_count, i);
447 fflush (stderr);
448 abort ();
449 }
450}
cf2d30f6
JB
451
452static int scm_debug_check_freelist = 0;
25748c78 453
a00c95d9 454SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 455 (SCM flag),
da4a1dba
GB
456 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
457 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
458 "compile-time flag was selected.\n")
1bbd0b84 459#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 460{
945fec60 461 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
462 return SCM_UNSPECIFIED;
463}
1bbd0b84 464#undef FUNC_NAME
25748c78
GB
465
466
4a4c9785
MD
467SCM
468scm_debug_newcell (void)
469{
470 SCM new;
471
472 scm_newcell_count++;
473 if (scm_debug_check_freelist)
474 {
8ded62a3 475 scm_check_freelist (scm_freelist);
4a4c9785
MD
476 scm_gc();
477 }
478
479 /* The rest of this is supposed to be identical to the SCM_NEWCELL
480 macro. */
481 if (SCM_IMP (scm_freelist))
482 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
483 else
484 {
485 new = scm_freelist;
486 scm_freelist = SCM_CDR (scm_freelist);
487 SCM_SETCAR (new, scm_tc16_allocated);
488 }
489
490 return new;
491}
492
493SCM
494scm_debug_newcell2 (void)
495{
496 SCM new;
497
498 scm_newcell2_count++;
499 if (scm_debug_check_freelist)
500 {
8ded62a3 501 scm_check_freelist (scm_freelist2);
4a4c9785
MD
502 scm_gc ();
503 }
504
505 /* The rest of this is supposed to be identical to the SCM_NEWCELL
506 macro. */
507 if (SCM_IMP (scm_freelist2))
508 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
509 else
510 {
511 new = scm_freelist2;
512 scm_freelist2 = SCM_CDR (scm_freelist2);
513 SCM_SETCAR (new, scm_tc16_allocated);
514 }
515
516 return new;
517}
518
fca7547b 519#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
520
521\f
0f2d19dd 522
b37fe1c5
MD
523static unsigned long
524master_cells_allocated (scm_freelist_t *master)
525{
526 int objects = master->clusters_allocated * (master->cluster_size - 1);
527 if (SCM_NULLP (master->clusters))
528 objects -= master->left_to_collect;
529 return master->span * objects;
530}
531
532static unsigned long
533freelist_length (SCM freelist)
534{
535 int n;
536 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
537 ++n;
538 return n;
539}
540
541static unsigned long
542compute_cells_allocated ()
543{
544 return (scm_cells_allocated
545 + master_cells_allocated (&scm_master_freelist)
546 + master_cells_allocated (&scm_master_freelist2)
547 - scm_master_freelist.span * freelist_length (scm_freelist)
548 - scm_master_freelist2.span * freelist_length (scm_freelist2));
549}
b37fe1c5 550
0f2d19dd
JB
551/* {Scheme Interface to GC}
552 */
553
a00c95d9 554SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 555 (),
b380b885 556 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 557#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
558{
559 int i;
560 int n;
561 SCM heap_segs;
c209c88e
GB
562 long int local_scm_mtrigger;
563 long int local_scm_mallocated;
564 long int local_scm_heap_size;
565 long int local_scm_cells_allocated;
566 long int local_scm_gc_time_taken;
0f2d19dd
JB
567 SCM answer;
568
569 SCM_DEFER_INTS;
939794ce
DH
570
571 ++scm_block_gc;
572
0f2d19dd
JB
573 retry:
574 heap_segs = SCM_EOL;
575 n = scm_n_heap_segs;
576 for (i = scm_n_heap_segs; i--; )
577 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
578 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
579 heap_segs);
580 if (scm_n_heap_segs != n)
581 goto retry;
939794ce
DH
582
583 --scm_block_gc;
0f2d19dd 584
7febb4a2
MD
585 /* Below, we cons to produce the resulting list. We want a snapshot of
586 * the heap situation before consing.
587 */
0f2d19dd
JB
588 local_scm_mtrigger = scm_mtrigger;
589 local_scm_mallocated = scm_mallocated;
b37fe1c5 590 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 591 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd
JB
592 local_scm_gc_time_taken = scm_gc_time_taken;
593
594 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
595 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
596 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
597 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
598 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
599 scm_cons (sym_heap_segments, heap_segs),
600 SCM_UNDEFINED);
601 SCM_ALLOW_INTS;
602 return answer;
603}
1bbd0b84 604#undef FUNC_NAME
0f2d19dd
JB
605
606
a00c95d9 607void
6e8d25a6 608scm_gc_start (const char *what)
0f2d19dd
JB
609{
610 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 611 scm_gc_cells_collected = 0;
37ddcaf6 612 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
613 scm_gc_yield = (scm_cells_allocated
614 + master_cells_allocated (&scm_master_freelist)
615 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
616 scm_gc_malloc_collected = 0;
617 scm_gc_ports_collected = 0;
618}
619
939794ce 620
a00c95d9 621void
0f2d19dd 622scm_gc_end ()
0f2d19dd
JB
623{
624 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 625 scm_gc_time_taken += scm_gc_rt;
0f2d19dd
JB
626}
627
628
a00c95d9 629SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 630 (SCM obj),
b380b885
MD
631 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
632 "returned by this function for @var{obj}")
1bbd0b84 633#define FUNC_NAME s_scm_object_address
0f2d19dd 634{
54778cd3 635 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 636}
1bbd0b84 637#undef FUNC_NAME
0f2d19dd
JB
638
639
a00c95d9 640SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 641 (),
b380b885
MD
642 "Scans all of SCM objects and reclaims for further use those that are\n"
643 "no longer accessible.")
1bbd0b84 644#define FUNC_NAME s_scm_gc
0f2d19dd
JB
645{
646 SCM_DEFER_INTS;
647 scm_igc ("call");
648 SCM_ALLOW_INTS;
649 return SCM_UNSPECIFIED;
650}
1bbd0b84 651#undef FUNC_NAME
0f2d19dd
JB
652
653
654\f
655/* {C Interface For When GC is Triggered}
656 */
657
b37fe1c5 658static void
8fef55a8 659adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 660{
8fef55a8 661 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 662 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
663 * `min_yield_fraction' of total heap size. Note, however, that
664 * the absolute value of min_yield will correspond to `collected'
bda1446c 665 * on one master (the one which currently is triggering GC).
b37fe1c5 666 *
bda1446c
MD
667 * The reason why we look at total yield instead of cells collected
668 * on one list is that we want to take other freelists into account.
669 * On this freelist, we know that (local) yield = collected cells,
670 * but that's probably not the case on the other lists.
b37fe1c5
MD
671 *
672 * (We might consider computing a better prediction, for example
673 * by computing an average over multiple GC:s.)
674 */
8fef55a8 675 if (freelist->min_yield_fraction)
b37fe1c5 676 {
37ddcaf6 677 /* Pick largest of last two yields. */
8fef55a8
MD
678 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
679 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
680#ifdef DEBUGINFO
681 fprintf (stderr, " after GC = %d, delta = %d\n",
682 scm_cells_allocated,
683 delta);
684#endif
685 if (delta > 0)
8fef55a8 686 freelist->min_yield += delta;
b37fe1c5
MD
687 }
688}
689
4a4c9785 690/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
691 * common while the freelist will be individual for each thread.
692 */
4a4c9785
MD
693
694SCM
695scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
696{
697 SCM cell;
698 ++scm_ints_disabled;
4c48ba06
MD
699 do
700 {
c7387918 701 if (SCM_NULLP (master->clusters))
4c48ba06 702 {
150c200b 703 if (master->grow_heap_p || scm_block_gc)
4c48ba06
MD
704 {
705 master->grow_heap_p = 0;
706 alloc_some_heap (master);
707 }
708 else
b37fe1c5 709 {
37ddcaf6
MD
710#ifdef DEBUGINFO
711 fprintf (stderr, "allocated = %d, ",
712 scm_cells_allocated
713 + master_cells_allocated (&scm_master_freelist)
714 + master_cells_allocated (&scm_master_freelist2));
715#endif
b37fe1c5 716 scm_igc ("cells");
8fef55a8 717 adjust_min_yield (master);
c7387918
DH
718 if (SCM_NULLP (master->clusters))
719 {
720 /* gc could not free any cells */
721 master->grow_heap_p = 0;
722 alloc_some_heap (master);
723 }
b37fe1c5 724 }
4c48ba06
MD
725 }
726 cell = SCM_CAR (master->clusters);
727 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 728 ++master->clusters_allocated;
4c48ba06
MD
729 }
730 while (SCM_NULLP (cell));
4a4c9785 731 --scm_ints_disabled;
4a4c9785 732 *freelist = SCM_CDR (cell);
54778cd3 733 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
734 return cell;
735}
736
4c48ba06
MD
737#if 0
738/* This is a support routine which can be used to reserve a cluster
739 * for some special use, such as debugging. It won't be useful until
740 * free cells are preserved between garbage collections.
741 */
742
743void
744scm_alloc_cluster (scm_freelist_t *master)
745{
746 SCM freelist, cell;
747 cell = scm_gc_for_newcell (master, &freelist);
748 SCM_SETCDR (cell, freelist);
749 return cell;
750}
751#endif
752
801cb5e7
MD
753
754scm_c_hook_t scm_before_gc_c_hook;
755scm_c_hook_t scm_before_mark_c_hook;
756scm_c_hook_t scm_before_sweep_c_hook;
757scm_c_hook_t scm_after_sweep_c_hook;
758scm_c_hook_t scm_after_gc_c_hook;
759
0f2d19dd 760void
1bbd0b84 761scm_igc (const char *what)
0f2d19dd
JB
762{
763 int j;
764
801cb5e7 765 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
766#ifdef DEBUGINFO
767 fprintf (stderr,
768 SCM_NULLP (scm_freelist)
769 ? "*"
770 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
771#endif
42db06f0
MD
772#ifdef USE_THREADS
773 /* During the critical section, only the current thread may run. */
774 SCM_THREAD_CRITICAL_SECTION_START;
775#endif
776
e242dfd2 777 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 778
ab4bef85
JB
779 scm_gc_start (what);
780
781 if (!scm_stack_base || scm_block_gc)
782 {
783 scm_gc_end ();
784 return;
785 }
786
a5c314c8
JB
787 if (scm_mallocated < 0)
788 /* The byte count of allocated objects has underflowed. This is
789 probably because you forgot to report the sizes of objects you
790 have allocated, by calling scm_done_malloc or some such. When
791 the GC freed them, it subtracted their size from
792 scm_mallocated, which underflowed. */
793 abort ();
c45acc34 794
ab4bef85
JB
795 if (scm_gc_heap_lock)
796 /* We've invoked the collector while a GC is already in progress.
797 That should never happen. */
798 abort ();
0f2d19dd
JB
799
800 ++scm_gc_heap_lock;
ab4bef85 801
0f2d19dd
JB
802 /* flush dead entries from the continuation stack */
803 {
804 int x;
805 int bound;
806 SCM * elts;
807 elts = SCM_VELTS (scm_continuation_stack);
808 bound = SCM_LENGTH (scm_continuation_stack);
809 x = SCM_INUM (scm_continuation_stack_ptr);
810 while (x < bound)
811 {
812 elts[x] = SCM_BOOL_F;
813 ++x;
814 }
815 }
816
801cb5e7
MD
817 scm_c_hook_run (&scm_before_mark_c_hook, 0);
818
42db06f0 819#ifndef USE_THREADS
a00c95d9 820
0f2d19dd
JB
821 /* Protect from the C stack. This must be the first marking
822 * done because it provides information about what objects
823 * are "in-use" by the C code. "in-use" objects are those
824 * for which the values from SCM_LENGTH and SCM_CHARS must remain
825 * usable. This requirement is stricter than a liveness
826 * requirement -- in particular, it constrains the implementation
827 * of scm_vector_set_length_x.
828 */
829 SCM_FLUSH_REGISTER_WINDOWS;
830 /* This assumes that all registers are saved into the jmp_buf */
831 setjmp (scm_save_regs_gc_mark);
832 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
833 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
834 sizeof scm_save_regs_gc_mark)
835 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
836
837 {
6ba93e5e 838 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 839#ifdef SCM_STACK_GROWS_UP
6ba93e5e 840 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 841#else
6ba93e5e 842 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
843#endif
844 }
845
42db06f0
MD
846#else /* USE_THREADS */
847
848 /* Mark every thread's stack and registers */
945fec60 849 scm_threads_mark_stacks ();
42db06f0
MD
850
851#endif /* USE_THREADS */
0f2d19dd
JB
852
853 /* FIXME: insert a phase to un-protect string-data preserved
854 * in scm_vector_set_length_x.
855 */
856
857 j = SCM_NUM_PROTECTS;
858 while (j--)
859 scm_gc_mark (scm_sys_protects[j]);
860
9de33deb
MD
861 /* FIXME: we should have a means to register C functions to be run
862 * in different phases of GC
a00c95d9 863 */
9de33deb 864 scm_mark_subr_table ();
a00c95d9 865
42db06f0
MD
866#ifndef USE_THREADS
867 scm_gc_mark (scm_root->handle);
868#endif
a00c95d9 869
801cb5e7 870 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 871
0f2d19dd
JB
872 scm_gc_sweep ();
873
801cb5e7
MD
874 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
875
0f2d19dd
JB
876 --scm_gc_heap_lock;
877 scm_gc_end ();
42db06f0
MD
878
879#ifdef USE_THREADS
880 SCM_THREAD_CRITICAL_SECTION_END;
881#endif
801cb5e7 882 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
883}
884
885\f
939794ce 886
a00c95d9 887/* {Mark/Sweep}
0f2d19dd
JB
888 */
889
890
891
892/* Mark an object precisely.
893 */
a00c95d9 894void
1bbd0b84 895scm_gc_mark (SCM p)
acf4331f 896#define FUNC_NAME "scm_gc_mark"
0f2d19dd
JB
897{
898 register long i;
899 register SCM ptr;
900
901 ptr = p;
902
903gc_mark_loop:
904 if (SCM_IMP (ptr))
905 return;
906
907gc_mark_nimp:
908 if (SCM_NCELLP (ptr))
acf4331f 909 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
0f2d19dd
JB
910
911 switch (SCM_TYP7 (ptr))
912 {
913 case scm_tcs_cons_nimcar:
914 if (SCM_GCMARKP (ptr))
915 break;
916 SCM_SETGCMARK (ptr);
917 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
918 {
919 ptr = SCM_CAR (ptr);
920 goto gc_mark_nimp;
921 }
922 scm_gc_mark (SCM_CAR (ptr));
923 ptr = SCM_GCCDR (ptr);
924 goto gc_mark_nimp;
925 case scm_tcs_cons_imcar:
acb0a19c
MD
926 if (SCM_GCMARKP (ptr))
927 break;
928 SCM_SETGCMARK (ptr);
929 ptr = SCM_GCCDR (ptr);
930 goto gc_mark_loop;
e641afaf 931 case scm_tc7_pws:
0f2d19dd
JB
932 if (SCM_GCMARKP (ptr))
933 break;
934 SCM_SETGCMARK (ptr);
54778cd3 935 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
936 ptr = SCM_GCCDR (ptr);
937 goto gc_mark_loop;
938 case scm_tcs_cons_gloc:
939 if (SCM_GCMARKP (ptr))
940 break;
941 SCM_SETGCMARK (ptr);
942 {
c8045e8d
DH
943 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
944 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
945 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
946 * pointer to a struct vtable data region. The fact that these are
947 * accessed in the same way restricts the possibilites to change the
948 * data layout of structs or heap cells.
949 */
950 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
951 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
952 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
953 {
954 default:
c8045e8d
DH
955 {
956 /* ptr is a gloc */
957 SCM gloc_car = SCM_PACK (word0);
958 scm_gc_mark (gloc_car);
959 ptr = SCM_GCCDR (ptr);
960 goto gc_mark_loop;
961 }
0f2d19dd
JB
962 case 1: /* ! */
963 case 0: /* ! */
964 {
c8045e8d
DH
965 /* ptr is a struct */
966 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
967 int len = SCM_LENGTH (layout);
968 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
969 /* We're using SCM_GCCDR here like STRUCT_DATA, except
970 that it removes the mark */
c8045e8d 971 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 972
c8045e8d 973 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 974 {
c8045e8d
DH
975 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
976 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 977 }
ad75306c
MD
978 if (len)
979 {
c8045e8d
DH
980 int x;
981
982 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 983 if (fields_desc[x] == 'p')
c8045e8d 984 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
985 if (fields_desc[x] == 'p')
986 {
987 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
988 for (x = *struct_data; x; --x)
989 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 990 else
c8045e8d 991 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
992 }
993 }
c8045e8d 994 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 995 {
c8045e8d
DH
996 vtable_data [scm_vtable_index_vcell] = 1;
997 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
998 goto gc_mark_loop;
999 }
1000 }
1001 }
1002 }
1003 break;
1004 case scm_tcs_closures:
1005 if (SCM_GCMARKP (ptr))
1006 break;
1007 SCM_SETGCMARK (ptr);
1008 if (SCM_IMP (SCM_CDR (ptr)))
1009 {
1010 ptr = SCM_CLOSCAR (ptr);
1011 goto gc_mark_nimp;
1012 }
1013 scm_gc_mark (SCM_CLOSCAR (ptr));
1014 ptr = SCM_GCCDR (ptr);
1015 goto gc_mark_nimp;
1016 case scm_tc7_vector:
1017 case scm_tc7_lvector:
1018#ifdef CCLO
1019 case scm_tc7_cclo:
1020#endif
1021 if (SCM_GC8MARKP (ptr))
1022 break;
1023 SCM_SETGC8MARK (ptr);
1024 i = SCM_LENGTH (ptr);
1025 if (i == 0)
1026 break;
1027 while (--i > 0)
1028 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1029 scm_gc_mark (SCM_VELTS (ptr)[i]);
1030 ptr = SCM_VELTS (ptr)[0];
1031 goto gc_mark_loop;
1032 case scm_tc7_contin:
1033 if SCM_GC8MARKP
1034 (ptr) break;
1035 SCM_SETGC8MARK (ptr);
c68296f8 1036 if (SCM_VELTS (ptr))
41b0806d 1037 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1038 (scm_sizet)
1039 (SCM_LENGTH (ptr) +
1040 (sizeof (SCM_STACKITEM) + -1 +
1041 sizeof (scm_contregs)) /
1042 sizeof (SCM_STACKITEM)));
0f2d19dd 1043 break;
afe5177e 1044#ifdef HAVE_ARRAYS
0f2d19dd
JB
1045 case scm_tc7_bvect:
1046 case scm_tc7_byvect:
1047 case scm_tc7_ivect:
1048 case scm_tc7_uvect:
1049 case scm_tc7_fvect:
1050 case scm_tc7_dvect:
1051 case scm_tc7_cvect:
1052 case scm_tc7_svect:
5c11cc9d 1053#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1054 case scm_tc7_llvect:
1055#endif
afe5177e 1056#endif
0f2d19dd 1057 case scm_tc7_string:
0f2d19dd
JB
1058 SCM_SETGC8MARK (ptr);
1059 break;
1060
1061 case scm_tc7_substring:
0f2d19dd
JB
1062 if (SCM_GC8MARKP(ptr))
1063 break;
1064 SCM_SETGC8MARK (ptr);
1065 ptr = SCM_CDR (ptr);
1066 goto gc_mark_loop;
1067
1068 case scm_tc7_wvect:
1069 if (SCM_GC8MARKP(ptr))
1070 break;
ab4bef85
JB
1071 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1072 scm_weak_vectors = ptr;
0f2d19dd
JB
1073 SCM_SETGC8MARK (ptr);
1074 if (SCM_IS_WHVEC_ANY (ptr))
1075 {
1076 int x;
1077 int len;
1078 int weak_keys;
1079 int weak_values;
1080
1081 len = SCM_LENGTH (ptr);
1082 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1083 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1084
0f2d19dd
JB
1085 for (x = 0; x < len; ++x)
1086 {
1087 SCM alist;
1088 alist = SCM_VELTS (ptr)[x];
46408039
JB
1089
1090 /* mark everything on the alist except the keys or
1091 * values, according to weak_values and weak_keys. */
0b5f3f34 1092 while ( SCM_CONSP (alist)
0f2d19dd 1093 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1094 && SCM_CONSP (SCM_CAR (alist)))
1095 {
1096 SCM kvpair;
1097 SCM next_alist;
1098
1099 kvpair = SCM_CAR (alist);
1100 next_alist = SCM_CDR (alist);
a00c95d9 1101 /*
0f2d19dd
JB
1102 * Do not do this:
1103 * SCM_SETGCMARK (alist);
1104 * SCM_SETGCMARK (kvpair);
1105 *
1106 * It may be that either the key or value is protected by
1107 * an escaped reference to part of the spine of this alist.
1108 * If we mark the spine here, and only mark one or neither of the
1109 * key and value, they may never be properly marked.
1110 * This leads to a horrible situation in which an alist containing
1111 * freelist cells is exported.
1112 *
1113 * So only mark the spines of these arrays last of all marking.
1114 * If somebody confuses us by constructing a weak vector
1115 * with a circular alist then we are hosed, but at least we
1116 * won't prematurely drop table entries.
1117 */
1118 if (!weak_keys)
1119 scm_gc_mark (SCM_CAR (kvpair));
1120 if (!weak_values)
1121 scm_gc_mark (SCM_GCCDR (kvpair));
1122 alist = next_alist;
1123 }
1124 if (SCM_NIMP (alist))
1125 scm_gc_mark (alist);
1126 }
1127 }
1128 break;
1129
1130 case scm_tc7_msymbol:
1131 if (SCM_GC8MARKP(ptr))
1132 break;
1133 SCM_SETGC8MARK (ptr);
1134 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1135 ptr = SCM_SYMBOL_PROPS (ptr);
1136 goto gc_mark_loop;
1137 case scm_tc7_ssymbol:
1138 if (SCM_GC8MARKP(ptr))
1139 break;
1140 SCM_SETGC8MARK (ptr);
1141 break;
1142 case scm_tcs_subrs:
9de33deb 1143 break;
0f2d19dd
JB
1144 case scm_tc7_port:
1145 i = SCM_PTOBNUM (ptr);
1146 if (!(i < scm_numptob))
1147 goto def;
1148 if (SCM_GC8MARKP (ptr))
1149 break;
dc53f026 1150 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1151 if (SCM_PTAB_ENTRY(ptr))
1152 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1153 if (scm_ptobs[i].mark)
1154 {
1155 ptr = (scm_ptobs[i].mark) (ptr);
1156 goto gc_mark_loop;
1157 }
1158 else
1159 return;
0f2d19dd
JB
1160 break;
1161 case scm_tc7_smob:
1162 if (SCM_GC8MARKP (ptr))
1163 break;
dc53f026 1164 SCM_SETGC8MARK (ptr);
acb0a19c 1165 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1166 { /* should be faster than going through scm_smobs */
1167 case scm_tc_free_cell:
1168 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1169 case scm_tc16_allocated:
acb0a19c
MD
1170 case scm_tc16_big:
1171 case scm_tc16_real:
1172 case scm_tc16_complex:
0f2d19dd
JB
1173 break;
1174 default:
1175 i = SCM_SMOBNUM (ptr);
1176 if (!(i < scm_numsmob))
1177 goto def;
dc53f026
JB
1178 if (scm_smobs[i].mark)
1179 {
1180 ptr = (scm_smobs[i].mark) (ptr);
1181 goto gc_mark_loop;
1182 }
1183 else
1184 return;
0f2d19dd
JB
1185 }
1186 break;
1187 default:
acf4331f
DH
1188 def:
1189 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd
JB
1190 }
1191}
acf4331f 1192#undef FUNC_NAME
0f2d19dd
JB
1193
1194
1195/* Mark a Region Conservatively
1196 */
1197
a00c95d9 1198void
6e8d25a6 1199scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1200{
1201 register long m = n;
1202 register int i, j;
1203 register SCM_CELLPTR ptr;
1204
1205 while (0 <= --m)
c67baafd 1206 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1207 {
195e6201 1208 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1209 i = 0;
1210 j = scm_n_heap_segs - 1;
1211 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1212 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1213 {
1214 while (i <= j)
1215 {
1216 int seg_id;
1217 seg_id = -1;
1218 if ( (i == j)
1219 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1220 seg_id = i;
1221 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1222 seg_id = j;
1223 else
1224 {
1225 int k;
1226 k = (i + j) / 2;
1227 if (k == i)
1228 break;
1229 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1230 {
1231 j = k;
1232 ++i;
1233 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1234 continue;
1235 else
1236 break;
1237 }
1238 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1239 {
1240 i = k;
1241 --j;
1242 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1243 continue;
1244 else
1245 break;
1246 }
1247 }
47457e8a
DH
1248 if (scm_heap_table[seg_id].span == 1
1249 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1250 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1251 break;
1252 }
1253
1254 }
1255 }
1256}
1257
1258
1a548472
DH
1259/* The function scm_cellp determines whether an SCM value can be regarded as a
1260 * pointer to a cell on the heap. Binary search is used in order to determine
1261 * the heap segment that contains the cell.
1262 */
2e11a577 1263int
6e8d25a6 1264scm_cellp (SCM value)
2e11a577 1265{
1a548472
DH
1266 if (SCM_CELLP (value)) {
1267 scm_cell * ptr = SCM2PTR (value);
1268 unsigned int i = 0;
1269 unsigned int j = scm_n_heap_segs - 1;
1270
1271 while (i < j) {
1272 int k = (i + j) / 2;
1273 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1274 j = k;
1275 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1276 i = k + 1;
1277 }
1278 }
2e11a577 1279
1a548472
DH
1280 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1281 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1a548472
DH
1282 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1283 return 1;
1284 } else {
1285 return 0;
2e11a577 1286 }
1a548472
DH
1287 } else {
1288 return 0;
1289 }
2e11a577
MD
1290}
1291
1292
4c48ba06
MD
1293static void
1294gc_sweep_freelist_start (scm_freelist_t *freelist)
1295{
1296 freelist->cells = SCM_EOL;
1297 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1298 freelist->clusters_allocated = 0;
4c48ba06
MD
1299 freelist->clusters = SCM_EOL;
1300 freelist->clustertail = &freelist->clusters;
1811ebce 1301 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1302 freelist->collected = 0;
1303}
1304
1305static void
1306gc_sweep_freelist_finish (scm_freelist_t *freelist)
1307{
1811ebce 1308 int collected;
4c48ba06
MD
1309 *freelist->clustertail = freelist->cells;
1310 if (SCM_NNULLP (freelist->cells))
1311 {
1312 SCM c = freelist->cells;
1313 SCM_SETCAR (c, SCM_CDR (c));
1314 SCM_SETCDR (c, SCM_EOL);
1315 freelist->collected +=
1316 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1317 }
b37fe1c5 1318 scm_gc_cells_collected += freelist->collected;
a00c95d9 1319
8fef55a8 1320 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1321 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1322 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1323 * size. This means that a too low yield is compensated by more
1324 * heap on the list which is currently doing most work, which is
1325 * just what we want.
1326 */
1811ebce 1327 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1328 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1329}
0f2d19dd 1330
a00c95d9 1331void
0f2d19dd 1332scm_gc_sweep ()
acf4331f 1333#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1334{
1335 register SCM_CELLPTR ptr;
0f2d19dd 1336 register SCM nfreelist;
4c48ba06 1337 register scm_freelist_t *freelist;
0f2d19dd 1338 register long m;
0f2d19dd 1339 register int span;
15e9d186 1340 long i;
0f2d19dd
JB
1341 scm_sizet seg_size;
1342
0f2d19dd 1343 m = 0;
0f2d19dd 1344
4c48ba06
MD
1345 gc_sweep_freelist_start (&scm_master_freelist);
1346 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1347
cf2d30f6 1348 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1349 {
4c48ba06 1350 register unsigned int left_to_collect;
4c48ba06 1351 register scm_sizet j;
15e9d186 1352
cf2d30f6
JB
1353 /* Unmarked cells go onto the front of the freelist this heap
1354 segment points to. Rather than updating the real freelist
1355 pointer as we go along, we accumulate the new head in
1356 nfreelist. Then, if it turns out that the entire segment is
1357 free, we free (i.e., malloc's free) the whole segment, and
1358 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1359 freelist = scm_heap_table[i].freelist;
1360 nfreelist = freelist->cells;
4c48ba06 1361 left_to_collect = freelist->left_to_collect;
945fec60 1362 span = scm_heap_table[i].span;
cf2d30f6 1363
a00c95d9
ML
1364 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1365 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1366 for (j = seg_size + span; j -= span; ptr += span)
1367 {
96f6f4ae
DH
1368 SCM scmptr = PTR2SCM (ptr);
1369
0f2d19dd
JB
1370 switch SCM_TYP7 (scmptr)
1371 {
1372 case scm_tcs_cons_gloc:
0f2d19dd 1373 {
c8045e8d
DH
1374 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1375 * struct or a gloc. See the corresponding comment in
1376 * scm_gc_mark.
1377 */
1378 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1379 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1380 if (SCM_GCMARKP (scmptr))
0f2d19dd 1381 {
c8045e8d
DH
1382 if (vtable_data [scm_vtable_index_vcell] == 1)
1383 vtable_data [scm_vtable_index_vcell] = 0;
1384 goto cmrkcontinue;
1385 }
1386 else
1387 {
1388 if (vtable_data [scm_vtable_index_vcell] == 0
1389 || vtable_data [scm_vtable_index_vcell] == 1)
1390 {
1391 scm_struct_free_t free
1392 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1393 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1394 }
0f2d19dd
JB
1395 }
1396 }
1397 break;
1398 case scm_tcs_cons_imcar:
1399 case scm_tcs_cons_nimcar:
1400 case scm_tcs_closures:
e641afaf 1401 case scm_tc7_pws:
0f2d19dd
JB
1402 if (SCM_GCMARKP (scmptr))
1403 goto cmrkcontinue;
1404 break;
1405 case scm_tc7_wvect:
1406 if (SCM_GC8MARKP (scmptr))
1407 {
1408 goto c8mrkcontinue;
1409 }
1410 else
1411 {
ab4bef85
JB
1412 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1413 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1414 break;
1415 }
1416
1417 case scm_tc7_vector:
1418 case scm_tc7_lvector:
1419#ifdef CCLO
1420 case scm_tc7_cclo:
1421#endif
1422 if (SCM_GC8MARKP (scmptr))
1423 goto c8mrkcontinue;
1424
1425 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1426 freechars:
1427 scm_must_free (SCM_CHARS (scmptr));
1428 /* SCM_SETCHARS(scmptr, 0);*/
1429 break;
afe5177e 1430#ifdef HAVE_ARRAYS
0f2d19dd
JB
1431 case scm_tc7_bvect:
1432 if SCM_GC8MARKP (scmptr)
1433 goto c8mrkcontinue;
1434 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1435 goto freechars;
1436 case scm_tc7_byvect:
1437 if SCM_GC8MARKP (scmptr)
1438 goto c8mrkcontinue;
1439 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1440 goto freechars;
1441 case scm_tc7_ivect:
1442 case scm_tc7_uvect:
1443 if SCM_GC8MARKP (scmptr)
1444 goto c8mrkcontinue;
1445 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1446 goto freechars;
1447 case scm_tc7_svect:
1448 if SCM_GC8MARKP (scmptr)
1449 goto c8mrkcontinue;
1450 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1451 goto freechars;
5c11cc9d 1452#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1453 case scm_tc7_llvect:
1454 if SCM_GC8MARKP (scmptr)
1455 goto c8mrkcontinue;
1456 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1457 goto freechars;
1458#endif
1459 case scm_tc7_fvect:
1460 if SCM_GC8MARKP (scmptr)
1461 goto c8mrkcontinue;
1462 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1463 goto freechars;
1464 case scm_tc7_dvect:
1465 if SCM_GC8MARKP (scmptr)
1466 goto c8mrkcontinue;
1467 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1468 goto freechars;
1469 case scm_tc7_cvect:
1470 if SCM_GC8MARKP (scmptr)
1471 goto c8mrkcontinue;
1472 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1473 goto freechars;
afe5177e 1474#endif
0f2d19dd 1475 case scm_tc7_substring:
0f2d19dd
JB
1476 if (SCM_GC8MARKP (scmptr))
1477 goto c8mrkcontinue;
1478 break;
1479 case scm_tc7_string:
0f2d19dd
JB
1480 if (SCM_GC8MARKP (scmptr))
1481 goto c8mrkcontinue;
1482 m += SCM_HUGE_LENGTH (scmptr) + 1;
1483 goto freechars;
1484 case scm_tc7_msymbol:
1485 if (SCM_GC8MARKP (scmptr))
1486 goto c8mrkcontinue;
cf551a2b
DH
1487 m += (SCM_LENGTH (scmptr) + 1
1488 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1489 scm_must_free ((char *)SCM_SLOTS (scmptr));
1490 break;
1491 case scm_tc7_contin:
1492 if SCM_GC8MARKP (scmptr)
1493 goto c8mrkcontinue;
0db18cf4 1494 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1495 if (SCM_VELTS (scmptr))
1496 goto freechars;
0f2d19dd
JB
1497 case scm_tc7_ssymbol:
1498 if SCM_GC8MARKP(scmptr)
1499 goto c8mrkcontinue;
1500 break;
1501 case scm_tcs_subrs:
1502 continue;
1503 case scm_tc7_port:
1504 if SCM_GC8MARKP (scmptr)
1505 goto c8mrkcontinue;
1506 if SCM_OPENP (scmptr)
1507 {
1508 int k = SCM_PTOBNUM (scmptr);
1509 if (!(k < scm_numptob))
1510 goto sweeperr;
1511 /* Keep "revealed" ports alive. */
945fec60 1512 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1513 continue;
1514 /* Yes, I really do mean scm_ptobs[k].free */
1515 /* rather than ftobs[k].close. .close */
1516 /* is for explicit CLOSE-PORT by user */
84af0382 1517 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1518 SCM_SETSTREAM (scmptr, 0);
1519 scm_remove_from_port_table (scmptr);
1520 scm_gc_ports_collected++;
24e68a57 1521 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1522 }
1523 break;
1524 case scm_tc7_smob:
1525 switch SCM_GCTYP16 (scmptr)
1526 {
1527 case scm_tc_free_cell:
acb0a19c 1528 case scm_tc16_real:
0f2d19dd
JB
1529 if SCM_GC8MARKP (scmptr)
1530 goto c8mrkcontinue;
1531 break;
1532#ifdef SCM_BIGDIG
acb0a19c 1533 case scm_tc16_big:
0f2d19dd
JB
1534 if SCM_GC8MARKP (scmptr)
1535 goto c8mrkcontinue;
1536 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1537 goto freechars;
1538#endif /* def SCM_BIGDIG */
acb0a19c 1539 case scm_tc16_complex:
0f2d19dd
JB
1540 if SCM_GC8MARKP (scmptr)
1541 goto c8mrkcontinue;
acb0a19c
MD
1542 m += 2 * sizeof (double);
1543 goto freechars;
0f2d19dd
JB
1544 default:
1545 if SCM_GC8MARKP (scmptr)
1546 goto c8mrkcontinue;
1547
1548 {
1549 int k;
1550 k = SCM_SMOBNUM (scmptr);
1551 if (!(k < scm_numsmob))
1552 goto sweeperr;
c8045e8d 1553 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1554 break;
1555 }
1556 }
1557 break;
1558 default:
acf4331f
DH
1559 sweeperr:
1560 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1561 }
0f2d19dd
JB
1562#if 0
1563 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1564 exit (2);
1565#endif
4c48ba06 1566 if (!--left_to_collect)
4a4c9785
MD
1567 {
1568 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1569 *freelist->clustertail = scmptr;
1570 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1571
4a4c9785 1572 nfreelist = SCM_EOL;
4c48ba06
MD
1573 freelist->collected += span * freelist->cluster_size;
1574 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1575 }
1576 else
4a4c9785
MD
1577 {
1578 /* Stick the new cell on the front of nfreelist. It's
1579 critical that we mark this cell as freed; otherwise, the
1580 conservative collector might trace it as some other type
1581 of object. */
54778cd3 1582 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1583 SCM_SETCDR (scmptr, nfreelist);
1584 nfreelist = scmptr;
1585 }
a00c95d9 1586
0f2d19dd
JB
1587 continue;
1588 c8mrkcontinue:
1589 SCM_CLRGC8MARK (scmptr);
1590 continue;
1591 cmrkcontinue:
1592 SCM_CLRGCMARK (scmptr);
1593 }
1594#ifdef GC_FREE_SEGMENTS
1595 if (n == seg_size)
1596 {
15e9d186
JB
1597 register long j;
1598
4c48ba06 1599 freelist->heap_size -= seg_size;
cf2d30f6
JB
1600 free ((char *) scm_heap_table[i].bounds[0]);
1601 scm_heap_table[i].bounds[0] = 0;
1602 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1603 scm_heap_table[j - 1] = scm_heap_table[j];
1604 scm_n_heap_segs -= 1;
cf2d30f6 1605 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1606 }
1607 else
1608#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1609 {
1610 /* Update the real freelist pointer to point to the head of
1611 the list of free cells we've built for this segment. */
4c48ba06 1612 freelist->cells = nfreelist;
4c48ba06 1613 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1614 }
1615
fca7547b 1616#ifdef GUILE_DEBUG_FREELIST
4c48ba06 1617 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1618 ? scm_freelist
1619 : scm_freelist2);
cf2d30f6
JB
1620 scm_map_free_list ();
1621#endif
4a4c9785 1622 }
a00c95d9 1623
4c48ba06
MD
1624 gc_sweep_freelist_finish (&scm_master_freelist);
1625 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1626
8ded62a3
MD
1627 /* When we move to POSIX threads private freelists should probably
1628 be GC-protected instead. */
1629 scm_freelist = SCM_EOL;
1630 scm_freelist2 = SCM_EOL;
a00c95d9 1631
b37fe1c5 1632 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1633 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1634 scm_mallocated -= m;
1635 scm_gc_malloc_collected = m;
1636}
acf4331f 1637#undef FUNC_NAME
0f2d19dd
JB
1638
1639
1640\f
1641
1642/* {Front end to malloc}
1643 *
c68296f8 1644 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1645 *
1646 * These functions provide services comperable to malloc, realloc, and
1647 * free. They are for allocating malloced parts of scheme objects.
1648 * The primary purpose of the front end is to impose calls to gc.
1649 */
1650
bc9d9bb2 1651
0f2d19dd
JB
1652/* scm_must_malloc
1653 * Return newly malloced storage or throw an error.
1654 *
1655 * The parameter WHAT is a string for error reporting.
a00c95d9 1656 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1657 * allocation, or if the first call to malloc fails,
1658 * garbage collect -- on the presumption that some objects
1659 * using malloced storage may be collected.
1660 *
1661 * The limit scm_mtrigger may be raised by this allocation.
1662 */
07806695 1663void *
e4ef2330 1664scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1665{
07806695 1666 void *ptr;
15e9d186 1667 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1668
1669 if (nm <= scm_mtrigger)
0f2d19dd 1670 {
07806695 1671 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1672 if (NULL != ptr)
1673 {
1674 scm_mallocated = nm;
bc9d9bb2
MD
1675#ifdef GUILE_DEBUG_MALLOC
1676 scm_malloc_register (ptr, what);
1677#endif
0f2d19dd
JB
1678 return ptr;
1679 }
1680 }
6064dcc6 1681
0f2d19dd 1682 scm_igc (what);
e4ef2330 1683
0f2d19dd 1684 nm = scm_mallocated + size;
07806695 1685 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1686 if (NULL != ptr)
1687 {
1688 scm_mallocated = nm;
6064dcc6
MV
1689 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1690 if (nm > scm_mtrigger)
1691 scm_mtrigger = nm + nm / 2;
1692 else
1693 scm_mtrigger += scm_mtrigger / 2;
1694 }
bc9d9bb2
MD
1695#ifdef GUILE_DEBUG_MALLOC
1696 scm_malloc_register (ptr, what);
1697#endif
1698
0f2d19dd
JB
1699 return ptr;
1700 }
e4ef2330 1701
acf4331f 1702 scm_memory_error (what);
0f2d19dd
JB
1703}
1704
1705
1706/* scm_must_realloc
1707 * is similar to scm_must_malloc.
1708 */
07806695
JB
1709void *
1710scm_must_realloc (void *where,
e4ef2330
MD
1711 scm_sizet old_size,
1712 scm_sizet size,
3eeba8d4 1713 const char *what)
0f2d19dd 1714{
07806695 1715 void *ptr;
e4ef2330
MD
1716 scm_sizet nm = scm_mallocated + size - old_size;
1717
1718 if (nm <= scm_mtrigger)
0f2d19dd 1719 {
07806695 1720 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1721 if (NULL != ptr)
1722 {
1723 scm_mallocated = nm;
bc9d9bb2
MD
1724#ifdef GUILE_DEBUG_MALLOC
1725 scm_malloc_reregister (where, ptr, what);
1726#endif
0f2d19dd
JB
1727 return ptr;
1728 }
1729 }
e4ef2330 1730
0f2d19dd 1731 scm_igc (what);
e4ef2330
MD
1732
1733 nm = scm_mallocated + size - old_size;
07806695 1734 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1735 if (NULL != ptr)
1736 {
1737 scm_mallocated = nm;
6064dcc6
MV
1738 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1739 if (nm > scm_mtrigger)
1740 scm_mtrigger = nm + nm / 2;
1741 else
1742 scm_mtrigger += scm_mtrigger / 2;
1743 }
bc9d9bb2
MD
1744#ifdef GUILE_DEBUG_MALLOC
1745 scm_malloc_reregister (where, ptr, what);
1746#endif
0f2d19dd
JB
1747 return ptr;
1748 }
e4ef2330 1749
acf4331f 1750 scm_memory_error (what);
0f2d19dd
JB
1751}
1752
acf4331f 1753
a00c95d9 1754void
07806695 1755scm_must_free (void *obj)
acf4331f 1756#define FUNC_NAME "scm_must_free"
0f2d19dd 1757{
bc9d9bb2
MD
1758#ifdef GUILE_DEBUG_MALLOC
1759 scm_malloc_unregister (obj);
1760#endif
0f2d19dd
JB
1761 if (obj)
1762 free (obj);
1763 else
acf4331f 1764 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1765}
acf4331f
DH
1766#undef FUNC_NAME
1767
0f2d19dd 1768
c68296f8
MV
1769/* Announce that there has been some malloc done that will be freed
1770 * during gc. A typical use is for a smob that uses some malloced
1771 * memory but can not get it from scm_must_malloc (for whatever
1772 * reason). When a new object of this smob is created you call
1773 * scm_done_malloc with the size of the object. When your smob free
1774 * function is called, be sure to include this size in the return
1775 * value. */
0f2d19dd 1776
c68296f8 1777void
6e8d25a6 1778scm_done_malloc (long size)
c68296f8
MV
1779{
1780 scm_mallocated += size;
1781
1782 if (scm_mallocated > scm_mtrigger)
1783 {
1784 scm_igc ("foreign mallocs");
1785 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1786 {
1787 if (scm_mallocated > scm_mtrigger)
1788 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1789 else
1790 scm_mtrigger += scm_mtrigger / 2;
1791 }
1792 }
1793}
1794
1795
1796\f
0f2d19dd
JB
1797
1798/* {Heap Segments}
1799 *
1800 * Each heap segment is an array of objects of a particular size.
1801 * Every segment has an associated (possibly shared) freelist.
1802 * A table of segment records is kept that records the upper and
1803 * lower extents of the segment; this is used during the conservative
1804 * phase of gc to identify probably gc roots (because they point
c68296f8 1805 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1806
1807/* scm_expmem
1808 * is true if the first segment was smaller than INIT_HEAP_SEG.
1809 * If scm_expmem is set to one, subsequent segment allocations will
1810 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1811 */
1812int scm_expmem = 0;
1813
4c48ba06
MD
1814scm_sizet scm_max_segment_size;
1815
0f2d19dd
JB
1816/* scm_heap_org
1817 * is the lowest base address of any heap segment.
1818 */
1819SCM_CELLPTR scm_heap_org;
1820
a00c95d9 1821scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
1822int scm_n_heap_segs = 0;
1823
0f2d19dd
JB
1824/* init_heap_seg
1825 * initializes a new heap segment and return the number of objects it contains.
1826 *
1827 * The segment origin, segment size in bytes, and the span of objects
1828 * in cells are input parameters. The freelist is both input and output.
1829 *
1830 * This function presume that the scm_heap_table has already been expanded
1831 * to accomodate a new segment record.
1832 */
1833
1834
a00c95d9 1835static scm_sizet
4c48ba06 1836init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
1837{
1838 register SCM_CELLPTR ptr;
0f2d19dd 1839 SCM_CELLPTR seg_end;
15e9d186 1840 int new_seg_index;
acb0a19c 1841 int n_new_cells;
4c48ba06 1842 int span = freelist->span;
a00c95d9 1843
0f2d19dd
JB
1844 if (seg_org == NULL)
1845 return 0;
1846
a00c95d9 1847 ptr = CELL_UP (seg_org, span);
acb0a19c 1848
a00c95d9 1849 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 1850 */
a00c95d9 1851 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 1852
a00c95d9 1853 /* Find the right place and insert the segment record.
0f2d19dd
JB
1854 *
1855 */
1856 for (new_seg_index = 0;
1857 ( (new_seg_index < scm_n_heap_segs)
1858 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1859 new_seg_index++)
1860 ;
1861
1862 {
1863 int i;
1864 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1865 scm_heap_table[i] = scm_heap_table[i - 1];
1866 }
a00c95d9 1867
0f2d19dd
JB
1868 ++scm_n_heap_segs;
1869
945fec60 1870 scm_heap_table[new_seg_index].span = span;
4c48ba06 1871 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
1872 scm_heap_table[new_seg_index].bounds[0] = ptr;
1873 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
1874
1875
a00c95d9 1876 /* Compute the least valid object pointer w/in this segment
0f2d19dd 1877 */
a00c95d9 1878 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
1879
1880
acb0a19c
MD
1881 /*n_new_cells*/
1882 n_new_cells = seg_end - ptr;
0f2d19dd 1883
4c48ba06 1884 freelist->heap_size += n_new_cells;
4a4c9785 1885
a00c95d9 1886 /* Partition objects in this segment into clusters */
4a4c9785
MD
1887 {
1888 SCM clusters;
1889 SCM *clusterp = &clusters;
4c48ba06 1890 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 1891
4c48ba06 1892 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 1893 {
4c48ba06
MD
1894 /* Determine end of cluster
1895 */
1896 if (n_new_cells >= n_cluster_cells)
1897 {
1898 seg_end = ptr + n_cluster_cells;
1899 n_new_cells -= n_cluster_cells;
1900 }
4a4c9785 1901 else
a00c95d9
ML
1902 /* [cmm] looks like the segment size doesn't divide cleanly by
1903 cluster size. bad cmm! */
1904 abort();
4a4c9785 1905
4c48ba06
MD
1906 /* Allocate cluster spine
1907 */
4a4c9785
MD
1908 *clusterp = PTR2SCM (ptr);
1909 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1910 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 1911 ptr += span;
a00c95d9 1912
4a4c9785
MD
1913 while (ptr < seg_end)
1914 {
96f6f4ae
DH
1915 SCM scmptr = PTR2SCM (ptr);
1916
54778cd3 1917 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1918 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1919 ptr += span;
1920 }
4c48ba06 1921
4a4c9785
MD
1922 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1923 }
a00c95d9 1924
4a4c9785
MD
1925 /* Patch up the last cluster pointer in the segment
1926 * to join it to the input freelist.
1927 */
4c48ba06
MD
1928 *clusterp = freelist->clusters;
1929 freelist->clusters = clusters;
4a4c9785
MD
1930 }
1931
4c48ba06
MD
1932#ifdef DEBUGINFO
1933 fprintf (stderr, "H");
1934#endif
0f2d19dd 1935 return size;
0f2d19dd
JB
1936}
1937
a00c95d9
ML
1938static scm_sizet
1939round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1940{
1941 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1942
1943 return
1944 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1945 + ALIGNMENT_SLACK (freelist);
1946}
1947
a00c95d9 1948static void
4c48ba06 1949alloc_some_heap (scm_freelist_t *freelist)
acf4331f 1950#define FUNC_NAME "alloc_some_heap"
0f2d19dd 1951{
a00c95d9 1952 scm_heap_seg_data_t * tmptable;
0f2d19dd 1953 SCM_CELLPTR ptr;
b37fe1c5 1954 long len;
a00c95d9 1955
0f2d19dd
JB
1956 /* Critical code sections (such as the garbage collector)
1957 * aren't supposed to add heap segments.
1958 */
1959 if (scm_gc_heap_lock)
acf4331f 1960 SCM_MISC_ERROR ("can not grow heap while locked", SCM_EOL);
0f2d19dd
JB
1961
1962 /* Expand the heap tables to have room for the new segment.
1963 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1964 * only if the allocation of the segment itself succeeds.
1965 */
a00c95d9 1966 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 1967
a00c95d9 1968 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
1969 realloc ((char *)scm_heap_table, len)));
1970 if (!tmptable)
c7387918
DH
1971 /* Dirk:FIXME:: scm_memory_error needs an additional message parameter.
1972 * Here: "could not grow heap segment table".
1973 */
1974 scm_memory_error (FUNC_NAME);
0f2d19dd
JB
1975 else
1976 scm_heap_table = tmptable;
1977
1978
1979 /* Pick a size for the new heap segment.
a00c95d9 1980 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
1981 * gc.h
1982 */
4c48ba06 1983 {
1811ebce
MD
1984 /* Assure that the new segment is predicted to be large enough.
1985 *
1986 * New yield should at least equal GC fraction of new heap size, i.e.
1987 *
1988 * y + dh > f * (h + dh)
1989 *
1990 * y : yield
8fef55a8 1991 * f : min yield fraction
1811ebce
MD
1992 * h : heap size
1993 * dh : size of new heap segment
1994 *
1995 * This gives dh > (f * h - y) / (1 - f)
bda1446c 1996 */
8fef55a8 1997 int f = freelist->min_yield_fraction;
1811ebce
MD
1998 long h = SCM_HEAP_SIZE;
1999 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2000 len = SCM_EXPHEAP (freelist->heap_size);
2001#ifdef DEBUGINFO
2002 fprintf (stderr, "(%d < %d)", len, min_cells);
2003#endif
2004 if (len < min_cells)
1811ebce 2005 len = min_cells + freelist->cluster_size;
4c48ba06 2006 len *= sizeof (scm_cell);
1811ebce
MD
2007 /* force new sampling */
2008 freelist->collected = LONG_MAX;
4c48ba06 2009 }
a00c95d9 2010
4c48ba06
MD
2011 if (len > scm_max_segment_size)
2012 len = scm_max_segment_size;
0f2d19dd
JB
2013
2014 {
2015 scm_sizet smallest;
2016
a00c95d9 2017 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2018
0f2d19dd 2019 if (len < smallest)
a00c95d9 2020 len = smallest;
0f2d19dd
JB
2021
2022 /* Allocate with decaying ambition. */
2023 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2024 && (len >= smallest))
2025 {
1811ebce 2026 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2027 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2028 if (ptr)
2029 {
a00c95d9 2030 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2031 return;
2032 }
2033 len /= 2;
2034 }
2035 }
2036
c7387918
DH
2037 /* Dirk:FIXME:: scm_memory_error needs an additional message parameter.
2038 * Here: "could not grow heap".
2039 */
2040 scm_memory_error (FUNC_NAME);
0f2d19dd 2041}
acf4331f 2042#undef FUNC_NAME
0f2d19dd
JB
2043
2044
a00c95d9 2045SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2046 (SCM name),
b380b885 2047 "")
1bbd0b84 2048#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2049{
2050 int x;
2051 int bound;
3b3b36dd 2052 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2053 SCM_DEFER_INTS;
2054 bound = scm_n_heap_segs;
2055 for (x = 0; x < bound; ++x)
2056 {
2057 SCM_CELLPTR p;
2058 SCM_CELLPTR pbound;
195e6201
DH
2059 p = scm_heap_table[x].bounds[0];
2060 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2061 while (p < pbound)
2062 {
c8045e8d
DH
2063 SCM cell = PTR2SCM (p);
2064 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2065 {
c8045e8d
DH
2066 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2067 * struct cell. See the corresponding comment in scm_gc_mark.
2068 */
2069 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2070 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2071 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2072 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2073 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2074 {
c8045e8d 2075 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2076 }
2077 }
2078 ++p;
2079 }
2080 }
2081 SCM_ALLOW_INTS;
2082 return name;
2083}
1bbd0b84 2084#undef FUNC_NAME
0f2d19dd
JB
2085
2086
2087\f
2088/* {GC Protection Helper Functions}
2089 */
2090
2091
0f2d19dd 2092void
6e8d25a6
GB
2093scm_remember (SCM *ptr)
2094{ /* empty */ }
0f2d19dd 2095
1cc91f1b 2096
c209c88e 2097/*
41b0806d
GB
2098 These crazy functions prevent garbage collection
2099 of arguments after the first argument by
2100 ensuring they remain live throughout the
2101 function because they are used in the last
2102 line of the code block.
2103 It'd be better to have a nice compiler hint to
2104 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2105SCM
2106scm_return_first (SCM elt, ...)
0f2d19dd
JB
2107{
2108 return elt;
2109}
2110
41b0806d
GB
2111int
2112scm_return_first_int (int i, ...)
2113{
2114 return i;
2115}
2116
0f2d19dd 2117
0f2d19dd 2118SCM
6e8d25a6 2119scm_permanent_object (SCM obj)
0f2d19dd
JB
2120{
2121 SCM_REDEFER_INTS;
2122 scm_permobjs = scm_cons (obj, scm_permobjs);
2123 SCM_REALLOW_INTS;
2124 return obj;
2125}
2126
2127
7bd4fbe2
MD
2128/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2129 other references are dropped, until the object is unprotected by calling
2130 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2131 i. e. it is possible to protect the same object several times, but it is
2132 necessary to unprotect the object the same number of times to actually get
2133 the object unprotected. It is an error to unprotect an object more often
2134 than it has been protected before. The function scm_protect_object returns
2135 OBJ.
2136*/
2137
2138/* Implementation note: For every object X, there is a counter which
2139 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2140*/
686765af 2141
ef290276 2142SCM
6e8d25a6 2143scm_protect_object (SCM obj)
ef290276 2144{
686765af
ML
2145 SCM handle;
2146
2147 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2148 SCM_REDEFER_INTS;
686765af 2149
0f0f0899
MD
2150 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2151 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
686765af 2152
2dd6a83a 2153 SCM_REALLOW_INTS;
686765af 2154
ef290276
JB
2155 return obj;
2156}
2157
2158
2159/* Remove any protection for OBJ established by a prior call to
dab7f566 2160 scm_protect_object. This function returns OBJ.
ef290276 2161
dab7f566 2162 See scm_protect_object for more information. */
ef290276 2163SCM
6e8d25a6 2164scm_unprotect_object (SCM obj)
ef290276 2165{
686765af
ML
2166 SCM handle;
2167
2168 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2169 SCM_REDEFER_INTS;
686765af
ML
2170
2171 handle = scm_hashq_get_handle (scm_protects, obj);
0f0f0899
MD
2172
2173 if (SCM_IMP (handle))
686765af 2174 {
0f0f0899
MD
2175 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2176 abort ();
686765af 2177 }
6a199940
DH
2178 else
2179 {
2180 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2181 if (count == 0)
2182 scm_hashq_remove_x (scm_protects, obj);
2183 else
2184 SCM_SETCDR (handle, SCM_MAKINUM (count));
2185 }
686765af 2186
2dd6a83a 2187 SCM_REALLOW_INTS;
ef290276
JB
2188
2189 return obj;
2190}
2191
c45acc34
JB
2192int terminating;
2193
2194/* called on process termination. */
e52ceaac
MD
2195#ifdef HAVE_ATEXIT
2196static void
2197cleanup (void)
2198#else
2199#ifdef HAVE_ON_EXIT
51157deb
MD
2200extern int on_exit (void (*procp) (), int arg);
2201
e52ceaac
MD
2202static void
2203cleanup (int status, void *arg)
2204#else
2205#error Dont know how to setup a cleanup handler on your system.
2206#endif
2207#endif
c45acc34
JB
2208{
2209 terminating = 1;
2210 scm_flush_all_ports ();
2211}
ef290276 2212
0f2d19dd 2213\f
acb0a19c 2214static int
4c48ba06 2215make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2216{
a00c95d9
ML
2217 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2218 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2219 rounded_size,
4c48ba06 2220 freelist))
acb0a19c 2221 {
a00c95d9
ML
2222 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2223 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2224 rounded_size,
4c48ba06 2225 freelist))
acb0a19c
MD
2226 return 1;
2227 }
2228 else
2229 scm_expmem = 1;
2230
8fef55a8
MD
2231 if (freelist->min_yield_fraction)
2232 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2233 / 100);
8fef55a8 2234 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2235
acb0a19c
MD
2236 return 0;
2237}
2238
2239\f
4c48ba06
MD
2240static void
2241init_freelist (scm_freelist_t *freelist,
2242 int span,
2243 int cluster_size,
8fef55a8 2244 int min_yield)
4c48ba06
MD
2245{
2246 freelist->clusters = SCM_EOL;
2247 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2248 freelist->left_to_collect = 0;
2249 freelist->clusters_allocated = 0;
8fef55a8
MD
2250 freelist->min_yield = 0;
2251 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2252 freelist->span = span;
2253 freelist->collected = 0;
1811ebce 2254 freelist->collected_1 = 0;
4c48ba06
MD
2255 freelist->heap_size = 0;
2256}
2257
4a4c9785 2258int
4c48ba06
MD
2259scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2260 scm_sizet init_heap_size_2, int gc_trigger_2,
2261 scm_sizet max_segment_size)
0f2d19dd
JB
2262{
2263 scm_sizet j;
2264
4c48ba06 2265 if (!init_heap_size_1)
aeacfc8f 2266 init_heap_size_1 = scm_default_init_heap_size_1;
4c48ba06 2267 if (!init_heap_size_2)
aeacfc8f 2268 init_heap_size_2 = scm_default_init_heap_size_2;
4c48ba06 2269
0f2d19dd
JB
2270 j = SCM_NUM_PROTECTS;
2271 while (j)
2272 scm_sys_protects[--j] = SCM_BOOL_F;
2273 scm_block_gc = 1;
4a4c9785 2274
4a4c9785 2275 scm_freelist = SCM_EOL;
4c48ba06
MD
2276 scm_freelist2 = SCM_EOL;
2277 init_freelist (&scm_master_freelist,
2278 1, SCM_CLUSTER_SIZE_1,
aeacfc8f 2279 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
4c48ba06
MD
2280 init_freelist (&scm_master_freelist2,
2281 2, SCM_CLUSTER_SIZE_2,
aeacfc8f 2282 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
4c48ba06 2283 scm_max_segment_size
aeacfc8f 2284 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
4a4c9785 2285
0f2d19dd
JB
2286 scm_expmem = 0;
2287
2288 j = SCM_HEAP_SEG_SIZE;
2289 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2290 scm_heap_table = ((scm_heap_seg_data_t *)
2291 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2292
4c48ba06
MD
2293 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2294 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2295 return 1;
acb0a19c 2296
801cb5e7 2297 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2298 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2299
801cb5e7
MD
2300 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2301 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2302 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2303 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2304 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2305
2306 /* Initialise the list of ports. */
840ae05d
JB
2307 scm_port_table = (scm_port **)
2308 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2309 if (!scm_port_table)
2310 return 1;
2311
a18bcd0e 2312#ifdef HAVE_ATEXIT
c45acc34 2313 atexit (cleanup);
e52ceaac
MD
2314#else
2315#ifdef HAVE_ON_EXIT
2316 on_exit (cleanup, 0);
2317#endif
a18bcd0e 2318#endif
0f2d19dd
JB
2319
2320 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2321 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2322
2323 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2324 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2325 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2326 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2327 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2328 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2329 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2330 scm_permobjs = SCM_EOL;
686765af 2331 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
54778cd3
DH
2332 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2333 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2334#ifdef SCM_BIGDIG
2335 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2336#endif
2337 return 0;
2338}
939794ce 2339
0f2d19dd
JB
2340\f
2341
939794ce
DH
2342SCM scm_after_gc_hook;
2343
2344#if (SCM_DEBUG_DEPRECATED == 0)
2345static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2346#endif /* SCM_DEBUG_DEPRECATED == 0 */
2347static SCM gc_async;
2348
2349
2350/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2351 * is run after the gc, as soon as the asynchronous events are handled by the
2352 * evaluator.
2353 */
2354static SCM
2355gc_async_thunk (void)
2356{
2357 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2358
2359#if (SCM_DEBUG_DEPRECATED == 0)
2360
2361 /* The following code will be removed in Guile 1.5. */
2362 if (SCM_NFALSEP (scm_gc_vcell))
2363 {
2364 SCM proc = SCM_CDR (scm_gc_vcell);
2365
2366 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2367 scm_apply (proc, SCM_EOL, SCM_EOL);
2368 }
2369
2370#endif /* SCM_DEBUG_DEPRECATED == 0 */
2371
2372 return SCM_UNSPECIFIED;
2373}
2374
2375
2376/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2377 * the garbage collection. The only purpose of this function is to mark the
2378 * gc_async (which will eventually lead to the execution of the
2379 * gc_async_thunk).
2380 */
2381static void *
2382mark_gc_async (void * hook_data, void *func_data, void *data)
2383{
2384 scm_system_async_mark (gc_async);
2385 return NULL;
2386}
2387
2388
0f2d19dd
JB
2389void
2390scm_init_gc ()
0f2d19dd 2391{
939794ce
DH
2392 SCM after_gc_thunk;
2393
801cb5e7 2394 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2395
2396#if (SCM_DEBUG_DEPRECATED == 0)
2397 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2398#endif /* SCM_DEBUG_DEPRECATED == 0 */
2399 /* Dirk:FIXME:: We don't really want a binding here. */
2400 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2401 gc_async = scm_system_async (after_gc_thunk);
2402
2403 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2404
a0599745 2405#include "libguile/gc.x"
0f2d19dd 2406}
89e00824
ML
2407
2408/*
2409 Local Variables:
2410 c-file-style: "gnu"
2411 End:
2412*/