* Cleaned up initialization of asyncs.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745
MD
49#include "libguile/_scm.h"
50#include "libguile/stime.h"
51#include "libguile/stackchk.h"
52#include "libguile/struct.h"
a0599745
MD
53#include "libguile/smob.h"
54#include "libguile/unif.h"
55#include "libguile/async.h"
56#include "libguile/ports.h"
57#include "libguile/root.h"
58#include "libguile/strings.h"
59#include "libguile/vectors.h"
801cb5e7 60#include "libguile/weaks.h"
686765af 61#include "libguile/hashtab.h"
a0599745
MD
62
63#include "libguile/validate.h"
64#include "libguile/gc.h"
fce59c93 65
bc9d9bb2 66#ifdef GUILE_DEBUG_MALLOC
a0599745 67#include "libguile/debug-malloc.h"
bc9d9bb2
MD
68#endif
69
0f2d19dd 70#ifdef HAVE_MALLOC_H
95b88819 71#include <malloc.h>
0f2d19dd
JB
72#endif
73
74#ifdef HAVE_UNISTD_H
95b88819 75#include <unistd.h>
0f2d19dd
JB
76#endif
77
1cc91f1b
JB
78#ifdef __STDC__
79#include <stdarg.h>
80#define var_start(x, y) va_start(x, y)
81#else
82#include <varargs.h>
83#define var_start(x, y) va_start(x)
84#endif
85
0f2d19dd
JB
86\f
87/* {heap tuning parameters}
a00c95d9 88 *
0f2d19dd
JB
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
91 *
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
95 *
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
99 *
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 104 *
0f2d19dd
JB
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 *
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
109 * is needed.
110 *
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 112 * trigger a GC.
6064dcc6
MV
113 *
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 118 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
119 */
120
8fef55a8
MD
121/*
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
126 */
aeacfc8f
MD
127int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
128int scm_default_min_yield_1 = 40;
4c48ba06 129#define SCM_CLUSTER_SIZE_1 2000L
4c48ba06 130
aeacfc8f 131int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
4c48ba06
MD
132/* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
134 */
aeacfc8f
MD
135int scm_default_min_yield_2 = 40;
136#define SCM_CLUSTER_SIZE_2 1000L
4c48ba06 137
aeacfc8f 138int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 139
945fec60 140#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
141#ifdef _QC
142# define SCM_HEAP_SEG_SIZE 32768L
143#else
144# ifdef sequent
4c48ba06 145# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 146# else
4c48ba06 147# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
148# endif
149#endif
4c48ba06 150/* Make heap grow with factor 1.5 */
4a4c9785 151#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 152#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 153#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
154
155/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
157
158#ifdef PROT386
159/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
160# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
162#else
163# ifdef _UNICOS
a00c95d9
ML
164# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 166# else
a00c95d9
ML
167# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
169# endif /* UNICOS */
170#endif /* PROT386 */
a00c95d9
ML
171#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
173#define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
1811ebce 175#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
176
177
178\f
945fec60 179/* scm_freelists
0f2d19dd 180 */
945fec60 181
a00c95d9
ML
182typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
a00c95d9
ML
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect;
b37fe1c5
MD
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated;
8fef55a8
MD
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
191 */
a00c95d9
ML
192 SCM clusters;
193 SCM *clustertail;
b37fe1c5 194 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 195 int cluster_size;
8fef55a8 196 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
197 */
198 int grow_heap_p;
8fef55a8 199 /* minimum yield on this list in order not to grow the heap
a00c95d9 200 */
8fef55a8
MD
201 long min_yield;
202 /* defines min_yield as percent of total heap size
a00c95d9 203 */
8fef55a8 204 int min_yield_fraction;
a00c95d9
ML
205 /* number of cells per object on this list */
206 int span;
207 /* number of collected cells during last GC */
1811ebce
MD
208 long collected;
209 /* number of collected cells during penultimate GC */
210 long collected_1;
a00c95d9
ML
211 /* total number of cells in heap segments
212 * belonging to this list.
213 */
1811ebce 214 long heap_size;
a00c95d9
ML
215} scm_freelist_t;
216
4a4c9785
MD
217SCM scm_freelist = SCM_EOL;
218scm_freelist_t scm_master_freelist = {
b37fe1c5 219 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
220};
221SCM scm_freelist2 = SCM_EOL;
222scm_freelist_t scm_master_freelist2 = {
b37fe1c5 223 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 224};
0f2d19dd
JB
225
226/* scm_mtrigger
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
228 */
15e9d186 229unsigned long scm_mtrigger;
0f2d19dd
JB
230
231
232/* scm_gc_heap_lock
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
235 */
236int scm_gc_heap_lock = 0;
237
238/* GC Blocking
239 * Don't pause for collection if this is set -- just
240 * expand the heap.
241 */
242
243int scm_block_gc = 1;
244
245/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
247 */
945fec60 248#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
249
250/* During collection, this accumulates objects holding
251 * weak references.
252 */
ab4bef85 253SCM scm_weak_vectors;
0f2d19dd
JB
254
255/* GC Statistics Keeping
256 */
257unsigned long scm_cells_allocated = 0;
a5c314c8 258long scm_mallocated = 0;
b37fe1c5 259unsigned long scm_gc_cells_collected;
8b0d194f 260unsigned long scm_gc_yield;
37ddcaf6 261static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
262unsigned long scm_gc_malloc_collected;
263unsigned long scm_gc_ports_collected;
264unsigned long scm_gc_rt;
265unsigned long scm_gc_time_taken = 0;
266
267SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
268SCM_SYMBOL (sym_heap_size, "cell-heap-size");
269SCM_SYMBOL (sym_mallocated, "bytes-malloced");
270SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
271SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
272SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
273
a00c95d9 274typedef struct scm_heap_seg_data_t
0f2d19dd 275{
cf2d30f6
JB
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds[2];
278
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
4c48ba06 281 scm_freelist_t *freelist;
cf2d30f6 282
fe517a7d 283 /* number of cells per object in this segment */
945fec60 284 int span;
a00c95d9 285} scm_heap_seg_data_t;
0f2d19dd
JB
286
287
288
945fec60
MD
289static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
290static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
291
292
293\f
cf2d30f6
JB
294/* Debugging functions. */
295
bb2c57fa 296#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
297
298/* Return the number of the heap segment containing CELL. */
299static int
300which_seg (SCM cell)
301{
302 int i;
303
304 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
305 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
306 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
307 return i;
308 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 309 SCM_UNPACK (cell));
cf2d30f6
JB
310 abort ();
311}
312
313
8ded62a3
MD
314static void
315map_free_list (scm_freelist_t *master, SCM freelist)
316{
317 int last_seg = -1, count = 0;
318 SCM f;
a00c95d9 319
8ded62a3
MD
320 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
321 {
322 int this_seg = which_seg (f);
323
324 if (this_seg != last_seg)
325 {
326 if (last_seg != -1)
327 fprintf (stderr, " %5d %d-cells in segment %d\n",
328 count, master->span, last_seg);
329 last_seg = this_seg;
330 count = 0;
331 }
332 count++;
333 }
334 if (last_seg != -1)
335 fprintf (stderr, " %5d %d-cells in segment %d\n",
336 count, master->span, last_seg);
337}
cf2d30f6 338
a00c95d9 339SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
340 (),
341 "Print debugging information about the free-list.\n"
5384bc5b 342 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
343#define FUNC_NAME s_scm_map_free_list
344{
4c48ba06
MD
345 int i;
346 fprintf (stderr, "%d segments total (%d:%d",
347 scm_n_heap_segs,
348 scm_heap_table[0].span,
349 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
350 for (i = 1; i < scm_n_heap_segs; i++)
351 fprintf (stderr, ", %d:%d",
352 scm_heap_table[i].span,
353 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
354 fprintf (stderr, ")\n");
8ded62a3
MD
355 map_free_list (&scm_master_freelist, scm_freelist);
356 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
357 fflush (stderr);
358
359 return SCM_UNSPECIFIED;
360}
1bbd0b84 361#undef FUNC_NAME
cf2d30f6 362
4c48ba06
MD
363static int last_cluster;
364static int last_size;
365
5384bc5b
MD
366static int
367free_list_length (char *title, int i, SCM freelist)
368{
369 SCM ls;
370 int n = 0;
371 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
206d3de3 372 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
5384bc5b
MD
373 ++n;
374 else
375 {
376 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
377 abort ();
378 }
4c48ba06
MD
379 if (n != last_size)
380 {
381 if (i > 0)
382 {
383 if (last_cluster == i - 1)
384 fprintf (stderr, "\t%d\n", last_size);
385 else
386 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
387 }
388 if (i >= 0)
389 fprintf (stderr, "%s %d", title, i);
390 else
391 fprintf (stderr, "%s\t%d\n", title, n);
392 last_cluster = i;
393 last_size = n;
394 }
5384bc5b
MD
395 return n;
396}
397
398static void
399free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
400{
401 SCM clusters;
4c48ba06 402 int i = 0, len, n = 0;
5384bc5b
MD
403 fprintf (stderr, "%s\n\n", title);
404 n += free_list_length ("free list", -1, freelist);
405 for (clusters = master->clusters;
406 SCM_NNULLP (clusters);
407 clusters = SCM_CDR (clusters))
4c48ba06
MD
408 {
409 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
410 n += len;
411 }
412 if (last_cluster == i - 1)
413 fprintf (stderr, "\t%d\n", last_size);
414 else
415 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
416 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
417}
418
a00c95d9 419SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
420 (),
421 "Print debugging information about the free-list.\n"
422 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
423#define FUNC_NAME s_scm_free_list_length
424{
b37fe1c5
MD
425 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
426 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 427 return SCM_UNSPECIFIED;
5384bc5b
MD
428}
429#undef FUNC_NAME
430
bb2c57fa
MD
431#endif
432
433#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
434
435/* Number of calls to SCM_NEWCELL since startup. */
436static unsigned long scm_newcell_count;
acb0a19c 437static unsigned long scm_newcell2_count;
cf2d30f6
JB
438
439/* Search freelist for anything that isn't marked as a free cell.
440 Abort if we find something. */
8ded62a3
MD
441static void
442scm_check_freelist (SCM freelist)
443{
444 SCM f;
445 int i = 0;
446
447 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
448 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
449 {
450 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
451 scm_newcell_count, i);
452 fflush (stderr);
453 abort ();
454 }
455}
cf2d30f6
JB
456
457static int scm_debug_check_freelist = 0;
25748c78 458
a00c95d9 459SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 460 (SCM flag),
da4a1dba
GB
461 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
462 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
463 "compile-time flag was selected.\n")
1bbd0b84 464#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 465{
945fec60 466 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
467 return SCM_UNSPECIFIED;
468}
1bbd0b84 469#undef FUNC_NAME
25748c78
GB
470
471
4a4c9785
MD
472SCM
473scm_debug_newcell (void)
474{
475 SCM new;
476
477 scm_newcell_count++;
478 if (scm_debug_check_freelist)
479 {
8ded62a3 480 scm_check_freelist (scm_freelist);
4a4c9785
MD
481 scm_gc();
482 }
483
484 /* The rest of this is supposed to be identical to the SCM_NEWCELL
485 macro. */
486 if (SCM_IMP (scm_freelist))
487 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
488 else
489 {
490 new = scm_freelist;
491 scm_freelist = SCM_CDR (scm_freelist);
492 SCM_SETCAR (new, scm_tc16_allocated);
493 }
494
495 return new;
496}
497
498SCM
499scm_debug_newcell2 (void)
500{
501 SCM new;
502
503 scm_newcell2_count++;
504 if (scm_debug_check_freelist)
505 {
8ded62a3 506 scm_check_freelist (scm_freelist2);
4a4c9785
MD
507 scm_gc ();
508 }
509
510 /* The rest of this is supposed to be identical to the SCM_NEWCELL
511 macro. */
512 if (SCM_IMP (scm_freelist2))
513 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
514 else
515 {
516 new = scm_freelist2;
517 scm_freelist2 = SCM_CDR (scm_freelist2);
518 SCM_SETCAR (new, scm_tc16_allocated);
519 }
520
521 return new;
522}
523
fca7547b 524#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
525
526\f
0f2d19dd 527
b37fe1c5
MD
528static unsigned long
529master_cells_allocated (scm_freelist_t *master)
530{
531 int objects = master->clusters_allocated * (master->cluster_size - 1);
532 if (SCM_NULLP (master->clusters))
533 objects -= master->left_to_collect;
534 return master->span * objects;
535}
536
537static unsigned long
538freelist_length (SCM freelist)
539{
540 int n;
541 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
542 ++n;
543 return n;
544}
545
546static unsigned long
547compute_cells_allocated ()
548{
549 return (scm_cells_allocated
550 + master_cells_allocated (&scm_master_freelist)
551 + master_cells_allocated (&scm_master_freelist2)
552 - scm_master_freelist.span * freelist_length (scm_freelist)
553 - scm_master_freelist2.span * freelist_length (scm_freelist2));
554}
b37fe1c5 555
0f2d19dd
JB
556/* {Scheme Interface to GC}
557 */
558
a00c95d9 559SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 560 (),
b380b885 561 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 562#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
563{
564 int i;
565 int n;
566 SCM heap_segs;
c209c88e
GB
567 long int local_scm_mtrigger;
568 long int local_scm_mallocated;
569 long int local_scm_heap_size;
570 long int local_scm_cells_allocated;
571 long int local_scm_gc_time_taken;
0f2d19dd
JB
572 SCM answer;
573
574 SCM_DEFER_INTS;
939794ce
DH
575
576 ++scm_block_gc;
577
0f2d19dd
JB
578 retry:
579 heap_segs = SCM_EOL;
580 n = scm_n_heap_segs;
581 for (i = scm_n_heap_segs; i--; )
582 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
583 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
584 heap_segs);
585 if (scm_n_heap_segs != n)
586 goto retry;
939794ce
DH
587
588 --scm_block_gc;
0f2d19dd 589
7febb4a2
MD
590 /* Below, we cons to produce the resulting list. We want a snapshot of
591 * the heap situation before consing.
592 */
0f2d19dd
JB
593 local_scm_mtrigger = scm_mtrigger;
594 local_scm_mallocated = scm_mallocated;
b37fe1c5 595 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 596 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd
JB
597 local_scm_gc_time_taken = scm_gc_time_taken;
598
599 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
600 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
601 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
602 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
603 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
604 scm_cons (sym_heap_segments, heap_segs),
605 SCM_UNDEFINED);
606 SCM_ALLOW_INTS;
607 return answer;
608}
1bbd0b84 609#undef FUNC_NAME
0f2d19dd
JB
610
611
a00c95d9 612void
6e8d25a6 613scm_gc_start (const char *what)
0f2d19dd
JB
614{
615 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 616 scm_gc_cells_collected = 0;
37ddcaf6 617 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
618 scm_gc_yield = (scm_cells_allocated
619 + master_cells_allocated (&scm_master_freelist)
620 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
621 scm_gc_malloc_collected = 0;
622 scm_gc_ports_collected = 0;
623}
624
939794ce 625
a00c95d9 626void
0f2d19dd 627scm_gc_end ()
0f2d19dd
JB
628{
629 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 630 scm_gc_time_taken += scm_gc_rt;
0f2d19dd
JB
631}
632
633
a00c95d9 634SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 635 (SCM obj),
b380b885
MD
636 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
637 "returned by this function for @var{obj}")
1bbd0b84 638#define FUNC_NAME s_scm_object_address
0f2d19dd 639{
54778cd3 640 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 641}
1bbd0b84 642#undef FUNC_NAME
0f2d19dd
JB
643
644
a00c95d9 645SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 646 (),
b380b885
MD
647 "Scans all of SCM objects and reclaims for further use those that are\n"
648 "no longer accessible.")
1bbd0b84 649#define FUNC_NAME s_scm_gc
0f2d19dd
JB
650{
651 SCM_DEFER_INTS;
652 scm_igc ("call");
653 SCM_ALLOW_INTS;
654 return SCM_UNSPECIFIED;
655}
1bbd0b84 656#undef FUNC_NAME
0f2d19dd
JB
657
658
659\f
660/* {C Interface For When GC is Triggered}
661 */
662
b37fe1c5 663static void
8fef55a8 664adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 665{
8fef55a8 666 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 667 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
668 * `min_yield_fraction' of total heap size. Note, however, that
669 * the absolute value of min_yield will correspond to `collected'
bda1446c 670 * on one master (the one which currently is triggering GC).
b37fe1c5 671 *
bda1446c
MD
672 * The reason why we look at total yield instead of cells collected
673 * on one list is that we want to take other freelists into account.
674 * On this freelist, we know that (local) yield = collected cells,
675 * but that's probably not the case on the other lists.
b37fe1c5
MD
676 *
677 * (We might consider computing a better prediction, for example
678 * by computing an average over multiple GC:s.)
679 */
8fef55a8 680 if (freelist->min_yield_fraction)
b37fe1c5 681 {
37ddcaf6 682 /* Pick largest of last two yields. */
8fef55a8
MD
683 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
684 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
685#ifdef DEBUGINFO
686 fprintf (stderr, " after GC = %d, delta = %d\n",
687 scm_cells_allocated,
688 delta);
689#endif
690 if (delta > 0)
8fef55a8 691 freelist->min_yield += delta;
b37fe1c5
MD
692 }
693}
694
4a4c9785 695/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
696 * common while the freelist will be individual for each thread.
697 */
4a4c9785
MD
698
699SCM
700scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
701{
702 SCM cell;
703 ++scm_ints_disabled;
4c48ba06
MD
704 do
705 {
706 if (SCM_NULLP (master->clusters))
707 {
708 if (master->grow_heap_p)
709 {
710 master->grow_heap_p = 0;
711 alloc_some_heap (master);
712 }
713 else
b37fe1c5 714 {
37ddcaf6
MD
715#ifdef DEBUGINFO
716 fprintf (stderr, "allocated = %d, ",
717 scm_cells_allocated
718 + master_cells_allocated (&scm_master_freelist)
719 + master_cells_allocated (&scm_master_freelist2));
720#endif
b37fe1c5 721 scm_igc ("cells");
8fef55a8 722 adjust_min_yield (master);
b37fe1c5 723 }
4c48ba06
MD
724 }
725 cell = SCM_CAR (master->clusters);
726 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 727 ++master->clusters_allocated;
4c48ba06
MD
728 }
729 while (SCM_NULLP (cell));
4a4c9785 730 --scm_ints_disabled;
4a4c9785 731 *freelist = SCM_CDR (cell);
54778cd3 732 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
733 return cell;
734}
735
4c48ba06
MD
736#if 0
737/* This is a support routine which can be used to reserve a cluster
738 * for some special use, such as debugging. It won't be useful until
739 * free cells are preserved between garbage collections.
740 */
741
742void
743scm_alloc_cluster (scm_freelist_t *master)
744{
745 SCM freelist, cell;
746 cell = scm_gc_for_newcell (master, &freelist);
747 SCM_SETCDR (cell, freelist);
748 return cell;
749}
750#endif
751
801cb5e7
MD
752
753scm_c_hook_t scm_before_gc_c_hook;
754scm_c_hook_t scm_before_mark_c_hook;
755scm_c_hook_t scm_before_sweep_c_hook;
756scm_c_hook_t scm_after_sweep_c_hook;
757scm_c_hook_t scm_after_gc_c_hook;
758
0f2d19dd 759void
1bbd0b84 760scm_igc (const char *what)
0f2d19dd
JB
761{
762 int j;
763
801cb5e7 764 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
765#ifdef DEBUGINFO
766 fprintf (stderr,
767 SCM_NULLP (scm_freelist)
768 ? "*"
769 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
770#endif
42db06f0
MD
771#ifdef USE_THREADS
772 /* During the critical section, only the current thread may run. */
773 SCM_THREAD_CRITICAL_SECTION_START;
774#endif
775
e242dfd2 776 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 777
ab4bef85
JB
778 scm_gc_start (what);
779
780 if (!scm_stack_base || scm_block_gc)
781 {
782 scm_gc_end ();
783 return;
784 }
785
a5c314c8
JB
786 if (scm_mallocated < 0)
787 /* The byte count of allocated objects has underflowed. This is
788 probably because you forgot to report the sizes of objects you
789 have allocated, by calling scm_done_malloc or some such. When
790 the GC freed them, it subtracted their size from
791 scm_mallocated, which underflowed. */
792 abort ();
c45acc34 793
ab4bef85
JB
794 if (scm_gc_heap_lock)
795 /* We've invoked the collector while a GC is already in progress.
796 That should never happen. */
797 abort ();
0f2d19dd
JB
798
799 ++scm_gc_heap_lock;
ab4bef85 800
0f2d19dd
JB
801 /* flush dead entries from the continuation stack */
802 {
803 int x;
804 int bound;
805 SCM * elts;
806 elts = SCM_VELTS (scm_continuation_stack);
807 bound = SCM_LENGTH (scm_continuation_stack);
808 x = SCM_INUM (scm_continuation_stack_ptr);
809 while (x < bound)
810 {
811 elts[x] = SCM_BOOL_F;
812 ++x;
813 }
814 }
815
801cb5e7
MD
816 scm_c_hook_run (&scm_before_mark_c_hook, 0);
817
42db06f0 818#ifndef USE_THREADS
a00c95d9 819
0f2d19dd
JB
820 /* Protect from the C stack. This must be the first marking
821 * done because it provides information about what objects
822 * are "in-use" by the C code. "in-use" objects are those
823 * for which the values from SCM_LENGTH and SCM_CHARS must remain
824 * usable. This requirement is stricter than a liveness
825 * requirement -- in particular, it constrains the implementation
826 * of scm_vector_set_length_x.
827 */
828 SCM_FLUSH_REGISTER_WINDOWS;
829 /* This assumes that all registers are saved into the jmp_buf */
830 setjmp (scm_save_regs_gc_mark);
831 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
832 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
833 sizeof scm_save_regs_gc_mark)
834 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
835
836 {
6ba93e5e 837 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 838#ifdef SCM_STACK_GROWS_UP
6ba93e5e 839 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 840#else
6ba93e5e 841 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
842#endif
843 }
844
42db06f0
MD
845#else /* USE_THREADS */
846
847 /* Mark every thread's stack and registers */
945fec60 848 scm_threads_mark_stacks ();
42db06f0
MD
849
850#endif /* USE_THREADS */
0f2d19dd
JB
851
852 /* FIXME: insert a phase to un-protect string-data preserved
853 * in scm_vector_set_length_x.
854 */
855
856 j = SCM_NUM_PROTECTS;
857 while (j--)
858 scm_gc_mark (scm_sys_protects[j]);
859
9de33deb
MD
860 /* FIXME: we should have a means to register C functions to be run
861 * in different phases of GC
a00c95d9 862 */
9de33deb 863 scm_mark_subr_table ();
a00c95d9 864
42db06f0
MD
865#ifndef USE_THREADS
866 scm_gc_mark (scm_root->handle);
867#endif
a00c95d9 868
801cb5e7 869 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 870
0f2d19dd
JB
871 scm_gc_sweep ();
872
801cb5e7
MD
873 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
874
0f2d19dd
JB
875 --scm_gc_heap_lock;
876 scm_gc_end ();
42db06f0
MD
877
878#ifdef USE_THREADS
879 SCM_THREAD_CRITICAL_SECTION_END;
880#endif
801cb5e7 881 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
882}
883
884\f
939794ce 885
a00c95d9 886/* {Mark/Sweep}
0f2d19dd
JB
887 */
888
889
890
891/* Mark an object precisely.
892 */
a00c95d9 893void
1bbd0b84 894scm_gc_mark (SCM p)
0f2d19dd
JB
895{
896 register long i;
897 register SCM ptr;
898
899 ptr = p;
900
901gc_mark_loop:
902 if (SCM_IMP (ptr))
903 return;
904
905gc_mark_nimp:
906 if (SCM_NCELLP (ptr))
f8392303 907 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
908
909 switch (SCM_TYP7 (ptr))
910 {
911 case scm_tcs_cons_nimcar:
912 if (SCM_GCMARKP (ptr))
913 break;
914 SCM_SETGCMARK (ptr);
915 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
916 {
917 ptr = SCM_CAR (ptr);
918 goto gc_mark_nimp;
919 }
920 scm_gc_mark (SCM_CAR (ptr));
921 ptr = SCM_GCCDR (ptr);
922 goto gc_mark_nimp;
923 case scm_tcs_cons_imcar:
acb0a19c
MD
924 if (SCM_GCMARKP (ptr))
925 break;
926 SCM_SETGCMARK (ptr);
927 ptr = SCM_GCCDR (ptr);
928 goto gc_mark_loop;
e641afaf 929 case scm_tc7_pws:
0f2d19dd
JB
930 if (SCM_GCMARKP (ptr))
931 break;
932 SCM_SETGCMARK (ptr);
54778cd3 933 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
934 ptr = SCM_GCCDR (ptr);
935 goto gc_mark_loop;
936 case scm_tcs_cons_gloc:
937 if (SCM_GCMARKP (ptr))
938 break;
939 SCM_SETGCMARK (ptr);
940 {
c8045e8d
DH
941 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
942 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
943 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
944 * pointer to a struct vtable data region. The fact that these are
945 * accessed in the same way restricts the possibilites to change the
946 * data layout of structs or heap cells.
947 */
948 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
949 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
950 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
951 {
952 default:
c8045e8d
DH
953 {
954 /* ptr is a gloc */
955 SCM gloc_car = SCM_PACK (word0);
956 scm_gc_mark (gloc_car);
957 ptr = SCM_GCCDR (ptr);
958 goto gc_mark_loop;
959 }
0f2d19dd
JB
960 case 1: /* ! */
961 case 0: /* ! */
962 {
c8045e8d
DH
963 /* ptr is a struct */
964 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
965 int len = SCM_LENGTH (layout);
966 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
967 /* We're using SCM_GCCDR here like STRUCT_DATA, except
968 that it removes the mark */
c8045e8d 969 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 970
c8045e8d 971 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 972 {
c8045e8d
DH
973 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
974 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 975 }
ad75306c
MD
976 if (len)
977 {
c8045e8d
DH
978 int x;
979
980 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 981 if (fields_desc[x] == 'p')
c8045e8d 982 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
983 if (fields_desc[x] == 'p')
984 {
985 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
986 for (x = *struct_data; x; --x)
987 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 988 else
c8045e8d 989 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
990 }
991 }
c8045e8d 992 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 993 {
c8045e8d
DH
994 vtable_data [scm_vtable_index_vcell] = 1;
995 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
996 goto gc_mark_loop;
997 }
998 }
999 }
1000 }
1001 break;
1002 case scm_tcs_closures:
1003 if (SCM_GCMARKP (ptr))
1004 break;
1005 SCM_SETGCMARK (ptr);
1006 if (SCM_IMP (SCM_CDR (ptr)))
1007 {
1008 ptr = SCM_CLOSCAR (ptr);
1009 goto gc_mark_nimp;
1010 }
1011 scm_gc_mark (SCM_CLOSCAR (ptr));
1012 ptr = SCM_GCCDR (ptr);
1013 goto gc_mark_nimp;
1014 case scm_tc7_vector:
1015 case scm_tc7_lvector:
1016#ifdef CCLO
1017 case scm_tc7_cclo:
1018#endif
1019 if (SCM_GC8MARKP (ptr))
1020 break;
1021 SCM_SETGC8MARK (ptr);
1022 i = SCM_LENGTH (ptr);
1023 if (i == 0)
1024 break;
1025 while (--i > 0)
1026 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1027 scm_gc_mark (SCM_VELTS (ptr)[i]);
1028 ptr = SCM_VELTS (ptr)[0];
1029 goto gc_mark_loop;
1030 case scm_tc7_contin:
1031 if SCM_GC8MARKP
1032 (ptr) break;
1033 SCM_SETGC8MARK (ptr);
c68296f8 1034 if (SCM_VELTS (ptr))
41b0806d 1035 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1036 (scm_sizet)
1037 (SCM_LENGTH (ptr) +
1038 (sizeof (SCM_STACKITEM) + -1 +
1039 sizeof (scm_contregs)) /
1040 sizeof (SCM_STACKITEM)));
0f2d19dd 1041 break;
afe5177e 1042#ifdef HAVE_ARRAYS
0f2d19dd
JB
1043 case scm_tc7_bvect:
1044 case scm_tc7_byvect:
1045 case scm_tc7_ivect:
1046 case scm_tc7_uvect:
1047 case scm_tc7_fvect:
1048 case scm_tc7_dvect:
1049 case scm_tc7_cvect:
1050 case scm_tc7_svect:
5c11cc9d 1051#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1052 case scm_tc7_llvect:
1053#endif
afe5177e 1054#endif
0f2d19dd 1055 case scm_tc7_string:
0f2d19dd
JB
1056 SCM_SETGC8MARK (ptr);
1057 break;
1058
1059 case scm_tc7_substring:
0f2d19dd
JB
1060 if (SCM_GC8MARKP(ptr))
1061 break;
1062 SCM_SETGC8MARK (ptr);
1063 ptr = SCM_CDR (ptr);
1064 goto gc_mark_loop;
1065
1066 case scm_tc7_wvect:
1067 if (SCM_GC8MARKP(ptr))
1068 break;
ab4bef85
JB
1069 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1070 scm_weak_vectors = ptr;
0f2d19dd
JB
1071 SCM_SETGC8MARK (ptr);
1072 if (SCM_IS_WHVEC_ANY (ptr))
1073 {
1074 int x;
1075 int len;
1076 int weak_keys;
1077 int weak_values;
1078
1079 len = SCM_LENGTH (ptr);
1080 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1081 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1082
0f2d19dd
JB
1083 for (x = 0; x < len; ++x)
1084 {
1085 SCM alist;
1086 alist = SCM_VELTS (ptr)[x];
46408039
JB
1087
1088 /* mark everything on the alist except the keys or
1089 * values, according to weak_values and weak_keys. */
0b5f3f34 1090 while ( SCM_CONSP (alist)
0f2d19dd 1091 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1092 && SCM_CONSP (SCM_CAR (alist)))
1093 {
1094 SCM kvpair;
1095 SCM next_alist;
1096
1097 kvpair = SCM_CAR (alist);
1098 next_alist = SCM_CDR (alist);
a00c95d9 1099 /*
0f2d19dd
JB
1100 * Do not do this:
1101 * SCM_SETGCMARK (alist);
1102 * SCM_SETGCMARK (kvpair);
1103 *
1104 * It may be that either the key or value is protected by
1105 * an escaped reference to part of the spine of this alist.
1106 * If we mark the spine here, and only mark one or neither of the
1107 * key and value, they may never be properly marked.
1108 * This leads to a horrible situation in which an alist containing
1109 * freelist cells is exported.
1110 *
1111 * So only mark the spines of these arrays last of all marking.
1112 * If somebody confuses us by constructing a weak vector
1113 * with a circular alist then we are hosed, but at least we
1114 * won't prematurely drop table entries.
1115 */
1116 if (!weak_keys)
1117 scm_gc_mark (SCM_CAR (kvpair));
1118 if (!weak_values)
1119 scm_gc_mark (SCM_GCCDR (kvpair));
1120 alist = next_alist;
1121 }
1122 if (SCM_NIMP (alist))
1123 scm_gc_mark (alist);
1124 }
1125 }
1126 break;
1127
1128 case scm_tc7_msymbol:
1129 if (SCM_GC8MARKP(ptr))
1130 break;
1131 SCM_SETGC8MARK (ptr);
1132 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1133 ptr = SCM_SYMBOL_PROPS (ptr);
1134 goto gc_mark_loop;
1135 case scm_tc7_ssymbol:
1136 if (SCM_GC8MARKP(ptr))
1137 break;
1138 SCM_SETGC8MARK (ptr);
1139 break;
1140 case scm_tcs_subrs:
9de33deb 1141 break;
0f2d19dd
JB
1142 case scm_tc7_port:
1143 i = SCM_PTOBNUM (ptr);
1144 if (!(i < scm_numptob))
1145 goto def;
1146 if (SCM_GC8MARKP (ptr))
1147 break;
dc53f026 1148 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1149 if (SCM_PTAB_ENTRY(ptr))
1150 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1151 if (scm_ptobs[i].mark)
1152 {
1153 ptr = (scm_ptobs[i].mark) (ptr);
1154 goto gc_mark_loop;
1155 }
1156 else
1157 return;
0f2d19dd
JB
1158 break;
1159 case scm_tc7_smob:
1160 if (SCM_GC8MARKP (ptr))
1161 break;
dc53f026 1162 SCM_SETGC8MARK (ptr);
acb0a19c 1163 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1164 { /* should be faster than going through scm_smobs */
1165 case scm_tc_free_cell:
1166 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1167 case scm_tc16_allocated:
acb0a19c
MD
1168 case scm_tc16_big:
1169 case scm_tc16_real:
1170 case scm_tc16_complex:
0f2d19dd
JB
1171 break;
1172 default:
1173 i = SCM_SMOBNUM (ptr);
1174 if (!(i < scm_numsmob))
1175 goto def;
dc53f026
JB
1176 if (scm_smobs[i].mark)
1177 {
1178 ptr = (scm_smobs[i].mark) (ptr);
1179 goto gc_mark_loop;
1180 }
1181 else
1182 return;
0f2d19dd
JB
1183 }
1184 break;
1185 default:
1186 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1187 }
1188}
1189
1190
1191/* Mark a Region Conservatively
1192 */
1193
a00c95d9 1194void
6e8d25a6 1195scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1196{
1197 register long m = n;
1198 register int i, j;
1199 register SCM_CELLPTR ptr;
1200
1201 while (0 <= --m)
c67baafd 1202 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1203 {
195e6201 1204 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1205 i = 0;
1206 j = scm_n_heap_segs - 1;
1207 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1208 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1209 {
1210 while (i <= j)
1211 {
1212 int seg_id;
1213 seg_id = -1;
1214 if ( (i == j)
1215 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1216 seg_id = i;
1217 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1218 seg_id = j;
1219 else
1220 {
1221 int k;
1222 k = (i + j) / 2;
1223 if (k == i)
1224 break;
1225 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1226 {
1227 j = k;
1228 ++i;
1229 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1230 continue;
1231 else
1232 break;
1233 }
1234 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1235 {
1236 i = k;
1237 --j;
1238 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1239 continue;
1240 else
1241 break;
1242 }
1243 }
47457e8a
DH
1244 if (scm_heap_table[seg_id].span == 1
1245 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1246 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1247 break;
1248 }
1249
1250 }
1251 }
1252}
1253
1254
1a548472
DH
1255/* The function scm_cellp determines whether an SCM value can be regarded as a
1256 * pointer to a cell on the heap. Binary search is used in order to determine
1257 * the heap segment that contains the cell.
1258 */
2e11a577 1259int
6e8d25a6 1260scm_cellp (SCM value)
2e11a577 1261{
1a548472
DH
1262 if (SCM_CELLP (value)) {
1263 scm_cell * ptr = SCM2PTR (value);
1264 unsigned int i = 0;
1265 unsigned int j = scm_n_heap_segs - 1;
1266
1267 while (i < j) {
1268 int k = (i + j) / 2;
1269 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1270 j = k;
1271 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1272 i = k + 1;
1273 }
1274 }
2e11a577 1275
1a548472
DH
1276 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1277 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1a548472
DH
1278 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1279 return 1;
1280 } else {
1281 return 0;
2e11a577 1282 }
1a548472
DH
1283 } else {
1284 return 0;
1285 }
2e11a577
MD
1286}
1287
1288
4c48ba06
MD
1289static void
1290gc_sweep_freelist_start (scm_freelist_t *freelist)
1291{
1292 freelist->cells = SCM_EOL;
1293 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1294 freelist->clusters_allocated = 0;
4c48ba06
MD
1295 freelist->clusters = SCM_EOL;
1296 freelist->clustertail = &freelist->clusters;
1811ebce 1297 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1298 freelist->collected = 0;
1299}
1300
1301static void
1302gc_sweep_freelist_finish (scm_freelist_t *freelist)
1303{
1811ebce 1304 int collected;
4c48ba06
MD
1305 *freelist->clustertail = freelist->cells;
1306 if (SCM_NNULLP (freelist->cells))
1307 {
1308 SCM c = freelist->cells;
1309 SCM_SETCAR (c, SCM_CDR (c));
1310 SCM_SETCDR (c, SCM_EOL);
1311 freelist->collected +=
1312 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1313 }
b37fe1c5 1314 scm_gc_cells_collected += freelist->collected;
a00c95d9 1315
8fef55a8 1316 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1317 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1318 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1319 * size. This means that a too low yield is compensated by more
1320 * heap on the list which is currently doing most work, which is
1321 * just what we want.
1322 */
1811ebce 1323 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1324 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1325}
0f2d19dd 1326
a00c95d9 1327void
0f2d19dd 1328scm_gc_sweep ()
0f2d19dd
JB
1329{
1330 register SCM_CELLPTR ptr;
0f2d19dd 1331 register SCM nfreelist;
4c48ba06 1332 register scm_freelist_t *freelist;
0f2d19dd 1333 register long m;
0f2d19dd 1334 register int span;
15e9d186 1335 long i;
0f2d19dd
JB
1336 scm_sizet seg_size;
1337
0f2d19dd 1338 m = 0;
0f2d19dd 1339
4c48ba06
MD
1340 gc_sweep_freelist_start (&scm_master_freelist);
1341 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1342
cf2d30f6 1343 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1344 {
4c48ba06 1345 register unsigned int left_to_collect;
4c48ba06 1346 register scm_sizet j;
15e9d186 1347
cf2d30f6
JB
1348 /* Unmarked cells go onto the front of the freelist this heap
1349 segment points to. Rather than updating the real freelist
1350 pointer as we go along, we accumulate the new head in
1351 nfreelist. Then, if it turns out that the entire segment is
1352 free, we free (i.e., malloc's free) the whole segment, and
1353 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1354 freelist = scm_heap_table[i].freelist;
1355 nfreelist = freelist->cells;
4c48ba06 1356 left_to_collect = freelist->left_to_collect;
945fec60 1357 span = scm_heap_table[i].span;
cf2d30f6 1358
a00c95d9
ML
1359 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1360 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1361 for (j = seg_size + span; j -= span; ptr += span)
1362 {
96f6f4ae
DH
1363 SCM scmptr = PTR2SCM (ptr);
1364
0f2d19dd
JB
1365 switch SCM_TYP7 (scmptr)
1366 {
1367 case scm_tcs_cons_gloc:
0f2d19dd 1368 {
c8045e8d
DH
1369 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1370 * struct or a gloc. See the corresponding comment in
1371 * scm_gc_mark.
1372 */
1373 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1374 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1375 if (SCM_GCMARKP (scmptr))
0f2d19dd 1376 {
c8045e8d
DH
1377 if (vtable_data [scm_vtable_index_vcell] == 1)
1378 vtable_data [scm_vtable_index_vcell] = 0;
1379 goto cmrkcontinue;
1380 }
1381 else
1382 {
1383 if (vtable_data [scm_vtable_index_vcell] == 0
1384 || vtable_data [scm_vtable_index_vcell] == 1)
1385 {
1386 scm_struct_free_t free
1387 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1388 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1389 }
0f2d19dd
JB
1390 }
1391 }
1392 break;
1393 case scm_tcs_cons_imcar:
1394 case scm_tcs_cons_nimcar:
1395 case scm_tcs_closures:
e641afaf 1396 case scm_tc7_pws:
0f2d19dd
JB
1397 if (SCM_GCMARKP (scmptr))
1398 goto cmrkcontinue;
1399 break;
1400 case scm_tc7_wvect:
1401 if (SCM_GC8MARKP (scmptr))
1402 {
1403 goto c8mrkcontinue;
1404 }
1405 else
1406 {
ab4bef85
JB
1407 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1408 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1409 break;
1410 }
1411
1412 case scm_tc7_vector:
1413 case scm_tc7_lvector:
1414#ifdef CCLO
1415 case scm_tc7_cclo:
1416#endif
1417 if (SCM_GC8MARKP (scmptr))
1418 goto c8mrkcontinue;
1419
1420 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1421 freechars:
1422 scm_must_free (SCM_CHARS (scmptr));
1423 /* SCM_SETCHARS(scmptr, 0);*/
1424 break;
afe5177e 1425#ifdef HAVE_ARRAYS
0f2d19dd
JB
1426 case scm_tc7_bvect:
1427 if SCM_GC8MARKP (scmptr)
1428 goto c8mrkcontinue;
1429 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1430 goto freechars;
1431 case scm_tc7_byvect:
1432 if SCM_GC8MARKP (scmptr)
1433 goto c8mrkcontinue;
1434 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1435 goto freechars;
1436 case scm_tc7_ivect:
1437 case scm_tc7_uvect:
1438 if SCM_GC8MARKP (scmptr)
1439 goto c8mrkcontinue;
1440 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1441 goto freechars;
1442 case scm_tc7_svect:
1443 if SCM_GC8MARKP (scmptr)
1444 goto c8mrkcontinue;
1445 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1446 goto freechars;
5c11cc9d 1447#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1448 case scm_tc7_llvect:
1449 if SCM_GC8MARKP (scmptr)
1450 goto c8mrkcontinue;
1451 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1452 goto freechars;
1453#endif
1454 case scm_tc7_fvect:
1455 if SCM_GC8MARKP (scmptr)
1456 goto c8mrkcontinue;
1457 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1458 goto freechars;
1459 case scm_tc7_dvect:
1460 if SCM_GC8MARKP (scmptr)
1461 goto c8mrkcontinue;
1462 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1463 goto freechars;
1464 case scm_tc7_cvect:
1465 if SCM_GC8MARKP (scmptr)
1466 goto c8mrkcontinue;
1467 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1468 goto freechars;
afe5177e 1469#endif
0f2d19dd 1470 case scm_tc7_substring:
0f2d19dd
JB
1471 if (SCM_GC8MARKP (scmptr))
1472 goto c8mrkcontinue;
1473 break;
1474 case scm_tc7_string:
0f2d19dd
JB
1475 if (SCM_GC8MARKP (scmptr))
1476 goto c8mrkcontinue;
1477 m += SCM_HUGE_LENGTH (scmptr) + 1;
1478 goto freechars;
1479 case scm_tc7_msymbol:
1480 if (SCM_GC8MARKP (scmptr))
1481 goto c8mrkcontinue;
cf551a2b
DH
1482 m += (SCM_LENGTH (scmptr) + 1
1483 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1484 scm_must_free ((char *)SCM_SLOTS (scmptr));
1485 break;
1486 case scm_tc7_contin:
1487 if SCM_GC8MARKP (scmptr)
1488 goto c8mrkcontinue;
0db18cf4 1489 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1490 if (SCM_VELTS (scmptr))
1491 goto freechars;
0f2d19dd
JB
1492 case scm_tc7_ssymbol:
1493 if SCM_GC8MARKP(scmptr)
1494 goto c8mrkcontinue;
1495 break;
1496 case scm_tcs_subrs:
1497 continue;
1498 case scm_tc7_port:
1499 if SCM_GC8MARKP (scmptr)
1500 goto c8mrkcontinue;
1501 if SCM_OPENP (scmptr)
1502 {
1503 int k = SCM_PTOBNUM (scmptr);
1504 if (!(k < scm_numptob))
1505 goto sweeperr;
1506 /* Keep "revealed" ports alive. */
945fec60 1507 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1508 continue;
1509 /* Yes, I really do mean scm_ptobs[k].free */
1510 /* rather than ftobs[k].close. .close */
1511 /* is for explicit CLOSE-PORT by user */
84af0382 1512 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1513 SCM_SETSTREAM (scmptr, 0);
1514 scm_remove_from_port_table (scmptr);
1515 scm_gc_ports_collected++;
24e68a57 1516 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1517 }
1518 break;
1519 case scm_tc7_smob:
1520 switch SCM_GCTYP16 (scmptr)
1521 {
1522 case scm_tc_free_cell:
acb0a19c 1523 case scm_tc16_real:
0f2d19dd
JB
1524 if SCM_GC8MARKP (scmptr)
1525 goto c8mrkcontinue;
1526 break;
1527#ifdef SCM_BIGDIG
acb0a19c 1528 case scm_tc16_big:
0f2d19dd
JB
1529 if SCM_GC8MARKP (scmptr)
1530 goto c8mrkcontinue;
1531 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1532 goto freechars;
1533#endif /* def SCM_BIGDIG */
acb0a19c 1534 case scm_tc16_complex:
0f2d19dd
JB
1535 if SCM_GC8MARKP (scmptr)
1536 goto c8mrkcontinue;
acb0a19c
MD
1537 m += 2 * sizeof (double);
1538 goto freechars;
0f2d19dd
JB
1539 default:
1540 if SCM_GC8MARKP (scmptr)
1541 goto c8mrkcontinue;
1542
1543 {
1544 int k;
1545 k = SCM_SMOBNUM (scmptr);
1546 if (!(k < scm_numsmob))
1547 goto sweeperr;
c8045e8d 1548 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1549 break;
1550 }
1551 }
1552 break;
1553 default:
1554 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1555 }
0f2d19dd
JB
1556#if 0
1557 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1558 exit (2);
1559#endif
4c48ba06 1560 if (!--left_to_collect)
4a4c9785
MD
1561 {
1562 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1563 *freelist->clustertail = scmptr;
1564 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1565
4a4c9785 1566 nfreelist = SCM_EOL;
4c48ba06
MD
1567 freelist->collected += span * freelist->cluster_size;
1568 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1569 }
1570 else
4a4c9785
MD
1571 {
1572 /* Stick the new cell on the front of nfreelist. It's
1573 critical that we mark this cell as freed; otherwise, the
1574 conservative collector might trace it as some other type
1575 of object. */
54778cd3 1576 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1577 SCM_SETCDR (scmptr, nfreelist);
1578 nfreelist = scmptr;
1579 }
a00c95d9 1580
0f2d19dd
JB
1581 continue;
1582 c8mrkcontinue:
1583 SCM_CLRGC8MARK (scmptr);
1584 continue;
1585 cmrkcontinue:
1586 SCM_CLRGCMARK (scmptr);
1587 }
1588#ifdef GC_FREE_SEGMENTS
1589 if (n == seg_size)
1590 {
15e9d186
JB
1591 register long j;
1592
4c48ba06 1593 freelist->heap_size -= seg_size;
cf2d30f6
JB
1594 free ((char *) scm_heap_table[i].bounds[0]);
1595 scm_heap_table[i].bounds[0] = 0;
1596 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1597 scm_heap_table[j - 1] = scm_heap_table[j];
1598 scm_n_heap_segs -= 1;
cf2d30f6 1599 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1600 }
1601 else
1602#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1603 {
1604 /* Update the real freelist pointer to point to the head of
1605 the list of free cells we've built for this segment. */
4c48ba06 1606 freelist->cells = nfreelist;
4c48ba06 1607 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1608 }
1609
fca7547b 1610#ifdef GUILE_DEBUG_FREELIST
4c48ba06 1611 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1612 ? scm_freelist
1613 : scm_freelist2);
cf2d30f6
JB
1614 scm_map_free_list ();
1615#endif
4a4c9785 1616 }
a00c95d9 1617
4c48ba06
MD
1618 gc_sweep_freelist_finish (&scm_master_freelist);
1619 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1620
8ded62a3
MD
1621 /* When we move to POSIX threads private freelists should probably
1622 be GC-protected instead. */
1623 scm_freelist = SCM_EOL;
1624 scm_freelist2 = SCM_EOL;
a00c95d9 1625
b37fe1c5 1626 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1627 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1628 scm_mallocated -= m;
1629 scm_gc_malloc_collected = m;
1630}
1631
1632
1633\f
1634
1635/* {Front end to malloc}
1636 *
c68296f8 1637 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1638 *
1639 * These functions provide services comperable to malloc, realloc, and
1640 * free. They are for allocating malloced parts of scheme objects.
1641 * The primary purpose of the front end is to impose calls to gc.
1642 */
1643
bc9d9bb2 1644
0f2d19dd
JB
1645/* scm_must_malloc
1646 * Return newly malloced storage or throw an error.
1647 *
1648 * The parameter WHAT is a string for error reporting.
a00c95d9 1649 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1650 * allocation, or if the first call to malloc fails,
1651 * garbage collect -- on the presumption that some objects
1652 * using malloced storage may be collected.
1653 *
1654 * The limit scm_mtrigger may be raised by this allocation.
1655 */
07806695 1656void *
e4ef2330 1657scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1658{
07806695 1659 void *ptr;
15e9d186 1660 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1661
1662 if (nm <= scm_mtrigger)
0f2d19dd 1663 {
07806695 1664 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1665 if (NULL != ptr)
1666 {
1667 scm_mallocated = nm;
bc9d9bb2
MD
1668#ifdef GUILE_DEBUG_MALLOC
1669 scm_malloc_register (ptr, what);
1670#endif
0f2d19dd
JB
1671 return ptr;
1672 }
1673 }
6064dcc6 1674
0f2d19dd 1675 scm_igc (what);
e4ef2330 1676
0f2d19dd 1677 nm = scm_mallocated + size;
07806695 1678 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1679 if (NULL != ptr)
1680 {
1681 scm_mallocated = nm;
6064dcc6
MV
1682 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1683 if (nm > scm_mtrigger)
1684 scm_mtrigger = nm + nm / 2;
1685 else
1686 scm_mtrigger += scm_mtrigger / 2;
1687 }
bc9d9bb2
MD
1688#ifdef GUILE_DEBUG_MALLOC
1689 scm_malloc_register (ptr, what);
1690#endif
1691
0f2d19dd
JB
1692 return ptr;
1693 }
e4ef2330
MD
1694
1695 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1696 return 0; /* never reached */
0f2d19dd
JB
1697}
1698
1699
1700/* scm_must_realloc
1701 * is similar to scm_must_malloc.
1702 */
07806695
JB
1703void *
1704scm_must_realloc (void *where,
e4ef2330
MD
1705 scm_sizet old_size,
1706 scm_sizet size,
3eeba8d4 1707 const char *what)
0f2d19dd 1708{
07806695 1709 void *ptr;
e4ef2330
MD
1710 scm_sizet nm = scm_mallocated + size - old_size;
1711
1712 if (nm <= scm_mtrigger)
0f2d19dd 1713 {
07806695 1714 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1715 if (NULL != ptr)
1716 {
1717 scm_mallocated = nm;
bc9d9bb2
MD
1718#ifdef GUILE_DEBUG_MALLOC
1719 scm_malloc_reregister (where, ptr, what);
1720#endif
0f2d19dd
JB
1721 return ptr;
1722 }
1723 }
e4ef2330 1724
0f2d19dd 1725 scm_igc (what);
e4ef2330
MD
1726
1727 nm = scm_mallocated + size - old_size;
07806695 1728 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1729 if (NULL != ptr)
1730 {
1731 scm_mallocated = nm;
6064dcc6
MV
1732 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1733 if (nm > scm_mtrigger)
1734 scm_mtrigger = nm + nm / 2;
1735 else
1736 scm_mtrigger += scm_mtrigger / 2;
1737 }
bc9d9bb2
MD
1738#ifdef GUILE_DEBUG_MALLOC
1739 scm_malloc_reregister (where, ptr, what);
1740#endif
0f2d19dd
JB
1741 return ptr;
1742 }
e4ef2330
MD
1743
1744 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1745 return 0; /* never reached */
0f2d19dd
JB
1746}
1747
a00c95d9 1748void
07806695 1749scm_must_free (void *obj)
0f2d19dd 1750{
bc9d9bb2
MD
1751#ifdef GUILE_DEBUG_MALLOC
1752 scm_malloc_unregister (obj);
1753#endif
0f2d19dd
JB
1754 if (obj)
1755 free (obj);
1756 else
1757 scm_wta (SCM_INUM0, "already free", "");
1758}
0f2d19dd 1759
c68296f8
MV
1760/* Announce that there has been some malloc done that will be freed
1761 * during gc. A typical use is for a smob that uses some malloced
1762 * memory but can not get it from scm_must_malloc (for whatever
1763 * reason). When a new object of this smob is created you call
1764 * scm_done_malloc with the size of the object. When your smob free
1765 * function is called, be sure to include this size in the return
1766 * value. */
0f2d19dd 1767
c68296f8 1768void
6e8d25a6 1769scm_done_malloc (long size)
c68296f8
MV
1770{
1771 scm_mallocated += size;
1772
1773 if (scm_mallocated > scm_mtrigger)
1774 {
1775 scm_igc ("foreign mallocs");
1776 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1777 {
1778 if (scm_mallocated > scm_mtrigger)
1779 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1780 else
1781 scm_mtrigger += scm_mtrigger / 2;
1782 }
1783 }
1784}
1785
1786
1787\f
0f2d19dd
JB
1788
1789/* {Heap Segments}
1790 *
1791 * Each heap segment is an array of objects of a particular size.
1792 * Every segment has an associated (possibly shared) freelist.
1793 * A table of segment records is kept that records the upper and
1794 * lower extents of the segment; this is used during the conservative
1795 * phase of gc to identify probably gc roots (because they point
c68296f8 1796 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1797
1798/* scm_expmem
1799 * is true if the first segment was smaller than INIT_HEAP_SEG.
1800 * If scm_expmem is set to one, subsequent segment allocations will
1801 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1802 */
1803int scm_expmem = 0;
1804
4c48ba06
MD
1805scm_sizet scm_max_segment_size;
1806
0f2d19dd
JB
1807/* scm_heap_org
1808 * is the lowest base address of any heap segment.
1809 */
1810SCM_CELLPTR scm_heap_org;
1811
a00c95d9 1812scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
1813int scm_n_heap_segs = 0;
1814
0f2d19dd
JB
1815/* init_heap_seg
1816 * initializes a new heap segment and return the number of objects it contains.
1817 *
1818 * The segment origin, segment size in bytes, and the span of objects
1819 * in cells are input parameters. The freelist is both input and output.
1820 *
1821 * This function presume that the scm_heap_table has already been expanded
1822 * to accomodate a new segment record.
1823 */
1824
1825
a00c95d9 1826static scm_sizet
4c48ba06 1827init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
1828{
1829 register SCM_CELLPTR ptr;
0f2d19dd 1830 SCM_CELLPTR seg_end;
15e9d186 1831 int new_seg_index;
acb0a19c 1832 int n_new_cells;
4c48ba06 1833 int span = freelist->span;
a00c95d9 1834
0f2d19dd
JB
1835 if (seg_org == NULL)
1836 return 0;
1837
a00c95d9 1838 ptr = CELL_UP (seg_org, span);
acb0a19c 1839
a00c95d9 1840 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 1841 */
a00c95d9 1842 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 1843
a00c95d9 1844 /* Find the right place and insert the segment record.
0f2d19dd
JB
1845 *
1846 */
1847 for (new_seg_index = 0;
1848 ( (new_seg_index < scm_n_heap_segs)
1849 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1850 new_seg_index++)
1851 ;
1852
1853 {
1854 int i;
1855 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1856 scm_heap_table[i] = scm_heap_table[i - 1];
1857 }
a00c95d9 1858
0f2d19dd
JB
1859 ++scm_n_heap_segs;
1860
945fec60 1861 scm_heap_table[new_seg_index].span = span;
4c48ba06 1862 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
1863 scm_heap_table[new_seg_index].bounds[0] = ptr;
1864 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
1865
1866
a00c95d9 1867 /* Compute the least valid object pointer w/in this segment
0f2d19dd 1868 */
a00c95d9 1869 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
1870
1871
acb0a19c
MD
1872 /*n_new_cells*/
1873 n_new_cells = seg_end - ptr;
0f2d19dd 1874
4c48ba06 1875 freelist->heap_size += n_new_cells;
4a4c9785 1876
a00c95d9 1877 /* Partition objects in this segment into clusters */
4a4c9785
MD
1878 {
1879 SCM clusters;
1880 SCM *clusterp = &clusters;
4c48ba06 1881 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 1882
4c48ba06 1883 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 1884 {
4c48ba06
MD
1885 /* Determine end of cluster
1886 */
1887 if (n_new_cells >= n_cluster_cells)
1888 {
1889 seg_end = ptr + n_cluster_cells;
1890 n_new_cells -= n_cluster_cells;
1891 }
4a4c9785 1892 else
a00c95d9
ML
1893 /* [cmm] looks like the segment size doesn't divide cleanly by
1894 cluster size. bad cmm! */
1895 abort();
4a4c9785 1896
4c48ba06
MD
1897 /* Allocate cluster spine
1898 */
4a4c9785
MD
1899 *clusterp = PTR2SCM (ptr);
1900 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1901 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 1902 ptr += span;
a00c95d9 1903
4a4c9785
MD
1904 while (ptr < seg_end)
1905 {
96f6f4ae
DH
1906 SCM scmptr = PTR2SCM (ptr);
1907
54778cd3 1908 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1909 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1910 ptr += span;
1911 }
4c48ba06 1912
4a4c9785
MD
1913 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1914 }
a00c95d9 1915
4a4c9785
MD
1916 /* Patch up the last cluster pointer in the segment
1917 * to join it to the input freelist.
1918 */
4c48ba06
MD
1919 *clusterp = freelist->clusters;
1920 freelist->clusters = clusters;
4a4c9785
MD
1921 }
1922
4c48ba06
MD
1923#ifdef DEBUGINFO
1924 fprintf (stderr, "H");
1925#endif
0f2d19dd 1926 return size;
0f2d19dd
JB
1927}
1928
a00c95d9
ML
1929static scm_sizet
1930round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1931{
1932 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1933
1934 return
1935 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1936 + ALIGNMENT_SLACK (freelist);
1937}
1938
a00c95d9 1939static void
4c48ba06 1940alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 1941{
a00c95d9 1942 scm_heap_seg_data_t * tmptable;
0f2d19dd 1943 SCM_CELLPTR ptr;
b37fe1c5 1944 long len;
a00c95d9 1945
0f2d19dd
JB
1946 /* Critical code sections (such as the garbage collector)
1947 * aren't supposed to add heap segments.
1948 */
1949 if (scm_gc_heap_lock)
1950 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1951
1952 /* Expand the heap tables to have room for the new segment.
1953 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1954 * only if the allocation of the segment itself succeeds.
1955 */
a00c95d9 1956 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 1957
a00c95d9 1958 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
1959 realloc ((char *)scm_heap_table, len)));
1960 if (!tmptable)
1961 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1962 else
1963 scm_heap_table = tmptable;
1964
1965
1966 /* Pick a size for the new heap segment.
a00c95d9 1967 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
1968 * gc.h
1969 */
4c48ba06 1970 {
1811ebce
MD
1971 /* Assure that the new segment is predicted to be large enough.
1972 *
1973 * New yield should at least equal GC fraction of new heap size, i.e.
1974 *
1975 * y + dh > f * (h + dh)
1976 *
1977 * y : yield
8fef55a8 1978 * f : min yield fraction
1811ebce
MD
1979 * h : heap size
1980 * dh : size of new heap segment
1981 *
1982 * This gives dh > (f * h - y) / (1 - f)
bda1446c 1983 */
8fef55a8 1984 int f = freelist->min_yield_fraction;
1811ebce
MD
1985 long h = SCM_HEAP_SIZE;
1986 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
1987 len = SCM_EXPHEAP (freelist->heap_size);
1988#ifdef DEBUGINFO
1989 fprintf (stderr, "(%d < %d)", len, min_cells);
1990#endif
1991 if (len < min_cells)
1811ebce 1992 len = min_cells + freelist->cluster_size;
4c48ba06 1993 len *= sizeof (scm_cell);
1811ebce
MD
1994 /* force new sampling */
1995 freelist->collected = LONG_MAX;
4c48ba06 1996 }
a00c95d9 1997
4c48ba06
MD
1998 if (len > scm_max_segment_size)
1999 len = scm_max_segment_size;
0f2d19dd
JB
2000
2001 {
2002 scm_sizet smallest;
2003
a00c95d9 2004 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2005
0f2d19dd 2006 if (len < smallest)
a00c95d9 2007 len = smallest;
0f2d19dd
JB
2008
2009 /* Allocate with decaying ambition. */
2010 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2011 && (len >= smallest))
2012 {
1811ebce 2013 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2014 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2015 if (ptr)
2016 {
a00c95d9 2017 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2018 return;
2019 }
2020 len /= 2;
2021 }
2022 }
2023
2024 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2025}
2026
2027
a00c95d9 2028SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2029 (SCM name),
b380b885 2030 "")
1bbd0b84 2031#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2032{
2033 int x;
2034 int bound;
3b3b36dd 2035 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2036 SCM_DEFER_INTS;
2037 bound = scm_n_heap_segs;
2038 for (x = 0; x < bound; ++x)
2039 {
2040 SCM_CELLPTR p;
2041 SCM_CELLPTR pbound;
195e6201
DH
2042 p = scm_heap_table[x].bounds[0];
2043 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2044 while (p < pbound)
2045 {
c8045e8d
DH
2046 SCM cell = PTR2SCM (p);
2047 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2048 {
c8045e8d
DH
2049 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2050 * struct cell. See the corresponding comment in scm_gc_mark.
2051 */
2052 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2053 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2054 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2055 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2056 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2057 {
c8045e8d 2058 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2059 }
2060 }
2061 ++p;
2062 }
2063 }
2064 SCM_ALLOW_INTS;
2065 return name;
2066}
1bbd0b84 2067#undef FUNC_NAME
0f2d19dd
JB
2068
2069
2070\f
2071/* {GC Protection Helper Functions}
2072 */
2073
2074
0f2d19dd 2075void
6e8d25a6
GB
2076scm_remember (SCM *ptr)
2077{ /* empty */ }
0f2d19dd 2078
1cc91f1b 2079
c209c88e 2080/*
41b0806d
GB
2081 These crazy functions prevent garbage collection
2082 of arguments after the first argument by
2083 ensuring they remain live throughout the
2084 function because they are used in the last
2085 line of the code block.
2086 It'd be better to have a nice compiler hint to
2087 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2088SCM
2089scm_return_first (SCM elt, ...)
0f2d19dd
JB
2090{
2091 return elt;
2092}
2093
41b0806d
GB
2094int
2095scm_return_first_int (int i, ...)
2096{
2097 return i;
2098}
2099
0f2d19dd 2100
0f2d19dd 2101SCM
6e8d25a6 2102scm_permanent_object (SCM obj)
0f2d19dd
JB
2103{
2104 SCM_REDEFER_INTS;
2105 scm_permobjs = scm_cons (obj, scm_permobjs);
2106 SCM_REALLOW_INTS;
2107 return obj;
2108}
2109
2110
7bd4fbe2
MD
2111/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2112 other references are dropped, until the object is unprotected by calling
2113 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2114 i. e. it is possible to protect the same object several times, but it is
2115 necessary to unprotect the object the same number of times to actually get
2116 the object unprotected. It is an error to unprotect an object more often
2117 than it has been protected before. The function scm_protect_object returns
2118 OBJ.
2119*/
2120
2121/* Implementation note: For every object X, there is a counter which
2122 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2123*/
686765af 2124
ef290276 2125SCM
6e8d25a6 2126scm_protect_object (SCM obj)
ef290276 2127{
686765af
ML
2128 SCM handle;
2129
2130 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2131 SCM_REDEFER_INTS;
686765af 2132
0f0f0899
MD
2133 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2134 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
686765af 2135
2dd6a83a 2136 SCM_REALLOW_INTS;
686765af 2137
ef290276
JB
2138 return obj;
2139}
2140
2141
2142/* Remove any protection for OBJ established by a prior call to
dab7f566 2143 scm_protect_object. This function returns OBJ.
ef290276 2144
dab7f566 2145 See scm_protect_object for more information. */
ef290276 2146SCM
6e8d25a6 2147scm_unprotect_object (SCM obj)
ef290276 2148{
686765af
ML
2149 SCM handle;
2150
2151 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2152 SCM_REDEFER_INTS;
686765af
ML
2153
2154 handle = scm_hashq_get_handle (scm_protects, obj);
0f0f0899
MD
2155
2156 if (SCM_IMP (handle))
686765af 2157 {
0f0f0899
MD
2158 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2159 abort ();
686765af 2160 }
6a199940
DH
2161 else
2162 {
2163 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2164 if (count == 0)
2165 scm_hashq_remove_x (scm_protects, obj);
2166 else
2167 SCM_SETCDR (handle, SCM_MAKINUM (count));
2168 }
686765af 2169
2dd6a83a 2170 SCM_REALLOW_INTS;
ef290276
JB
2171
2172 return obj;
2173}
2174
c45acc34
JB
2175int terminating;
2176
2177/* called on process termination. */
e52ceaac
MD
2178#ifdef HAVE_ATEXIT
2179static void
2180cleanup (void)
2181#else
2182#ifdef HAVE_ON_EXIT
51157deb
MD
2183extern int on_exit (void (*procp) (), int arg);
2184
e52ceaac
MD
2185static void
2186cleanup (int status, void *arg)
2187#else
2188#error Dont know how to setup a cleanup handler on your system.
2189#endif
2190#endif
c45acc34
JB
2191{
2192 terminating = 1;
2193 scm_flush_all_ports ();
2194}
ef290276 2195
0f2d19dd 2196\f
acb0a19c 2197static int
4c48ba06 2198make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2199{
a00c95d9
ML
2200 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2201 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2202 rounded_size,
4c48ba06 2203 freelist))
acb0a19c 2204 {
a00c95d9
ML
2205 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2206 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2207 rounded_size,
4c48ba06 2208 freelist))
acb0a19c
MD
2209 return 1;
2210 }
2211 else
2212 scm_expmem = 1;
2213
8fef55a8
MD
2214 if (freelist->min_yield_fraction)
2215 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2216 / 100);
8fef55a8 2217 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2218
acb0a19c
MD
2219 return 0;
2220}
2221
2222\f
4c48ba06
MD
2223static void
2224init_freelist (scm_freelist_t *freelist,
2225 int span,
2226 int cluster_size,
8fef55a8 2227 int min_yield)
4c48ba06
MD
2228{
2229 freelist->clusters = SCM_EOL;
2230 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2231 freelist->left_to_collect = 0;
2232 freelist->clusters_allocated = 0;
8fef55a8
MD
2233 freelist->min_yield = 0;
2234 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2235 freelist->span = span;
2236 freelist->collected = 0;
1811ebce 2237 freelist->collected_1 = 0;
4c48ba06
MD
2238 freelist->heap_size = 0;
2239}
2240
4a4c9785 2241int
4c48ba06
MD
2242scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2243 scm_sizet init_heap_size_2, int gc_trigger_2,
2244 scm_sizet max_segment_size)
0f2d19dd
JB
2245{
2246 scm_sizet j;
2247
4c48ba06 2248 if (!init_heap_size_1)
aeacfc8f 2249 init_heap_size_1 = scm_default_init_heap_size_1;
4c48ba06 2250 if (!init_heap_size_2)
aeacfc8f 2251 init_heap_size_2 = scm_default_init_heap_size_2;
4c48ba06 2252
0f2d19dd
JB
2253 j = SCM_NUM_PROTECTS;
2254 while (j)
2255 scm_sys_protects[--j] = SCM_BOOL_F;
2256 scm_block_gc = 1;
4a4c9785 2257
4a4c9785 2258 scm_freelist = SCM_EOL;
4c48ba06
MD
2259 scm_freelist2 = SCM_EOL;
2260 init_freelist (&scm_master_freelist,
2261 1, SCM_CLUSTER_SIZE_1,
aeacfc8f 2262 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
4c48ba06
MD
2263 init_freelist (&scm_master_freelist2,
2264 2, SCM_CLUSTER_SIZE_2,
aeacfc8f 2265 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
4c48ba06 2266 scm_max_segment_size
aeacfc8f 2267 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
4a4c9785 2268
0f2d19dd
JB
2269 scm_expmem = 0;
2270
2271 j = SCM_HEAP_SEG_SIZE;
2272 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2273 scm_heap_table = ((scm_heap_seg_data_t *)
2274 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2275
4c48ba06
MD
2276 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2277 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2278 return 1;
acb0a19c 2279
801cb5e7 2280 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2281 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2282
801cb5e7
MD
2283 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2284 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2285 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2286 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2287 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2288
2289 /* Initialise the list of ports. */
840ae05d
JB
2290 scm_port_table = (scm_port **)
2291 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2292 if (!scm_port_table)
2293 return 1;
2294
a18bcd0e 2295#ifdef HAVE_ATEXIT
c45acc34 2296 atexit (cleanup);
e52ceaac
MD
2297#else
2298#ifdef HAVE_ON_EXIT
2299 on_exit (cleanup, 0);
2300#endif
a18bcd0e 2301#endif
0f2d19dd
JB
2302
2303 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2304 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2305
2306 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2307 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2308 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2309 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2310 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2311 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2312 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2313 scm_permobjs = SCM_EOL;
686765af 2314 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
54778cd3
DH
2315 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2316 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2317#ifdef SCM_BIGDIG
2318 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2319#endif
2320 return 0;
2321}
939794ce 2322
0f2d19dd
JB
2323\f
2324
939794ce
DH
2325SCM scm_after_gc_hook;
2326
2327#if (SCM_DEBUG_DEPRECATED == 0)
2328static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2329#endif /* SCM_DEBUG_DEPRECATED == 0 */
2330static SCM gc_async;
2331
2332
2333/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2334 * is run after the gc, as soon as the asynchronous events are handled by the
2335 * evaluator.
2336 */
2337static SCM
2338gc_async_thunk (void)
2339{
2340 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2341
2342#if (SCM_DEBUG_DEPRECATED == 0)
2343
2344 /* The following code will be removed in Guile 1.5. */
2345 if (SCM_NFALSEP (scm_gc_vcell))
2346 {
2347 SCM proc = SCM_CDR (scm_gc_vcell);
2348
2349 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2350 scm_apply (proc, SCM_EOL, SCM_EOL);
2351 }
2352
2353#endif /* SCM_DEBUG_DEPRECATED == 0 */
2354
2355 return SCM_UNSPECIFIED;
2356}
2357
2358
2359/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2360 * the garbage collection. The only purpose of this function is to mark the
2361 * gc_async (which will eventually lead to the execution of the
2362 * gc_async_thunk).
2363 */
2364static void *
2365mark_gc_async (void * hook_data, void *func_data, void *data)
2366{
2367 scm_system_async_mark (gc_async);
2368 return NULL;
2369}
2370
2371
0f2d19dd
JB
2372void
2373scm_init_gc ()
0f2d19dd 2374{
939794ce
DH
2375 SCM after_gc_thunk;
2376
801cb5e7 2377 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2378
2379#if (SCM_DEBUG_DEPRECATED == 0)
2380 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2381#endif /* SCM_DEBUG_DEPRECATED == 0 */
2382 /* Dirk:FIXME:: We don't really want a binding here. */
2383 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2384 gc_async = scm_system_async (after_gc_thunk);
2385
2386 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2387
a0599745 2388#include "libguile/gc.x"
0f2d19dd 2389}
89e00824
ML
2390
2391/*
2392 Local Variables:
2393 c-file-style: "gnu"
2394 End:
2395*/