* Added a note about removing GUILE_OLD_ASYNC_CLICK.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745 49#include "libguile/_scm.h"
0a7a7445 50#include "libguile/eval.h"
a0599745
MD
51#include "libguile/stime.h"
52#include "libguile/stackchk.h"
53#include "libguile/struct.h"
a0599745
MD
54#include "libguile/smob.h"
55#include "libguile/unif.h"
56#include "libguile/async.h"
57#include "libguile/ports.h"
58#include "libguile/root.h"
59#include "libguile/strings.h"
60#include "libguile/vectors.h"
801cb5e7 61#include "libguile/weaks.h"
686765af 62#include "libguile/hashtab.h"
a0599745
MD
63
64#include "libguile/validate.h"
65#include "libguile/gc.h"
fce59c93 66
bc9d9bb2 67#ifdef GUILE_DEBUG_MALLOC
a0599745 68#include "libguile/debug-malloc.h"
bc9d9bb2
MD
69#endif
70
0f2d19dd 71#ifdef HAVE_MALLOC_H
95b88819 72#include <malloc.h>
0f2d19dd
JB
73#endif
74
75#ifdef HAVE_UNISTD_H
95b88819 76#include <unistd.h>
0f2d19dd
JB
77#endif
78
1cc91f1b
JB
79#ifdef __STDC__
80#include <stdarg.h>
81#define var_start(x, y) va_start(x, y)
82#else
83#include <varargs.h>
84#define var_start(x, y) va_start(x)
85#endif
86
0f2d19dd
JB
87\f
88/* {heap tuning parameters}
a00c95d9 89 *
0f2d19dd
JB
90 * These are parameters for controlling memory allocation. The heap
91 * is the area out of which scm_cons, and object headers are allocated.
92 *
93 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
94 * 64 bit machine. The units of the _SIZE parameters are bytes.
95 * Cons pairs and object headers occupy one heap cell.
96 *
97 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
98 * allocated initially the heap will grow by half its current size
99 * each subsequent time more heap is needed.
100 *
101 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
102 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
103 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
104 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 105 *
0f2d19dd
JB
106 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
107 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 *
109 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
110 * is needed.
111 *
112 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 113 * trigger a GC.
6064dcc6
MV
114 *
115 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
116 * reclaimed by a GC triggered by must_malloc. If less than this is
117 * reclaimed, the trigger threshold is raised. [I don't know what a
118 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 119 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
120 */
121
8fef55a8
MD
122/*
123 * Heap size 45000 and 40% min yield gives quick startup and no extra
124 * heap allocation. Having higher values on min yield may lead to
125 * large heaps, especially if code behaviour is varying its
126 * maximum consumption between different freelists.
127 */
aeacfc8f
MD
128int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
129int scm_default_min_yield_1 = 40;
4c48ba06 130#define SCM_CLUSTER_SIZE_1 2000L
4c48ba06 131
aeacfc8f 132int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
4c48ba06
MD
133/* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
aeacfc8f
MD
136int scm_default_min_yield_2 = 40;
137#define SCM_CLUSTER_SIZE_2 1000L
4c48ba06 138
aeacfc8f 139int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
4c48ba06 140
945fec60 141#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
142#ifdef _QC
143# define SCM_HEAP_SEG_SIZE 32768L
144#else
145# ifdef sequent
4c48ba06 146# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 147# else
4c48ba06 148# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
149# endif
150#endif
4c48ba06 151/* Make heap grow with factor 1.5 */
4a4c9785 152#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 153#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 154#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
155
156/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
157 bounds for allocated storage */
158
159#ifdef PROT386
160/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
161# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
162# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
163#else
164# ifdef _UNICOS
a00c95d9
ML
165# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
166# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 167# else
a00c95d9
ML
168# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
169# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
170# endif /* UNICOS */
171#endif /* PROT386 */
a00c95d9
ML
172#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
173#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
174#define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
1811ebce 176#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
177
178
179\f
945fec60 180/* scm_freelists
0f2d19dd 181 */
945fec60 182
a00c95d9
ML
183typedef struct scm_freelist_t {
184 /* collected cells */
185 SCM cells;
a00c95d9
ML
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
b37fe1c5
MD
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
8fef55a8
MD
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
a00c95d9
ML
193 SCM clusters;
194 SCM *clustertail;
b37fe1c5 195 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 196 int cluster_size;
8fef55a8 197 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
198 */
199 int grow_heap_p;
8fef55a8 200 /* minimum yield on this list in order not to grow the heap
a00c95d9 201 */
8fef55a8
MD
202 long min_yield;
203 /* defines min_yield as percent of total heap size
a00c95d9 204 */
8fef55a8 205 int min_yield_fraction;
a00c95d9
ML
206 /* number of cells per object on this list */
207 int span;
208 /* number of collected cells during last GC */
1811ebce
MD
209 long collected;
210 /* number of collected cells during penultimate GC */
211 long collected_1;
a00c95d9
ML
212 /* total number of cells in heap segments
213 * belonging to this list.
214 */
1811ebce 215 long heap_size;
a00c95d9
ML
216} scm_freelist_t;
217
4a4c9785
MD
218SCM scm_freelist = SCM_EOL;
219scm_freelist_t scm_master_freelist = {
b37fe1c5 220 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
221};
222SCM scm_freelist2 = SCM_EOL;
223scm_freelist_t scm_master_freelist2 = {
b37fe1c5 224 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785 225};
0f2d19dd
JB
226
227/* scm_mtrigger
228 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 */
15e9d186 230unsigned long scm_mtrigger;
0f2d19dd
JB
231
232
233/* scm_gc_heap_lock
234 * If set, don't expand the heap. Set only during gc, during which no allocation
235 * is supposed to take place anyway.
236 */
237int scm_gc_heap_lock = 0;
238
239/* GC Blocking
240 * Don't pause for collection if this is set -- just
241 * expand the heap.
242 */
0f2d19dd
JB
243int scm_block_gc = 1;
244
0f2d19dd
JB
245/* During collection, this accumulates objects holding
246 * weak references.
247 */
ab4bef85 248SCM scm_weak_vectors;
0f2d19dd
JB
249
250/* GC Statistics Keeping
251 */
252unsigned long scm_cells_allocated = 0;
a5c314c8 253long scm_mallocated = 0;
b37fe1c5 254unsigned long scm_gc_cells_collected;
8b0d194f 255unsigned long scm_gc_yield;
37ddcaf6 256static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
0f2d19dd
JB
257unsigned long scm_gc_malloc_collected;
258unsigned long scm_gc_ports_collected;
259unsigned long scm_gc_rt;
260unsigned long scm_gc_time_taken = 0;
261
262SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
263SCM_SYMBOL (sym_heap_size, "cell-heap-size");
264SCM_SYMBOL (sym_mallocated, "bytes-malloced");
265SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
266SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
267SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
268
a00c95d9 269typedef struct scm_heap_seg_data_t
0f2d19dd 270{
cf2d30f6
JB
271 /* lower and upper bounds of the segment */
272 SCM_CELLPTR bounds[2];
273
274 /* address of the head-of-freelist pointer for this segment's cells.
275 All segments usually point to the same one, scm_freelist. */
4c48ba06 276 scm_freelist_t *freelist;
cf2d30f6 277
fe517a7d 278 /* number of cells per object in this segment */
945fec60 279 int span;
a00c95d9 280} scm_heap_seg_data_t;
0f2d19dd
JB
281
282
283
945fec60 284static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
b6efc951
DH
285
286typedef enum { return_on_error, abort_on_error } policy_on_error;
287static void alloc_some_heap (scm_freelist_t *, policy_on_error);
0f2d19dd
JB
288
289
290\f
cf2d30f6
JB
291/* Debugging functions. */
292
bb2c57fa 293#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
294
295/* Return the number of the heap segment containing CELL. */
296static int
297which_seg (SCM cell)
298{
299 int i;
300
301 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
302 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
303 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
304 return i;
305 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 306 SCM_UNPACK (cell));
cf2d30f6
JB
307 abort ();
308}
309
310
8ded62a3
MD
311static void
312map_free_list (scm_freelist_t *master, SCM freelist)
313{
314 int last_seg = -1, count = 0;
315 SCM f;
a00c95d9 316
3f5d82cd 317 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
8ded62a3
MD
318 {
319 int this_seg = which_seg (f);
320
321 if (this_seg != last_seg)
322 {
323 if (last_seg != -1)
324 fprintf (stderr, " %5d %d-cells in segment %d\n",
325 count, master->span, last_seg);
326 last_seg = this_seg;
327 count = 0;
328 }
329 count++;
330 }
331 if (last_seg != -1)
332 fprintf (stderr, " %5d %d-cells in segment %d\n",
333 count, master->span, last_seg);
334}
cf2d30f6 335
a00c95d9 336SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
337 (),
338 "Print debugging information about the free-list.\n"
5384bc5b 339 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
340#define FUNC_NAME s_scm_map_free_list
341{
4c48ba06
MD
342 int i;
343 fprintf (stderr, "%d segments total (%d:%d",
344 scm_n_heap_segs,
345 scm_heap_table[0].span,
346 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
347 for (i = 1; i < scm_n_heap_segs; i++)
348 fprintf (stderr, ", %d:%d",
349 scm_heap_table[i].span,
350 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
351 fprintf (stderr, ")\n");
8ded62a3
MD
352 map_free_list (&scm_master_freelist, scm_freelist);
353 map_free_list (&scm_master_freelist2, scm_freelist2);
cf2d30f6
JB
354 fflush (stderr);
355
356 return SCM_UNSPECIFIED;
357}
1bbd0b84 358#undef FUNC_NAME
cf2d30f6 359
4c48ba06
MD
360static int last_cluster;
361static int last_size;
362
5384bc5b
MD
363static int
364free_list_length (char *title, int i, SCM freelist)
365{
366 SCM ls;
367 int n = 0;
3f5d82cd
DH
368 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
369 if (SCM_FREE_CELL_P (ls))
5384bc5b
MD
370 ++n;
371 else
372 {
373 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
374 abort ();
375 }
4c48ba06
MD
376 if (n != last_size)
377 {
378 if (i > 0)
379 {
380 if (last_cluster == i - 1)
381 fprintf (stderr, "\t%d\n", last_size);
382 else
383 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
384 }
385 if (i >= 0)
386 fprintf (stderr, "%s %d", title, i);
387 else
388 fprintf (stderr, "%s\t%d\n", title, n);
389 last_cluster = i;
390 last_size = n;
391 }
5384bc5b
MD
392 return n;
393}
394
395static void
396free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
397{
398 SCM clusters;
4c48ba06 399 int i = 0, len, n = 0;
5384bc5b
MD
400 fprintf (stderr, "%s\n\n", title);
401 n += free_list_length ("free list", -1, freelist);
402 for (clusters = master->clusters;
403 SCM_NNULLP (clusters);
404 clusters = SCM_CDR (clusters))
4c48ba06
MD
405 {
406 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
407 n += len;
408 }
409 if (last_cluster == i - 1)
410 fprintf (stderr, "\t%d\n", last_size);
411 else
412 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
413 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
414}
415
a00c95d9 416SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
417 (),
418 "Print debugging information about the free-list.\n"
419 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
420#define FUNC_NAME s_scm_free_list_length
421{
b37fe1c5
MD
422 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
423 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 424 return SCM_UNSPECIFIED;
5384bc5b
MD
425}
426#undef FUNC_NAME
427
bb2c57fa
MD
428#endif
429
430#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
431
432/* Number of calls to SCM_NEWCELL since startup. */
433static unsigned long scm_newcell_count;
acb0a19c 434static unsigned long scm_newcell2_count;
cf2d30f6
JB
435
436/* Search freelist for anything that isn't marked as a free cell.
437 Abort if we find something. */
8ded62a3
MD
438static void
439scm_check_freelist (SCM freelist)
440{
441 SCM f;
442 int i = 0;
443
3f5d82cd
DH
444 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
445 if (!SCM_FREE_CELL_P (f))
8ded62a3
MD
446 {
447 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
448 scm_newcell_count, i);
8ded62a3
MD
449 abort ();
450 }
451}
cf2d30f6
JB
452
453static int scm_debug_check_freelist = 0;
25748c78 454
a00c95d9 455SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 456 (SCM flag),
da4a1dba
GB
457 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
458 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
459 "compile-time flag was selected.\n")
1bbd0b84 460#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 461{
945fec60 462 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
463 return SCM_UNSPECIFIED;
464}
1bbd0b84 465#undef FUNC_NAME
25748c78
GB
466
467
4a4c9785
MD
468SCM
469scm_debug_newcell (void)
470{
471 SCM new;
472
473 scm_newcell_count++;
474 if (scm_debug_check_freelist)
475 {
8ded62a3 476 scm_check_freelist (scm_freelist);
4a4c9785
MD
477 scm_gc();
478 }
479
480 /* The rest of this is supposed to be identical to the SCM_NEWCELL
481 macro. */
3f5d82cd 482 if (SCM_NULLP (scm_freelist))
4a4c9785
MD
483 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
484 else
485 {
486 new = scm_freelist;
3f5d82cd
DH
487 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
488 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
4a4c9785
MD
489 }
490
491 return new;
492}
493
494SCM
495scm_debug_newcell2 (void)
496{
497 SCM new;
498
499 scm_newcell2_count++;
500 if (scm_debug_check_freelist)
501 {
8ded62a3 502 scm_check_freelist (scm_freelist2);
4a4c9785
MD
503 scm_gc ();
504 }
505
506 /* The rest of this is supposed to be identical to the SCM_NEWCELL
507 macro. */
3f5d82cd 508 if (SCM_NULLP (scm_freelist2))
4a4c9785
MD
509 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
510 else
511 {
512 new = scm_freelist2;
3f5d82cd
DH
513 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
514 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
4a4c9785
MD
515 }
516
517 return new;
518}
519
fca7547b 520#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
521
522\f
0f2d19dd 523
b37fe1c5
MD
524static unsigned long
525master_cells_allocated (scm_freelist_t *master)
526{
527 int objects = master->clusters_allocated * (master->cluster_size - 1);
528 if (SCM_NULLP (master->clusters))
529 objects -= master->left_to_collect;
530 return master->span * objects;
531}
532
533static unsigned long
534freelist_length (SCM freelist)
535{
536 int n;
3f5d82cd 537 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
b37fe1c5
MD
538 ++n;
539 return n;
540}
541
542static unsigned long
543compute_cells_allocated ()
544{
545 return (scm_cells_allocated
546 + master_cells_allocated (&scm_master_freelist)
547 + master_cells_allocated (&scm_master_freelist2)
548 - scm_master_freelist.span * freelist_length (scm_freelist)
549 - scm_master_freelist2.span * freelist_length (scm_freelist2));
550}
b37fe1c5 551
0f2d19dd
JB
552/* {Scheme Interface to GC}
553 */
554
a00c95d9 555SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 556 (),
b380b885 557 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 558#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
559{
560 int i;
561 int n;
562 SCM heap_segs;
c209c88e
GB
563 long int local_scm_mtrigger;
564 long int local_scm_mallocated;
565 long int local_scm_heap_size;
566 long int local_scm_cells_allocated;
567 long int local_scm_gc_time_taken;
0f2d19dd
JB
568 SCM answer;
569
570 SCM_DEFER_INTS;
939794ce
DH
571
572 ++scm_block_gc;
573
0f2d19dd
JB
574 retry:
575 heap_segs = SCM_EOL;
576 n = scm_n_heap_segs;
577 for (i = scm_n_heap_segs; i--; )
578 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
579 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
580 heap_segs);
581 if (scm_n_heap_segs != n)
582 goto retry;
939794ce
DH
583
584 --scm_block_gc;
0f2d19dd 585
7febb4a2
MD
586 /* Below, we cons to produce the resulting list. We want a snapshot of
587 * the heap situation before consing.
588 */
0f2d19dd
JB
589 local_scm_mtrigger = scm_mtrigger;
590 local_scm_mallocated = scm_mallocated;
b37fe1c5 591 local_scm_heap_size = SCM_HEAP_SIZE;
b37fe1c5 592 local_scm_cells_allocated = compute_cells_allocated ();
0f2d19dd
JB
593 local_scm_gc_time_taken = scm_gc_time_taken;
594
595 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
596 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
597 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
598 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
599 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
600 scm_cons (sym_heap_segments, heap_segs),
601 SCM_UNDEFINED);
602 SCM_ALLOW_INTS;
603 return answer;
604}
1bbd0b84 605#undef FUNC_NAME
0f2d19dd
JB
606
607
a00c95d9 608void
6e8d25a6 609scm_gc_start (const char *what)
0f2d19dd
JB
610{
611 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 612 scm_gc_cells_collected = 0;
37ddcaf6 613 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
614 scm_gc_yield = (scm_cells_allocated
615 + master_cells_allocated (&scm_master_freelist)
616 + master_cells_allocated (&scm_master_freelist2));
0f2d19dd
JB
617 scm_gc_malloc_collected = 0;
618 scm_gc_ports_collected = 0;
619}
620
939794ce 621
a00c95d9 622void
0f2d19dd 623scm_gc_end ()
0f2d19dd
JB
624{
625 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 626 scm_gc_time_taken += scm_gc_rt;
0f2d19dd
JB
627}
628
629
a00c95d9 630SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 631 (SCM obj),
b380b885
MD
632 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
633 "returned by this function for @var{obj}")
1bbd0b84 634#define FUNC_NAME s_scm_object_address
0f2d19dd 635{
54778cd3 636 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 637}
1bbd0b84 638#undef FUNC_NAME
0f2d19dd
JB
639
640
a00c95d9 641SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 642 (),
b380b885
MD
643 "Scans all of SCM objects and reclaims for further use those that are\n"
644 "no longer accessible.")
1bbd0b84 645#define FUNC_NAME s_scm_gc
0f2d19dd
JB
646{
647 SCM_DEFER_INTS;
648 scm_igc ("call");
649 SCM_ALLOW_INTS;
650 return SCM_UNSPECIFIED;
651}
1bbd0b84 652#undef FUNC_NAME
0f2d19dd
JB
653
654
655\f
656/* {C Interface For When GC is Triggered}
657 */
658
b37fe1c5 659static void
8fef55a8 660adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 661{
8fef55a8 662 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 663 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
664 * `min_yield_fraction' of total heap size. Note, however, that
665 * the absolute value of min_yield will correspond to `collected'
bda1446c 666 * on one master (the one which currently is triggering GC).
b37fe1c5 667 *
bda1446c
MD
668 * The reason why we look at total yield instead of cells collected
669 * on one list is that we want to take other freelists into account.
670 * On this freelist, we know that (local) yield = collected cells,
671 * but that's probably not the case on the other lists.
b37fe1c5
MD
672 *
673 * (We might consider computing a better prediction, for example
674 * by computing an average over multiple GC:s.)
675 */
8fef55a8 676 if (freelist->min_yield_fraction)
b37fe1c5 677 {
37ddcaf6 678 /* Pick largest of last two yields. */
8fef55a8
MD
679 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
680 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
681#ifdef DEBUGINFO
682 fprintf (stderr, " after GC = %d, delta = %d\n",
683 scm_cells_allocated,
684 delta);
685#endif
686 if (delta > 0)
8fef55a8 687 freelist->min_yield += delta;
b37fe1c5
MD
688 }
689}
690
b6efc951 691
4a4c9785 692/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
693 * common while the freelist will be individual for each thread.
694 */
4a4c9785
MD
695
696SCM
697scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
698{
699 SCM cell;
700 ++scm_ints_disabled;
4c48ba06
MD
701 do
702 {
c7387918 703 if (SCM_NULLP (master->clusters))
4c48ba06 704 {
150c200b 705 if (master->grow_heap_p || scm_block_gc)
4c48ba06 706 {
b6efc951
DH
707 /* In order to reduce gc frequency, try to allocate a new heap
708 * segment first, even if gc might find some free cells. If we
709 * can't obtain a new heap segment, we will try gc later.
710 */
4c48ba06 711 master->grow_heap_p = 0;
b6efc951 712 alloc_some_heap (master, return_on_error);
4c48ba06 713 }
b6efc951 714 if (SCM_NULLP (master->clusters))
b37fe1c5 715 {
b6efc951
DH
716 /* The heap was not grown, either because it wasn't scheduled to
717 * grow, or because there was not enough memory available. In
718 * both cases we have to try gc to get some free cells.
719 */
37ddcaf6
MD
720#ifdef DEBUGINFO
721 fprintf (stderr, "allocated = %d, ",
722 scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist)
724 + master_cells_allocated (&scm_master_freelist2));
725#endif
b37fe1c5 726 scm_igc ("cells");
8fef55a8 727 adjust_min_yield (master);
c7387918
DH
728 if (SCM_NULLP (master->clusters))
729 {
b6efc951
DH
730 /* gc could not free any cells. Now, we _must_ allocate a
731 * new heap segment, because there is no other possibility
732 * to provide a new cell for the caller.
733 */
734 alloc_some_heap (master, abort_on_error);
c7387918 735 }
b37fe1c5 736 }
4c48ba06
MD
737 }
738 cell = SCM_CAR (master->clusters);
739 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 740 ++master->clusters_allocated;
4c48ba06
MD
741 }
742 while (SCM_NULLP (cell));
4a4c9785 743 --scm_ints_disabled;
3f5d82cd
DH
744 *freelist = SCM_FREE_CELL_CDR (cell);
745 SCM_SET_FREE_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
746 return cell;
747}
748
b6efc951 749
4c48ba06
MD
750#if 0
751/* This is a support routine which can be used to reserve a cluster
752 * for some special use, such as debugging. It won't be useful until
753 * free cells are preserved between garbage collections.
754 */
755
756void
757scm_alloc_cluster (scm_freelist_t *master)
758{
759 SCM freelist, cell;
760 cell = scm_gc_for_newcell (master, &freelist);
761 SCM_SETCDR (cell, freelist);
762 return cell;
763}
764#endif
765
801cb5e7
MD
766
767scm_c_hook_t scm_before_gc_c_hook;
768scm_c_hook_t scm_before_mark_c_hook;
769scm_c_hook_t scm_before_sweep_c_hook;
770scm_c_hook_t scm_after_sweep_c_hook;
771scm_c_hook_t scm_after_gc_c_hook;
772
b6efc951 773
0f2d19dd 774void
1bbd0b84 775scm_igc (const char *what)
0f2d19dd
JB
776{
777 int j;
778
801cb5e7 779 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
780#ifdef DEBUGINFO
781 fprintf (stderr,
782 SCM_NULLP (scm_freelist)
783 ? "*"
784 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
785#endif
42db06f0
MD
786#ifdef USE_THREADS
787 /* During the critical section, only the current thread may run. */
788 SCM_THREAD_CRITICAL_SECTION_START;
789#endif
790
e242dfd2 791 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 792
ab4bef85
JB
793 scm_gc_start (what);
794
795 if (!scm_stack_base || scm_block_gc)
796 {
797 scm_gc_end ();
798 return;
799 }
800
a5c314c8
JB
801 if (scm_mallocated < 0)
802 /* The byte count of allocated objects has underflowed. This is
803 probably because you forgot to report the sizes of objects you
804 have allocated, by calling scm_done_malloc or some such. When
805 the GC freed them, it subtracted their size from
806 scm_mallocated, which underflowed. */
807 abort ();
c45acc34 808
ab4bef85
JB
809 if (scm_gc_heap_lock)
810 /* We've invoked the collector while a GC is already in progress.
811 That should never happen. */
812 abort ();
0f2d19dd
JB
813
814 ++scm_gc_heap_lock;
ab4bef85 815
0f2d19dd
JB
816 /* flush dead entries from the continuation stack */
817 {
818 int x;
819 int bound;
820 SCM * elts;
821 elts = SCM_VELTS (scm_continuation_stack);
822 bound = SCM_LENGTH (scm_continuation_stack);
823 x = SCM_INUM (scm_continuation_stack_ptr);
824 while (x < bound)
825 {
826 elts[x] = SCM_BOOL_F;
827 ++x;
828 }
829 }
830
801cb5e7
MD
831 scm_c_hook_run (&scm_before_mark_c_hook, 0);
832
42db06f0 833#ifndef USE_THREADS
a00c95d9 834
0f2d19dd
JB
835 /* Protect from the C stack. This must be the first marking
836 * done because it provides information about what objects
837 * are "in-use" by the C code. "in-use" objects are those
838 * for which the values from SCM_LENGTH and SCM_CHARS must remain
839 * usable. This requirement is stricter than a liveness
840 * requirement -- in particular, it constrains the implementation
841 * of scm_vector_set_length_x.
842 */
843 SCM_FLUSH_REGISTER_WINDOWS;
844 /* This assumes that all registers are saved into the jmp_buf */
845 setjmp (scm_save_regs_gc_mark);
846 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
847 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
848 sizeof scm_save_regs_gc_mark)
849 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
850
851 {
6ba93e5e 852 scm_sizet stack_len = scm_stack_size (scm_stack_base);
0f2d19dd 853#ifdef SCM_STACK_GROWS_UP
6ba93e5e 854 scm_mark_locations (scm_stack_base, stack_len);
0f2d19dd 855#else
6ba93e5e 856 scm_mark_locations (scm_stack_base - stack_len, stack_len);
0f2d19dd
JB
857#endif
858 }
859
42db06f0
MD
860#else /* USE_THREADS */
861
862 /* Mark every thread's stack and registers */
945fec60 863 scm_threads_mark_stacks ();
42db06f0
MD
864
865#endif /* USE_THREADS */
0f2d19dd
JB
866
867 /* FIXME: insert a phase to un-protect string-data preserved
868 * in scm_vector_set_length_x.
869 */
870
871 j = SCM_NUM_PROTECTS;
872 while (j--)
873 scm_gc_mark (scm_sys_protects[j]);
874
9de33deb
MD
875 /* FIXME: we should have a means to register C functions to be run
876 * in different phases of GC
a00c95d9 877 */
9de33deb 878 scm_mark_subr_table ();
a00c95d9 879
42db06f0
MD
880#ifndef USE_THREADS
881 scm_gc_mark (scm_root->handle);
882#endif
a00c95d9 883
801cb5e7 884 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 885
0f2d19dd
JB
886 scm_gc_sweep ();
887
801cb5e7
MD
888 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
889
0f2d19dd
JB
890 --scm_gc_heap_lock;
891 scm_gc_end ();
42db06f0
MD
892
893#ifdef USE_THREADS
894 SCM_THREAD_CRITICAL_SECTION_END;
895#endif
801cb5e7 896 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
897}
898
899\f
939794ce 900
a00c95d9 901/* {Mark/Sweep}
0f2d19dd
JB
902 */
903
904
905
906/* Mark an object precisely.
907 */
a00c95d9 908void
1bbd0b84 909scm_gc_mark (SCM p)
acf4331f 910#define FUNC_NAME "scm_gc_mark"
0f2d19dd
JB
911{
912 register long i;
913 register SCM ptr;
914
915 ptr = p;
916
917gc_mark_loop:
918 if (SCM_IMP (ptr))
919 return;
920
921gc_mark_nimp:
3f5d82cd 922 if (!SCM_CELLP (ptr))
acf4331f 923 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
0f2d19dd
JB
924
925 switch (SCM_TYP7 (ptr))
926 {
927 case scm_tcs_cons_nimcar:
928 if (SCM_GCMARKP (ptr))
929 break;
930 SCM_SETGCMARK (ptr);
931 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
932 {
933 ptr = SCM_CAR (ptr);
934 goto gc_mark_nimp;
935 }
936 scm_gc_mark (SCM_CAR (ptr));
937 ptr = SCM_GCCDR (ptr);
938 goto gc_mark_nimp;
939 case scm_tcs_cons_imcar:
acb0a19c
MD
940 if (SCM_GCMARKP (ptr))
941 break;
942 SCM_SETGCMARK (ptr);
943 ptr = SCM_GCCDR (ptr);
944 goto gc_mark_loop;
e641afaf 945 case scm_tc7_pws:
0f2d19dd
JB
946 if (SCM_GCMARKP (ptr))
947 break;
948 SCM_SETGCMARK (ptr);
54778cd3 949 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
950 ptr = SCM_GCCDR (ptr);
951 goto gc_mark_loop;
952 case scm_tcs_cons_gloc:
953 if (SCM_GCMARKP (ptr))
954 break;
955 SCM_SETGCMARK (ptr);
956 {
c8045e8d
DH
957 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
958 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
959 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
960 * pointer to a struct vtable data region. The fact that these are
961 * accessed in the same way restricts the possibilites to change the
962 * data layout of structs or heap cells.
963 */
964 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
965 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
966 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
967 {
968 default:
c8045e8d
DH
969 {
970 /* ptr is a gloc */
971 SCM gloc_car = SCM_PACK (word0);
972 scm_gc_mark (gloc_car);
973 ptr = SCM_GCCDR (ptr);
974 goto gc_mark_loop;
975 }
0f2d19dd
JB
976 case 1: /* ! */
977 case 0: /* ! */
978 {
c8045e8d
DH
979 /* ptr is a struct */
980 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
981 int len = SCM_LENGTH (layout);
982 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
983 /* We're using SCM_GCCDR here like STRUCT_DATA, except
984 that it removes the mark */
c8045e8d 985 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 986
c8045e8d 987 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 988 {
c8045e8d
DH
989 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
990 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 991 }
ad75306c
MD
992 if (len)
993 {
c8045e8d
DH
994 int x;
995
996 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 997 if (fields_desc[x] == 'p')
c8045e8d 998 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
999 if (fields_desc[x] == 'p')
1000 {
1001 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
1002 for (x = *struct_data; x; --x)
1003 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 1004 else
c8045e8d 1005 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1006 }
1007 }
c8045e8d 1008 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 1009 {
c8045e8d
DH
1010 vtable_data [scm_vtable_index_vcell] = 1;
1011 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
1012 goto gc_mark_loop;
1013 }
1014 }
1015 }
1016 }
1017 break;
1018 case scm_tcs_closures:
1019 if (SCM_GCMARKP (ptr))
1020 break;
1021 SCM_SETGCMARK (ptr);
1022 if (SCM_IMP (SCM_CDR (ptr)))
1023 {
1024 ptr = SCM_CLOSCAR (ptr);
1025 goto gc_mark_nimp;
1026 }
1027 scm_gc_mark (SCM_CLOSCAR (ptr));
1028 ptr = SCM_GCCDR (ptr);
1029 goto gc_mark_nimp;
1030 case scm_tc7_vector:
1031 case scm_tc7_lvector:
1032#ifdef CCLO
1033 case scm_tc7_cclo:
1034#endif
1035 if (SCM_GC8MARKP (ptr))
1036 break;
1037 SCM_SETGC8MARK (ptr);
1038 i = SCM_LENGTH (ptr);
1039 if (i == 0)
1040 break;
1041 while (--i > 0)
1042 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1043 scm_gc_mark (SCM_VELTS (ptr)[i]);
1044 ptr = SCM_VELTS (ptr)[0];
1045 goto gc_mark_loop;
1046 case scm_tc7_contin:
1047 if SCM_GC8MARKP
1048 (ptr) break;
1049 SCM_SETGC8MARK (ptr);
c68296f8 1050 if (SCM_VELTS (ptr))
41b0806d 1051 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1052 (scm_sizet)
1053 (SCM_LENGTH (ptr) +
1054 (sizeof (SCM_STACKITEM) + -1 +
1055 sizeof (scm_contregs)) /
1056 sizeof (SCM_STACKITEM)));
0f2d19dd 1057 break;
afe5177e 1058#ifdef HAVE_ARRAYS
0f2d19dd
JB
1059 case scm_tc7_bvect:
1060 case scm_tc7_byvect:
1061 case scm_tc7_ivect:
1062 case scm_tc7_uvect:
1063 case scm_tc7_fvect:
1064 case scm_tc7_dvect:
1065 case scm_tc7_cvect:
1066 case scm_tc7_svect:
5c11cc9d 1067#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1068 case scm_tc7_llvect:
1069#endif
afe5177e 1070#endif
0f2d19dd 1071 case scm_tc7_string:
0f2d19dd
JB
1072 SCM_SETGC8MARK (ptr);
1073 break;
1074
1075 case scm_tc7_substring:
0f2d19dd
JB
1076 if (SCM_GC8MARKP(ptr))
1077 break;
1078 SCM_SETGC8MARK (ptr);
1079 ptr = SCM_CDR (ptr);
1080 goto gc_mark_loop;
1081
1082 case scm_tc7_wvect:
1083 if (SCM_GC8MARKP(ptr))
1084 break;
ab4bef85
JB
1085 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1086 scm_weak_vectors = ptr;
0f2d19dd
JB
1087 SCM_SETGC8MARK (ptr);
1088 if (SCM_IS_WHVEC_ANY (ptr))
1089 {
1090 int x;
1091 int len;
1092 int weak_keys;
1093 int weak_values;
1094
1095 len = SCM_LENGTH (ptr);
1096 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1097 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1098
0f2d19dd
JB
1099 for (x = 0; x < len; ++x)
1100 {
1101 SCM alist;
1102 alist = SCM_VELTS (ptr)[x];
46408039
JB
1103
1104 /* mark everything on the alist except the keys or
1105 * values, according to weak_values and weak_keys. */
0b5f3f34 1106 while ( SCM_CONSP (alist)
0f2d19dd 1107 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1108 && SCM_CONSP (SCM_CAR (alist)))
1109 {
1110 SCM kvpair;
1111 SCM next_alist;
1112
1113 kvpair = SCM_CAR (alist);
1114 next_alist = SCM_CDR (alist);
a00c95d9 1115 /*
0f2d19dd
JB
1116 * Do not do this:
1117 * SCM_SETGCMARK (alist);
1118 * SCM_SETGCMARK (kvpair);
1119 *
1120 * It may be that either the key or value is protected by
1121 * an escaped reference to part of the spine of this alist.
1122 * If we mark the spine here, and only mark one or neither of the
1123 * key and value, they may never be properly marked.
1124 * This leads to a horrible situation in which an alist containing
1125 * freelist cells is exported.
1126 *
1127 * So only mark the spines of these arrays last of all marking.
1128 * If somebody confuses us by constructing a weak vector
1129 * with a circular alist then we are hosed, but at least we
1130 * won't prematurely drop table entries.
1131 */
1132 if (!weak_keys)
1133 scm_gc_mark (SCM_CAR (kvpair));
1134 if (!weak_values)
1135 scm_gc_mark (SCM_GCCDR (kvpair));
1136 alist = next_alist;
1137 }
1138 if (SCM_NIMP (alist))
1139 scm_gc_mark (alist);
1140 }
1141 }
1142 break;
1143
1144 case scm_tc7_msymbol:
1145 if (SCM_GC8MARKP(ptr))
1146 break;
1147 SCM_SETGC8MARK (ptr);
1148 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1149 ptr = SCM_SYMBOL_PROPS (ptr);
1150 goto gc_mark_loop;
1151 case scm_tc7_ssymbol:
1152 if (SCM_GC8MARKP(ptr))
1153 break;
1154 SCM_SETGC8MARK (ptr);
1155 break;
1156 case scm_tcs_subrs:
9de33deb 1157 break;
0f2d19dd
JB
1158 case scm_tc7_port:
1159 i = SCM_PTOBNUM (ptr);
1160 if (!(i < scm_numptob))
1161 goto def;
1162 if (SCM_GC8MARKP (ptr))
1163 break;
dc53f026 1164 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1165 if (SCM_PTAB_ENTRY(ptr))
1166 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1167 if (scm_ptobs[i].mark)
1168 {
1169 ptr = (scm_ptobs[i].mark) (ptr);
1170 goto gc_mark_loop;
1171 }
1172 else
1173 return;
0f2d19dd
JB
1174 break;
1175 case scm_tc7_smob:
1176 if (SCM_GC8MARKP (ptr))
1177 break;
dc53f026 1178 SCM_SETGC8MARK (ptr);
acb0a19c 1179 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1180 { /* should be faster than going through scm_smobs */
1181 case scm_tc_free_cell:
1182 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1183 case scm_tc16_allocated:
acb0a19c
MD
1184 case scm_tc16_big:
1185 case scm_tc16_real:
1186 case scm_tc16_complex:
0f2d19dd
JB
1187 break;
1188 default:
1189 i = SCM_SMOBNUM (ptr);
1190 if (!(i < scm_numsmob))
1191 goto def;
dc53f026
JB
1192 if (scm_smobs[i].mark)
1193 {
1194 ptr = (scm_smobs[i].mark) (ptr);
1195 goto gc_mark_loop;
1196 }
1197 else
1198 return;
0f2d19dd
JB
1199 }
1200 break;
1201 default:
acf4331f
DH
1202 def:
1203 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd
JB
1204 }
1205}
acf4331f 1206#undef FUNC_NAME
0f2d19dd
JB
1207
1208
1209/* Mark a Region Conservatively
1210 */
1211
a00c95d9 1212void
6e8d25a6 1213scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1214{
1215 register long m = n;
1216 register int i, j;
1217 register SCM_CELLPTR ptr;
1218
1219 while (0 <= --m)
c67baafd 1220 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1221 {
195e6201 1222 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1223 i = 0;
1224 j = scm_n_heap_segs - 1;
1225 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1226 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1227 {
1228 while (i <= j)
1229 {
1230 int seg_id;
1231 seg_id = -1;
1232 if ( (i == j)
1233 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1234 seg_id = i;
1235 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1236 seg_id = j;
1237 else
1238 {
1239 int k;
1240 k = (i + j) / 2;
1241 if (k == i)
1242 break;
1243 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1244 {
1245 j = k;
1246 ++i;
1247 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1248 continue;
1249 else
1250 break;
1251 }
1252 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1253 {
1254 i = k;
1255 --j;
1256 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1257 continue;
1258 else
1259 break;
1260 }
1261 }
47457e8a
DH
1262 if (scm_heap_table[seg_id].span == 1
1263 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1264 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1265 break;
1266 }
0f2d19dd
JB
1267 }
1268 }
1269}
1270
1271
1a548472
DH
1272/* The function scm_cellp determines whether an SCM value can be regarded as a
1273 * pointer to a cell on the heap. Binary search is used in order to determine
1274 * the heap segment that contains the cell.
1275 */
2e11a577 1276int
6e8d25a6 1277scm_cellp (SCM value)
2e11a577 1278{
1a548472
DH
1279 if (SCM_CELLP (value)) {
1280 scm_cell * ptr = SCM2PTR (value);
1281 unsigned int i = 0;
1282 unsigned int j = scm_n_heap_segs - 1;
1283
1284 while (i < j) {
1285 int k = (i + j) / 2;
1286 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1287 j = k;
1288 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1289 i = k + 1;
1290 }
1291 }
2e11a577 1292
1a548472
DH
1293 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1294 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1a548472
DH
1295 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1296 return 1;
1297 } else {
1298 return 0;
2e11a577 1299 }
1a548472
DH
1300 } else {
1301 return 0;
1302 }
2e11a577
MD
1303}
1304
1305
4c48ba06
MD
1306static void
1307gc_sweep_freelist_start (scm_freelist_t *freelist)
1308{
1309 freelist->cells = SCM_EOL;
1310 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1311 freelist->clusters_allocated = 0;
4c48ba06
MD
1312 freelist->clusters = SCM_EOL;
1313 freelist->clustertail = &freelist->clusters;
1811ebce 1314 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1315 freelist->collected = 0;
1316}
1317
1318static void
1319gc_sweep_freelist_finish (scm_freelist_t *freelist)
1320{
1811ebce 1321 int collected;
4c48ba06 1322 *freelist->clustertail = freelist->cells;
3f5d82cd 1323 if (!SCM_NULLP (freelist->cells))
4c48ba06
MD
1324 {
1325 SCM c = freelist->cells;
1326 SCM_SETCAR (c, SCM_CDR (c));
1327 SCM_SETCDR (c, SCM_EOL);
1328 freelist->collected +=
1329 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1330 }
b37fe1c5 1331 scm_gc_cells_collected += freelist->collected;
a00c95d9 1332
8fef55a8 1333 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1334 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1335 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1336 * size. This means that a too low yield is compensated by more
1337 * heap on the list which is currently doing most work, which is
1338 * just what we want.
1339 */
1811ebce 1340 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1341 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06 1342}
0f2d19dd 1343
a00c95d9 1344void
0f2d19dd 1345scm_gc_sweep ()
acf4331f 1346#define FUNC_NAME "scm_gc_sweep"
0f2d19dd
JB
1347{
1348 register SCM_CELLPTR ptr;
0f2d19dd 1349 register SCM nfreelist;
4c48ba06 1350 register scm_freelist_t *freelist;
0f2d19dd 1351 register long m;
0f2d19dd 1352 register int span;
15e9d186 1353 long i;
0f2d19dd
JB
1354 scm_sizet seg_size;
1355
0f2d19dd 1356 m = 0;
0f2d19dd 1357
4c48ba06
MD
1358 gc_sweep_freelist_start (&scm_master_freelist);
1359 gc_sweep_freelist_start (&scm_master_freelist2);
a00c95d9 1360
cf2d30f6 1361 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1362 {
4c48ba06 1363 register unsigned int left_to_collect;
4c48ba06 1364 register scm_sizet j;
15e9d186 1365
cf2d30f6
JB
1366 /* Unmarked cells go onto the front of the freelist this heap
1367 segment points to. Rather than updating the real freelist
1368 pointer as we go along, we accumulate the new head in
1369 nfreelist. Then, if it turns out that the entire segment is
1370 free, we free (i.e., malloc's free) the whole segment, and
1371 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1372 freelist = scm_heap_table[i].freelist;
1373 nfreelist = freelist->cells;
4c48ba06 1374 left_to_collect = freelist->left_to_collect;
945fec60 1375 span = scm_heap_table[i].span;
cf2d30f6 1376
a00c95d9
ML
1377 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1378 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1379 for (j = seg_size + span; j -= span; ptr += span)
1380 {
96f6f4ae
DH
1381 SCM scmptr = PTR2SCM (ptr);
1382
0f2d19dd
JB
1383 switch SCM_TYP7 (scmptr)
1384 {
1385 case scm_tcs_cons_gloc:
0f2d19dd 1386 {
c8045e8d
DH
1387 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1388 * struct or a gloc. See the corresponding comment in
1389 * scm_gc_mark.
1390 */
1391 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1392 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1393 if (SCM_GCMARKP (scmptr))
0f2d19dd 1394 {
c8045e8d
DH
1395 if (vtable_data [scm_vtable_index_vcell] == 1)
1396 vtable_data [scm_vtable_index_vcell] = 0;
1397 goto cmrkcontinue;
1398 }
1399 else
1400 {
1401 if (vtable_data [scm_vtable_index_vcell] == 0
1402 || vtable_data [scm_vtable_index_vcell] == 1)
1403 {
1404 scm_struct_free_t free
1405 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1406 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1407 }
0f2d19dd
JB
1408 }
1409 }
1410 break;
1411 case scm_tcs_cons_imcar:
1412 case scm_tcs_cons_nimcar:
1413 case scm_tcs_closures:
e641afaf 1414 case scm_tc7_pws:
0f2d19dd
JB
1415 if (SCM_GCMARKP (scmptr))
1416 goto cmrkcontinue;
1417 break;
1418 case scm_tc7_wvect:
1419 if (SCM_GC8MARKP (scmptr))
1420 {
1421 goto c8mrkcontinue;
1422 }
1423 else
1424 {
ab4bef85
JB
1425 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1426 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1427 break;
1428 }
1429
1430 case scm_tc7_vector:
1431 case scm_tc7_lvector:
1432#ifdef CCLO
1433 case scm_tc7_cclo:
1434#endif
1435 if (SCM_GC8MARKP (scmptr))
1436 goto c8mrkcontinue;
1437
1438 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1439 freechars:
1440 scm_must_free (SCM_CHARS (scmptr));
1441 /* SCM_SETCHARS(scmptr, 0);*/
1442 break;
afe5177e 1443#ifdef HAVE_ARRAYS
0f2d19dd
JB
1444 case scm_tc7_bvect:
1445 if SCM_GC8MARKP (scmptr)
1446 goto c8mrkcontinue;
1447 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1448 goto freechars;
1449 case scm_tc7_byvect:
1450 if SCM_GC8MARKP (scmptr)
1451 goto c8mrkcontinue;
1452 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1453 goto freechars;
1454 case scm_tc7_ivect:
1455 case scm_tc7_uvect:
1456 if SCM_GC8MARKP (scmptr)
1457 goto c8mrkcontinue;
1458 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1459 goto freechars;
1460 case scm_tc7_svect:
1461 if SCM_GC8MARKP (scmptr)
1462 goto c8mrkcontinue;
1463 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1464 goto freechars;
5c11cc9d 1465#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1466 case scm_tc7_llvect:
1467 if SCM_GC8MARKP (scmptr)
1468 goto c8mrkcontinue;
1469 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1470 goto freechars;
1471#endif
1472 case scm_tc7_fvect:
1473 if SCM_GC8MARKP (scmptr)
1474 goto c8mrkcontinue;
1475 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1476 goto freechars;
1477 case scm_tc7_dvect:
1478 if SCM_GC8MARKP (scmptr)
1479 goto c8mrkcontinue;
1480 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1481 goto freechars;
1482 case scm_tc7_cvect:
1483 if SCM_GC8MARKP (scmptr)
1484 goto c8mrkcontinue;
1485 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1486 goto freechars;
afe5177e 1487#endif
0f2d19dd 1488 case scm_tc7_substring:
0f2d19dd
JB
1489 if (SCM_GC8MARKP (scmptr))
1490 goto c8mrkcontinue;
1491 break;
1492 case scm_tc7_string:
0f2d19dd
JB
1493 if (SCM_GC8MARKP (scmptr))
1494 goto c8mrkcontinue;
1495 m += SCM_HUGE_LENGTH (scmptr) + 1;
1496 goto freechars;
1497 case scm_tc7_msymbol:
1498 if (SCM_GC8MARKP (scmptr))
1499 goto c8mrkcontinue;
cf551a2b
DH
1500 m += (SCM_LENGTH (scmptr) + 1
1501 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1502 scm_must_free ((char *)SCM_SLOTS (scmptr));
1503 break;
1504 case scm_tc7_contin:
1505 if SCM_GC8MARKP (scmptr)
1506 goto c8mrkcontinue;
0db18cf4 1507 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1508 if (SCM_VELTS (scmptr))
1509 goto freechars;
0f2d19dd
JB
1510 case scm_tc7_ssymbol:
1511 if SCM_GC8MARKP(scmptr)
1512 goto c8mrkcontinue;
1513 break;
1514 case scm_tcs_subrs:
1515 continue;
1516 case scm_tc7_port:
1517 if SCM_GC8MARKP (scmptr)
1518 goto c8mrkcontinue;
1519 if SCM_OPENP (scmptr)
1520 {
1521 int k = SCM_PTOBNUM (scmptr);
1522 if (!(k < scm_numptob))
1523 goto sweeperr;
1524 /* Keep "revealed" ports alive. */
945fec60 1525 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1526 continue;
1527 /* Yes, I really do mean scm_ptobs[k].free */
1528 /* rather than ftobs[k].close. .close */
1529 /* is for explicit CLOSE-PORT by user */
84af0382 1530 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1531 SCM_SETSTREAM (scmptr, 0);
1532 scm_remove_from_port_table (scmptr);
1533 scm_gc_ports_collected++;
24e68a57 1534 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1535 }
1536 break;
1537 case scm_tc7_smob:
1538 switch SCM_GCTYP16 (scmptr)
1539 {
1540 case scm_tc_free_cell:
acb0a19c 1541 case scm_tc16_real:
0f2d19dd
JB
1542 if SCM_GC8MARKP (scmptr)
1543 goto c8mrkcontinue;
1544 break;
1545#ifdef SCM_BIGDIG
acb0a19c 1546 case scm_tc16_big:
0f2d19dd
JB
1547 if SCM_GC8MARKP (scmptr)
1548 goto c8mrkcontinue;
1549 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1550 goto freechars;
1551#endif /* def SCM_BIGDIG */
acb0a19c 1552 case scm_tc16_complex:
0f2d19dd
JB
1553 if SCM_GC8MARKP (scmptr)
1554 goto c8mrkcontinue;
acb0a19c
MD
1555 m += 2 * sizeof (double);
1556 goto freechars;
0f2d19dd
JB
1557 default:
1558 if SCM_GC8MARKP (scmptr)
1559 goto c8mrkcontinue;
1560
1561 {
1562 int k;
1563 k = SCM_SMOBNUM (scmptr);
1564 if (!(k < scm_numsmob))
1565 goto sweeperr;
c8045e8d 1566 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1567 break;
1568 }
1569 }
1570 break;
1571 default:
acf4331f
DH
1572 sweeperr:
1573 SCM_MISC_ERROR ("unknown type", SCM_EOL);
0f2d19dd 1574 }
0f2d19dd 1575#if 0
3f5d82cd 1576 if (SCM_FREE_CELL_P (scmptr))
0f2d19dd
JB
1577 exit (2);
1578#endif
4c48ba06 1579 if (!--left_to_collect)
4a4c9785
MD
1580 {
1581 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1582 *freelist->clustertail = scmptr;
1583 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1584
4a4c9785 1585 nfreelist = SCM_EOL;
4c48ba06
MD
1586 freelist->collected += span * freelist->cluster_size;
1587 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1588 }
1589 else
4a4c9785
MD
1590 {
1591 /* Stick the new cell on the front of nfreelist. It's
1592 critical that we mark this cell as freed; otherwise, the
1593 conservative collector might trace it as some other type
1594 of object. */
54778cd3 1595 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1596 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
4a4c9785
MD
1597 nfreelist = scmptr;
1598 }
a00c95d9 1599
0f2d19dd
JB
1600 continue;
1601 c8mrkcontinue:
1602 SCM_CLRGC8MARK (scmptr);
1603 continue;
1604 cmrkcontinue:
1605 SCM_CLRGCMARK (scmptr);
1606 }
1607#ifdef GC_FREE_SEGMENTS
1608 if (n == seg_size)
1609 {
15e9d186
JB
1610 register long j;
1611
4c48ba06 1612 freelist->heap_size -= seg_size;
cf2d30f6
JB
1613 free ((char *) scm_heap_table[i].bounds[0]);
1614 scm_heap_table[i].bounds[0] = 0;
1615 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1616 scm_heap_table[j - 1] = scm_heap_table[j];
1617 scm_n_heap_segs -= 1;
cf2d30f6 1618 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1619 }
1620 else
1621#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1622 {
1623 /* Update the real freelist pointer to point to the head of
1624 the list of free cells we've built for this segment. */
4c48ba06 1625 freelist->cells = nfreelist;
4c48ba06 1626 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1627 }
1628
fca7547b 1629#ifdef GUILE_DEBUG_FREELIST
4c48ba06 1630 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1631 ? scm_freelist
1632 : scm_freelist2);
cf2d30f6
JB
1633 scm_map_free_list ();
1634#endif
4a4c9785 1635 }
a00c95d9 1636
4c48ba06
MD
1637 gc_sweep_freelist_finish (&scm_master_freelist);
1638 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1639
8ded62a3
MD
1640 /* When we move to POSIX threads private freelists should probably
1641 be GC-protected instead. */
1642 scm_freelist = SCM_EOL;
1643 scm_freelist2 = SCM_EOL;
a00c95d9 1644
b37fe1c5 1645 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f 1646 scm_gc_yield -= scm_cells_allocated;
0f2d19dd
JB
1647 scm_mallocated -= m;
1648 scm_gc_malloc_collected = m;
1649}
acf4331f 1650#undef FUNC_NAME
0f2d19dd
JB
1651
1652
1653\f
1654
1655/* {Front end to malloc}
1656 *
c68296f8 1657 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1658 *
1659 * These functions provide services comperable to malloc, realloc, and
1660 * free. They are for allocating malloced parts of scheme objects.
1661 * The primary purpose of the front end is to impose calls to gc.
1662 */
1663
bc9d9bb2 1664
0f2d19dd
JB
1665/* scm_must_malloc
1666 * Return newly malloced storage or throw an error.
1667 *
1668 * The parameter WHAT is a string for error reporting.
a00c95d9 1669 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1670 * allocation, or if the first call to malloc fails,
1671 * garbage collect -- on the presumption that some objects
1672 * using malloced storage may be collected.
1673 *
1674 * The limit scm_mtrigger may be raised by this allocation.
1675 */
07806695 1676void *
e4ef2330 1677scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1678{
07806695 1679 void *ptr;
15e9d186 1680 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1681
1682 if (nm <= scm_mtrigger)
0f2d19dd 1683 {
07806695 1684 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1685 if (NULL != ptr)
1686 {
1687 scm_mallocated = nm;
bc9d9bb2
MD
1688#ifdef GUILE_DEBUG_MALLOC
1689 scm_malloc_register (ptr, what);
1690#endif
0f2d19dd
JB
1691 return ptr;
1692 }
1693 }
6064dcc6 1694
0f2d19dd 1695 scm_igc (what);
e4ef2330 1696
0f2d19dd 1697 nm = scm_mallocated + size;
07806695 1698 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1699 if (NULL != ptr)
1700 {
1701 scm_mallocated = nm;
6064dcc6
MV
1702 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1703 if (nm > scm_mtrigger)
1704 scm_mtrigger = nm + nm / 2;
1705 else
1706 scm_mtrigger += scm_mtrigger / 2;
1707 }
bc9d9bb2
MD
1708#ifdef GUILE_DEBUG_MALLOC
1709 scm_malloc_register (ptr, what);
1710#endif
1711
0f2d19dd
JB
1712 return ptr;
1713 }
e4ef2330 1714
acf4331f 1715 scm_memory_error (what);
0f2d19dd
JB
1716}
1717
1718
1719/* scm_must_realloc
1720 * is similar to scm_must_malloc.
1721 */
07806695
JB
1722void *
1723scm_must_realloc (void *where,
e4ef2330
MD
1724 scm_sizet old_size,
1725 scm_sizet size,
3eeba8d4 1726 const char *what)
0f2d19dd 1727{
07806695 1728 void *ptr;
e4ef2330
MD
1729 scm_sizet nm = scm_mallocated + size - old_size;
1730
1731 if (nm <= scm_mtrigger)
0f2d19dd 1732 {
07806695 1733 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1734 if (NULL != ptr)
1735 {
1736 scm_mallocated = nm;
bc9d9bb2
MD
1737#ifdef GUILE_DEBUG_MALLOC
1738 scm_malloc_reregister (where, ptr, what);
1739#endif
0f2d19dd
JB
1740 return ptr;
1741 }
1742 }
e4ef2330 1743
0f2d19dd 1744 scm_igc (what);
e4ef2330
MD
1745
1746 nm = scm_mallocated + size - old_size;
07806695 1747 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1748 if (NULL != ptr)
1749 {
1750 scm_mallocated = nm;
6064dcc6
MV
1751 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1752 if (nm > scm_mtrigger)
1753 scm_mtrigger = nm + nm / 2;
1754 else
1755 scm_mtrigger += scm_mtrigger / 2;
1756 }
bc9d9bb2
MD
1757#ifdef GUILE_DEBUG_MALLOC
1758 scm_malloc_reregister (where, ptr, what);
1759#endif
0f2d19dd
JB
1760 return ptr;
1761 }
e4ef2330 1762
acf4331f 1763 scm_memory_error (what);
0f2d19dd
JB
1764}
1765
acf4331f 1766
a00c95d9 1767void
07806695 1768scm_must_free (void *obj)
acf4331f 1769#define FUNC_NAME "scm_must_free"
0f2d19dd 1770{
bc9d9bb2
MD
1771#ifdef GUILE_DEBUG_MALLOC
1772 scm_malloc_unregister (obj);
1773#endif
0f2d19dd
JB
1774 if (obj)
1775 free (obj);
1776 else
acf4331f 1777 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
0f2d19dd 1778}
acf4331f
DH
1779#undef FUNC_NAME
1780
0f2d19dd 1781
c68296f8
MV
1782/* Announce that there has been some malloc done that will be freed
1783 * during gc. A typical use is for a smob that uses some malloced
1784 * memory but can not get it from scm_must_malloc (for whatever
1785 * reason). When a new object of this smob is created you call
1786 * scm_done_malloc with the size of the object. When your smob free
1787 * function is called, be sure to include this size in the return
1788 * value. */
0f2d19dd 1789
c68296f8 1790void
6e8d25a6 1791scm_done_malloc (long size)
c68296f8
MV
1792{
1793 scm_mallocated += size;
1794
1795 if (scm_mallocated > scm_mtrigger)
1796 {
1797 scm_igc ("foreign mallocs");
1798 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1799 {
1800 if (scm_mallocated > scm_mtrigger)
1801 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1802 else
1803 scm_mtrigger += scm_mtrigger / 2;
1804 }
1805 }
1806}
1807
1808
1809\f
0f2d19dd
JB
1810
1811/* {Heap Segments}
1812 *
1813 * Each heap segment is an array of objects of a particular size.
1814 * Every segment has an associated (possibly shared) freelist.
1815 * A table of segment records is kept that records the upper and
1816 * lower extents of the segment; this is used during the conservative
1817 * phase of gc to identify probably gc roots (because they point
c68296f8 1818 * into valid segments at reasonable offsets). */
0f2d19dd
JB
1819
1820/* scm_expmem
1821 * is true if the first segment was smaller than INIT_HEAP_SEG.
1822 * If scm_expmem is set to one, subsequent segment allocations will
1823 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1824 */
1825int scm_expmem = 0;
1826
4c48ba06
MD
1827scm_sizet scm_max_segment_size;
1828
0f2d19dd
JB
1829/* scm_heap_org
1830 * is the lowest base address of any heap segment.
1831 */
1832SCM_CELLPTR scm_heap_org;
1833
a00c95d9 1834scm_heap_seg_data_t * scm_heap_table = 0;
b6efc951 1835static unsigned int heap_segment_table_size = 0;
0f2d19dd
JB
1836int scm_n_heap_segs = 0;
1837
0f2d19dd
JB
1838/* init_heap_seg
1839 * initializes a new heap segment and return the number of objects it contains.
1840 *
1841 * The segment origin, segment size in bytes, and the span of objects
1842 * in cells are input parameters. The freelist is both input and output.
1843 *
1844 * This function presume that the scm_heap_table has already been expanded
1845 * to accomodate a new segment record.
1846 */
1847
1848
a00c95d9 1849static scm_sizet
4c48ba06 1850init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
1851{
1852 register SCM_CELLPTR ptr;
0f2d19dd 1853 SCM_CELLPTR seg_end;
15e9d186 1854 int new_seg_index;
acb0a19c 1855 int n_new_cells;
4c48ba06 1856 int span = freelist->span;
a00c95d9 1857
0f2d19dd
JB
1858 if (seg_org == NULL)
1859 return 0;
1860
a00c95d9 1861 ptr = CELL_UP (seg_org, span);
acb0a19c 1862
a00c95d9 1863 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 1864 */
a00c95d9 1865 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 1866
a00c95d9 1867 /* Find the right place and insert the segment record.
0f2d19dd
JB
1868 *
1869 */
1870 for (new_seg_index = 0;
1871 ( (new_seg_index < scm_n_heap_segs)
1872 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1873 new_seg_index++)
1874 ;
1875
1876 {
1877 int i;
1878 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1879 scm_heap_table[i] = scm_heap_table[i - 1];
1880 }
a00c95d9 1881
0f2d19dd
JB
1882 ++scm_n_heap_segs;
1883
945fec60 1884 scm_heap_table[new_seg_index].span = span;
4c48ba06 1885 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
1886 scm_heap_table[new_seg_index].bounds[0] = ptr;
1887 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
1888
1889
a00c95d9 1890 /* Compute the least valid object pointer w/in this segment
0f2d19dd 1891 */
a00c95d9 1892 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
1893
1894
acb0a19c
MD
1895 /*n_new_cells*/
1896 n_new_cells = seg_end - ptr;
0f2d19dd 1897
4c48ba06 1898 freelist->heap_size += n_new_cells;
4a4c9785 1899
a00c95d9 1900 /* Partition objects in this segment into clusters */
4a4c9785
MD
1901 {
1902 SCM clusters;
1903 SCM *clusterp = &clusters;
4c48ba06 1904 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 1905
4c48ba06 1906 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 1907 {
4c48ba06
MD
1908 /* Determine end of cluster
1909 */
1910 if (n_new_cells >= n_cluster_cells)
1911 {
1912 seg_end = ptr + n_cluster_cells;
1913 n_new_cells -= n_cluster_cells;
1914 }
4a4c9785 1915 else
a00c95d9
ML
1916 /* [cmm] looks like the segment size doesn't divide cleanly by
1917 cluster size. bad cmm! */
1918 abort();
4a4c9785 1919
4c48ba06
MD
1920 /* Allocate cluster spine
1921 */
4a4c9785
MD
1922 *clusterp = PTR2SCM (ptr);
1923 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1924 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 1925 ptr += span;
a00c95d9 1926
4a4c9785
MD
1927 while (ptr < seg_end)
1928 {
96f6f4ae
DH
1929 SCM scmptr = PTR2SCM (ptr);
1930
54778cd3 1931 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
3f5d82cd 1932 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (ptr + span));
4a4c9785
MD
1933 ptr += span;
1934 }
4c48ba06 1935
3f5d82cd 1936 SCM_SET_FREE_CELL_CDR (PTR2SCM (ptr - span), SCM_EOL);
4a4c9785 1937 }
a00c95d9 1938
4a4c9785
MD
1939 /* Patch up the last cluster pointer in the segment
1940 * to join it to the input freelist.
1941 */
4c48ba06
MD
1942 *clusterp = freelist->clusters;
1943 freelist->clusters = clusters;
4a4c9785
MD
1944 }
1945
4c48ba06
MD
1946#ifdef DEBUGINFO
1947 fprintf (stderr, "H");
1948#endif
0f2d19dd 1949 return size;
0f2d19dd
JB
1950}
1951
a00c95d9
ML
1952static scm_sizet
1953round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1954{
1955 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1956
1957 return
1958 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1959 + ALIGNMENT_SLACK (freelist);
1960}
1961
a00c95d9 1962static void
b6efc951 1963alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
acf4331f 1964#define FUNC_NAME "alloc_some_heap"
0f2d19dd 1965{
0f2d19dd 1966 SCM_CELLPTR ptr;
b37fe1c5 1967 long len;
a00c95d9 1968
b6efc951
DH
1969 if (scm_gc_heap_lock)
1970 {
1971 /* Critical code sections (such as the garbage collector) aren't
1972 * supposed to add heap segments.
1973 */
1974 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
1975 abort ();
1976 }
0f2d19dd 1977
b6efc951
DH
1978 if (scm_n_heap_segs == heap_segment_table_size)
1979 {
1980 /* We have to expand the heap segment table to have room for the new
1981 * segment. Do not yet increment scm_n_heap_segs -- that is done by
1982 * init_heap_seg only if the allocation of the segment itself succeeds.
1983 */
1984 unsigned int new_table_size = scm_n_heap_segs + 1;
1985 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
1986 scm_heap_seg_data_t * new_heap_table;
1987
1988 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
1989 realloc ((char *)scm_heap_table, size)));
1990 if (!new_heap_table)
1991 {
1992 if (error_policy == abort_on_error)
1993 {
1994 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
1995 abort ();
1996 }
1997 else
1998 {
1999 return;
2000 }
2001 }
2002 else
2003 {
2004 scm_heap_table = new_heap_table;
2005 heap_segment_table_size = new_table_size;
2006 }
2007 }
0f2d19dd
JB
2008
2009
2010 /* Pick a size for the new heap segment.
a00c95d9 2011 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2012 * gc.h
2013 */
4c48ba06 2014 {
1811ebce
MD
2015 /* Assure that the new segment is predicted to be large enough.
2016 *
2017 * New yield should at least equal GC fraction of new heap size, i.e.
2018 *
2019 * y + dh > f * (h + dh)
2020 *
2021 * y : yield
8fef55a8 2022 * f : min yield fraction
1811ebce
MD
2023 * h : heap size
2024 * dh : size of new heap segment
2025 *
2026 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2027 */
8fef55a8 2028 int f = freelist->min_yield_fraction;
1811ebce
MD
2029 long h = SCM_HEAP_SIZE;
2030 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2031 len = SCM_EXPHEAP (freelist->heap_size);
2032#ifdef DEBUGINFO
2033 fprintf (stderr, "(%d < %d)", len, min_cells);
2034#endif
2035 if (len < min_cells)
1811ebce 2036 len = min_cells + freelist->cluster_size;
4c48ba06 2037 len *= sizeof (scm_cell);
1811ebce
MD
2038 /* force new sampling */
2039 freelist->collected = LONG_MAX;
4c48ba06 2040 }
a00c95d9 2041
4c48ba06
MD
2042 if (len > scm_max_segment_size)
2043 len = scm_max_segment_size;
0f2d19dd
JB
2044
2045 {
2046 scm_sizet smallest;
2047
a00c95d9 2048 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
a00c95d9 2049
0f2d19dd 2050 if (len < smallest)
a00c95d9 2051 len = smallest;
0f2d19dd
JB
2052
2053 /* Allocate with decaying ambition. */
2054 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2055 && (len >= smallest))
2056 {
1811ebce 2057 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2058 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2059 if (ptr)
2060 {
a00c95d9 2061 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2062 return;
2063 }
2064 len /= 2;
2065 }
2066 }
2067
b6efc951
DH
2068 if (error_policy == abort_on_error)
2069 {
2070 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2071 abort ();
2072 }
0f2d19dd 2073}
acf4331f 2074#undef FUNC_NAME
0f2d19dd
JB
2075
2076
a00c95d9 2077SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2078 (SCM name),
b380b885 2079 "")
1bbd0b84 2080#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2081{
2082 int x;
2083 int bound;
3b3b36dd 2084 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2085 SCM_DEFER_INTS;
2086 bound = scm_n_heap_segs;
2087 for (x = 0; x < bound; ++x)
2088 {
2089 SCM_CELLPTR p;
2090 SCM_CELLPTR pbound;
195e6201
DH
2091 p = scm_heap_table[x].bounds[0];
2092 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2093 while (p < pbound)
2094 {
c8045e8d
DH
2095 SCM cell = PTR2SCM (p);
2096 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2097 {
c8045e8d
DH
2098 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2099 * struct cell. See the corresponding comment in scm_gc_mark.
2100 */
2101 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2102 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2103 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
9a09deb1 2104 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
c8045e8d 2105 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2106 {
c8045e8d 2107 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2108 }
2109 }
2110 ++p;
2111 }
2112 }
2113 SCM_ALLOW_INTS;
2114 return name;
2115}
1bbd0b84 2116#undef FUNC_NAME
0f2d19dd
JB
2117
2118
2119\f
2120/* {GC Protection Helper Functions}
2121 */
2122
2123
0f2d19dd 2124void
6e8d25a6
GB
2125scm_remember (SCM *ptr)
2126{ /* empty */ }
0f2d19dd 2127
1cc91f1b 2128
c209c88e 2129/*
41b0806d
GB
2130 These crazy functions prevent garbage collection
2131 of arguments after the first argument by
2132 ensuring they remain live throughout the
2133 function because they are used in the last
2134 line of the code block.
2135 It'd be better to have a nice compiler hint to
2136 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2137SCM
2138scm_return_first (SCM elt, ...)
0f2d19dd
JB
2139{
2140 return elt;
2141}
2142
41b0806d
GB
2143int
2144scm_return_first_int (int i, ...)
2145{
2146 return i;
2147}
2148
0f2d19dd 2149
0f2d19dd 2150SCM
6e8d25a6 2151scm_permanent_object (SCM obj)
0f2d19dd
JB
2152{
2153 SCM_REDEFER_INTS;
2154 scm_permobjs = scm_cons (obj, scm_permobjs);
2155 SCM_REALLOW_INTS;
2156 return obj;
2157}
2158
2159
7bd4fbe2
MD
2160/* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2161 other references are dropped, until the object is unprotected by calling
2162 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2163 i. e. it is possible to protect the same object several times, but it is
2164 necessary to unprotect the object the same number of times to actually get
2165 the object unprotected. It is an error to unprotect an object more often
2166 than it has been protected before. The function scm_protect_object returns
2167 OBJ.
2168*/
2169
2170/* Implementation note: For every object X, there is a counter which
2171 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2172*/
686765af 2173
ef290276 2174SCM
6e8d25a6 2175scm_protect_object (SCM obj)
ef290276 2176{
686765af
ML
2177 SCM handle;
2178
2179 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2180 SCM_REDEFER_INTS;
686765af 2181
0f0f0899
MD
2182 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2183 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
686765af 2184
2dd6a83a 2185 SCM_REALLOW_INTS;
686765af 2186
ef290276
JB
2187 return obj;
2188}
2189
2190
2191/* Remove any protection for OBJ established by a prior call to
dab7f566 2192 scm_protect_object. This function returns OBJ.
ef290276 2193
dab7f566 2194 See scm_protect_object for more information. */
ef290276 2195SCM
6e8d25a6 2196scm_unprotect_object (SCM obj)
ef290276 2197{
686765af
ML
2198 SCM handle;
2199
2200 /* This critical section barrier will be replaced by a mutex. */
2dd6a83a 2201 SCM_REDEFER_INTS;
686765af
ML
2202
2203 handle = scm_hashq_get_handle (scm_protects, obj);
0f0f0899
MD
2204
2205 if (SCM_IMP (handle))
686765af 2206 {
0f0f0899
MD
2207 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2208 abort ();
686765af 2209 }
6a199940
DH
2210 else
2211 {
2212 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2213 if (count == 0)
2214 scm_hashq_remove_x (scm_protects, obj);
2215 else
2216 SCM_SETCDR (handle, SCM_MAKINUM (count));
2217 }
686765af 2218
2dd6a83a 2219 SCM_REALLOW_INTS;
ef290276
JB
2220
2221 return obj;
2222}
2223
c45acc34
JB
2224int terminating;
2225
2226/* called on process termination. */
e52ceaac
MD
2227#ifdef HAVE_ATEXIT
2228static void
2229cleanup (void)
2230#else
2231#ifdef HAVE_ON_EXIT
51157deb
MD
2232extern int on_exit (void (*procp) (), int arg);
2233
e52ceaac
MD
2234static void
2235cleanup (int status, void *arg)
2236#else
2237#error Dont know how to setup a cleanup handler on your system.
2238#endif
2239#endif
c45acc34
JB
2240{
2241 terminating = 1;
2242 scm_flush_all_ports ();
2243}
ef290276 2244
0f2d19dd 2245\f
acb0a19c 2246static int
4c48ba06 2247make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2248{
a00c95d9
ML
2249 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2250 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2251 rounded_size,
4c48ba06 2252 freelist))
acb0a19c 2253 {
a00c95d9
ML
2254 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2255 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2256 rounded_size,
4c48ba06 2257 freelist))
acb0a19c
MD
2258 return 1;
2259 }
2260 else
2261 scm_expmem = 1;
2262
8fef55a8
MD
2263 if (freelist->min_yield_fraction)
2264 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2265 / 100);
8fef55a8 2266 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
a00c95d9 2267
acb0a19c
MD
2268 return 0;
2269}
2270
2271\f
4c48ba06
MD
2272static void
2273init_freelist (scm_freelist_t *freelist,
2274 int span,
2275 int cluster_size,
8fef55a8 2276 int min_yield)
4c48ba06
MD
2277{
2278 freelist->clusters = SCM_EOL;
2279 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2280 freelist->left_to_collect = 0;
2281 freelist->clusters_allocated = 0;
8fef55a8
MD
2282 freelist->min_yield = 0;
2283 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2284 freelist->span = span;
2285 freelist->collected = 0;
1811ebce 2286 freelist->collected_1 = 0;
4c48ba06
MD
2287 freelist->heap_size = 0;
2288}
2289
4a4c9785 2290int
4c48ba06
MD
2291scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2292 scm_sizet init_heap_size_2, int gc_trigger_2,
2293 scm_sizet max_segment_size)
0f2d19dd
JB
2294{
2295 scm_sizet j;
2296
4c48ba06 2297 if (!init_heap_size_1)
aeacfc8f 2298 init_heap_size_1 = scm_default_init_heap_size_1;
4c48ba06 2299 if (!init_heap_size_2)
aeacfc8f 2300 init_heap_size_2 = scm_default_init_heap_size_2;
4c48ba06 2301
0f2d19dd
JB
2302 j = SCM_NUM_PROTECTS;
2303 while (j)
2304 scm_sys_protects[--j] = SCM_BOOL_F;
2305 scm_block_gc = 1;
4a4c9785 2306
4a4c9785 2307 scm_freelist = SCM_EOL;
4c48ba06
MD
2308 scm_freelist2 = SCM_EOL;
2309 init_freelist (&scm_master_freelist,
2310 1, SCM_CLUSTER_SIZE_1,
aeacfc8f 2311 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
4c48ba06
MD
2312 init_freelist (&scm_master_freelist2,
2313 2, SCM_CLUSTER_SIZE_2,
aeacfc8f 2314 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
4c48ba06 2315 scm_max_segment_size
aeacfc8f 2316 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
4a4c9785 2317
0f2d19dd
JB
2318 scm_expmem = 0;
2319
2320 j = SCM_HEAP_SEG_SIZE;
2321 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2322 scm_heap_table = ((scm_heap_seg_data_t *)
2323 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
b6efc951 2324 heap_segment_table_size = 2;
acb0a19c 2325
4c48ba06
MD
2326 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2327 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785 2328 return 1;
acb0a19c 2329
801cb5e7 2330 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2331 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2332
801cb5e7
MD
2333 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2334 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2335 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2336 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2337 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2338
2339 /* Initialise the list of ports. */
840ae05d
JB
2340 scm_port_table = (scm_port **)
2341 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2342 if (!scm_port_table)
2343 return 1;
2344
a18bcd0e 2345#ifdef HAVE_ATEXIT
c45acc34 2346 atexit (cleanup);
e52ceaac
MD
2347#else
2348#ifdef HAVE_ON_EXIT
2349 on_exit (cleanup, 0);
2350#endif
a18bcd0e 2351#endif
0f2d19dd
JB
2352
2353 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2354 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2355
2356 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2357 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2358 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2359 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2360 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2361 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2362 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2363 scm_permobjs = SCM_EOL;
686765af 2364 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
54778cd3
DH
2365 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2366 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2367#ifdef SCM_BIGDIG
2368 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2369#endif
2370 return 0;
2371}
939794ce 2372
0f2d19dd
JB
2373\f
2374
939794ce
DH
2375SCM scm_after_gc_hook;
2376
2377#if (SCM_DEBUG_DEPRECATED == 0)
2378static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2379#endif /* SCM_DEBUG_DEPRECATED == 0 */
2380static SCM gc_async;
2381
2382
2383/* The function gc_async_thunk causes the execution of the after-gc-hook. It
2384 * is run after the gc, as soon as the asynchronous events are handled by the
2385 * evaluator.
2386 */
2387static SCM
2388gc_async_thunk (void)
2389{
2390 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2391
2392#if (SCM_DEBUG_DEPRECATED == 0)
2393
2394 /* The following code will be removed in Guile 1.5. */
2395 if (SCM_NFALSEP (scm_gc_vcell))
2396 {
2397 SCM proc = SCM_CDR (scm_gc_vcell);
2398
2399 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2400 scm_apply (proc, SCM_EOL, SCM_EOL);
2401 }
2402
2403#endif /* SCM_DEBUG_DEPRECATED == 0 */
2404
2405 return SCM_UNSPECIFIED;
2406}
2407
2408
2409/* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2410 * the garbage collection. The only purpose of this function is to mark the
2411 * gc_async (which will eventually lead to the execution of the
2412 * gc_async_thunk).
2413 */
2414static void *
2415mark_gc_async (void * hook_data, void *func_data, void *data)
2416{
2417 scm_system_async_mark (gc_async);
2418 return NULL;
2419}
2420
2421
0f2d19dd
JB
2422void
2423scm_init_gc ()
0f2d19dd 2424{
939794ce
DH
2425 SCM after_gc_thunk;
2426
801cb5e7 2427 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
939794ce
DH
2428
2429#if (SCM_DEBUG_DEPRECATED == 0)
2430 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2431#endif /* SCM_DEBUG_DEPRECATED == 0 */
2432 /* Dirk:FIXME:: We don't really want a binding here. */
2433 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2434 gc_async = scm_system_async (after_gc_thunk);
2435
2436 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2437
a0599745 2438#include "libguile/gc.x"
0f2d19dd 2439}
89e00824
ML
2440
2441/*
2442 Local Variables:
2443 c-file-style: "gnu"
2444 End:
2445*/