*** empty log message ***
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745
MD
49#include "libguile/_scm.h"
50#include "libguile/stime.h"
51#include "libguile/stackchk.h"
52#include "libguile/struct.h"
a0599745
MD
53#include "libguile/smob.h"
54#include "libguile/unif.h"
55#include "libguile/async.h"
56#include "libguile/ports.h"
57#include "libguile/root.h"
58#include "libguile/strings.h"
59#include "libguile/vectors.h"
801cb5e7 60#include "libguile/weaks.h"
a0599745
MD
61
62#include "libguile/validate.h"
63#include "libguile/gc.h"
fce59c93 64
bc9d9bb2 65#ifdef GUILE_DEBUG_MALLOC
a0599745 66#include "libguile/debug-malloc.h"
bc9d9bb2
MD
67#endif
68
0f2d19dd 69#ifdef HAVE_MALLOC_H
95b88819 70#include <malloc.h>
0f2d19dd
JB
71#endif
72
73#ifdef HAVE_UNISTD_H
95b88819 74#include <unistd.h>
0f2d19dd
JB
75#endif
76
1cc91f1b
JB
77#ifdef __STDC__
78#include <stdarg.h>
79#define var_start(x, y) va_start(x, y)
80#else
81#include <varargs.h>
82#define var_start(x, y) va_start(x)
83#endif
84
0f2d19dd
JB
85\f
86/* {heap tuning parameters}
a00c95d9 87 *
0f2d19dd
JB
88 * These are parameters for controlling memory allocation. The heap
89 * is the area out of which scm_cons, and object headers are allocated.
90 *
91 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
92 * 64 bit machine. The units of the _SIZE parameters are bytes.
93 * Cons pairs and object headers occupy one heap cell.
94 *
95 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
96 * allocated initially the heap will grow by half its current size
97 * each subsequent time more heap is needed.
98 *
99 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
100 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
101 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
102 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 103 *
0f2d19dd
JB
104 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
105 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
106 *
107 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
108 * is needed.
109 *
110 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 111 * trigger a GC.
6064dcc6
MV
112 *
113 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
114 * reclaimed by a GC triggered by must_malloc. If less than this is
115 * reclaimed, the trigger threshold is raised. [I don't know what a
116 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 117 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
118 */
119
8fef55a8
MD
120/*
121 * Heap size 45000 and 40% min yield gives quick startup and no extra
122 * heap allocation. Having higher values on min yield may lead to
123 * large heaps, especially if code behaviour is varying its
124 * maximum consumption between different freelists.
125 */
126#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
4c48ba06 127#define SCM_CLUSTER_SIZE_1 2000L
8fef55a8 128#define SCM_MIN_YIELD_1 40
4c48ba06
MD
129
130#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
131#define SCM_CLUSTER_SIZE_2 1000L
132/* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
134 */
8fef55a8 135#define SCM_MIN_YIELD_2 40
4c48ba06
MD
136
137#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
138
945fec60 139#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
140#ifdef _QC
141# define SCM_HEAP_SEG_SIZE 32768L
142#else
143# ifdef sequent
4c48ba06 144# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 145# else
4c48ba06 146# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
147# endif
148#endif
4c48ba06 149/* Make heap grow with factor 1.5 */
4a4c9785 150#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 151#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 152#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
153
154/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
155 bounds for allocated storage */
156
157#ifdef PROT386
158/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
159# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
160# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
161#else
162# ifdef _UNICOS
a00c95d9
ML
163# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
164# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 165# else
a00c95d9
ML
166# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
167# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
168# endif /* UNICOS */
169#endif /* PROT386 */
a00c95d9
ML
170#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
171#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
172#ifdef GUILE_NEW_GC_SCHEME
173#define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
175#else
176#define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
177#endif
1811ebce 178#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
179
180
181\f
945fec60 182/* scm_freelists
0f2d19dd 183 */
945fec60 184
a00c95d9
ML
185typedef struct scm_freelist_t {
186 /* collected cells */
187 SCM cells;
188#ifdef GUILE_NEW_GC_SCHEME
189 /* number of cells left to collect before cluster is full */
190 unsigned int left_to_collect;
b37fe1c5
MD
191 /* number of clusters which have been allocated */
192 unsigned int clusters_allocated;
8fef55a8
MD
193 /* a list of freelists, each of size cluster_size,
194 * except the last one which may be shorter
195 */
a00c95d9
ML
196 SCM clusters;
197 SCM *clustertail;
b37fe1c5 198 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 199 int cluster_size;
8fef55a8 200 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
201 */
202 int grow_heap_p;
8fef55a8 203 /* minimum yield on this list in order not to grow the heap
a00c95d9 204 */
8fef55a8
MD
205 long min_yield;
206 /* defines min_yield as percent of total heap size
a00c95d9 207 */
8fef55a8 208 int min_yield_fraction;
a00c95d9
ML
209#endif
210 /* number of cells per object on this list */
211 int span;
212 /* number of collected cells during last GC */
1811ebce
MD
213 long collected;
214 /* number of collected cells during penultimate GC */
215 long collected_1;
a00c95d9
ML
216 /* total number of cells in heap segments
217 * belonging to this list.
218 */
1811ebce 219 long heap_size;
a00c95d9
ML
220} scm_freelist_t;
221
4a4c9785
MD
222#ifdef GUILE_NEW_GC_SCHEME
223SCM scm_freelist = SCM_EOL;
224scm_freelist_t scm_master_freelist = {
b37fe1c5 225 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
226};
227SCM scm_freelist2 = SCM_EOL;
228scm_freelist_t scm_master_freelist2 = {
b37fe1c5 229 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785
MD
230};
231#else
945fec60
MD
232scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
233scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
4a4c9785 234#endif
0f2d19dd
JB
235
236/* scm_mtrigger
237 * is the number of bytes of must_malloc allocation needed to trigger gc.
238 */
15e9d186 239unsigned long scm_mtrigger;
0f2d19dd
JB
240
241
242/* scm_gc_heap_lock
243 * If set, don't expand the heap. Set only during gc, during which no allocation
244 * is supposed to take place anyway.
245 */
246int scm_gc_heap_lock = 0;
247
248/* GC Blocking
249 * Don't pause for collection if this is set -- just
250 * expand the heap.
251 */
252
253int scm_block_gc = 1;
254
255/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
256 * collection (GC) more space is allocated for the heap.
257 */
945fec60 258#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
259
260/* During collection, this accumulates objects holding
261 * weak references.
262 */
ab4bef85 263SCM scm_weak_vectors;
0f2d19dd
JB
264
265/* GC Statistics Keeping
266 */
267unsigned long scm_cells_allocated = 0;
a5c314c8 268long scm_mallocated = 0;
b37fe1c5 269unsigned long scm_gc_cells_collected;
8b0d194f
MD
270#ifdef GUILE_NEW_GC_SCHEME
271unsigned long scm_gc_yield;
37ddcaf6 272static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
8b0d194f 273#endif
0f2d19dd
JB
274unsigned long scm_gc_malloc_collected;
275unsigned long scm_gc_ports_collected;
276unsigned long scm_gc_rt;
277unsigned long scm_gc_time_taken = 0;
278
279SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
280SCM_SYMBOL (sym_heap_size, "cell-heap-size");
281SCM_SYMBOL (sym_mallocated, "bytes-malloced");
282SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
283SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
284SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
285
a00c95d9 286typedef struct scm_heap_seg_data_t
0f2d19dd 287{
cf2d30f6
JB
288 /* lower and upper bounds of the segment */
289 SCM_CELLPTR bounds[2];
290
291 /* address of the head-of-freelist pointer for this segment's cells.
292 All segments usually point to the same one, scm_freelist. */
4c48ba06 293 scm_freelist_t *freelist;
cf2d30f6
JB
294
295 /* number of SCM words per object in this segment */
945fec60 296 int span;
cf2d30f6
JB
297
298 /* If SEG_DATA->valid is non-zero, the conservative marking
299 functions will apply SEG_DATA->valid to the purported pointer and
300 SEG_DATA, and mark the object iff the function returns non-zero.
301 At the moment, I don't think anyone uses this. */
0f2d19dd 302 int (*valid) ();
a00c95d9 303} scm_heap_seg_data_t;
0f2d19dd
JB
304
305
306
945fec60
MD
307static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
308static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
309
310
311\f
cf2d30f6
JB
312/* Debugging functions. */
313
bb2c57fa 314#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
315
316/* Return the number of the heap segment containing CELL. */
317static int
318which_seg (SCM cell)
319{
320 int i;
321
322 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
323 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
324 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
325 return i;
326 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 327 SCM_UNPACK (cell));
cf2d30f6
JB
328 abort ();
329}
330
331
8ded62a3
MD
332#ifdef GUILE_NEW_GC_SCHEME
333static void
334map_free_list (scm_freelist_t *master, SCM freelist)
335{
336 int last_seg = -1, count = 0;
337 SCM f;
a00c95d9 338
8ded62a3
MD
339 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
340 {
341 int this_seg = which_seg (f);
342
343 if (this_seg != last_seg)
344 {
345 if (last_seg != -1)
346 fprintf (stderr, " %5d %d-cells in segment %d\n",
347 count, master->span, last_seg);
348 last_seg = this_seg;
349 count = 0;
350 }
351 count++;
352 }
353 if (last_seg != -1)
354 fprintf (stderr, " %5d %d-cells in segment %d\n",
355 count, master->span, last_seg);
356}
357#else
acb0a19c 358static void
4c48ba06 359map_free_list (scm_freelist_t *freelist)
cf2d30f6
JB
360{
361 int last_seg = -1, count = 0;
362 SCM f;
a00c95d9 363
4c48ba06 364 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
cf2d30f6
JB
365 {
366 int this_seg = which_seg (f);
367
368 if (this_seg != last_seg)
369 {
370 if (last_seg != -1)
acb0a19c 371 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 372 count, freelist->span, last_seg);
cf2d30f6
JB
373 last_seg = this_seg;
374 count = 0;
375 }
376 count++;
377 }
378 if (last_seg != -1)
acb0a19c 379 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 380 count, freelist->span, last_seg);
acb0a19c 381}
8ded62a3 382#endif
cf2d30f6 383
a00c95d9 384SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
385 (),
386 "Print debugging information about the free-list.\n"
5384bc5b 387 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
388#define FUNC_NAME s_scm_map_free_list
389{
4c48ba06
MD
390 int i;
391 fprintf (stderr, "%d segments total (%d:%d",
392 scm_n_heap_segs,
393 scm_heap_table[0].span,
394 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
395 for (i = 1; i < scm_n_heap_segs; i++)
396 fprintf (stderr, ", %d:%d",
397 scm_heap_table[i].span,
398 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
399 fprintf (stderr, ")\n");
4a4c9785 400#ifdef GUILE_NEW_GC_SCHEME
8ded62a3
MD
401 map_free_list (&scm_master_freelist, scm_freelist);
402 map_free_list (&scm_master_freelist2, scm_freelist2);
4a4c9785 403#else
945fec60
MD
404 map_free_list (&scm_freelist);
405 map_free_list (&scm_freelist2);
4a4c9785 406#endif
cf2d30f6
JB
407 fflush (stderr);
408
409 return SCM_UNSPECIFIED;
410}
1bbd0b84 411#undef FUNC_NAME
cf2d30f6 412
a8cffa9f 413#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
414static int last_cluster;
415static int last_size;
416
5384bc5b
MD
417static int
418free_list_length (char *title, int i, SCM freelist)
419{
420 SCM ls;
421 int n = 0;
422 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
423 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
424 ++n;
425 else
426 {
427 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
428 abort ();
429 }
4c48ba06
MD
430 if (n != last_size)
431 {
432 if (i > 0)
433 {
434 if (last_cluster == i - 1)
435 fprintf (stderr, "\t%d\n", last_size);
436 else
437 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
438 }
439 if (i >= 0)
440 fprintf (stderr, "%s %d", title, i);
441 else
442 fprintf (stderr, "%s\t%d\n", title, n);
443 last_cluster = i;
444 last_size = n;
445 }
5384bc5b
MD
446 return n;
447}
448
449static void
450free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
451{
452 SCM clusters;
4c48ba06 453 int i = 0, len, n = 0;
5384bc5b
MD
454 fprintf (stderr, "%s\n\n", title);
455 n += free_list_length ("free list", -1, freelist);
456 for (clusters = master->clusters;
457 SCM_NNULLP (clusters);
458 clusters = SCM_CDR (clusters))
4c48ba06
MD
459 {
460 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
461 n += len;
462 }
463 if (last_cluster == i - 1)
464 fprintf (stderr, "\t%d\n", last_size);
465 else
466 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
467 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
468}
469
a00c95d9 470SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
471 (),
472 "Print debugging information about the free-list.\n"
473 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
474#define FUNC_NAME s_scm_free_list_length
475{
b37fe1c5
MD
476 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
477 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 478 return SCM_UNSPECIFIED;
5384bc5b
MD
479}
480#undef FUNC_NAME
a8cffa9f 481#endif
5384bc5b 482
bb2c57fa
MD
483#endif
484
485#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
486
487/* Number of calls to SCM_NEWCELL since startup. */
488static unsigned long scm_newcell_count;
acb0a19c 489static unsigned long scm_newcell2_count;
cf2d30f6
JB
490
491/* Search freelist for anything that isn't marked as a free cell.
492 Abort if we find something. */
8ded62a3
MD
493#ifdef GUILE_NEW_GC_SCHEME
494static void
495scm_check_freelist (SCM freelist)
496{
497 SCM f;
498 int i = 0;
499
500 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
501 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
502 {
503 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
504 scm_newcell_count, i);
505 fflush (stderr);
506 abort ();
507 }
508}
509#else
cf2d30f6 510static void
4c48ba06 511scm_check_freelist (scm_freelist_t *freelist)
cf2d30f6
JB
512{
513 SCM f;
514 int i = 0;
515
4c48ba06 516 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
cf2d30f6
JB
517 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
518 {
519 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
520 scm_newcell_count, i);
521 fflush (stderr);
522 abort ();
523 }
524}
8ded62a3 525#endif
cf2d30f6
JB
526
527static int scm_debug_check_freelist = 0;
25748c78 528
a00c95d9 529SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 530 (SCM flag),
da4a1dba
GB
531 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
532 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
533 "compile-time flag was selected.\n")
1bbd0b84 534#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 535{
945fec60 536 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
537 return SCM_UNSPECIFIED;
538}
1bbd0b84 539#undef FUNC_NAME
25748c78
GB
540
541
4a4c9785
MD
542#ifdef GUILE_NEW_GC_SCHEME
543
544SCM
545scm_debug_newcell (void)
546{
547 SCM new;
548
549 scm_newcell_count++;
550 if (scm_debug_check_freelist)
551 {
8ded62a3 552 scm_check_freelist (scm_freelist);
4a4c9785
MD
553 scm_gc();
554 }
555
556 /* The rest of this is supposed to be identical to the SCM_NEWCELL
557 macro. */
558 if (SCM_IMP (scm_freelist))
559 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
560 else
561 {
562 new = scm_freelist;
563 scm_freelist = SCM_CDR (scm_freelist);
564 SCM_SETCAR (new, scm_tc16_allocated);
565 }
566
567 return new;
568}
569
570SCM
571scm_debug_newcell2 (void)
572{
573 SCM new;
574
575 scm_newcell2_count++;
576 if (scm_debug_check_freelist)
577 {
8ded62a3 578 scm_check_freelist (scm_freelist2);
4a4c9785
MD
579 scm_gc ();
580 }
581
582 /* The rest of this is supposed to be identical to the SCM_NEWCELL
583 macro. */
584 if (SCM_IMP (scm_freelist2))
585 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
586 else
587 {
588 new = scm_freelist2;
589 scm_freelist2 = SCM_CDR (scm_freelist2);
590 SCM_SETCAR (new, scm_tc16_allocated);
591 }
592
593 return new;
594}
595
596#else /* GUILE_NEW_GC_SCHEME */
597
f2333166
JB
598SCM
599scm_debug_newcell (void)
cf2d30f6 600{
f2333166
JB
601 SCM new;
602
cf2d30f6 603 scm_newcell_count++;
945fec60
MD
604 if (scm_debug_check_freelist)
605 {
606 scm_check_freelist (&scm_freelist);
607 scm_gc();
608 }
cf2d30f6
JB
609
610 /* The rest of this is supposed to be identical to the SCM_NEWCELL
611 macro. */
945fec60
MD
612 if (SCM_IMP (scm_freelist.cells))
613 new = scm_gc_for_newcell (&scm_freelist);
cf2d30f6
JB
614 else
615 {
945fec60
MD
616 new = scm_freelist.cells;
617 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
d9de3881 618 SCM_SETCAR (new, scm_tc16_allocated);
cf2d30f6
JB
619 ++scm_cells_allocated;
620 }
f2333166
JB
621
622 return new;
cf2d30f6
JB
623}
624
acb0a19c
MD
625SCM
626scm_debug_newcell2 (void)
627{
628 SCM new;
629
630 scm_newcell2_count++;
631 if (scm_debug_check_freelist) {
945fec60 632 scm_check_freelist (&scm_freelist2);
acb0a19c
MD
633 scm_gc();
634 }
635
636 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
637 macro. */
945fec60
MD
638 if (SCM_IMP (scm_freelist2.cells))
639 new = scm_gc_for_newcell (&scm_freelist2);
acb0a19c
MD
640 else
641 {
945fec60
MD
642 new = scm_freelist2.cells;
643 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
acb0a19c
MD
644 SCM_SETCAR (new, scm_tc16_allocated);
645 scm_cells_allocated += 2;
646 }
647
648 return new;
649}
650
4a4c9785 651#endif /* GUILE_NEW_GC_SCHEME */
fca7547b 652#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
653
654\f
0f2d19dd 655
b37fe1c5
MD
656#ifdef GUILE_NEW_GC_SCHEME
657static unsigned long
658master_cells_allocated (scm_freelist_t *master)
659{
660 int objects = master->clusters_allocated * (master->cluster_size - 1);
661 if (SCM_NULLP (master->clusters))
662 objects -= master->left_to_collect;
663 return master->span * objects;
664}
665
666static unsigned long
667freelist_length (SCM freelist)
668{
669 int n;
670 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
671 ++n;
672 return n;
673}
674
675static unsigned long
676compute_cells_allocated ()
677{
678 return (scm_cells_allocated
679 + master_cells_allocated (&scm_master_freelist)
680 + master_cells_allocated (&scm_master_freelist2)
681 - scm_master_freelist.span * freelist_length (scm_freelist)
682 - scm_master_freelist2.span * freelist_length (scm_freelist2));
683}
684#endif
685
0f2d19dd
JB
686/* {Scheme Interface to GC}
687 */
688
a00c95d9 689SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 690 (),
b380b885 691 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 692#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
693{
694 int i;
695 int n;
696 SCM heap_segs;
c209c88e
GB
697 long int local_scm_mtrigger;
698 long int local_scm_mallocated;
699 long int local_scm_heap_size;
700 long int local_scm_cells_allocated;
701 long int local_scm_gc_time_taken;
0f2d19dd
JB
702 SCM answer;
703
704 SCM_DEFER_INTS;
705 scm_block_gc = 1;
706 retry:
707 heap_segs = SCM_EOL;
708 n = scm_n_heap_segs;
709 for (i = scm_n_heap_segs; i--; )
710 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
711 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
712 heap_segs);
713 if (scm_n_heap_segs != n)
714 goto retry;
715 scm_block_gc = 0;
716
7febb4a2
MD
717 /* Below, we cons to produce the resulting list. We want a snapshot of
718 * the heap situation before consing.
719 */
0f2d19dd
JB
720 local_scm_mtrigger = scm_mtrigger;
721 local_scm_mallocated = scm_mallocated;
b37fe1c5 722 local_scm_heap_size = SCM_HEAP_SIZE;
4a4c9785 723#ifdef GUILE_NEW_GC_SCHEME
b37fe1c5 724 local_scm_cells_allocated = compute_cells_allocated ();
4a4c9785 725#else
0f2d19dd 726 local_scm_cells_allocated = scm_cells_allocated;
b37fe1c5 727#endif
0f2d19dd
JB
728 local_scm_gc_time_taken = scm_gc_time_taken;
729
730 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
731 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
732 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
733 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
734 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
735 scm_cons (sym_heap_segments, heap_segs),
736 SCM_UNDEFINED);
737 SCM_ALLOW_INTS;
738 return answer;
739}
1bbd0b84 740#undef FUNC_NAME
0f2d19dd
JB
741
742
a00c95d9 743void
6e8d25a6 744scm_gc_start (const char *what)
0f2d19dd
JB
745{
746 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 747 scm_gc_cells_collected = 0;
8b0d194f 748#ifdef GUILE_NEW_GC_SCHEME
37ddcaf6 749 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
750 scm_gc_yield = (scm_cells_allocated
751 + master_cells_allocated (&scm_master_freelist)
752 + master_cells_allocated (&scm_master_freelist2));
753#endif
0f2d19dd
JB
754 scm_gc_malloc_collected = 0;
755 scm_gc_ports_collected = 0;
756}
757
a00c95d9 758void
0f2d19dd 759scm_gc_end ()
0f2d19dd
JB
760{
761 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 762 scm_gc_time_taken += scm_gc_rt;
9ea54cc6 763 scm_system_async_mark (scm_gc_async);
0f2d19dd
JB
764}
765
766
a00c95d9 767SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 768 (SCM obj),
b380b885
MD
769 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
770 "returned by this function for @var{obj}")
1bbd0b84 771#define FUNC_NAME s_scm_object_address
0f2d19dd 772{
54778cd3 773 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 774}
1bbd0b84 775#undef FUNC_NAME
0f2d19dd
JB
776
777
a00c95d9 778SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 779 (),
b380b885
MD
780 "Scans all of SCM objects and reclaims for further use those that are\n"
781 "no longer accessible.")
1bbd0b84 782#define FUNC_NAME s_scm_gc
0f2d19dd
JB
783{
784 SCM_DEFER_INTS;
785 scm_igc ("call");
786 SCM_ALLOW_INTS;
787 return SCM_UNSPECIFIED;
788}
1bbd0b84 789#undef FUNC_NAME
0f2d19dd
JB
790
791
792\f
793/* {C Interface For When GC is Triggered}
794 */
795
4a4c9785
MD
796#ifdef GUILE_NEW_GC_SCHEME
797
b37fe1c5 798static void
8fef55a8 799adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 800{
8fef55a8 801 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 802 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
803 * `min_yield_fraction' of total heap size. Note, however, that
804 * the absolute value of min_yield will correspond to `collected'
bda1446c 805 * on one master (the one which currently is triggering GC).
b37fe1c5 806 *
bda1446c
MD
807 * The reason why we look at total yield instead of cells collected
808 * on one list is that we want to take other freelists into account.
809 * On this freelist, we know that (local) yield = collected cells,
810 * but that's probably not the case on the other lists.
b37fe1c5
MD
811 *
812 * (We might consider computing a better prediction, for example
813 * by computing an average over multiple GC:s.)
814 */
8fef55a8 815 if (freelist->min_yield_fraction)
b37fe1c5 816 {
37ddcaf6 817 /* Pick largest of last two yields. */
8fef55a8
MD
818 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
819 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
820#ifdef DEBUGINFO
821 fprintf (stderr, " after GC = %d, delta = %d\n",
822 scm_cells_allocated,
823 delta);
824#endif
825 if (delta > 0)
8fef55a8 826 freelist->min_yield += delta;
b37fe1c5
MD
827 }
828}
829
4a4c9785 830/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
831 * common while the freelist will be individual for each thread.
832 */
4a4c9785
MD
833
834SCM
835scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
836{
837 SCM cell;
838 ++scm_ints_disabled;
4c48ba06
MD
839 do
840 {
841 if (SCM_NULLP (master->clusters))
842 {
843 if (master->grow_heap_p)
844 {
845 master->grow_heap_p = 0;
846 alloc_some_heap (master);
847 }
848 else
b37fe1c5 849 {
37ddcaf6
MD
850#ifdef DEBUGINFO
851 fprintf (stderr, "allocated = %d, ",
852 scm_cells_allocated
853 + master_cells_allocated (&scm_master_freelist)
854 + master_cells_allocated (&scm_master_freelist2));
855#endif
b37fe1c5 856 scm_igc ("cells");
8fef55a8 857 adjust_min_yield (master);
b37fe1c5 858 }
4c48ba06
MD
859 }
860 cell = SCM_CAR (master->clusters);
861 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 862 ++master->clusters_allocated;
4c48ba06
MD
863 }
864 while (SCM_NULLP (cell));
4a4c9785 865 --scm_ints_disabled;
4a4c9785 866 *freelist = SCM_CDR (cell);
54778cd3 867 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
868 return cell;
869}
870
4c48ba06
MD
871#if 0
872/* This is a support routine which can be used to reserve a cluster
873 * for some special use, such as debugging. It won't be useful until
874 * free cells are preserved between garbage collections.
875 */
876
877void
878scm_alloc_cluster (scm_freelist_t *master)
879{
880 SCM freelist, cell;
881 cell = scm_gc_for_newcell (master, &freelist);
882 SCM_SETCDR (cell, freelist);
883 return cell;
884}
885#endif
886
4a4c9785
MD
887#else /* GUILE_NEW_GC_SCHEME */
888
0f2d19dd 889void
4c48ba06 890scm_gc_for_alloc (scm_freelist_t *freelist)
0f2d19dd
JB
891{
892 SCM_REDEFER_INTS;
893 scm_igc ("cells");
945fec60 894#ifdef GUILE_DEBUG_FREELIST
acb0a19c 895 fprintf (stderr, "Collected: %d, min_yield: %d\n",
4c48ba06 896 freelist->collected, MIN_GC_YIELD (freelist));
acb0a19c 897#endif
4c48ba06
MD
898 if ((freelist->collected < MIN_GC_YIELD (freelist))
899 || SCM_IMP (freelist->cells))
900 alloc_some_heap (freelist);
0f2d19dd
JB
901 SCM_REALLOW_INTS;
902}
903
904
a00c95d9 905SCM
4c48ba06 906scm_gc_for_newcell (scm_freelist_t *freelist)
0f2d19dd
JB
907{
908 SCM fl;
4c48ba06
MD
909 scm_gc_for_alloc (freelist);
910 fl = freelist->cells;
911 freelist->cells = SCM_CDR (fl);
acb0a19c 912 SCM_SETCAR (fl, scm_tc16_allocated);
0f2d19dd
JB
913 return fl;
914}
915
4a4c9785
MD
916#endif /* GUILE_NEW_GC_SCHEME */
917
801cb5e7
MD
918SCM scm_after_gc_hook;
919
920scm_c_hook_t scm_before_gc_c_hook;
921scm_c_hook_t scm_before_mark_c_hook;
922scm_c_hook_t scm_before_sweep_c_hook;
923scm_c_hook_t scm_after_sweep_c_hook;
924scm_c_hook_t scm_after_gc_c_hook;
925
0f2d19dd 926void
1bbd0b84 927scm_igc (const char *what)
0f2d19dd
JB
928{
929 int j;
930
801cb5e7 931 scm_c_hook_run (&scm_before_gc_c_hook, 0);
4c48ba06
MD
932#ifdef DEBUGINFO
933 fprintf (stderr,
934 SCM_NULLP (scm_freelist)
935 ? "*"
936 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
937#endif
42db06f0
MD
938#ifdef USE_THREADS
939 /* During the critical section, only the current thread may run. */
940 SCM_THREAD_CRITICAL_SECTION_START;
941#endif
942
e242dfd2 943 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 944
ab4bef85
JB
945 scm_gc_start (what);
946
947 if (!scm_stack_base || scm_block_gc)
948 {
949 scm_gc_end ();
950 return;
951 }
952
a5c314c8
JB
953 if (scm_mallocated < 0)
954 /* The byte count of allocated objects has underflowed. This is
955 probably because you forgot to report the sizes of objects you
956 have allocated, by calling scm_done_malloc or some such. When
957 the GC freed them, it subtracted their size from
958 scm_mallocated, which underflowed. */
959 abort ();
c45acc34 960
ab4bef85
JB
961 if (scm_gc_heap_lock)
962 /* We've invoked the collector while a GC is already in progress.
963 That should never happen. */
964 abort ();
0f2d19dd
JB
965
966 ++scm_gc_heap_lock;
ab4bef85 967
0f2d19dd
JB
968 /* unprotect any struct types with no instances */
969#if 0
970 {
971 SCM type_list;
972 SCM * pos;
973
974 pos = &scm_type_obj_list;
975 type_list = scm_type_obj_list;
976 while (type_list != SCM_EOL)
977 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
978 {
24e68a57 979 pos = SCM_CDRLOC (type_list);
0f2d19dd
JB
980 type_list = SCM_CDR (type_list);
981 }
982 else
983 {
984 *pos = SCM_CDR (type_list);
985 type_list = SCM_CDR (type_list);
986 }
987 }
988#endif
989
990 /* flush dead entries from the continuation stack */
991 {
992 int x;
993 int bound;
994 SCM * elts;
995 elts = SCM_VELTS (scm_continuation_stack);
996 bound = SCM_LENGTH (scm_continuation_stack);
997 x = SCM_INUM (scm_continuation_stack_ptr);
998 while (x < bound)
999 {
1000 elts[x] = SCM_BOOL_F;
1001 ++x;
1002 }
1003 }
1004
801cb5e7
MD
1005 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1006
42db06f0 1007#ifndef USE_THREADS
a00c95d9 1008
0f2d19dd
JB
1009 /* Protect from the C stack. This must be the first marking
1010 * done because it provides information about what objects
1011 * are "in-use" by the C code. "in-use" objects are those
1012 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1013 * usable. This requirement is stricter than a liveness
1014 * requirement -- in particular, it constrains the implementation
1015 * of scm_vector_set_length_x.
1016 */
1017 SCM_FLUSH_REGISTER_WINDOWS;
1018 /* This assumes that all registers are saved into the jmp_buf */
1019 setjmp (scm_save_regs_gc_mark);
1020 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1021 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1022 sizeof scm_save_regs_gc_mark)
1023 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1024
1025 {
1026 /* stack_len is long rather than scm_sizet in order to guarantee that
1027 &stack_len is long aligned */
1028#ifdef SCM_STACK_GROWS_UP
1029#ifdef nosve
1030 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1031#else
1032 long stack_len = scm_stack_size (scm_stack_base);
1033#endif
1034 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1035#else
1036#ifdef nosve
1037 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1038#else
1039 long stack_len = scm_stack_size (scm_stack_base);
1040#endif
1041 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1042#endif
1043 }
1044
42db06f0
MD
1045#else /* USE_THREADS */
1046
1047 /* Mark every thread's stack and registers */
945fec60 1048 scm_threads_mark_stacks ();
42db06f0
MD
1049
1050#endif /* USE_THREADS */
0f2d19dd
JB
1051
1052 /* FIXME: insert a phase to un-protect string-data preserved
1053 * in scm_vector_set_length_x.
1054 */
1055
1056 j = SCM_NUM_PROTECTS;
1057 while (j--)
1058 scm_gc_mark (scm_sys_protects[j]);
1059
9de33deb
MD
1060 /* FIXME: we should have a means to register C functions to be run
1061 * in different phases of GC
a00c95d9 1062 */
9de33deb 1063 scm_mark_subr_table ();
a00c95d9 1064
42db06f0
MD
1065#ifndef USE_THREADS
1066 scm_gc_mark (scm_root->handle);
1067#endif
a00c95d9 1068
801cb5e7 1069 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
0493cd89 1070
0f2d19dd
JB
1071 scm_gc_sweep ();
1072
801cb5e7
MD
1073 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1074
0f2d19dd
JB
1075 --scm_gc_heap_lock;
1076 scm_gc_end ();
42db06f0
MD
1077
1078#ifdef USE_THREADS
1079 SCM_THREAD_CRITICAL_SECTION_END;
1080#endif
801cb5e7 1081 scm_c_hook_run (&scm_after_gc_c_hook, 0);
0f2d19dd
JB
1082}
1083
1084\f
a00c95d9 1085/* {Mark/Sweep}
0f2d19dd
JB
1086 */
1087
1088
1089
1090/* Mark an object precisely.
1091 */
a00c95d9 1092void
1bbd0b84 1093scm_gc_mark (SCM p)
0f2d19dd
JB
1094{
1095 register long i;
1096 register SCM ptr;
1097
1098 ptr = p;
1099
1100gc_mark_loop:
1101 if (SCM_IMP (ptr))
1102 return;
1103
1104gc_mark_nimp:
1105 if (SCM_NCELLP (ptr))
f8392303 1106 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
1107
1108 switch (SCM_TYP7 (ptr))
1109 {
1110 case scm_tcs_cons_nimcar:
1111 if (SCM_GCMARKP (ptr))
1112 break;
1113 SCM_SETGCMARK (ptr);
1114 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1115 {
1116 ptr = SCM_CAR (ptr);
1117 goto gc_mark_nimp;
1118 }
1119 scm_gc_mark (SCM_CAR (ptr));
1120 ptr = SCM_GCCDR (ptr);
1121 goto gc_mark_nimp;
1122 case scm_tcs_cons_imcar:
acb0a19c
MD
1123 if (SCM_GCMARKP (ptr))
1124 break;
1125 SCM_SETGCMARK (ptr);
1126 ptr = SCM_GCCDR (ptr);
1127 goto gc_mark_loop;
e641afaf 1128 case scm_tc7_pws:
0f2d19dd
JB
1129 if (SCM_GCMARKP (ptr))
1130 break;
1131 SCM_SETGCMARK (ptr);
54778cd3 1132 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
1133 ptr = SCM_GCCDR (ptr);
1134 goto gc_mark_loop;
1135 case scm_tcs_cons_gloc:
1136 if (SCM_GCMARKP (ptr))
1137 break;
1138 SCM_SETGCMARK (ptr);
1139 {
c8045e8d
DH
1140 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1141 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1142 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1143 * pointer to a struct vtable data region. The fact that these are
1144 * accessed in the same way restricts the possibilites to change the
1145 * data layout of structs or heap cells.
1146 */
1147 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1148 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1149 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
1150 {
1151 default:
c8045e8d
DH
1152 {
1153 /* ptr is a gloc */
1154 SCM gloc_car = SCM_PACK (word0);
1155 scm_gc_mark (gloc_car);
1156 ptr = SCM_GCCDR (ptr);
1157 goto gc_mark_loop;
1158 }
0f2d19dd
JB
1159 case 1: /* ! */
1160 case 0: /* ! */
1161 {
c8045e8d
DH
1162 /* ptr is a struct */
1163 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1164 int len = SCM_LENGTH (layout);
1165 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
1166 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1167 that it removes the mark */
c8045e8d 1168 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 1169
c8045e8d 1170 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 1171 {
c8045e8d
DH
1172 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1173 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 1174 }
ad75306c
MD
1175 if (len)
1176 {
c8045e8d
DH
1177 int x;
1178
1179 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 1180 if (fields_desc[x] == 'p')
c8045e8d 1181 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1182 if (fields_desc[x] == 'p')
1183 {
1184 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
1185 for (x = *struct_data; x; --x)
1186 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 1187 else
c8045e8d 1188 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1189 }
1190 }
c8045e8d 1191 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 1192 {
c8045e8d
DH
1193 vtable_data [scm_vtable_index_vcell] = 1;
1194 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
1195 goto gc_mark_loop;
1196 }
1197 }
1198 }
1199 }
1200 break;
1201 case scm_tcs_closures:
1202 if (SCM_GCMARKP (ptr))
1203 break;
1204 SCM_SETGCMARK (ptr);
1205 if (SCM_IMP (SCM_CDR (ptr)))
1206 {
1207 ptr = SCM_CLOSCAR (ptr);
1208 goto gc_mark_nimp;
1209 }
1210 scm_gc_mark (SCM_CLOSCAR (ptr));
1211 ptr = SCM_GCCDR (ptr);
1212 goto gc_mark_nimp;
1213 case scm_tc7_vector:
1214 case scm_tc7_lvector:
1215#ifdef CCLO
1216 case scm_tc7_cclo:
1217#endif
1218 if (SCM_GC8MARKP (ptr))
1219 break;
1220 SCM_SETGC8MARK (ptr);
1221 i = SCM_LENGTH (ptr);
1222 if (i == 0)
1223 break;
1224 while (--i > 0)
1225 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1226 scm_gc_mark (SCM_VELTS (ptr)[i]);
1227 ptr = SCM_VELTS (ptr)[0];
1228 goto gc_mark_loop;
1229 case scm_tc7_contin:
1230 if SCM_GC8MARKP
1231 (ptr) break;
1232 SCM_SETGC8MARK (ptr);
c68296f8 1233 if (SCM_VELTS (ptr))
41b0806d 1234 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1235 (scm_sizet)
1236 (SCM_LENGTH (ptr) +
1237 (sizeof (SCM_STACKITEM) + -1 +
1238 sizeof (scm_contregs)) /
1239 sizeof (SCM_STACKITEM)));
0f2d19dd 1240 break;
afe5177e 1241#ifdef HAVE_ARRAYS
0f2d19dd
JB
1242 case scm_tc7_bvect:
1243 case scm_tc7_byvect:
1244 case scm_tc7_ivect:
1245 case scm_tc7_uvect:
1246 case scm_tc7_fvect:
1247 case scm_tc7_dvect:
1248 case scm_tc7_cvect:
1249 case scm_tc7_svect:
5c11cc9d 1250#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1251 case scm_tc7_llvect:
1252#endif
afe5177e 1253#endif
0f2d19dd 1254 case scm_tc7_string:
0f2d19dd
JB
1255 SCM_SETGC8MARK (ptr);
1256 break;
1257
1258 case scm_tc7_substring:
0f2d19dd
JB
1259 if (SCM_GC8MARKP(ptr))
1260 break;
1261 SCM_SETGC8MARK (ptr);
1262 ptr = SCM_CDR (ptr);
1263 goto gc_mark_loop;
1264
1265 case scm_tc7_wvect:
1266 if (SCM_GC8MARKP(ptr))
1267 break;
ab4bef85
JB
1268 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1269 scm_weak_vectors = ptr;
0f2d19dd
JB
1270 SCM_SETGC8MARK (ptr);
1271 if (SCM_IS_WHVEC_ANY (ptr))
1272 {
1273 int x;
1274 int len;
1275 int weak_keys;
1276 int weak_values;
1277
1278 len = SCM_LENGTH (ptr);
1279 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1280 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1281
0f2d19dd
JB
1282 for (x = 0; x < len; ++x)
1283 {
1284 SCM alist;
1285 alist = SCM_VELTS (ptr)[x];
46408039
JB
1286
1287 /* mark everything on the alist except the keys or
1288 * values, according to weak_values and weak_keys. */
0b5f3f34 1289 while ( SCM_CONSP (alist)
0f2d19dd 1290 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1291 && SCM_CONSP (SCM_CAR (alist)))
1292 {
1293 SCM kvpair;
1294 SCM next_alist;
1295
1296 kvpair = SCM_CAR (alist);
1297 next_alist = SCM_CDR (alist);
a00c95d9 1298 /*
0f2d19dd
JB
1299 * Do not do this:
1300 * SCM_SETGCMARK (alist);
1301 * SCM_SETGCMARK (kvpair);
1302 *
1303 * It may be that either the key or value is protected by
1304 * an escaped reference to part of the spine of this alist.
1305 * If we mark the spine here, and only mark one or neither of the
1306 * key and value, they may never be properly marked.
1307 * This leads to a horrible situation in which an alist containing
1308 * freelist cells is exported.
1309 *
1310 * So only mark the spines of these arrays last of all marking.
1311 * If somebody confuses us by constructing a weak vector
1312 * with a circular alist then we are hosed, but at least we
1313 * won't prematurely drop table entries.
1314 */
1315 if (!weak_keys)
1316 scm_gc_mark (SCM_CAR (kvpair));
1317 if (!weak_values)
1318 scm_gc_mark (SCM_GCCDR (kvpair));
1319 alist = next_alist;
1320 }
1321 if (SCM_NIMP (alist))
1322 scm_gc_mark (alist);
1323 }
1324 }
1325 break;
1326
1327 case scm_tc7_msymbol:
1328 if (SCM_GC8MARKP(ptr))
1329 break;
1330 SCM_SETGC8MARK (ptr);
1331 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1332 ptr = SCM_SYMBOL_PROPS (ptr);
1333 goto gc_mark_loop;
1334 case scm_tc7_ssymbol:
1335 if (SCM_GC8MARKP(ptr))
1336 break;
1337 SCM_SETGC8MARK (ptr);
1338 break;
1339 case scm_tcs_subrs:
9de33deb 1340 break;
0f2d19dd
JB
1341 case scm_tc7_port:
1342 i = SCM_PTOBNUM (ptr);
1343 if (!(i < scm_numptob))
1344 goto def;
1345 if (SCM_GC8MARKP (ptr))
1346 break;
dc53f026 1347 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1348 if (SCM_PTAB_ENTRY(ptr))
1349 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1350 if (scm_ptobs[i].mark)
1351 {
1352 ptr = (scm_ptobs[i].mark) (ptr);
1353 goto gc_mark_loop;
1354 }
1355 else
1356 return;
0f2d19dd
JB
1357 break;
1358 case scm_tc7_smob:
1359 if (SCM_GC8MARKP (ptr))
1360 break;
dc53f026 1361 SCM_SETGC8MARK (ptr);
acb0a19c 1362 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1363 { /* should be faster than going through scm_smobs */
1364 case scm_tc_free_cell:
1365 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1366 case scm_tc16_allocated:
acb0a19c
MD
1367 case scm_tc16_big:
1368 case scm_tc16_real:
1369 case scm_tc16_complex:
0f2d19dd
JB
1370 break;
1371 default:
1372 i = SCM_SMOBNUM (ptr);
1373 if (!(i < scm_numsmob))
1374 goto def;
dc53f026
JB
1375 if (scm_smobs[i].mark)
1376 {
1377 ptr = (scm_smobs[i].mark) (ptr);
1378 goto gc_mark_loop;
1379 }
1380 else
1381 return;
0f2d19dd
JB
1382 }
1383 break;
1384 default:
1385 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1386 }
1387}
1388
1389
1390/* Mark a Region Conservatively
1391 */
1392
a00c95d9 1393void
6e8d25a6 1394scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1395{
1396 register long m = n;
1397 register int i, j;
1398 register SCM_CELLPTR ptr;
1399
1400 while (0 <= --m)
c67baafd 1401 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1402 {
195e6201 1403 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1404 i = 0;
1405 j = scm_n_heap_segs - 1;
1406 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1407 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1408 {
1409 while (i <= j)
1410 {
1411 int seg_id;
1412 seg_id = -1;
1413 if ( (i == j)
1414 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1415 seg_id = i;
1416 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1417 seg_id = j;
1418 else
1419 {
1420 int k;
1421 k = (i + j) / 2;
1422 if (k == i)
1423 break;
1424 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1425 {
1426 j = k;
1427 ++i;
1428 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1429 continue;
1430 else
1431 break;
1432 }
1433 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1434 {
1435 i = k;
1436 --j;
1437 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1438 continue;
1439 else
1440 break;
1441 }
1442 }
c67baafd 1443 if (!scm_heap_table[seg_id].valid
0f2d19dd
JB
1444 || scm_heap_table[seg_id].valid (ptr,
1445 &scm_heap_table[seg_id]))
c67baafd
MD
1446 if (scm_heap_table[seg_id].span == 1
1447 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1448 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1449 break;
1450 }
1451
1452 }
1453 }
1454}
1455
1456
1a548472
DH
1457/* The function scm_cellp determines whether an SCM value can be regarded as a
1458 * pointer to a cell on the heap. Binary search is used in order to determine
1459 * the heap segment that contains the cell.
1460 */
2e11a577 1461int
6e8d25a6 1462scm_cellp (SCM value)
2e11a577 1463{
1a548472
DH
1464 if (SCM_CELLP (value)) {
1465 scm_cell * ptr = SCM2PTR (value);
1466 unsigned int i = 0;
1467 unsigned int j = scm_n_heap_segs - 1;
1468
1469 while (i < j) {
1470 int k = (i + j) / 2;
1471 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1472 j = k;
1473 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1474 i = k + 1;
1475 }
1476 }
2e11a577 1477
1a548472
DH
1478 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1479 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1480 && (!scm_heap_table[i].valid || scm_heap_table[i].valid (ptr, &scm_heap_table[i]))
1481 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1482 return 1;
1483 } else {
1484 return 0;
2e11a577 1485 }
1a548472
DH
1486 } else {
1487 return 0;
1488 }
2e11a577
MD
1489}
1490
1491
4c48ba06
MD
1492#ifdef GUILE_NEW_GC_SCHEME
1493static void
1494gc_sweep_freelist_start (scm_freelist_t *freelist)
1495{
1496 freelist->cells = SCM_EOL;
1497 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1498 freelist->clusters_allocated = 0;
4c48ba06
MD
1499 freelist->clusters = SCM_EOL;
1500 freelist->clustertail = &freelist->clusters;
1811ebce 1501 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1502 freelist->collected = 0;
1503}
1504
1505static void
1506gc_sweep_freelist_finish (scm_freelist_t *freelist)
1507{
1811ebce 1508 int collected;
4c48ba06
MD
1509 *freelist->clustertail = freelist->cells;
1510 if (SCM_NNULLP (freelist->cells))
1511 {
1512 SCM c = freelist->cells;
1513 SCM_SETCAR (c, SCM_CDR (c));
1514 SCM_SETCDR (c, SCM_EOL);
1515 freelist->collected +=
1516 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1517 }
b37fe1c5 1518 scm_gc_cells_collected += freelist->collected;
a00c95d9 1519
8fef55a8 1520 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1521 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1522 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1523 * size. This means that a too low yield is compensated by more
1524 * heap on the list which is currently doing most work, which is
1525 * just what we want.
1526 */
1811ebce 1527 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1528 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06
MD
1529}
1530#endif
0f2d19dd 1531
a00c95d9 1532void
0f2d19dd 1533scm_gc_sweep ()
0f2d19dd
JB
1534{
1535 register SCM_CELLPTR ptr;
0f2d19dd 1536 register SCM nfreelist;
4c48ba06 1537 register scm_freelist_t *freelist;
0f2d19dd 1538 register long m;
0f2d19dd 1539 register int span;
15e9d186 1540 long i;
0f2d19dd
JB
1541 scm_sizet seg_size;
1542
0f2d19dd 1543 m = 0;
0f2d19dd 1544
4a4c9785 1545#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1546 gc_sweep_freelist_start (&scm_master_freelist);
1547 gc_sweep_freelist_start (&scm_master_freelist2);
4a4c9785 1548#else
cf2d30f6
JB
1549 /* Reset all free list pointers. We'll reconstruct them completely
1550 while scanning. */
1551 for (i = 0; i < scm_n_heap_segs; i++)
4c48ba06 1552 scm_heap_table[i].freelist->cells = SCM_EOL;
4a4c9785 1553#endif
a00c95d9 1554
cf2d30f6 1555 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1556 {
0df07278 1557#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1558 register unsigned int left_to_collect;
1559#else
1560 register scm_sizet n = 0;
0df07278 1561#endif
4c48ba06 1562 register scm_sizet j;
15e9d186 1563
cf2d30f6
JB
1564 /* Unmarked cells go onto the front of the freelist this heap
1565 segment points to. Rather than updating the real freelist
1566 pointer as we go along, we accumulate the new head in
1567 nfreelist. Then, if it turns out that the entire segment is
1568 free, we free (i.e., malloc's free) the whole segment, and
1569 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1570 freelist = scm_heap_table[i].freelist;
1571 nfreelist = freelist->cells;
4a4c9785 1572#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1573 left_to_collect = freelist->left_to_collect;
4a4c9785 1574#endif
945fec60 1575 span = scm_heap_table[i].span;
cf2d30f6 1576
a00c95d9
ML
1577 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1578 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1579 for (j = seg_size + span; j -= span; ptr += span)
1580 {
96f6f4ae
DH
1581 SCM scmptr = PTR2SCM (ptr);
1582
0f2d19dd
JB
1583 switch SCM_TYP7 (scmptr)
1584 {
1585 case scm_tcs_cons_gloc:
0f2d19dd 1586 {
c8045e8d
DH
1587 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1588 * struct or a gloc. See the corresponding comment in
1589 * scm_gc_mark.
1590 */
1591 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1592 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1593 if (SCM_GCMARKP (scmptr))
0f2d19dd 1594 {
c8045e8d
DH
1595 if (vtable_data [scm_vtable_index_vcell] == 1)
1596 vtable_data [scm_vtable_index_vcell] = 0;
1597 goto cmrkcontinue;
1598 }
1599 else
1600 {
1601 if (vtable_data [scm_vtable_index_vcell] == 0
1602 || vtable_data [scm_vtable_index_vcell] == 1)
1603 {
1604 scm_struct_free_t free
1605 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1606 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1607 }
0f2d19dd
JB
1608 }
1609 }
1610 break;
1611 case scm_tcs_cons_imcar:
1612 case scm_tcs_cons_nimcar:
1613 case scm_tcs_closures:
e641afaf 1614 case scm_tc7_pws:
0f2d19dd
JB
1615 if (SCM_GCMARKP (scmptr))
1616 goto cmrkcontinue;
1617 break;
1618 case scm_tc7_wvect:
1619 if (SCM_GC8MARKP (scmptr))
1620 {
1621 goto c8mrkcontinue;
1622 }
1623 else
1624 {
ab4bef85
JB
1625 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1626 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1627 break;
1628 }
1629
1630 case scm_tc7_vector:
1631 case scm_tc7_lvector:
1632#ifdef CCLO
1633 case scm_tc7_cclo:
1634#endif
1635 if (SCM_GC8MARKP (scmptr))
1636 goto c8mrkcontinue;
1637
1638 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1639 freechars:
1640 scm_must_free (SCM_CHARS (scmptr));
1641 /* SCM_SETCHARS(scmptr, 0);*/
1642 break;
afe5177e 1643#ifdef HAVE_ARRAYS
0f2d19dd
JB
1644 case scm_tc7_bvect:
1645 if SCM_GC8MARKP (scmptr)
1646 goto c8mrkcontinue;
1647 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1648 goto freechars;
1649 case scm_tc7_byvect:
1650 if SCM_GC8MARKP (scmptr)
1651 goto c8mrkcontinue;
1652 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1653 goto freechars;
1654 case scm_tc7_ivect:
1655 case scm_tc7_uvect:
1656 if SCM_GC8MARKP (scmptr)
1657 goto c8mrkcontinue;
1658 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1659 goto freechars;
1660 case scm_tc7_svect:
1661 if SCM_GC8MARKP (scmptr)
1662 goto c8mrkcontinue;
1663 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1664 goto freechars;
5c11cc9d 1665#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1666 case scm_tc7_llvect:
1667 if SCM_GC8MARKP (scmptr)
1668 goto c8mrkcontinue;
1669 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1670 goto freechars;
1671#endif
1672 case scm_tc7_fvect:
1673 if SCM_GC8MARKP (scmptr)
1674 goto c8mrkcontinue;
1675 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1676 goto freechars;
1677 case scm_tc7_dvect:
1678 if SCM_GC8MARKP (scmptr)
1679 goto c8mrkcontinue;
1680 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1681 goto freechars;
1682 case scm_tc7_cvect:
1683 if SCM_GC8MARKP (scmptr)
1684 goto c8mrkcontinue;
1685 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1686 goto freechars;
afe5177e 1687#endif
0f2d19dd 1688 case scm_tc7_substring:
0f2d19dd
JB
1689 if (SCM_GC8MARKP (scmptr))
1690 goto c8mrkcontinue;
1691 break;
1692 case scm_tc7_string:
0f2d19dd
JB
1693 if (SCM_GC8MARKP (scmptr))
1694 goto c8mrkcontinue;
1695 m += SCM_HUGE_LENGTH (scmptr) + 1;
1696 goto freechars;
1697 case scm_tc7_msymbol:
1698 if (SCM_GC8MARKP (scmptr))
1699 goto c8mrkcontinue;
cf551a2b
DH
1700 m += (SCM_LENGTH (scmptr) + 1
1701 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1702 scm_must_free ((char *)SCM_SLOTS (scmptr));
1703 break;
1704 case scm_tc7_contin:
1705 if SCM_GC8MARKP (scmptr)
1706 goto c8mrkcontinue;
0db18cf4 1707 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1708 if (SCM_VELTS (scmptr))
1709 goto freechars;
0f2d19dd
JB
1710 case scm_tc7_ssymbol:
1711 if SCM_GC8MARKP(scmptr)
1712 goto c8mrkcontinue;
1713 break;
1714 case scm_tcs_subrs:
1715 continue;
1716 case scm_tc7_port:
1717 if SCM_GC8MARKP (scmptr)
1718 goto c8mrkcontinue;
1719 if SCM_OPENP (scmptr)
1720 {
1721 int k = SCM_PTOBNUM (scmptr);
1722 if (!(k < scm_numptob))
1723 goto sweeperr;
1724 /* Keep "revealed" ports alive. */
945fec60 1725 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1726 continue;
1727 /* Yes, I really do mean scm_ptobs[k].free */
1728 /* rather than ftobs[k].close. .close */
1729 /* is for explicit CLOSE-PORT by user */
84af0382 1730 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1731 SCM_SETSTREAM (scmptr, 0);
1732 scm_remove_from_port_table (scmptr);
1733 scm_gc_ports_collected++;
24e68a57 1734 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1735 }
1736 break;
1737 case scm_tc7_smob:
1738 switch SCM_GCTYP16 (scmptr)
1739 {
1740 case scm_tc_free_cell:
acb0a19c 1741 case scm_tc16_real:
0f2d19dd
JB
1742 if SCM_GC8MARKP (scmptr)
1743 goto c8mrkcontinue;
1744 break;
1745#ifdef SCM_BIGDIG
acb0a19c 1746 case scm_tc16_big:
0f2d19dd
JB
1747 if SCM_GC8MARKP (scmptr)
1748 goto c8mrkcontinue;
1749 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1750 goto freechars;
1751#endif /* def SCM_BIGDIG */
acb0a19c 1752 case scm_tc16_complex:
0f2d19dd
JB
1753 if SCM_GC8MARKP (scmptr)
1754 goto c8mrkcontinue;
acb0a19c
MD
1755 m += 2 * sizeof (double);
1756 goto freechars;
0f2d19dd
JB
1757 default:
1758 if SCM_GC8MARKP (scmptr)
1759 goto c8mrkcontinue;
1760
1761 {
1762 int k;
1763 k = SCM_SMOBNUM (scmptr);
1764 if (!(k < scm_numsmob))
1765 goto sweeperr;
c8045e8d 1766 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1767 break;
1768 }
1769 }
1770 break;
1771 default:
1772 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1773 }
0f2d19dd
JB
1774#if 0
1775 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1776 exit (2);
1777#endif
4a4c9785
MD
1778#ifndef GUILE_NEW_GC_SCHEME
1779 n += span;
1780#else
4c48ba06 1781 if (!--left_to_collect)
4a4c9785
MD
1782 {
1783 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1784 *freelist->clustertail = scmptr;
1785 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1786
4a4c9785 1787 nfreelist = SCM_EOL;
4c48ba06
MD
1788 freelist->collected += span * freelist->cluster_size;
1789 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1790 }
1791 else
1792#endif
1793 {
1794 /* Stick the new cell on the front of nfreelist. It's
1795 critical that we mark this cell as freed; otherwise, the
1796 conservative collector might trace it as some other type
1797 of object. */
54778cd3 1798 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1799 SCM_SETCDR (scmptr, nfreelist);
1800 nfreelist = scmptr;
1801 }
a00c95d9 1802
0f2d19dd
JB
1803 continue;
1804 c8mrkcontinue:
1805 SCM_CLRGC8MARK (scmptr);
1806 continue;
1807 cmrkcontinue:
1808 SCM_CLRGCMARK (scmptr);
1809 }
1810#ifdef GC_FREE_SEGMENTS
1811 if (n == seg_size)
1812 {
15e9d186
JB
1813 register long j;
1814
4c48ba06 1815 freelist->heap_size -= seg_size;
cf2d30f6
JB
1816 free ((char *) scm_heap_table[i].bounds[0]);
1817 scm_heap_table[i].bounds[0] = 0;
1818 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1819 scm_heap_table[j - 1] = scm_heap_table[j];
1820 scm_n_heap_segs -= 1;
cf2d30f6 1821 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1822 }
1823 else
1824#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1825 {
1826 /* Update the real freelist pointer to point to the head of
1827 the list of free cells we've built for this segment. */
4c48ba06 1828 freelist->cells = nfreelist;
4a4c9785 1829#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1830 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1831#endif
1832 }
1833
4c48ba06
MD
1834#ifndef GUILE_NEW_GC_SCHEME
1835 freelist->collected += n;
4a4c9785 1836#endif
0f2d19dd 1837
fca7547b 1838#ifdef GUILE_DEBUG_FREELIST
8ded62a3 1839#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1840 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1841 ? scm_freelist
1842 : scm_freelist2);
1843#else
4c48ba06 1844 scm_check_freelist (freelist);
8ded62a3 1845#endif
cf2d30f6
JB
1846 scm_map_free_list ();
1847#endif
4a4c9785 1848 }
a00c95d9 1849
4a4c9785 1850#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1851 gc_sweep_freelist_finish (&scm_master_freelist);
1852 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1853
8ded62a3
MD
1854 /* When we move to POSIX threads private freelists should probably
1855 be GC-protected instead. */
1856 scm_freelist = SCM_EOL;
1857 scm_freelist2 = SCM_EOL;
4a4c9785 1858#endif
a00c95d9 1859
b37fe1c5 1860 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f
MD
1861#ifdef GUILE_NEW_GC_SCHEME
1862 scm_gc_yield -= scm_cells_allocated;
1863#endif
0f2d19dd
JB
1864 scm_mallocated -= m;
1865 scm_gc_malloc_collected = m;
1866}
1867
1868
1869\f
1870
1871/* {Front end to malloc}
1872 *
c68296f8 1873 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1874 *
1875 * These functions provide services comperable to malloc, realloc, and
1876 * free. They are for allocating malloced parts of scheme objects.
1877 * The primary purpose of the front end is to impose calls to gc.
1878 */
1879
bc9d9bb2 1880
0f2d19dd
JB
1881/* scm_must_malloc
1882 * Return newly malloced storage or throw an error.
1883 *
1884 * The parameter WHAT is a string for error reporting.
a00c95d9 1885 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1886 * allocation, or if the first call to malloc fails,
1887 * garbage collect -- on the presumption that some objects
1888 * using malloced storage may be collected.
1889 *
1890 * The limit scm_mtrigger may be raised by this allocation.
1891 */
07806695 1892void *
e4ef2330 1893scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 1894{
07806695 1895 void *ptr;
15e9d186 1896 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
1897
1898 if (nm <= scm_mtrigger)
0f2d19dd 1899 {
07806695 1900 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1901 if (NULL != ptr)
1902 {
1903 scm_mallocated = nm;
bc9d9bb2
MD
1904#ifdef GUILE_DEBUG_MALLOC
1905 scm_malloc_register (ptr, what);
1906#endif
0f2d19dd
JB
1907 return ptr;
1908 }
1909 }
6064dcc6 1910
0f2d19dd 1911 scm_igc (what);
e4ef2330 1912
0f2d19dd 1913 nm = scm_mallocated + size;
07806695 1914 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
1915 if (NULL != ptr)
1916 {
1917 scm_mallocated = nm;
6064dcc6
MV
1918 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1919 if (nm > scm_mtrigger)
1920 scm_mtrigger = nm + nm / 2;
1921 else
1922 scm_mtrigger += scm_mtrigger / 2;
1923 }
bc9d9bb2
MD
1924#ifdef GUILE_DEBUG_MALLOC
1925 scm_malloc_register (ptr, what);
1926#endif
1927
0f2d19dd
JB
1928 return ptr;
1929 }
e4ef2330
MD
1930
1931 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1932 return 0; /* never reached */
0f2d19dd
JB
1933}
1934
1935
1936/* scm_must_realloc
1937 * is similar to scm_must_malloc.
1938 */
07806695
JB
1939void *
1940scm_must_realloc (void *where,
e4ef2330
MD
1941 scm_sizet old_size,
1942 scm_sizet size,
3eeba8d4 1943 const char *what)
0f2d19dd 1944{
07806695 1945 void *ptr;
e4ef2330
MD
1946 scm_sizet nm = scm_mallocated + size - old_size;
1947
1948 if (nm <= scm_mtrigger)
0f2d19dd 1949 {
07806695 1950 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1951 if (NULL != ptr)
1952 {
1953 scm_mallocated = nm;
bc9d9bb2
MD
1954#ifdef GUILE_DEBUG_MALLOC
1955 scm_malloc_reregister (where, ptr, what);
1956#endif
0f2d19dd
JB
1957 return ptr;
1958 }
1959 }
e4ef2330 1960
0f2d19dd 1961 scm_igc (what);
e4ef2330
MD
1962
1963 nm = scm_mallocated + size - old_size;
07806695 1964 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
1965 if (NULL != ptr)
1966 {
1967 scm_mallocated = nm;
6064dcc6
MV
1968 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1969 if (nm > scm_mtrigger)
1970 scm_mtrigger = nm + nm / 2;
1971 else
1972 scm_mtrigger += scm_mtrigger / 2;
1973 }
bc9d9bb2
MD
1974#ifdef GUILE_DEBUG_MALLOC
1975 scm_malloc_reregister (where, ptr, what);
1976#endif
0f2d19dd
JB
1977 return ptr;
1978 }
e4ef2330
MD
1979
1980 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1981 return 0; /* never reached */
0f2d19dd
JB
1982}
1983
a00c95d9 1984void
07806695 1985scm_must_free (void *obj)
0f2d19dd 1986{
bc9d9bb2
MD
1987#ifdef GUILE_DEBUG_MALLOC
1988 scm_malloc_unregister (obj);
1989#endif
0f2d19dd
JB
1990 if (obj)
1991 free (obj);
1992 else
1993 scm_wta (SCM_INUM0, "already free", "");
1994}
0f2d19dd 1995
c68296f8
MV
1996/* Announce that there has been some malloc done that will be freed
1997 * during gc. A typical use is for a smob that uses some malloced
1998 * memory but can not get it from scm_must_malloc (for whatever
1999 * reason). When a new object of this smob is created you call
2000 * scm_done_malloc with the size of the object. When your smob free
2001 * function is called, be sure to include this size in the return
2002 * value. */
0f2d19dd 2003
c68296f8 2004void
6e8d25a6 2005scm_done_malloc (long size)
c68296f8
MV
2006{
2007 scm_mallocated += size;
2008
2009 if (scm_mallocated > scm_mtrigger)
2010 {
2011 scm_igc ("foreign mallocs");
2012 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2013 {
2014 if (scm_mallocated > scm_mtrigger)
2015 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2016 else
2017 scm_mtrigger += scm_mtrigger / 2;
2018 }
2019 }
2020}
2021
2022
2023\f
0f2d19dd
JB
2024
2025/* {Heap Segments}
2026 *
2027 * Each heap segment is an array of objects of a particular size.
2028 * Every segment has an associated (possibly shared) freelist.
2029 * A table of segment records is kept that records the upper and
2030 * lower extents of the segment; this is used during the conservative
2031 * phase of gc to identify probably gc roots (because they point
c68296f8 2032 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2033
2034/* scm_expmem
2035 * is true if the first segment was smaller than INIT_HEAP_SEG.
2036 * If scm_expmem is set to one, subsequent segment allocations will
2037 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2038 */
2039int scm_expmem = 0;
2040
4c48ba06
MD
2041scm_sizet scm_max_segment_size;
2042
0f2d19dd
JB
2043/* scm_heap_org
2044 * is the lowest base address of any heap segment.
2045 */
2046SCM_CELLPTR scm_heap_org;
2047
a00c95d9 2048scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
2049int scm_n_heap_segs = 0;
2050
0f2d19dd
JB
2051/* init_heap_seg
2052 * initializes a new heap segment and return the number of objects it contains.
2053 *
2054 * The segment origin, segment size in bytes, and the span of objects
2055 * in cells are input parameters. The freelist is both input and output.
2056 *
2057 * This function presume that the scm_heap_table has already been expanded
2058 * to accomodate a new segment record.
2059 */
2060
2061
a00c95d9 2062static scm_sizet
4c48ba06 2063init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2064{
2065 register SCM_CELLPTR ptr;
0f2d19dd 2066 SCM_CELLPTR seg_end;
15e9d186 2067 int new_seg_index;
acb0a19c 2068 int n_new_cells;
4c48ba06 2069 int span = freelist->span;
a00c95d9 2070
0f2d19dd
JB
2071 if (seg_org == NULL)
2072 return 0;
2073
a00c95d9 2074 ptr = CELL_UP (seg_org, span);
acb0a19c 2075
a00c95d9 2076 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2077 */
a00c95d9 2078 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 2079
a00c95d9 2080 /* Find the right place and insert the segment record.
0f2d19dd
JB
2081 *
2082 */
2083 for (new_seg_index = 0;
2084 ( (new_seg_index < scm_n_heap_segs)
2085 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2086 new_seg_index++)
2087 ;
2088
2089 {
2090 int i;
2091 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2092 scm_heap_table[i] = scm_heap_table[i - 1];
2093 }
a00c95d9 2094
0f2d19dd
JB
2095 ++scm_n_heap_segs;
2096
2097 scm_heap_table[new_seg_index].valid = 0;
945fec60 2098 scm_heap_table[new_seg_index].span = span;
4c48ba06 2099 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2100 scm_heap_table[new_seg_index].bounds[0] = ptr;
2101 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
2102
2103
a00c95d9 2104 /* Compute the least valid object pointer w/in this segment
0f2d19dd 2105 */
a00c95d9 2106 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
2107
2108
acb0a19c
MD
2109 /*n_new_cells*/
2110 n_new_cells = seg_end - ptr;
0f2d19dd 2111
4a4c9785
MD
2112#ifdef GUILE_NEW_GC_SCHEME
2113
4c48ba06 2114 freelist->heap_size += n_new_cells;
4a4c9785 2115
a00c95d9 2116 /* Partition objects in this segment into clusters */
4a4c9785
MD
2117 {
2118 SCM clusters;
2119 SCM *clusterp = &clusters;
4c48ba06 2120 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 2121
4c48ba06 2122 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 2123 {
4c48ba06
MD
2124 /* Determine end of cluster
2125 */
2126 if (n_new_cells >= n_cluster_cells)
2127 {
2128 seg_end = ptr + n_cluster_cells;
2129 n_new_cells -= n_cluster_cells;
2130 }
4a4c9785 2131 else
a00c95d9
ML
2132 /* [cmm] looks like the segment size doesn't divide cleanly by
2133 cluster size. bad cmm! */
2134 abort();
4a4c9785 2135
4c48ba06
MD
2136 /* Allocate cluster spine
2137 */
4a4c9785
MD
2138 *clusterp = PTR2SCM (ptr);
2139 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2140 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 2141 ptr += span;
a00c95d9 2142
4a4c9785
MD
2143 while (ptr < seg_end)
2144 {
96f6f4ae
DH
2145 SCM scmptr = PTR2SCM (ptr);
2146
54778cd3 2147 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
2148 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2149 ptr += span;
2150 }
4c48ba06 2151
4a4c9785
MD
2152 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2153 }
a00c95d9 2154
4a4c9785
MD
2155 /* Patch up the last cluster pointer in the segment
2156 * to join it to the input freelist.
2157 */
4c48ba06
MD
2158 *clusterp = freelist->clusters;
2159 freelist->clusters = clusters;
4a4c9785
MD
2160 }
2161
2162#else /* GUILE_NEW_GC_SCHEME */
2163
a00c95d9 2164 /* Prepend objects in this segment to the freelist.
0f2d19dd
JB
2165 */
2166 while (ptr < seg_end)
2167 {
96f6f4ae
DH
2168 SCM scmptr = PTR2SCM (ptr);
2169
24e68a57 2170 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
945fec60
MD
2171 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2172 ptr += span;
0f2d19dd
JB
2173 }
2174
945fec60 2175 ptr -= span;
0f2d19dd
JB
2176
2177 /* Patch up the last freelist pointer in the segment
2178 * to join it to the input freelist.
2179 */
4c48ba06 2180 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
a00c95d9 2181 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
4c48ba06
MD
2182
2183 freelist->heap_size += n_new_cells;
0f2d19dd 2184
4a4c9785 2185#endif /* GUILE_NEW_GC_SCHEME */
4c48ba06
MD
2186
2187#ifdef DEBUGINFO
2188 fprintf (stderr, "H");
2189#endif
0f2d19dd 2190 return size;
0f2d19dd
JB
2191}
2192
a00c95d9
ML
2193#ifndef GUILE_NEW_GC_SCHEME
2194#define round_to_cluster_size(freelist, len) len
2195#else
2196
2197static scm_sizet
2198round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2199{
2200 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2201
2202 return
2203 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2204 + ALIGNMENT_SLACK (freelist);
2205}
2206
2207#endif
0f2d19dd 2208
a00c95d9 2209static void
4c48ba06 2210alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 2211{
a00c95d9 2212 scm_heap_seg_data_t * tmptable;
0f2d19dd 2213 SCM_CELLPTR ptr;
b37fe1c5 2214 long len;
a00c95d9 2215
0f2d19dd
JB
2216 /* Critical code sections (such as the garbage collector)
2217 * aren't supposed to add heap segments.
2218 */
2219 if (scm_gc_heap_lock)
2220 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2221
2222 /* Expand the heap tables to have room for the new segment.
2223 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2224 * only if the allocation of the segment itself succeeds.
2225 */
a00c95d9 2226 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 2227
a00c95d9 2228 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
2229 realloc ((char *)scm_heap_table, len)));
2230 if (!tmptable)
2231 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2232 else
2233 scm_heap_table = tmptable;
2234
2235
2236 /* Pick a size for the new heap segment.
a00c95d9 2237 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2238 * gc.h
2239 */
4c48ba06
MD
2240#ifdef GUILE_NEW_GC_SCHEME
2241 {
1811ebce
MD
2242 /* Assure that the new segment is predicted to be large enough.
2243 *
2244 * New yield should at least equal GC fraction of new heap size, i.e.
2245 *
2246 * y + dh > f * (h + dh)
2247 *
2248 * y : yield
8fef55a8 2249 * f : min yield fraction
1811ebce
MD
2250 * h : heap size
2251 * dh : size of new heap segment
2252 *
2253 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2254 */
8fef55a8 2255 int f = freelist->min_yield_fraction;
1811ebce
MD
2256 long h = SCM_HEAP_SIZE;
2257 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2258 len = SCM_EXPHEAP (freelist->heap_size);
2259#ifdef DEBUGINFO
2260 fprintf (stderr, "(%d < %d)", len, min_cells);
2261#endif
2262 if (len < min_cells)
1811ebce 2263 len = min_cells + freelist->cluster_size;
4c48ba06 2264 len *= sizeof (scm_cell);
1811ebce
MD
2265 /* force new sampling */
2266 freelist->collected = LONG_MAX;
4c48ba06 2267 }
a00c95d9 2268
4c48ba06
MD
2269 if (len > scm_max_segment_size)
2270 len = scm_max_segment_size;
2271#else
0f2d19dd
JB
2272 if (scm_expmem)
2273 {
4c48ba06
MD
2274 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2275 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
945fec60 2276 != len)
0f2d19dd
JB
2277 len = 0;
2278 }
2279 else
2280 len = SCM_HEAP_SEG_SIZE;
4c48ba06 2281#endif /* GUILE_NEW_GC_SCHEME */
0f2d19dd
JB
2282
2283 {
2284 scm_sizet smallest;
2285
a00c95d9 2286#ifndef GUILE_NEW_GC_SCHEME
4c48ba06 2287 smallest = (freelist->span * sizeof (scm_cell));
a00c95d9
ML
2288#else
2289 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2290#endif
2291
0f2d19dd 2292 if (len < smallest)
a00c95d9 2293 len = smallest;
0f2d19dd
JB
2294
2295 /* Allocate with decaying ambition. */
2296 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2297 && (len >= smallest))
2298 {
1811ebce 2299 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2300 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2301 if (ptr)
2302 {
a00c95d9 2303 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2304 return;
2305 }
2306 len /= 2;
2307 }
2308 }
2309
2310 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2311}
2312
2313
a00c95d9 2314SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2315 (SCM name),
b380b885 2316 "")
1bbd0b84 2317#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2318{
2319 int x;
2320 int bound;
3b3b36dd 2321 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2322 SCM_DEFER_INTS;
2323 bound = scm_n_heap_segs;
2324 for (x = 0; x < bound; ++x)
2325 {
2326 SCM_CELLPTR p;
2327 SCM_CELLPTR pbound;
195e6201
DH
2328 p = scm_heap_table[x].bounds[0];
2329 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2330 while (p < pbound)
2331 {
c8045e8d
DH
2332 SCM cell = PTR2SCM (p);
2333 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2334 {
c8045e8d
DH
2335 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2336 * struct cell. See the corresponding comment in scm_gc_mark.
2337 */
2338 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2339 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2340 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2341 if ((SCM_TRUE_P (name) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2342 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2343 {
c8045e8d 2344 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2345 }
2346 }
2347 ++p;
2348 }
2349 }
2350 SCM_ALLOW_INTS;
2351 return name;
2352}
1bbd0b84 2353#undef FUNC_NAME
0f2d19dd
JB
2354
2355
2356\f
2357/* {GC Protection Helper Functions}
2358 */
2359
2360
0f2d19dd 2361void
6e8d25a6
GB
2362scm_remember (SCM *ptr)
2363{ /* empty */ }
0f2d19dd 2364
1cc91f1b 2365
c209c88e 2366/*
41b0806d
GB
2367 These crazy functions prevent garbage collection
2368 of arguments after the first argument by
2369 ensuring they remain live throughout the
2370 function because they are used in the last
2371 line of the code block.
2372 It'd be better to have a nice compiler hint to
2373 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2374SCM
2375scm_return_first (SCM elt, ...)
0f2d19dd
JB
2376{
2377 return elt;
2378}
2379
41b0806d
GB
2380int
2381scm_return_first_int (int i, ...)
2382{
2383 return i;
2384}
2385
0f2d19dd 2386
0f2d19dd 2387SCM
6e8d25a6 2388scm_permanent_object (SCM obj)
0f2d19dd
JB
2389{
2390 SCM_REDEFER_INTS;
2391 scm_permobjs = scm_cons (obj, scm_permobjs);
2392 SCM_REALLOW_INTS;
2393 return obj;
2394}
2395
2396
ef290276
JB
2397/* Protect OBJ from the garbage collector. OBJ will not be freed,
2398 even if all other references are dropped, until someone applies
2399 scm_unprotect_object to it. This function returns OBJ.
2400
c209c88e
GB
2401 Calls to scm_protect_object nest. For every object OBJ, there is a
2402 counter which scm_protect_object(OBJ) increments and
2403 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
dab7f566
JB
2404 an object's counter is greater than zero, the garbage collector
2405 will not free it.
2406
2407 Of course, that's not how it's implemented. scm_protect_object and
2408 scm_unprotect_object just maintain a list of references to things.
2409 Since the GC knows about this list, all objects it mentions stay
2410 alive. scm_protect_object adds its argument to the list;
2411 scm_unprotect_object removes the first occurrence of its argument
2412 to the list. */
ef290276 2413SCM
6e8d25a6 2414scm_protect_object (SCM obj)
ef290276 2415{
ef290276
JB
2416 scm_protects = scm_cons (obj, scm_protects);
2417
2418 return obj;
2419}
2420
2421
2422/* Remove any protection for OBJ established by a prior call to
dab7f566 2423 scm_protect_object. This function returns OBJ.
ef290276 2424
dab7f566 2425 See scm_protect_object for more information. */
ef290276 2426SCM
6e8d25a6 2427scm_unprotect_object (SCM obj)
ef290276 2428{
dab7f566
JB
2429 SCM *tail_ptr = &scm_protects;
2430
0c95b57d 2431 while (SCM_CONSP (*tail_ptr))
c8045e8d 2432 if (SCM_EQ_P (SCM_CAR (*tail_ptr), obj))
dab7f566
JB
2433 {
2434 *tail_ptr = SCM_CDR (*tail_ptr);
2435 break;
2436 }
2437 else
2438 tail_ptr = SCM_CDRLOC (*tail_ptr);
ef290276
JB
2439
2440 return obj;
2441}
2442
c45acc34
JB
2443int terminating;
2444
2445/* called on process termination. */
e52ceaac
MD
2446#ifdef HAVE_ATEXIT
2447static void
2448cleanup (void)
2449#else
2450#ifdef HAVE_ON_EXIT
51157deb
MD
2451extern int on_exit (void (*procp) (), int arg);
2452
e52ceaac
MD
2453static void
2454cleanup (int status, void *arg)
2455#else
2456#error Dont know how to setup a cleanup handler on your system.
2457#endif
2458#endif
c45acc34
JB
2459{
2460 terminating = 1;
2461 scm_flush_all_ports ();
2462}
ef290276 2463
0f2d19dd 2464\f
acb0a19c 2465static int
4c48ba06 2466make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2467{
a00c95d9
ML
2468 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2469 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2470 rounded_size,
4c48ba06 2471 freelist))
acb0a19c 2472 {
a00c95d9
ML
2473 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2474 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2475 rounded_size,
4c48ba06 2476 freelist))
acb0a19c
MD
2477 return 1;
2478 }
2479 else
2480 scm_expmem = 1;
2481
b37fe1c5 2482#ifdef GUILE_NEW_GC_SCHEME
8fef55a8
MD
2483 if (freelist->min_yield_fraction)
2484 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2485 / 100);
8fef55a8 2486 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
b37fe1c5 2487#endif
a00c95d9 2488
acb0a19c
MD
2489 return 0;
2490}
2491
2492\f
4a4c9785 2493#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2494static void
2495init_freelist (scm_freelist_t *freelist,
2496 int span,
2497 int cluster_size,
8fef55a8 2498 int min_yield)
4c48ba06
MD
2499{
2500 freelist->clusters = SCM_EOL;
2501 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2502 freelist->left_to_collect = 0;
2503 freelist->clusters_allocated = 0;
8fef55a8
MD
2504 freelist->min_yield = 0;
2505 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2506 freelist->span = span;
2507 freelist->collected = 0;
1811ebce 2508 freelist->collected_1 = 0;
4c48ba06
MD
2509 freelist->heap_size = 0;
2510}
2511
4a4c9785 2512int
4c48ba06
MD
2513scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2514 scm_sizet init_heap_size_2, int gc_trigger_2,
2515 scm_sizet max_segment_size)
4a4c9785 2516#else
0f2d19dd 2517int
b37fe1c5 2518scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
4a4c9785 2519#endif
0f2d19dd
JB
2520{
2521 scm_sizet j;
2522
4c48ba06
MD
2523 if (!init_heap_size_1)
2524 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2525 if (!init_heap_size_2)
2526 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2527
0f2d19dd
JB
2528 j = SCM_NUM_PROTECTS;
2529 while (j)
2530 scm_sys_protects[--j] = SCM_BOOL_F;
2531 scm_block_gc = 1;
4a4c9785
MD
2532
2533#ifdef GUILE_NEW_GC_SCHEME
2534 scm_freelist = SCM_EOL;
4c48ba06
MD
2535 scm_freelist2 = SCM_EOL;
2536 init_freelist (&scm_master_freelist,
2537 1, SCM_CLUSTER_SIZE_1,
8fef55a8 2538 gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
4c48ba06
MD
2539 init_freelist (&scm_master_freelist2,
2540 2, SCM_CLUSTER_SIZE_2,
8fef55a8 2541 gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
4c48ba06
MD
2542 scm_max_segment_size
2543 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
4a4c9785 2544#else
945fec60
MD
2545 scm_freelist.cells = SCM_EOL;
2546 scm_freelist.span = 1;
2547 scm_freelist.collected = 0;
2548 scm_freelist.heap_size = 0;
4a4c9785 2549
945fec60
MD
2550 scm_freelist2.cells = SCM_EOL;
2551 scm_freelist2.span = 2;
2552 scm_freelist2.collected = 0;
2553 scm_freelist2.heap_size = 0;
4a4c9785
MD
2554#endif
2555
0f2d19dd
JB
2556 scm_expmem = 0;
2557
2558 j = SCM_HEAP_SEG_SIZE;
2559 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2560 scm_heap_table = ((scm_heap_seg_data_t *)
2561 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2562
4a4c9785 2563#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2564 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2565 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785
MD
2566 return 1;
2567#else
4c48ba06
MD
2568 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2569 make_initial_segment (init_heap_size_2, &scm_freelist2))
acb0a19c 2570 return 1;
4a4c9785 2571#endif
acb0a19c 2572
801cb5e7 2573 /* scm_hplims[0] can change. do not remove scm_heap_org */
a00c95d9 2574 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2575
801cb5e7
MD
2576 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2577 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2578 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2579 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2580 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
0f2d19dd
JB
2581
2582 /* Initialise the list of ports. */
840ae05d
JB
2583 scm_port_table = (scm_port **)
2584 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2585 if (!scm_port_table)
2586 return 1;
2587
a18bcd0e 2588#ifdef HAVE_ATEXIT
c45acc34 2589 atexit (cleanup);
e52ceaac
MD
2590#else
2591#ifdef HAVE_ON_EXIT
2592 on_exit (cleanup, 0);
2593#endif
a18bcd0e 2594#endif
0f2d19dd
JB
2595
2596 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2597 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2598
2599 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2600 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2601 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2602 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2603 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2604 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2605 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2606 scm_permobjs = SCM_EOL;
ef290276 2607 scm_protects = SCM_EOL;
3b2b8760 2608 scm_asyncs = SCM_EOL;
54778cd3
DH
2609 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2610 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2611#ifdef SCM_BIGDIG
2612 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2613#endif
2614 return 0;
2615}
2616\f
2617
0f2d19dd
JB
2618void
2619scm_init_gc ()
0f2d19dd 2620{
801cb5e7 2621 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
a0599745 2622#include "libguile/gc.x"
0f2d19dd 2623}
89e00824
ML
2624
2625/*
2626 Local Variables:
2627 c-file-style: "gnu"
2628 End:
2629*/