* Makefile.am (DEFS): Added. automake adds -I options to DEFS,
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
a0599745
MD
49#include "libguile/_scm.h"
50#include "libguile/stime.h"
51#include "libguile/stackchk.h"
52#include "libguile/struct.h"
53#include "libguile/weaks.h"
54#include "libguile/guardians.h"
55#include "libguile/smob.h"
56#include "libguile/unif.h"
57#include "libguile/async.h"
58#include "libguile/ports.h"
59#include "libguile/root.h"
60#include "libguile/strings.h"
61#include "libguile/vectors.h"
62
63#include "libguile/validate.h"
64#include "libguile/gc.h"
fce59c93 65
bc9d9bb2 66#ifdef GUILE_DEBUG_MALLOC
a0599745 67#include "libguile/debug-malloc.h"
bc9d9bb2
MD
68#endif
69
0f2d19dd 70#ifdef HAVE_MALLOC_H
95b88819 71#include <malloc.h>
0f2d19dd
JB
72#endif
73
74#ifdef HAVE_UNISTD_H
95b88819 75#include <unistd.h>
0f2d19dd
JB
76#endif
77
1cc91f1b
JB
78#ifdef __STDC__
79#include <stdarg.h>
80#define var_start(x, y) va_start(x, y)
81#else
82#include <varargs.h>
83#define var_start(x, y) va_start(x)
84#endif
85
0f2d19dd
JB
86\f
87/* {heap tuning parameters}
a00c95d9 88 *
0f2d19dd
JB
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
91 *
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
95 *
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
99 *
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 104 *
0f2d19dd
JB
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 *
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
109 * is needed.
110 *
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 112 * trigger a GC.
6064dcc6
MV
113 *
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 118 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
119 */
120
8fef55a8
MD
121/*
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
126 */
127#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
4c48ba06 128#define SCM_CLUSTER_SIZE_1 2000L
8fef55a8 129#define SCM_MIN_YIELD_1 40
4c48ba06
MD
130
131#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
132#define SCM_CLUSTER_SIZE_2 1000L
133/* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
8fef55a8 136#define SCM_MIN_YIELD_2 40
4c48ba06
MD
137
138#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
139
945fec60 140#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
141#ifdef _QC
142# define SCM_HEAP_SEG_SIZE 32768L
143#else
144# ifdef sequent
4c48ba06 145# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 146# else
4c48ba06 147# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
148# endif
149#endif
4c48ba06 150/* Make heap grow with factor 1.5 */
4a4c9785 151#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 152#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 153#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
154
155/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
157
158#ifdef PROT386
159/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
160# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
162#else
163# ifdef _UNICOS
a00c95d9
ML
164# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 166# else
a00c95d9
ML
167# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
169# endif /* UNICOS */
170#endif /* PROT386 */
a00c95d9
ML
171#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
173#ifdef GUILE_NEW_GC_SCHEME
174#define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
176#else
177#define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
178#endif
1811ebce 179#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
180
181
182\f
945fec60 183/* scm_freelists
0f2d19dd 184 */
945fec60 185
a00c95d9
ML
186typedef struct scm_freelist_t {
187 /* collected cells */
188 SCM cells;
189#ifdef GUILE_NEW_GC_SCHEME
190 /* number of cells left to collect before cluster is full */
191 unsigned int left_to_collect;
b37fe1c5
MD
192 /* number of clusters which have been allocated */
193 unsigned int clusters_allocated;
8fef55a8
MD
194 /* a list of freelists, each of size cluster_size,
195 * except the last one which may be shorter
196 */
a00c95d9
ML
197 SCM clusters;
198 SCM *clustertail;
b37fe1c5 199 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 200 int cluster_size;
8fef55a8 201 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
202 */
203 int grow_heap_p;
8fef55a8 204 /* minimum yield on this list in order not to grow the heap
a00c95d9 205 */
8fef55a8
MD
206 long min_yield;
207 /* defines min_yield as percent of total heap size
a00c95d9 208 */
8fef55a8 209 int min_yield_fraction;
a00c95d9
ML
210#endif
211 /* number of cells per object on this list */
212 int span;
213 /* number of collected cells during last GC */
1811ebce
MD
214 long collected;
215 /* number of collected cells during penultimate GC */
216 long collected_1;
a00c95d9
ML
217 /* total number of cells in heap segments
218 * belonging to this list.
219 */
1811ebce 220 long heap_size;
a00c95d9
ML
221} scm_freelist_t;
222
4a4c9785
MD
223#ifdef GUILE_NEW_GC_SCHEME
224SCM scm_freelist = SCM_EOL;
225scm_freelist_t scm_master_freelist = {
b37fe1c5 226 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
227};
228SCM scm_freelist2 = SCM_EOL;
229scm_freelist_t scm_master_freelist2 = {
b37fe1c5 230 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785
MD
231};
232#else
945fec60
MD
233scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
234scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
4a4c9785 235#endif
0f2d19dd
JB
236
237/* scm_mtrigger
238 * is the number of bytes of must_malloc allocation needed to trigger gc.
239 */
15e9d186 240unsigned long scm_mtrigger;
0f2d19dd
JB
241
242
243/* scm_gc_heap_lock
244 * If set, don't expand the heap. Set only during gc, during which no allocation
245 * is supposed to take place anyway.
246 */
247int scm_gc_heap_lock = 0;
248
249/* GC Blocking
250 * Don't pause for collection if this is set -- just
251 * expand the heap.
252 */
253
254int scm_block_gc = 1;
255
256/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
257 * collection (GC) more space is allocated for the heap.
258 */
945fec60 259#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
260
261/* During collection, this accumulates objects holding
262 * weak references.
263 */
ab4bef85 264SCM scm_weak_vectors;
0f2d19dd
JB
265
266/* GC Statistics Keeping
267 */
268unsigned long scm_cells_allocated = 0;
a5c314c8 269long scm_mallocated = 0;
b37fe1c5 270unsigned long scm_gc_cells_collected;
8b0d194f
MD
271#ifdef GUILE_NEW_GC_SCHEME
272unsigned long scm_gc_yield;
37ddcaf6 273static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
8b0d194f 274#endif
0f2d19dd
JB
275unsigned long scm_gc_malloc_collected;
276unsigned long scm_gc_ports_collected;
277unsigned long scm_gc_rt;
278unsigned long scm_gc_time_taken = 0;
279
280SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
281SCM_SYMBOL (sym_heap_size, "cell-heap-size");
282SCM_SYMBOL (sym_mallocated, "bytes-malloced");
283SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
284SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
285SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
286
a00c95d9 287typedef struct scm_heap_seg_data_t
0f2d19dd 288{
cf2d30f6
JB
289 /* lower and upper bounds of the segment */
290 SCM_CELLPTR bounds[2];
291
292 /* address of the head-of-freelist pointer for this segment's cells.
293 All segments usually point to the same one, scm_freelist. */
4c48ba06 294 scm_freelist_t *freelist;
cf2d30f6
JB
295
296 /* number of SCM words per object in this segment */
945fec60 297 int span;
cf2d30f6
JB
298
299 /* If SEG_DATA->valid is non-zero, the conservative marking
300 functions will apply SEG_DATA->valid to the purported pointer and
301 SEG_DATA, and mark the object iff the function returns non-zero.
302 At the moment, I don't think anyone uses this. */
0f2d19dd 303 int (*valid) ();
a00c95d9 304} scm_heap_seg_data_t;
0f2d19dd
JB
305
306
307
308
945fec60
MD
309static void scm_mark_weak_vector_spines (void);
310static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
311static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
312
313
314\f
cf2d30f6
JB
315/* Debugging functions. */
316
bb2c57fa 317#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
318
319/* Return the number of the heap segment containing CELL. */
320static int
321which_seg (SCM cell)
322{
323 int i;
324
325 for (i = 0; i < scm_n_heap_segs; i++)
195e6201
DH
326 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
327 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
cf2d30f6
JB
328 return i;
329 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 330 SCM_UNPACK (cell));
cf2d30f6
JB
331 abort ();
332}
333
334
8ded62a3
MD
335#ifdef GUILE_NEW_GC_SCHEME
336static void
337map_free_list (scm_freelist_t *master, SCM freelist)
338{
339 int last_seg = -1, count = 0;
340 SCM f;
a00c95d9 341
8ded62a3
MD
342 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
343 {
344 int this_seg = which_seg (f);
345
346 if (this_seg != last_seg)
347 {
348 if (last_seg != -1)
349 fprintf (stderr, " %5d %d-cells in segment %d\n",
350 count, master->span, last_seg);
351 last_seg = this_seg;
352 count = 0;
353 }
354 count++;
355 }
356 if (last_seg != -1)
357 fprintf (stderr, " %5d %d-cells in segment %d\n",
358 count, master->span, last_seg);
359}
360#else
acb0a19c 361static void
4c48ba06 362map_free_list (scm_freelist_t *freelist)
cf2d30f6
JB
363{
364 int last_seg = -1, count = 0;
365 SCM f;
a00c95d9 366
4c48ba06 367 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
cf2d30f6
JB
368 {
369 int this_seg = which_seg (f);
370
371 if (this_seg != last_seg)
372 {
373 if (last_seg != -1)
acb0a19c 374 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 375 count, freelist->span, last_seg);
cf2d30f6
JB
376 last_seg = this_seg;
377 count = 0;
378 }
379 count++;
380 }
381 if (last_seg != -1)
acb0a19c 382 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 383 count, freelist->span, last_seg);
acb0a19c 384}
8ded62a3 385#endif
cf2d30f6 386
a00c95d9 387SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
388 (),
389 "Print debugging information about the free-list.\n"
5384bc5b 390 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
391#define FUNC_NAME s_scm_map_free_list
392{
4c48ba06
MD
393 int i;
394 fprintf (stderr, "%d segments total (%d:%d",
395 scm_n_heap_segs,
396 scm_heap_table[0].span,
397 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
398 for (i = 1; i < scm_n_heap_segs; i++)
399 fprintf (stderr, ", %d:%d",
400 scm_heap_table[i].span,
401 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
402 fprintf (stderr, ")\n");
4a4c9785 403#ifdef GUILE_NEW_GC_SCHEME
8ded62a3
MD
404 map_free_list (&scm_master_freelist, scm_freelist);
405 map_free_list (&scm_master_freelist2, scm_freelist2);
4a4c9785 406#else
945fec60
MD
407 map_free_list (&scm_freelist);
408 map_free_list (&scm_freelist2);
4a4c9785 409#endif
cf2d30f6
JB
410 fflush (stderr);
411
412 return SCM_UNSPECIFIED;
413}
1bbd0b84 414#undef FUNC_NAME
cf2d30f6 415
a8cffa9f 416#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
417static int last_cluster;
418static int last_size;
419
5384bc5b
MD
420static int
421free_list_length (char *title, int i, SCM freelist)
422{
423 SCM ls;
424 int n = 0;
425 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
426 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
427 ++n;
428 else
429 {
430 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
431 abort ();
432 }
4c48ba06
MD
433 if (n != last_size)
434 {
435 if (i > 0)
436 {
437 if (last_cluster == i - 1)
438 fprintf (stderr, "\t%d\n", last_size);
439 else
440 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
441 }
442 if (i >= 0)
443 fprintf (stderr, "%s %d", title, i);
444 else
445 fprintf (stderr, "%s\t%d\n", title, n);
446 last_cluster = i;
447 last_size = n;
448 }
5384bc5b
MD
449 return n;
450}
451
452static void
453free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
454{
455 SCM clusters;
4c48ba06 456 int i = 0, len, n = 0;
5384bc5b
MD
457 fprintf (stderr, "%s\n\n", title);
458 n += free_list_length ("free list", -1, freelist);
459 for (clusters = master->clusters;
460 SCM_NNULLP (clusters);
461 clusters = SCM_CDR (clusters))
4c48ba06
MD
462 {
463 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
464 n += len;
465 }
466 if (last_cluster == i - 1)
467 fprintf (stderr, "\t%d\n", last_size);
468 else
469 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
470 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
471}
472
a00c95d9 473SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
474 (),
475 "Print debugging information about the free-list.\n"
476 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
477#define FUNC_NAME s_scm_free_list_length
478{
b37fe1c5
MD
479 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
480 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 481 return SCM_UNSPECIFIED;
5384bc5b
MD
482}
483#undef FUNC_NAME
a8cffa9f 484#endif
5384bc5b 485
bb2c57fa
MD
486#endif
487
488#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
489
490/* Number of calls to SCM_NEWCELL since startup. */
491static unsigned long scm_newcell_count;
acb0a19c 492static unsigned long scm_newcell2_count;
cf2d30f6
JB
493
494/* Search freelist for anything that isn't marked as a free cell.
495 Abort if we find something. */
8ded62a3
MD
496#ifdef GUILE_NEW_GC_SCHEME
497static void
498scm_check_freelist (SCM freelist)
499{
500 SCM f;
501 int i = 0;
502
503 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
504 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
505 {
506 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
507 scm_newcell_count, i);
508 fflush (stderr);
509 abort ();
510 }
511}
512#else
cf2d30f6 513static void
4c48ba06 514scm_check_freelist (scm_freelist_t *freelist)
cf2d30f6
JB
515{
516 SCM f;
517 int i = 0;
518
4c48ba06 519 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
cf2d30f6
JB
520 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
521 {
522 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
523 scm_newcell_count, i);
524 fflush (stderr);
525 abort ();
526 }
527}
8ded62a3 528#endif
cf2d30f6
JB
529
530static int scm_debug_check_freelist = 0;
25748c78 531
a00c95d9 532SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 533 (SCM flag),
da4a1dba
GB
534 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
535 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
536 "compile-time flag was selected.\n")
1bbd0b84 537#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 538{
945fec60 539 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
540 return SCM_UNSPECIFIED;
541}
1bbd0b84 542#undef FUNC_NAME
25748c78
GB
543
544
4a4c9785
MD
545#ifdef GUILE_NEW_GC_SCHEME
546
547SCM
548scm_debug_newcell (void)
549{
550 SCM new;
551
552 scm_newcell_count++;
553 if (scm_debug_check_freelist)
554 {
8ded62a3 555 scm_check_freelist (scm_freelist);
4a4c9785
MD
556 scm_gc();
557 }
558
559 /* The rest of this is supposed to be identical to the SCM_NEWCELL
560 macro. */
561 if (SCM_IMP (scm_freelist))
562 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
563 else
564 {
565 new = scm_freelist;
566 scm_freelist = SCM_CDR (scm_freelist);
567 SCM_SETCAR (new, scm_tc16_allocated);
568 }
569
570 return new;
571}
572
573SCM
574scm_debug_newcell2 (void)
575{
576 SCM new;
577
578 scm_newcell2_count++;
579 if (scm_debug_check_freelist)
580 {
8ded62a3 581 scm_check_freelist (scm_freelist2);
4a4c9785
MD
582 scm_gc ();
583 }
584
585 /* The rest of this is supposed to be identical to the SCM_NEWCELL
586 macro. */
587 if (SCM_IMP (scm_freelist2))
588 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
589 else
590 {
591 new = scm_freelist2;
592 scm_freelist2 = SCM_CDR (scm_freelist2);
593 SCM_SETCAR (new, scm_tc16_allocated);
594 }
595
596 return new;
597}
598
599#else /* GUILE_NEW_GC_SCHEME */
600
f2333166
JB
601SCM
602scm_debug_newcell (void)
cf2d30f6 603{
f2333166
JB
604 SCM new;
605
cf2d30f6 606 scm_newcell_count++;
945fec60
MD
607 if (scm_debug_check_freelist)
608 {
609 scm_check_freelist (&scm_freelist);
610 scm_gc();
611 }
cf2d30f6
JB
612
613 /* The rest of this is supposed to be identical to the SCM_NEWCELL
614 macro. */
945fec60
MD
615 if (SCM_IMP (scm_freelist.cells))
616 new = scm_gc_for_newcell (&scm_freelist);
cf2d30f6
JB
617 else
618 {
945fec60
MD
619 new = scm_freelist.cells;
620 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
d9de3881 621 SCM_SETCAR (new, scm_tc16_allocated);
cf2d30f6
JB
622 ++scm_cells_allocated;
623 }
f2333166
JB
624
625 return new;
cf2d30f6
JB
626}
627
acb0a19c
MD
628SCM
629scm_debug_newcell2 (void)
630{
631 SCM new;
632
633 scm_newcell2_count++;
634 if (scm_debug_check_freelist) {
945fec60 635 scm_check_freelist (&scm_freelist2);
acb0a19c
MD
636 scm_gc();
637 }
638
639 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
640 macro. */
945fec60
MD
641 if (SCM_IMP (scm_freelist2.cells))
642 new = scm_gc_for_newcell (&scm_freelist2);
acb0a19c
MD
643 else
644 {
945fec60
MD
645 new = scm_freelist2.cells;
646 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
acb0a19c
MD
647 SCM_SETCAR (new, scm_tc16_allocated);
648 scm_cells_allocated += 2;
649 }
650
651 return new;
652}
653
4a4c9785 654#endif /* GUILE_NEW_GC_SCHEME */
fca7547b 655#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
656
657\f
0f2d19dd 658
b37fe1c5
MD
659#ifdef GUILE_NEW_GC_SCHEME
660static unsigned long
661master_cells_allocated (scm_freelist_t *master)
662{
663 int objects = master->clusters_allocated * (master->cluster_size - 1);
664 if (SCM_NULLP (master->clusters))
665 objects -= master->left_to_collect;
666 return master->span * objects;
667}
668
669static unsigned long
670freelist_length (SCM freelist)
671{
672 int n;
673 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
674 ++n;
675 return n;
676}
677
678static unsigned long
679compute_cells_allocated ()
680{
681 return (scm_cells_allocated
682 + master_cells_allocated (&scm_master_freelist)
683 + master_cells_allocated (&scm_master_freelist2)
684 - scm_master_freelist.span * freelist_length (scm_freelist)
685 - scm_master_freelist2.span * freelist_length (scm_freelist2));
686}
687#endif
688
0f2d19dd
JB
689/* {Scheme Interface to GC}
690 */
691
a00c95d9 692SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 693 (),
b380b885 694 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 695#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
696{
697 int i;
698 int n;
699 SCM heap_segs;
c209c88e
GB
700 long int local_scm_mtrigger;
701 long int local_scm_mallocated;
702 long int local_scm_heap_size;
703 long int local_scm_cells_allocated;
704 long int local_scm_gc_time_taken;
0f2d19dd
JB
705 SCM answer;
706
707 SCM_DEFER_INTS;
708 scm_block_gc = 1;
709 retry:
710 heap_segs = SCM_EOL;
711 n = scm_n_heap_segs;
712 for (i = scm_n_heap_segs; i--; )
713 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
714 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
715 heap_segs);
716 if (scm_n_heap_segs != n)
717 goto retry;
718 scm_block_gc = 0;
719
7febb4a2
MD
720 /* Below, we cons to produce the resulting list. We want a snapshot of
721 * the heap situation before consing.
722 */
0f2d19dd
JB
723 local_scm_mtrigger = scm_mtrigger;
724 local_scm_mallocated = scm_mallocated;
b37fe1c5 725 local_scm_heap_size = SCM_HEAP_SIZE;
4a4c9785 726#ifdef GUILE_NEW_GC_SCHEME
b37fe1c5 727 local_scm_cells_allocated = compute_cells_allocated ();
4a4c9785 728#else
0f2d19dd 729 local_scm_cells_allocated = scm_cells_allocated;
b37fe1c5 730#endif
0f2d19dd
JB
731 local_scm_gc_time_taken = scm_gc_time_taken;
732
733 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
734 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
735 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
736 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
737 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
738 scm_cons (sym_heap_segments, heap_segs),
739 SCM_UNDEFINED);
740 SCM_ALLOW_INTS;
741 return answer;
742}
1bbd0b84 743#undef FUNC_NAME
0f2d19dd
JB
744
745
a00c95d9 746void
6e8d25a6 747scm_gc_start (const char *what)
0f2d19dd
JB
748{
749 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 750 scm_gc_cells_collected = 0;
8b0d194f 751#ifdef GUILE_NEW_GC_SCHEME
37ddcaf6 752 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
753 scm_gc_yield = (scm_cells_allocated
754 + master_cells_allocated (&scm_master_freelist)
755 + master_cells_allocated (&scm_master_freelist2));
756#endif
0f2d19dd
JB
757 scm_gc_malloc_collected = 0;
758 scm_gc_ports_collected = 0;
759}
760
a00c95d9 761void
0f2d19dd 762scm_gc_end ()
0f2d19dd
JB
763{
764 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 765 scm_gc_time_taken += scm_gc_rt;
9ea54cc6 766 scm_system_async_mark (scm_gc_async);
0f2d19dd
JB
767}
768
769
a00c95d9 770SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 771 (SCM obj),
b380b885
MD
772 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
773 "returned by this function for @var{obj}")
1bbd0b84 774#define FUNC_NAME s_scm_object_address
0f2d19dd 775{
54778cd3 776 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 777}
1bbd0b84 778#undef FUNC_NAME
0f2d19dd
JB
779
780
a00c95d9 781SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 782 (),
b380b885
MD
783 "Scans all of SCM objects and reclaims for further use those that are\n"
784 "no longer accessible.")
1bbd0b84 785#define FUNC_NAME s_scm_gc
0f2d19dd
JB
786{
787 SCM_DEFER_INTS;
788 scm_igc ("call");
789 SCM_ALLOW_INTS;
790 return SCM_UNSPECIFIED;
791}
1bbd0b84 792#undef FUNC_NAME
0f2d19dd
JB
793
794
795\f
796/* {C Interface For When GC is Triggered}
797 */
798
4a4c9785
MD
799#ifdef GUILE_NEW_GC_SCHEME
800
b37fe1c5 801static void
8fef55a8 802adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 803{
8fef55a8 804 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 805 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
806 * `min_yield_fraction' of total heap size. Note, however, that
807 * the absolute value of min_yield will correspond to `collected'
bda1446c 808 * on one master (the one which currently is triggering GC).
b37fe1c5 809 *
bda1446c
MD
810 * The reason why we look at total yield instead of cells collected
811 * on one list is that we want to take other freelists into account.
812 * On this freelist, we know that (local) yield = collected cells,
813 * but that's probably not the case on the other lists.
b37fe1c5
MD
814 *
815 * (We might consider computing a better prediction, for example
816 * by computing an average over multiple GC:s.)
817 */
8fef55a8 818 if (freelist->min_yield_fraction)
b37fe1c5 819 {
37ddcaf6 820 /* Pick largest of last two yields. */
8fef55a8
MD
821 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
822 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
823#ifdef DEBUGINFO
824 fprintf (stderr, " after GC = %d, delta = %d\n",
825 scm_cells_allocated,
826 delta);
827#endif
828 if (delta > 0)
8fef55a8 829 freelist->min_yield += delta;
b37fe1c5
MD
830 }
831}
832
4a4c9785 833/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
834 * common while the freelist will be individual for each thread.
835 */
4a4c9785
MD
836
837SCM
838scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
839{
840 SCM cell;
841 ++scm_ints_disabled;
4c48ba06
MD
842 do
843 {
844 if (SCM_NULLP (master->clusters))
845 {
846 if (master->grow_heap_p)
847 {
848 master->grow_heap_p = 0;
849 alloc_some_heap (master);
850 }
851 else
b37fe1c5 852 {
37ddcaf6
MD
853#ifdef DEBUGINFO
854 fprintf (stderr, "allocated = %d, ",
855 scm_cells_allocated
856 + master_cells_allocated (&scm_master_freelist)
857 + master_cells_allocated (&scm_master_freelist2));
858#endif
b37fe1c5 859 scm_igc ("cells");
8fef55a8 860 adjust_min_yield (master);
b37fe1c5 861 }
4c48ba06
MD
862 }
863 cell = SCM_CAR (master->clusters);
864 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 865 ++master->clusters_allocated;
4c48ba06
MD
866 }
867 while (SCM_NULLP (cell));
4a4c9785 868 --scm_ints_disabled;
4a4c9785 869 *freelist = SCM_CDR (cell);
54778cd3 870 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
871 return cell;
872}
873
4c48ba06
MD
874#if 0
875/* This is a support routine which can be used to reserve a cluster
876 * for some special use, such as debugging. It won't be useful until
877 * free cells are preserved between garbage collections.
878 */
879
880void
881scm_alloc_cluster (scm_freelist_t *master)
882{
883 SCM freelist, cell;
884 cell = scm_gc_for_newcell (master, &freelist);
885 SCM_SETCDR (cell, freelist);
886 return cell;
887}
888#endif
889
4a4c9785
MD
890#else /* GUILE_NEW_GC_SCHEME */
891
0f2d19dd 892void
4c48ba06 893scm_gc_for_alloc (scm_freelist_t *freelist)
0f2d19dd
JB
894{
895 SCM_REDEFER_INTS;
896 scm_igc ("cells");
945fec60 897#ifdef GUILE_DEBUG_FREELIST
acb0a19c 898 fprintf (stderr, "Collected: %d, min_yield: %d\n",
4c48ba06 899 freelist->collected, MIN_GC_YIELD (freelist));
acb0a19c 900#endif
4c48ba06
MD
901 if ((freelist->collected < MIN_GC_YIELD (freelist))
902 || SCM_IMP (freelist->cells))
903 alloc_some_heap (freelist);
0f2d19dd
JB
904 SCM_REALLOW_INTS;
905}
906
907
a00c95d9 908SCM
4c48ba06 909scm_gc_for_newcell (scm_freelist_t *freelist)
0f2d19dd
JB
910{
911 SCM fl;
4c48ba06
MD
912 scm_gc_for_alloc (freelist);
913 fl = freelist->cells;
914 freelist->cells = SCM_CDR (fl);
acb0a19c 915 SCM_SETCAR (fl, scm_tc16_allocated);
0f2d19dd
JB
916 return fl;
917}
918
4a4c9785
MD
919#endif /* GUILE_NEW_GC_SCHEME */
920
0f2d19dd 921void
1bbd0b84 922scm_igc (const char *what)
0f2d19dd
JB
923{
924 int j;
925
4c48ba06
MD
926#ifdef DEBUGINFO
927 fprintf (stderr,
928 SCM_NULLP (scm_freelist)
929 ? "*"
930 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
931#endif
42db06f0
MD
932#ifdef USE_THREADS
933 /* During the critical section, only the current thread may run. */
934 SCM_THREAD_CRITICAL_SECTION_START;
935#endif
936
e242dfd2 937 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 938
ab4bef85
JB
939 scm_gc_start (what);
940
941 if (!scm_stack_base || scm_block_gc)
942 {
943 scm_gc_end ();
944 return;
945 }
946
a5c314c8
JB
947 if (scm_mallocated < 0)
948 /* The byte count of allocated objects has underflowed. This is
949 probably because you forgot to report the sizes of objects you
950 have allocated, by calling scm_done_malloc or some such. When
951 the GC freed them, it subtracted their size from
952 scm_mallocated, which underflowed. */
953 abort ();
c45acc34 954
ab4bef85
JB
955 if (scm_gc_heap_lock)
956 /* We've invoked the collector while a GC is already in progress.
957 That should never happen. */
958 abort ();
0f2d19dd
JB
959
960 ++scm_gc_heap_lock;
ab4bef85
JB
961
962 scm_weak_vectors = SCM_EOL;
0f2d19dd 963
0493cd89
MD
964 scm_guardian_gc_init ();
965
0f2d19dd
JB
966 /* unprotect any struct types with no instances */
967#if 0
968 {
969 SCM type_list;
970 SCM * pos;
971
972 pos = &scm_type_obj_list;
973 type_list = scm_type_obj_list;
974 while (type_list != SCM_EOL)
975 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
976 {
24e68a57 977 pos = SCM_CDRLOC (type_list);
0f2d19dd
JB
978 type_list = SCM_CDR (type_list);
979 }
980 else
981 {
982 *pos = SCM_CDR (type_list);
983 type_list = SCM_CDR (type_list);
984 }
985 }
986#endif
987
988 /* flush dead entries from the continuation stack */
989 {
990 int x;
991 int bound;
992 SCM * elts;
993 elts = SCM_VELTS (scm_continuation_stack);
994 bound = SCM_LENGTH (scm_continuation_stack);
995 x = SCM_INUM (scm_continuation_stack_ptr);
996 while (x < bound)
997 {
998 elts[x] = SCM_BOOL_F;
999 ++x;
1000 }
1001 }
1002
42db06f0 1003#ifndef USE_THREADS
a00c95d9 1004
0f2d19dd
JB
1005 /* Protect from the C stack. This must be the first marking
1006 * done because it provides information about what objects
1007 * are "in-use" by the C code. "in-use" objects are those
1008 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1009 * usable. This requirement is stricter than a liveness
1010 * requirement -- in particular, it constrains the implementation
1011 * of scm_vector_set_length_x.
1012 */
1013 SCM_FLUSH_REGISTER_WINDOWS;
1014 /* This assumes that all registers are saved into the jmp_buf */
1015 setjmp (scm_save_regs_gc_mark);
1016 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1017 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1018 sizeof scm_save_regs_gc_mark)
1019 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1020
1021 {
1022 /* stack_len is long rather than scm_sizet in order to guarantee that
1023 &stack_len is long aligned */
1024#ifdef SCM_STACK_GROWS_UP
1025#ifdef nosve
1026 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1027#else
1028 long stack_len = scm_stack_size (scm_stack_base);
1029#endif
1030 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1031#else
1032#ifdef nosve
1033 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1034#else
1035 long stack_len = scm_stack_size (scm_stack_base);
1036#endif
1037 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1038#endif
1039 }
1040
42db06f0
MD
1041#else /* USE_THREADS */
1042
1043 /* Mark every thread's stack and registers */
945fec60 1044 scm_threads_mark_stacks ();
42db06f0
MD
1045
1046#endif /* USE_THREADS */
0f2d19dd
JB
1047
1048 /* FIXME: insert a phase to un-protect string-data preserved
1049 * in scm_vector_set_length_x.
1050 */
1051
1052 j = SCM_NUM_PROTECTS;
1053 while (j--)
1054 scm_gc_mark (scm_sys_protects[j]);
1055
9de33deb
MD
1056 /* FIXME: we should have a means to register C functions to be run
1057 * in different phases of GC
a00c95d9 1058 */
9de33deb 1059 scm_mark_subr_table ();
a00c95d9 1060
42db06f0
MD
1061#ifndef USE_THREADS
1062 scm_gc_mark (scm_root->handle);
1063#endif
a00c95d9 1064
0f2d19dd
JB
1065 scm_mark_weak_vector_spines ();
1066
0493cd89
MD
1067 scm_guardian_zombify ();
1068
0f2d19dd
JB
1069 scm_gc_sweep ();
1070
1071 --scm_gc_heap_lock;
1072 scm_gc_end ();
42db06f0
MD
1073
1074#ifdef USE_THREADS
1075 SCM_THREAD_CRITICAL_SECTION_END;
1076#endif
0f2d19dd
JB
1077}
1078
1079\f
a00c95d9 1080/* {Mark/Sweep}
0f2d19dd
JB
1081 */
1082
1083
1084
1085/* Mark an object precisely.
1086 */
a00c95d9 1087void
1bbd0b84 1088scm_gc_mark (SCM p)
0f2d19dd
JB
1089{
1090 register long i;
1091 register SCM ptr;
1092
1093 ptr = p;
1094
1095gc_mark_loop:
1096 if (SCM_IMP (ptr))
1097 return;
1098
1099gc_mark_nimp:
1100 if (SCM_NCELLP (ptr))
f8392303 1101 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
1102
1103 switch (SCM_TYP7 (ptr))
1104 {
1105 case scm_tcs_cons_nimcar:
1106 if (SCM_GCMARKP (ptr))
1107 break;
1108 SCM_SETGCMARK (ptr);
1109 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1110 {
1111 ptr = SCM_CAR (ptr);
1112 goto gc_mark_nimp;
1113 }
1114 scm_gc_mark (SCM_CAR (ptr));
1115 ptr = SCM_GCCDR (ptr);
1116 goto gc_mark_nimp;
1117 case scm_tcs_cons_imcar:
acb0a19c
MD
1118 if (SCM_GCMARKP (ptr))
1119 break;
1120 SCM_SETGCMARK (ptr);
1121 ptr = SCM_GCCDR (ptr);
1122 goto gc_mark_loop;
e641afaf 1123 case scm_tc7_pws:
0f2d19dd
JB
1124 if (SCM_GCMARKP (ptr))
1125 break;
1126 SCM_SETGCMARK (ptr);
54778cd3 1127 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
1128 ptr = SCM_GCCDR (ptr);
1129 goto gc_mark_loop;
1130 case scm_tcs_cons_gloc:
1131 if (SCM_GCMARKP (ptr))
1132 break;
1133 SCM_SETGCMARK (ptr);
1134 {
c8045e8d
DH
1135 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1136 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1137 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1138 * pointer to a struct vtable data region. The fact that these are
1139 * accessed in the same way restricts the possibilites to change the
1140 * data layout of structs or heap cells.
1141 */
1142 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1143 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1144 switch (vtable_data [scm_vtable_index_vcell])
0f2d19dd
JB
1145 {
1146 default:
c8045e8d
DH
1147 {
1148 /* ptr is a gloc */
1149 SCM gloc_car = SCM_PACK (word0);
1150 scm_gc_mark (gloc_car);
1151 ptr = SCM_GCCDR (ptr);
1152 goto gc_mark_loop;
1153 }
0f2d19dd
JB
1154 case 1: /* ! */
1155 case 0: /* ! */
1156 {
c8045e8d
DH
1157 /* ptr is a struct */
1158 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1159 int len = SCM_LENGTH (layout);
1160 char * fields_desc = SCM_CHARS (layout);
14d1400f
JB
1161 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1162 that it removes the mark */
c8045e8d 1163 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
a00c95d9 1164
c8045e8d 1165 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
aa0761ec 1166 {
c8045e8d
DH
1167 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1168 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
aa0761ec 1169 }
ad75306c
MD
1170 if (len)
1171 {
c8045e8d
DH
1172 int x;
1173
1174 for (x = 0; x < len - 2; x += 2, ++struct_data)
ad75306c 1175 if (fields_desc[x] == 'p')
c8045e8d 1176 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1177 if (fields_desc[x] == 'p')
1178 {
1179 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c8045e8d
DH
1180 for (x = *struct_data; x; --x)
1181 scm_gc_mark (SCM_PACK (*++struct_data));
ad75306c 1182 else
c8045e8d 1183 scm_gc_mark (SCM_PACK (*struct_data));
ad75306c
MD
1184 }
1185 }
c8045e8d 1186 if (vtable_data [scm_vtable_index_vcell] == 0)
0f2d19dd 1187 {
c8045e8d
DH
1188 vtable_data [scm_vtable_index_vcell] = 1;
1189 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
0f2d19dd
JB
1190 goto gc_mark_loop;
1191 }
1192 }
1193 }
1194 }
1195 break;
1196 case scm_tcs_closures:
1197 if (SCM_GCMARKP (ptr))
1198 break;
1199 SCM_SETGCMARK (ptr);
1200 if (SCM_IMP (SCM_CDR (ptr)))
1201 {
1202 ptr = SCM_CLOSCAR (ptr);
1203 goto gc_mark_nimp;
1204 }
1205 scm_gc_mark (SCM_CLOSCAR (ptr));
1206 ptr = SCM_GCCDR (ptr);
1207 goto gc_mark_nimp;
1208 case scm_tc7_vector:
1209 case scm_tc7_lvector:
1210#ifdef CCLO
1211 case scm_tc7_cclo:
1212#endif
1213 if (SCM_GC8MARKP (ptr))
1214 break;
1215 SCM_SETGC8MARK (ptr);
1216 i = SCM_LENGTH (ptr);
1217 if (i == 0)
1218 break;
1219 while (--i > 0)
1220 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1221 scm_gc_mark (SCM_VELTS (ptr)[i]);
1222 ptr = SCM_VELTS (ptr)[0];
1223 goto gc_mark_loop;
1224 case scm_tc7_contin:
1225 if SCM_GC8MARKP
1226 (ptr) break;
1227 SCM_SETGC8MARK (ptr);
c68296f8 1228 if (SCM_VELTS (ptr))
41b0806d 1229 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1230 (scm_sizet)
1231 (SCM_LENGTH (ptr) +
1232 (sizeof (SCM_STACKITEM) + -1 +
1233 sizeof (scm_contregs)) /
1234 sizeof (SCM_STACKITEM)));
0f2d19dd 1235 break;
afe5177e 1236#ifdef HAVE_ARRAYS
0f2d19dd
JB
1237 case scm_tc7_bvect:
1238 case scm_tc7_byvect:
1239 case scm_tc7_ivect:
1240 case scm_tc7_uvect:
1241 case scm_tc7_fvect:
1242 case scm_tc7_dvect:
1243 case scm_tc7_cvect:
1244 case scm_tc7_svect:
5c11cc9d 1245#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1246 case scm_tc7_llvect:
1247#endif
afe5177e 1248#endif
0f2d19dd 1249 case scm_tc7_string:
0f2d19dd
JB
1250 SCM_SETGC8MARK (ptr);
1251 break;
1252
1253 case scm_tc7_substring:
0f2d19dd
JB
1254 if (SCM_GC8MARKP(ptr))
1255 break;
1256 SCM_SETGC8MARK (ptr);
1257 ptr = SCM_CDR (ptr);
1258 goto gc_mark_loop;
1259
1260 case scm_tc7_wvect:
1261 if (SCM_GC8MARKP(ptr))
1262 break;
ab4bef85
JB
1263 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1264 scm_weak_vectors = ptr;
0f2d19dd
JB
1265 SCM_SETGC8MARK (ptr);
1266 if (SCM_IS_WHVEC_ANY (ptr))
1267 {
1268 int x;
1269 int len;
1270 int weak_keys;
1271 int weak_values;
1272
1273 len = SCM_LENGTH (ptr);
1274 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1275 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1276
0f2d19dd
JB
1277 for (x = 0; x < len; ++x)
1278 {
1279 SCM alist;
1280 alist = SCM_VELTS (ptr)[x];
46408039
JB
1281
1282 /* mark everything on the alist except the keys or
1283 * values, according to weak_values and weak_keys. */
0b5f3f34 1284 while ( SCM_CONSP (alist)
0f2d19dd 1285 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1286 && SCM_CONSP (SCM_CAR (alist)))
1287 {
1288 SCM kvpair;
1289 SCM next_alist;
1290
1291 kvpair = SCM_CAR (alist);
1292 next_alist = SCM_CDR (alist);
a00c95d9 1293 /*
0f2d19dd
JB
1294 * Do not do this:
1295 * SCM_SETGCMARK (alist);
1296 * SCM_SETGCMARK (kvpair);
1297 *
1298 * It may be that either the key or value is protected by
1299 * an escaped reference to part of the spine of this alist.
1300 * If we mark the spine here, and only mark one or neither of the
1301 * key and value, they may never be properly marked.
1302 * This leads to a horrible situation in which an alist containing
1303 * freelist cells is exported.
1304 *
1305 * So only mark the spines of these arrays last of all marking.
1306 * If somebody confuses us by constructing a weak vector
1307 * with a circular alist then we are hosed, but at least we
1308 * won't prematurely drop table entries.
1309 */
1310 if (!weak_keys)
1311 scm_gc_mark (SCM_CAR (kvpair));
1312 if (!weak_values)
1313 scm_gc_mark (SCM_GCCDR (kvpair));
1314 alist = next_alist;
1315 }
1316 if (SCM_NIMP (alist))
1317 scm_gc_mark (alist);
1318 }
1319 }
1320 break;
1321
1322 case scm_tc7_msymbol:
1323 if (SCM_GC8MARKP(ptr))
1324 break;
1325 SCM_SETGC8MARK (ptr);
1326 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1327 ptr = SCM_SYMBOL_PROPS (ptr);
1328 goto gc_mark_loop;
1329 case scm_tc7_ssymbol:
1330 if (SCM_GC8MARKP(ptr))
1331 break;
1332 SCM_SETGC8MARK (ptr);
1333 break;
1334 case scm_tcs_subrs:
9de33deb 1335 break;
0f2d19dd
JB
1336 case scm_tc7_port:
1337 i = SCM_PTOBNUM (ptr);
1338 if (!(i < scm_numptob))
1339 goto def;
1340 if (SCM_GC8MARKP (ptr))
1341 break;
dc53f026 1342 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1343 if (SCM_PTAB_ENTRY(ptr))
1344 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1345 if (scm_ptobs[i].mark)
1346 {
1347 ptr = (scm_ptobs[i].mark) (ptr);
1348 goto gc_mark_loop;
1349 }
1350 else
1351 return;
0f2d19dd
JB
1352 break;
1353 case scm_tc7_smob:
1354 if (SCM_GC8MARKP (ptr))
1355 break;
dc53f026 1356 SCM_SETGC8MARK (ptr);
acb0a19c 1357 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1358 { /* should be faster than going through scm_smobs */
1359 case scm_tc_free_cell:
1360 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1361 case scm_tc16_allocated:
acb0a19c
MD
1362 case scm_tc16_big:
1363 case scm_tc16_real:
1364 case scm_tc16_complex:
0f2d19dd
JB
1365 break;
1366 default:
1367 i = SCM_SMOBNUM (ptr);
1368 if (!(i < scm_numsmob))
1369 goto def;
dc53f026
JB
1370 if (scm_smobs[i].mark)
1371 {
1372 ptr = (scm_smobs[i].mark) (ptr);
1373 goto gc_mark_loop;
1374 }
1375 else
1376 return;
0f2d19dd
JB
1377 }
1378 break;
1379 default:
1380 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1381 }
1382}
1383
1384
1385/* Mark a Region Conservatively
1386 */
1387
a00c95d9 1388void
6e8d25a6 1389scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1390{
1391 register long m = n;
1392 register int i, j;
1393 register SCM_CELLPTR ptr;
1394
1395 while (0 <= --m)
c67baafd 1396 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1397 {
195e6201 1398 ptr = SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1399 i = 0;
1400 j = scm_n_heap_segs - 1;
1401 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1402 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1403 {
1404 while (i <= j)
1405 {
1406 int seg_id;
1407 seg_id = -1;
1408 if ( (i == j)
1409 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1410 seg_id = i;
1411 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1412 seg_id = j;
1413 else
1414 {
1415 int k;
1416 k = (i + j) / 2;
1417 if (k == i)
1418 break;
1419 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1420 {
1421 j = k;
1422 ++i;
1423 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1424 continue;
1425 else
1426 break;
1427 }
1428 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1429 {
1430 i = k;
1431 --j;
1432 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1433 continue;
1434 else
1435 break;
1436 }
1437 }
c67baafd 1438 if (!scm_heap_table[seg_id].valid
0f2d19dd
JB
1439 || scm_heap_table[seg_id].valid (ptr,
1440 &scm_heap_table[seg_id]))
c67baafd
MD
1441 if (scm_heap_table[seg_id].span == 1
1442 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1443 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1444 break;
1445 }
1446
1447 }
1448 }
1449}
1450
1451
2e11a577
MD
1452/* The following is a C predicate which determines if an SCM value can be
1453 regarded as a pointer to a cell on the heap. The code is duplicated
1454 from scm_mark_locations. */
1455
1cc91f1b 1456
2e11a577 1457int
6e8d25a6 1458scm_cellp (SCM value)
2e11a577
MD
1459{
1460 register int i, j;
1461 register SCM_CELLPTR ptr;
a00c95d9 1462
c67baafd 1463 if (SCM_CELLP (value))
2e11a577 1464 {
195e6201 1465 ptr = SCM2PTR (value);
2e11a577
MD
1466 i = 0;
1467 j = scm_n_heap_segs - 1;
1468 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1469 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1470 {
1471 while (i <= j)
1472 {
1473 int seg_id;
1474 seg_id = -1;
1475 if ( (i == j)
1476 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1477 seg_id = i;
1478 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1479 seg_id = j;
1480 else
1481 {
1482 int k;
1483 k = (i + j) / 2;
1484 if (k == i)
1485 break;
1486 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1487 {
1488 j = k;
1489 ++i;
1490 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1491 continue;
1492 else
1493 break;
1494 }
1495 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1496 {
1497 i = k;
1498 --j;
1499 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1500 continue;
1501 else
1502 break;
1503 }
1504 }
c67baafd 1505 if (!scm_heap_table[seg_id].valid
2e11a577
MD
1506 || scm_heap_table[seg_id].valid (ptr,
1507 &scm_heap_table[seg_id]))
c67baafd
MD
1508 if (scm_heap_table[seg_id].span == 1
1509 || SCM_DOUBLE_CELLP (value))
1510 scm_gc_mark (value);
2e11a577
MD
1511 break;
1512 }
1513
1514 }
1515 }
1516 return 0;
1517}
1518
1519
3b2b8760 1520static void
0f2d19dd 1521scm_mark_weak_vector_spines ()
0f2d19dd 1522{
ab4bef85 1523 SCM w;
0f2d19dd 1524
54778cd3 1525 for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1526 {
ab4bef85 1527 if (SCM_IS_WHVEC_ANY (w))
0f2d19dd
JB
1528 {
1529 SCM *ptr;
1530 SCM obj;
1531 int j;
1532 int n;
1533
ab4bef85
JB
1534 obj = w;
1535 ptr = SCM_VELTS (w);
1536 n = SCM_LENGTH (w);
0f2d19dd
JB
1537 for (j = 0; j < n; ++j)
1538 {
1539 SCM alist;
1540
1541 alist = ptr[j];
0b5f3f34 1542 while ( SCM_CONSP (alist)
a00c95d9 1543 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1544 && SCM_CONSP (SCM_CAR (alist)))
1545 {
1546 SCM_SETGCMARK (alist);
1547 SCM_SETGCMARK (SCM_CAR (alist));
1548 alist = SCM_GCCDR (alist);
1549 }
1550 }
1551 }
1552 }
1553}
1554
1555
4c48ba06
MD
1556#ifdef GUILE_NEW_GC_SCHEME
1557static void
1558gc_sweep_freelist_start (scm_freelist_t *freelist)
1559{
1560 freelist->cells = SCM_EOL;
1561 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1562 freelist->clusters_allocated = 0;
4c48ba06
MD
1563 freelist->clusters = SCM_EOL;
1564 freelist->clustertail = &freelist->clusters;
1811ebce 1565 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1566 freelist->collected = 0;
1567}
1568
1569static void
1570gc_sweep_freelist_finish (scm_freelist_t *freelist)
1571{
1811ebce 1572 int collected;
4c48ba06
MD
1573 *freelist->clustertail = freelist->cells;
1574 if (SCM_NNULLP (freelist->cells))
1575 {
1576 SCM c = freelist->cells;
1577 SCM_SETCAR (c, SCM_CDR (c));
1578 SCM_SETCDR (c, SCM_EOL);
1579 freelist->collected +=
1580 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1581 }
b37fe1c5 1582 scm_gc_cells_collected += freelist->collected;
a00c95d9 1583
8fef55a8 1584 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1585 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1586 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1587 * size. This means that a too low yield is compensated by more
1588 * heap on the list which is currently doing most work, which is
1589 * just what we want.
1590 */
1811ebce 1591 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1592 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06
MD
1593}
1594#endif
0f2d19dd 1595
a00c95d9 1596void
0f2d19dd 1597scm_gc_sweep ()
0f2d19dd
JB
1598{
1599 register SCM_CELLPTR ptr;
0f2d19dd 1600 register SCM nfreelist;
4c48ba06 1601 register scm_freelist_t *freelist;
0f2d19dd 1602 register long m;
0f2d19dd 1603 register int span;
15e9d186 1604 long i;
0f2d19dd
JB
1605 scm_sizet seg_size;
1606
0f2d19dd 1607 m = 0;
0f2d19dd 1608
4a4c9785 1609#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1610 gc_sweep_freelist_start (&scm_master_freelist);
1611 gc_sweep_freelist_start (&scm_master_freelist2);
4a4c9785 1612#else
cf2d30f6
JB
1613 /* Reset all free list pointers. We'll reconstruct them completely
1614 while scanning. */
1615 for (i = 0; i < scm_n_heap_segs; i++)
4c48ba06 1616 scm_heap_table[i].freelist->cells = SCM_EOL;
4a4c9785 1617#endif
a00c95d9 1618
cf2d30f6 1619 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1620 {
0df07278 1621#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1622 register unsigned int left_to_collect;
1623#else
1624 register scm_sizet n = 0;
0df07278 1625#endif
4c48ba06 1626 register scm_sizet j;
15e9d186 1627
cf2d30f6
JB
1628 /* Unmarked cells go onto the front of the freelist this heap
1629 segment points to. Rather than updating the real freelist
1630 pointer as we go along, we accumulate the new head in
1631 nfreelist. Then, if it turns out that the entire segment is
1632 free, we free (i.e., malloc's free) the whole segment, and
1633 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1634 freelist = scm_heap_table[i].freelist;
1635 nfreelist = freelist->cells;
4a4c9785 1636#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1637 left_to_collect = freelist->left_to_collect;
4a4c9785 1638#endif
945fec60 1639 span = scm_heap_table[i].span;
cf2d30f6 1640
a00c95d9
ML
1641 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1642 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1643 for (j = seg_size + span; j -= span; ptr += span)
1644 {
96f6f4ae
DH
1645 SCM scmptr = PTR2SCM (ptr);
1646
0f2d19dd
JB
1647 switch SCM_TYP7 (scmptr)
1648 {
1649 case scm_tcs_cons_gloc:
0f2d19dd 1650 {
c8045e8d
DH
1651 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1652 * struct or a gloc. See the corresponding comment in
1653 * scm_gc_mark.
1654 */
1655 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1656 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1657 if (SCM_GCMARKP (scmptr))
0f2d19dd 1658 {
c8045e8d
DH
1659 if (vtable_data [scm_vtable_index_vcell] == 1)
1660 vtable_data [scm_vtable_index_vcell] = 0;
1661 goto cmrkcontinue;
1662 }
1663 else
1664 {
1665 if (vtable_data [scm_vtable_index_vcell] == 0
1666 || vtable_data [scm_vtable_index_vcell] == 1)
1667 {
1668 scm_struct_free_t free
1669 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1670 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1671 }
0f2d19dd
JB
1672 }
1673 }
1674 break;
1675 case scm_tcs_cons_imcar:
1676 case scm_tcs_cons_nimcar:
1677 case scm_tcs_closures:
e641afaf 1678 case scm_tc7_pws:
0f2d19dd
JB
1679 if (SCM_GCMARKP (scmptr))
1680 goto cmrkcontinue;
1681 break;
1682 case scm_tc7_wvect:
1683 if (SCM_GC8MARKP (scmptr))
1684 {
1685 goto c8mrkcontinue;
1686 }
1687 else
1688 {
ab4bef85
JB
1689 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1690 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1691 break;
1692 }
1693
1694 case scm_tc7_vector:
1695 case scm_tc7_lvector:
1696#ifdef CCLO
1697 case scm_tc7_cclo:
1698#endif
1699 if (SCM_GC8MARKP (scmptr))
1700 goto c8mrkcontinue;
1701
1702 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1703 freechars:
1704 scm_must_free (SCM_CHARS (scmptr));
1705 /* SCM_SETCHARS(scmptr, 0);*/
1706 break;
afe5177e 1707#ifdef HAVE_ARRAYS
0f2d19dd
JB
1708 case scm_tc7_bvect:
1709 if SCM_GC8MARKP (scmptr)
1710 goto c8mrkcontinue;
1711 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1712 goto freechars;
1713 case scm_tc7_byvect:
1714 if SCM_GC8MARKP (scmptr)
1715 goto c8mrkcontinue;
1716 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1717 goto freechars;
1718 case scm_tc7_ivect:
1719 case scm_tc7_uvect:
1720 if SCM_GC8MARKP (scmptr)
1721 goto c8mrkcontinue;
1722 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1723 goto freechars;
1724 case scm_tc7_svect:
1725 if SCM_GC8MARKP (scmptr)
1726 goto c8mrkcontinue;
1727 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1728 goto freechars;
5c11cc9d 1729#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1730 case scm_tc7_llvect:
1731 if SCM_GC8MARKP (scmptr)
1732 goto c8mrkcontinue;
1733 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1734 goto freechars;
1735#endif
1736 case scm_tc7_fvect:
1737 if SCM_GC8MARKP (scmptr)
1738 goto c8mrkcontinue;
1739 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1740 goto freechars;
1741 case scm_tc7_dvect:
1742 if SCM_GC8MARKP (scmptr)
1743 goto c8mrkcontinue;
1744 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1745 goto freechars;
1746 case scm_tc7_cvect:
1747 if SCM_GC8MARKP (scmptr)
1748 goto c8mrkcontinue;
1749 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1750 goto freechars;
afe5177e 1751#endif
0f2d19dd 1752 case scm_tc7_substring:
0f2d19dd
JB
1753 if (SCM_GC8MARKP (scmptr))
1754 goto c8mrkcontinue;
1755 break;
1756 case scm_tc7_string:
0f2d19dd
JB
1757 if (SCM_GC8MARKP (scmptr))
1758 goto c8mrkcontinue;
1759 m += SCM_HUGE_LENGTH (scmptr) + 1;
1760 goto freechars;
1761 case scm_tc7_msymbol:
1762 if (SCM_GC8MARKP (scmptr))
1763 goto c8mrkcontinue;
cf551a2b
DH
1764 m += (SCM_LENGTH (scmptr) + 1
1765 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
0f2d19dd
JB
1766 scm_must_free ((char *)SCM_SLOTS (scmptr));
1767 break;
1768 case scm_tc7_contin:
1769 if SCM_GC8MARKP (scmptr)
1770 goto c8mrkcontinue;
0db18cf4 1771 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1772 if (SCM_VELTS (scmptr))
1773 goto freechars;
0f2d19dd
JB
1774 case scm_tc7_ssymbol:
1775 if SCM_GC8MARKP(scmptr)
1776 goto c8mrkcontinue;
1777 break;
1778 case scm_tcs_subrs:
1779 continue;
1780 case scm_tc7_port:
1781 if SCM_GC8MARKP (scmptr)
1782 goto c8mrkcontinue;
1783 if SCM_OPENP (scmptr)
1784 {
1785 int k = SCM_PTOBNUM (scmptr);
1786 if (!(k < scm_numptob))
1787 goto sweeperr;
1788 /* Keep "revealed" ports alive. */
945fec60 1789 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1790 continue;
1791 /* Yes, I really do mean scm_ptobs[k].free */
1792 /* rather than ftobs[k].close. .close */
1793 /* is for explicit CLOSE-PORT by user */
84af0382 1794 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1795 SCM_SETSTREAM (scmptr, 0);
1796 scm_remove_from_port_table (scmptr);
1797 scm_gc_ports_collected++;
24e68a57 1798 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1799 }
1800 break;
1801 case scm_tc7_smob:
1802 switch SCM_GCTYP16 (scmptr)
1803 {
1804 case scm_tc_free_cell:
acb0a19c 1805 case scm_tc16_real:
0f2d19dd
JB
1806 if SCM_GC8MARKP (scmptr)
1807 goto c8mrkcontinue;
1808 break;
1809#ifdef SCM_BIGDIG
acb0a19c 1810 case scm_tc16_big:
0f2d19dd
JB
1811 if SCM_GC8MARKP (scmptr)
1812 goto c8mrkcontinue;
1813 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1814 goto freechars;
1815#endif /* def SCM_BIGDIG */
acb0a19c 1816 case scm_tc16_complex:
0f2d19dd
JB
1817 if SCM_GC8MARKP (scmptr)
1818 goto c8mrkcontinue;
acb0a19c
MD
1819 m += 2 * sizeof (double);
1820 goto freechars;
0f2d19dd
JB
1821 default:
1822 if SCM_GC8MARKP (scmptr)
1823 goto c8mrkcontinue;
1824
1825 {
1826 int k;
1827 k = SCM_SMOBNUM (scmptr);
1828 if (!(k < scm_numsmob))
1829 goto sweeperr;
c8045e8d 1830 m += (scm_smobs[k].free) (scmptr);
0f2d19dd
JB
1831 break;
1832 }
1833 }
1834 break;
1835 default:
1836 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1837 }
0f2d19dd
JB
1838#if 0
1839 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1840 exit (2);
1841#endif
4a4c9785
MD
1842#ifndef GUILE_NEW_GC_SCHEME
1843 n += span;
1844#else
4c48ba06 1845 if (!--left_to_collect)
4a4c9785
MD
1846 {
1847 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1848 *freelist->clustertail = scmptr;
1849 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1850
4a4c9785 1851 nfreelist = SCM_EOL;
4c48ba06
MD
1852 freelist->collected += span * freelist->cluster_size;
1853 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1854 }
1855 else
1856#endif
1857 {
1858 /* Stick the new cell on the front of nfreelist. It's
1859 critical that we mark this cell as freed; otherwise, the
1860 conservative collector might trace it as some other type
1861 of object. */
54778cd3 1862 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1863 SCM_SETCDR (scmptr, nfreelist);
1864 nfreelist = scmptr;
1865 }
a00c95d9 1866
0f2d19dd
JB
1867 continue;
1868 c8mrkcontinue:
1869 SCM_CLRGC8MARK (scmptr);
1870 continue;
1871 cmrkcontinue:
1872 SCM_CLRGCMARK (scmptr);
1873 }
1874#ifdef GC_FREE_SEGMENTS
1875 if (n == seg_size)
1876 {
15e9d186
JB
1877 register long j;
1878
4c48ba06 1879 freelist->heap_size -= seg_size;
cf2d30f6
JB
1880 free ((char *) scm_heap_table[i].bounds[0]);
1881 scm_heap_table[i].bounds[0] = 0;
1882 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1883 scm_heap_table[j - 1] = scm_heap_table[j];
1884 scm_n_heap_segs -= 1;
cf2d30f6 1885 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1886 }
1887 else
1888#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1889 {
1890 /* Update the real freelist pointer to point to the head of
1891 the list of free cells we've built for this segment. */
4c48ba06 1892 freelist->cells = nfreelist;
4a4c9785 1893#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1894 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1895#endif
1896 }
1897
4c48ba06
MD
1898#ifndef GUILE_NEW_GC_SCHEME
1899 freelist->collected += n;
4a4c9785 1900#endif
0f2d19dd 1901
fca7547b 1902#ifdef GUILE_DEBUG_FREELIST
8ded62a3 1903#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1904 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1905 ? scm_freelist
1906 : scm_freelist2);
1907#else
4c48ba06 1908 scm_check_freelist (freelist);
8ded62a3 1909#endif
cf2d30f6
JB
1910 scm_map_free_list ();
1911#endif
4a4c9785 1912 }
a00c95d9 1913
4a4c9785 1914#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1915 gc_sweep_freelist_finish (&scm_master_freelist);
1916 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1917
8ded62a3
MD
1918 /* When we move to POSIX threads private freelists should probably
1919 be GC-protected instead. */
1920 scm_freelist = SCM_EOL;
1921 scm_freelist2 = SCM_EOL;
4a4c9785 1922#endif
a00c95d9 1923
0f2d19dd
JB
1924 /* Scan weak vectors. */
1925 {
ab4bef85 1926 SCM *ptr, w;
54778cd3 1927 for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1928 {
ab4bef85 1929 if (!SCM_IS_WHVEC_ANY (w))
0f2d19dd 1930 {
15e9d186
JB
1931 register long j, n;
1932
ab4bef85
JB
1933 ptr = SCM_VELTS (w);
1934 n = SCM_LENGTH (w);
0f2d19dd 1935 for (j = 0; j < n; ++j)
0c95b57d 1936 if (SCM_FREEP (ptr[j]))
0f2d19dd
JB
1937 ptr[j] = SCM_BOOL_F;
1938 }
1939 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1940 {
ab4bef85
JB
1941 SCM obj = w;
1942 register long n = SCM_LENGTH (w);
15e9d186
JB
1943 register long j;
1944
ab4bef85 1945 ptr = SCM_VELTS (w);
15e9d186 1946
0f2d19dd
JB
1947 for (j = 0; j < n; ++j)
1948 {
1949 SCM * fixup;
1950 SCM alist;
1951 int weak_keys;
1952 int weak_values;
a00c95d9 1953
0f2d19dd
JB
1954 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1955 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1956
1957 fixup = ptr + j;
1958 alist = *fixup;
1959
0b5f3f34 1960 while ( SCM_CONSP (alist)
0f2d19dd
JB
1961 && SCM_CONSP (SCM_CAR (alist)))
1962 {
1963 SCM key;
1964 SCM value;
1965
1966 key = SCM_CAAR (alist);
1967 value = SCM_CDAR (alist);
0c95b57d
GB
1968 if ( (weak_keys && SCM_FREEP (key))
1969 || (weak_values && SCM_FREEP (value)))
0f2d19dd
JB
1970 {
1971 *fixup = SCM_CDR (alist);
1972 }
1973 else
24e68a57 1974 fixup = SCM_CDRLOC (alist);
0f2d19dd
JB
1975 alist = SCM_CDR (alist);
1976 }
1977 }
1978 }
1979 }
1980 }
b37fe1c5 1981 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f
MD
1982#ifdef GUILE_NEW_GC_SCHEME
1983 scm_gc_yield -= scm_cells_allocated;
1984#endif
0f2d19dd
JB
1985 scm_mallocated -= m;
1986 scm_gc_malloc_collected = m;
1987}
1988
1989
1990\f
1991
1992/* {Front end to malloc}
1993 *
c68296f8 1994 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1995 *
1996 * These functions provide services comperable to malloc, realloc, and
1997 * free. They are for allocating malloced parts of scheme objects.
1998 * The primary purpose of the front end is to impose calls to gc.
1999 */
2000
bc9d9bb2 2001
0f2d19dd
JB
2002/* scm_must_malloc
2003 * Return newly malloced storage or throw an error.
2004 *
2005 * The parameter WHAT is a string for error reporting.
a00c95d9 2006 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
2007 * allocation, or if the first call to malloc fails,
2008 * garbage collect -- on the presumption that some objects
2009 * using malloced storage may be collected.
2010 *
2011 * The limit scm_mtrigger may be raised by this allocation.
2012 */
07806695 2013void *
e4ef2330 2014scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 2015{
07806695 2016 void *ptr;
15e9d186 2017 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
2018
2019 if (nm <= scm_mtrigger)
0f2d19dd 2020 {
07806695 2021 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2022 if (NULL != ptr)
2023 {
2024 scm_mallocated = nm;
bc9d9bb2
MD
2025#ifdef GUILE_DEBUG_MALLOC
2026 scm_malloc_register (ptr, what);
2027#endif
0f2d19dd
JB
2028 return ptr;
2029 }
2030 }
6064dcc6 2031
0f2d19dd 2032 scm_igc (what);
e4ef2330 2033
0f2d19dd 2034 nm = scm_mallocated + size;
07806695 2035 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2036 if (NULL != ptr)
2037 {
2038 scm_mallocated = nm;
6064dcc6
MV
2039 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2040 if (nm > scm_mtrigger)
2041 scm_mtrigger = nm + nm / 2;
2042 else
2043 scm_mtrigger += scm_mtrigger / 2;
2044 }
bc9d9bb2
MD
2045#ifdef GUILE_DEBUG_MALLOC
2046 scm_malloc_register (ptr, what);
2047#endif
2048
0f2d19dd
JB
2049 return ptr;
2050 }
e4ef2330
MD
2051
2052 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2053 return 0; /* never reached */
0f2d19dd
JB
2054}
2055
2056
2057/* scm_must_realloc
2058 * is similar to scm_must_malloc.
2059 */
07806695
JB
2060void *
2061scm_must_realloc (void *where,
e4ef2330
MD
2062 scm_sizet old_size,
2063 scm_sizet size,
3eeba8d4 2064 const char *what)
0f2d19dd 2065{
07806695 2066 void *ptr;
e4ef2330
MD
2067 scm_sizet nm = scm_mallocated + size - old_size;
2068
2069 if (nm <= scm_mtrigger)
0f2d19dd 2070 {
07806695 2071 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2072 if (NULL != ptr)
2073 {
2074 scm_mallocated = nm;
bc9d9bb2
MD
2075#ifdef GUILE_DEBUG_MALLOC
2076 scm_malloc_reregister (where, ptr, what);
2077#endif
0f2d19dd
JB
2078 return ptr;
2079 }
2080 }
e4ef2330 2081
0f2d19dd 2082 scm_igc (what);
e4ef2330
MD
2083
2084 nm = scm_mallocated + size - old_size;
07806695 2085 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2086 if (NULL != ptr)
2087 {
2088 scm_mallocated = nm;
6064dcc6
MV
2089 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2090 if (nm > scm_mtrigger)
2091 scm_mtrigger = nm + nm / 2;
2092 else
2093 scm_mtrigger += scm_mtrigger / 2;
2094 }
bc9d9bb2
MD
2095#ifdef GUILE_DEBUG_MALLOC
2096 scm_malloc_reregister (where, ptr, what);
2097#endif
0f2d19dd
JB
2098 return ptr;
2099 }
e4ef2330
MD
2100
2101 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2102 return 0; /* never reached */
0f2d19dd
JB
2103}
2104
a00c95d9 2105void
07806695 2106scm_must_free (void *obj)
0f2d19dd 2107{
bc9d9bb2
MD
2108#ifdef GUILE_DEBUG_MALLOC
2109 scm_malloc_unregister (obj);
2110#endif
0f2d19dd
JB
2111 if (obj)
2112 free (obj);
2113 else
2114 scm_wta (SCM_INUM0, "already free", "");
2115}
0f2d19dd 2116
c68296f8
MV
2117/* Announce that there has been some malloc done that will be freed
2118 * during gc. A typical use is for a smob that uses some malloced
2119 * memory but can not get it from scm_must_malloc (for whatever
2120 * reason). When a new object of this smob is created you call
2121 * scm_done_malloc with the size of the object. When your smob free
2122 * function is called, be sure to include this size in the return
2123 * value. */
0f2d19dd 2124
c68296f8 2125void
6e8d25a6 2126scm_done_malloc (long size)
c68296f8
MV
2127{
2128 scm_mallocated += size;
2129
2130 if (scm_mallocated > scm_mtrigger)
2131 {
2132 scm_igc ("foreign mallocs");
2133 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2134 {
2135 if (scm_mallocated > scm_mtrigger)
2136 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2137 else
2138 scm_mtrigger += scm_mtrigger / 2;
2139 }
2140 }
2141}
2142
2143
2144\f
0f2d19dd
JB
2145
2146/* {Heap Segments}
2147 *
2148 * Each heap segment is an array of objects of a particular size.
2149 * Every segment has an associated (possibly shared) freelist.
2150 * A table of segment records is kept that records the upper and
2151 * lower extents of the segment; this is used during the conservative
2152 * phase of gc to identify probably gc roots (because they point
c68296f8 2153 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2154
2155/* scm_expmem
2156 * is true if the first segment was smaller than INIT_HEAP_SEG.
2157 * If scm_expmem is set to one, subsequent segment allocations will
2158 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2159 */
2160int scm_expmem = 0;
2161
4c48ba06
MD
2162scm_sizet scm_max_segment_size;
2163
0f2d19dd
JB
2164/* scm_heap_org
2165 * is the lowest base address of any heap segment.
2166 */
2167SCM_CELLPTR scm_heap_org;
2168
a00c95d9 2169scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
2170int scm_n_heap_segs = 0;
2171
0f2d19dd
JB
2172/* init_heap_seg
2173 * initializes a new heap segment and return the number of objects it contains.
2174 *
2175 * The segment origin, segment size in bytes, and the span of objects
2176 * in cells are input parameters. The freelist is both input and output.
2177 *
2178 * This function presume that the scm_heap_table has already been expanded
2179 * to accomodate a new segment record.
2180 */
2181
2182
a00c95d9 2183static scm_sizet
4c48ba06 2184init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2185{
2186 register SCM_CELLPTR ptr;
0f2d19dd 2187 SCM_CELLPTR seg_end;
15e9d186 2188 int new_seg_index;
acb0a19c 2189 int n_new_cells;
4c48ba06 2190 int span = freelist->span;
a00c95d9 2191
0f2d19dd
JB
2192 if (seg_org == NULL)
2193 return 0;
2194
a00c95d9 2195 ptr = CELL_UP (seg_org, span);
acb0a19c 2196
a00c95d9 2197 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2198 */
a00c95d9 2199 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 2200
a00c95d9 2201 /* Find the right place and insert the segment record.
0f2d19dd
JB
2202 *
2203 */
2204 for (new_seg_index = 0;
2205 ( (new_seg_index < scm_n_heap_segs)
2206 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2207 new_seg_index++)
2208 ;
2209
2210 {
2211 int i;
2212 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2213 scm_heap_table[i] = scm_heap_table[i - 1];
2214 }
a00c95d9 2215
0f2d19dd
JB
2216 ++scm_n_heap_segs;
2217
2218 scm_heap_table[new_seg_index].valid = 0;
945fec60 2219 scm_heap_table[new_seg_index].span = span;
4c48ba06 2220 scm_heap_table[new_seg_index].freelist = freelist;
195e6201
DH
2221 scm_heap_table[new_seg_index].bounds[0] = ptr;
2222 scm_heap_table[new_seg_index].bounds[1] = seg_end;
0f2d19dd
JB
2223
2224
a00c95d9 2225 /* Compute the least valid object pointer w/in this segment
0f2d19dd 2226 */
a00c95d9 2227 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
2228
2229
acb0a19c
MD
2230 /*n_new_cells*/
2231 n_new_cells = seg_end - ptr;
0f2d19dd 2232
4a4c9785
MD
2233#ifdef GUILE_NEW_GC_SCHEME
2234
4c48ba06 2235 freelist->heap_size += n_new_cells;
4a4c9785 2236
a00c95d9 2237 /* Partition objects in this segment into clusters */
4a4c9785
MD
2238 {
2239 SCM clusters;
2240 SCM *clusterp = &clusters;
4c48ba06 2241 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 2242
4c48ba06 2243 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 2244 {
4c48ba06
MD
2245 /* Determine end of cluster
2246 */
2247 if (n_new_cells >= n_cluster_cells)
2248 {
2249 seg_end = ptr + n_cluster_cells;
2250 n_new_cells -= n_cluster_cells;
2251 }
4a4c9785 2252 else
a00c95d9
ML
2253 /* [cmm] looks like the segment size doesn't divide cleanly by
2254 cluster size. bad cmm! */
2255 abort();
4a4c9785 2256
4c48ba06
MD
2257 /* Allocate cluster spine
2258 */
4a4c9785
MD
2259 *clusterp = PTR2SCM (ptr);
2260 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2261 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 2262 ptr += span;
a00c95d9 2263
4a4c9785
MD
2264 while (ptr < seg_end)
2265 {
96f6f4ae
DH
2266 SCM scmptr = PTR2SCM (ptr);
2267
54778cd3 2268 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
2269 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2270 ptr += span;
2271 }
4c48ba06 2272
4a4c9785
MD
2273 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2274 }
a00c95d9 2275
4a4c9785
MD
2276 /* Patch up the last cluster pointer in the segment
2277 * to join it to the input freelist.
2278 */
4c48ba06
MD
2279 *clusterp = freelist->clusters;
2280 freelist->clusters = clusters;
4a4c9785
MD
2281 }
2282
2283#else /* GUILE_NEW_GC_SCHEME */
2284
a00c95d9 2285 /* Prepend objects in this segment to the freelist.
0f2d19dd
JB
2286 */
2287 while (ptr < seg_end)
2288 {
96f6f4ae
DH
2289 SCM scmptr = PTR2SCM (ptr);
2290
24e68a57 2291 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
945fec60
MD
2292 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2293 ptr += span;
0f2d19dd
JB
2294 }
2295
945fec60 2296 ptr -= span;
0f2d19dd
JB
2297
2298 /* Patch up the last freelist pointer in the segment
2299 * to join it to the input freelist.
2300 */
4c48ba06 2301 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
a00c95d9 2302 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
4c48ba06
MD
2303
2304 freelist->heap_size += n_new_cells;
0f2d19dd 2305
4a4c9785 2306#endif /* GUILE_NEW_GC_SCHEME */
4c48ba06
MD
2307
2308#ifdef DEBUGINFO
2309 fprintf (stderr, "H");
2310#endif
0f2d19dd 2311 return size;
0f2d19dd
JB
2312}
2313
a00c95d9
ML
2314#ifndef GUILE_NEW_GC_SCHEME
2315#define round_to_cluster_size(freelist, len) len
2316#else
2317
2318static scm_sizet
2319round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2320{
2321 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2322
2323 return
2324 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2325 + ALIGNMENT_SLACK (freelist);
2326}
2327
2328#endif
0f2d19dd 2329
a00c95d9 2330static void
4c48ba06 2331alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 2332{
a00c95d9 2333 scm_heap_seg_data_t * tmptable;
0f2d19dd 2334 SCM_CELLPTR ptr;
b37fe1c5 2335 long len;
a00c95d9 2336
0f2d19dd
JB
2337 /* Critical code sections (such as the garbage collector)
2338 * aren't supposed to add heap segments.
2339 */
2340 if (scm_gc_heap_lock)
2341 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2342
2343 /* Expand the heap tables to have room for the new segment.
2344 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2345 * only if the allocation of the segment itself succeeds.
2346 */
a00c95d9 2347 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 2348
a00c95d9 2349 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
2350 realloc ((char *)scm_heap_table, len)));
2351 if (!tmptable)
2352 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2353 else
2354 scm_heap_table = tmptable;
2355
2356
2357 /* Pick a size for the new heap segment.
a00c95d9 2358 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2359 * gc.h
2360 */
4c48ba06
MD
2361#ifdef GUILE_NEW_GC_SCHEME
2362 {
1811ebce
MD
2363 /* Assure that the new segment is predicted to be large enough.
2364 *
2365 * New yield should at least equal GC fraction of new heap size, i.e.
2366 *
2367 * y + dh > f * (h + dh)
2368 *
2369 * y : yield
8fef55a8 2370 * f : min yield fraction
1811ebce
MD
2371 * h : heap size
2372 * dh : size of new heap segment
2373 *
2374 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2375 */
8fef55a8 2376 int f = freelist->min_yield_fraction;
1811ebce
MD
2377 long h = SCM_HEAP_SIZE;
2378 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2379 len = SCM_EXPHEAP (freelist->heap_size);
2380#ifdef DEBUGINFO
2381 fprintf (stderr, "(%d < %d)", len, min_cells);
2382#endif
2383 if (len < min_cells)
1811ebce 2384 len = min_cells + freelist->cluster_size;
4c48ba06 2385 len *= sizeof (scm_cell);
1811ebce
MD
2386 /* force new sampling */
2387 freelist->collected = LONG_MAX;
4c48ba06 2388 }
a00c95d9 2389
4c48ba06
MD
2390 if (len > scm_max_segment_size)
2391 len = scm_max_segment_size;
2392#else
0f2d19dd
JB
2393 if (scm_expmem)
2394 {
4c48ba06
MD
2395 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2396 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
945fec60 2397 != len)
0f2d19dd
JB
2398 len = 0;
2399 }
2400 else
2401 len = SCM_HEAP_SEG_SIZE;
4c48ba06 2402#endif /* GUILE_NEW_GC_SCHEME */
0f2d19dd
JB
2403
2404 {
2405 scm_sizet smallest;
2406
a00c95d9 2407#ifndef GUILE_NEW_GC_SCHEME
4c48ba06 2408 smallest = (freelist->span * sizeof (scm_cell));
a00c95d9
ML
2409#else
2410 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2411#endif
2412
0f2d19dd 2413 if (len < smallest)
a00c95d9 2414 len = smallest;
0f2d19dd
JB
2415
2416 /* Allocate with decaying ambition. */
2417 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2418 && (len >= smallest))
2419 {
1811ebce 2420 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2421 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2422 if (ptr)
2423 {
a00c95d9 2424 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2425 return;
2426 }
2427 len /= 2;
2428 }
2429 }
2430
2431 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2432}
2433
2434
a00c95d9 2435SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2436 (SCM name),
b380b885 2437 "")
1bbd0b84 2438#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2439{
2440 int x;
2441 int bound;
3b3b36dd 2442 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2443 SCM_DEFER_INTS;
2444 bound = scm_n_heap_segs;
2445 for (x = 0; x < bound; ++x)
2446 {
2447 SCM_CELLPTR p;
2448 SCM_CELLPTR pbound;
195e6201
DH
2449 p = scm_heap_table[x].bounds[0];
2450 pbound = scm_heap_table[x].bounds[1];
0f2d19dd
JB
2451 while (p < pbound)
2452 {
c8045e8d
DH
2453 SCM cell = PTR2SCM (p);
2454 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
0f2d19dd 2455 {
c8045e8d
DH
2456 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2457 * struct cell. See the corresponding comment in scm_gc_mark.
2458 */
2459 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2460 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2461 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2462 if ((SCM_TRUE_P (name) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2463 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
0f2d19dd 2464 {
c8045e8d 2465 SCM_SET_CELL_OBJECT_0 (cell, name);
0f2d19dd
JB
2466 }
2467 }
2468 ++p;
2469 }
2470 }
2471 SCM_ALLOW_INTS;
2472 return name;
2473}
1bbd0b84 2474#undef FUNC_NAME
0f2d19dd
JB
2475
2476
2477\f
2478/* {GC Protection Helper Functions}
2479 */
2480
2481
0f2d19dd 2482void
6e8d25a6
GB
2483scm_remember (SCM *ptr)
2484{ /* empty */ }
0f2d19dd 2485
1cc91f1b 2486
c209c88e 2487/*
41b0806d
GB
2488 These crazy functions prevent garbage collection
2489 of arguments after the first argument by
2490 ensuring they remain live throughout the
2491 function because they are used in the last
2492 line of the code block.
2493 It'd be better to have a nice compiler hint to
2494 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2495SCM
2496scm_return_first (SCM elt, ...)
0f2d19dd
JB
2497{
2498 return elt;
2499}
2500
41b0806d
GB
2501int
2502scm_return_first_int (int i, ...)
2503{
2504 return i;
2505}
2506
0f2d19dd 2507
0f2d19dd 2508SCM
6e8d25a6 2509scm_permanent_object (SCM obj)
0f2d19dd
JB
2510{
2511 SCM_REDEFER_INTS;
2512 scm_permobjs = scm_cons (obj, scm_permobjs);
2513 SCM_REALLOW_INTS;
2514 return obj;
2515}
2516
2517
ef290276
JB
2518/* Protect OBJ from the garbage collector. OBJ will not be freed,
2519 even if all other references are dropped, until someone applies
2520 scm_unprotect_object to it. This function returns OBJ.
2521
c209c88e
GB
2522 Calls to scm_protect_object nest. For every object OBJ, there is a
2523 counter which scm_protect_object(OBJ) increments and
2524 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
dab7f566
JB
2525 an object's counter is greater than zero, the garbage collector
2526 will not free it.
2527
2528 Of course, that's not how it's implemented. scm_protect_object and
2529 scm_unprotect_object just maintain a list of references to things.
2530 Since the GC knows about this list, all objects it mentions stay
2531 alive. scm_protect_object adds its argument to the list;
2532 scm_unprotect_object removes the first occurrence of its argument
2533 to the list. */
ef290276 2534SCM
6e8d25a6 2535scm_protect_object (SCM obj)
ef290276 2536{
ef290276
JB
2537 scm_protects = scm_cons (obj, scm_protects);
2538
2539 return obj;
2540}
2541
2542
2543/* Remove any protection for OBJ established by a prior call to
dab7f566 2544 scm_protect_object. This function returns OBJ.
ef290276 2545
dab7f566 2546 See scm_protect_object for more information. */
ef290276 2547SCM
6e8d25a6 2548scm_unprotect_object (SCM obj)
ef290276 2549{
dab7f566
JB
2550 SCM *tail_ptr = &scm_protects;
2551
0c95b57d 2552 while (SCM_CONSP (*tail_ptr))
c8045e8d 2553 if (SCM_EQ_P (SCM_CAR (*tail_ptr), obj))
dab7f566
JB
2554 {
2555 *tail_ptr = SCM_CDR (*tail_ptr);
2556 break;
2557 }
2558 else
2559 tail_ptr = SCM_CDRLOC (*tail_ptr);
ef290276
JB
2560
2561 return obj;
2562}
2563
c45acc34
JB
2564int terminating;
2565
2566/* called on process termination. */
e52ceaac
MD
2567#ifdef HAVE_ATEXIT
2568static void
2569cleanup (void)
2570#else
2571#ifdef HAVE_ON_EXIT
51157deb
MD
2572extern int on_exit (void (*procp) (), int arg);
2573
e52ceaac
MD
2574static void
2575cleanup (int status, void *arg)
2576#else
2577#error Dont know how to setup a cleanup handler on your system.
2578#endif
2579#endif
c45acc34
JB
2580{
2581 terminating = 1;
2582 scm_flush_all_ports ();
2583}
ef290276 2584
0f2d19dd 2585\f
acb0a19c 2586static int
4c48ba06 2587make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2588{
a00c95d9
ML
2589 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2590 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2591 rounded_size,
4c48ba06 2592 freelist))
acb0a19c 2593 {
a00c95d9
ML
2594 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2595 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2596 rounded_size,
4c48ba06 2597 freelist))
acb0a19c
MD
2598 return 1;
2599 }
2600 else
2601 scm_expmem = 1;
2602
b37fe1c5 2603#ifdef GUILE_NEW_GC_SCHEME
8fef55a8
MD
2604 if (freelist->min_yield_fraction)
2605 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2606 / 100);
8fef55a8 2607 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
b37fe1c5 2608#endif
a00c95d9 2609
acb0a19c
MD
2610 return 0;
2611}
2612
2613\f
4a4c9785 2614#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2615static void
2616init_freelist (scm_freelist_t *freelist,
2617 int span,
2618 int cluster_size,
8fef55a8 2619 int min_yield)
4c48ba06
MD
2620{
2621 freelist->clusters = SCM_EOL;
2622 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2623 freelist->left_to_collect = 0;
2624 freelist->clusters_allocated = 0;
8fef55a8
MD
2625 freelist->min_yield = 0;
2626 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2627 freelist->span = span;
2628 freelist->collected = 0;
1811ebce 2629 freelist->collected_1 = 0;
4c48ba06
MD
2630 freelist->heap_size = 0;
2631}
2632
4a4c9785 2633int
4c48ba06
MD
2634scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2635 scm_sizet init_heap_size_2, int gc_trigger_2,
2636 scm_sizet max_segment_size)
4a4c9785 2637#else
0f2d19dd 2638int
b37fe1c5 2639scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
4a4c9785 2640#endif
0f2d19dd
JB
2641{
2642 scm_sizet j;
2643
4c48ba06
MD
2644 if (!init_heap_size_1)
2645 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2646 if (!init_heap_size_2)
2647 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2648
0f2d19dd
JB
2649 j = SCM_NUM_PROTECTS;
2650 while (j)
2651 scm_sys_protects[--j] = SCM_BOOL_F;
2652 scm_block_gc = 1;
4a4c9785
MD
2653
2654#ifdef GUILE_NEW_GC_SCHEME
2655 scm_freelist = SCM_EOL;
4c48ba06
MD
2656 scm_freelist2 = SCM_EOL;
2657 init_freelist (&scm_master_freelist,
2658 1, SCM_CLUSTER_SIZE_1,
8fef55a8 2659 gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
4c48ba06
MD
2660 init_freelist (&scm_master_freelist2,
2661 2, SCM_CLUSTER_SIZE_2,
8fef55a8 2662 gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
4c48ba06
MD
2663 scm_max_segment_size
2664 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
4a4c9785 2665#else
945fec60
MD
2666 scm_freelist.cells = SCM_EOL;
2667 scm_freelist.span = 1;
2668 scm_freelist.collected = 0;
2669 scm_freelist.heap_size = 0;
4a4c9785 2670
945fec60
MD
2671 scm_freelist2.cells = SCM_EOL;
2672 scm_freelist2.span = 2;
2673 scm_freelist2.collected = 0;
2674 scm_freelist2.heap_size = 0;
4a4c9785
MD
2675#endif
2676
0f2d19dd
JB
2677 scm_expmem = 0;
2678
2679 j = SCM_HEAP_SEG_SIZE;
2680 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2681 scm_heap_table = ((scm_heap_seg_data_t *)
2682 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2683
4a4c9785 2684#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2685 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2686 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785
MD
2687 return 1;
2688#else
4c48ba06
MD
2689 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2690 make_initial_segment (init_heap_size_2, &scm_freelist2))
acb0a19c 2691 return 1;
4a4c9785 2692#endif
acb0a19c 2693
a00c95d9 2694 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2695
0f2d19dd 2696 /* scm_hplims[0] can change. do not remove scm_heap_org */
ab4bef85 2697 scm_weak_vectors = SCM_EOL;
0f2d19dd
JB
2698
2699 /* Initialise the list of ports. */
840ae05d
JB
2700 scm_port_table = (scm_port **)
2701 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2702 if (!scm_port_table)
2703 return 1;
2704
a18bcd0e 2705#ifdef HAVE_ATEXIT
c45acc34 2706 atexit (cleanup);
e52ceaac
MD
2707#else
2708#ifdef HAVE_ON_EXIT
2709 on_exit (cleanup, 0);
2710#endif
a18bcd0e 2711#endif
0f2d19dd
JB
2712
2713 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2714 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2715
2716 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2717 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2718 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2719 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2720 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2721 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2722 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2723 scm_permobjs = SCM_EOL;
ef290276 2724 scm_protects = SCM_EOL;
3b2b8760 2725 scm_asyncs = SCM_EOL;
54778cd3
DH
2726 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2727 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2728#ifdef SCM_BIGDIG
2729 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2730#endif
2731 return 0;
2732}
2733\f
2734
0f2d19dd
JB
2735void
2736scm_init_gc ()
0f2d19dd 2737{
a0599745 2738#include "libguile/gc.x"
0f2d19dd 2739}
89e00824
ML
2740
2741/*
2742 Local Variables:
2743 c-file-style: "gnu"
2744 End:
2745*/