Lots of fixes to make guile (at some time) compile with strict typing.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
49#include "_scm.h"
20e6290e
JB
50#include "stime.h"
51#include "stackchk.h"
52#include "struct.h"
20e6290e 53#include "weaks.h"
0493cd89 54#include "guardians.h"
20e6290e
JB
55#include "smob.h"
56#include "unif.h"
57#include "async.h"
f04d8caf 58#include "ports.h"
ba11fd4c 59#include "root.h"
7ab3fdd5 60#include "strings.h"
003d1fd0 61#include "vectors.h"
0f2d19dd 62
b6791b2e 63#include "validate.h"
fce59c93
JB
64#include "gc.h"
65
0f2d19dd 66#ifdef HAVE_MALLOC_H
95b88819 67#include <malloc.h>
0f2d19dd
JB
68#endif
69
70#ifdef HAVE_UNISTD_H
95b88819 71#include <unistd.h>
0f2d19dd
JB
72#endif
73
1cc91f1b
JB
74#ifdef __STDC__
75#include <stdarg.h>
76#define var_start(x, y) va_start(x, y)
77#else
78#include <varargs.h>
79#define var_start(x, y) va_start(x)
80#endif
81
0f2d19dd
JB
82\f
83/* {heap tuning parameters}
a00c95d9 84 *
0f2d19dd
JB
85 * These are parameters for controlling memory allocation. The heap
86 * is the area out of which scm_cons, and object headers are allocated.
87 *
88 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
89 * 64 bit machine. The units of the _SIZE parameters are bytes.
90 * Cons pairs and object headers occupy one heap cell.
91 *
92 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
93 * allocated initially the heap will grow by half its current size
94 * each subsequent time more heap is needed.
95 *
96 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
97 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
98 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
99 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 100 *
0f2d19dd
JB
101 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
102 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
103 *
104 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
105 * is needed.
106 *
107 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 108 * trigger a GC.
6064dcc6
MV
109 *
110 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
111 * reclaimed by a GC triggered by must_malloc. If less than this is
112 * reclaimed, the trigger threshold is raised. [I don't know what a
113 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 114 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
115 */
116
8fef55a8
MD
117/*
118 * Heap size 45000 and 40% min yield gives quick startup and no extra
119 * heap allocation. Having higher values on min yield may lead to
120 * large heaps, especially if code behaviour is varying its
121 * maximum consumption between different freelists.
122 */
123#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
4c48ba06 124#define SCM_CLUSTER_SIZE_1 2000L
8fef55a8 125#define SCM_MIN_YIELD_1 40
4c48ba06
MD
126
127#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
128#define SCM_CLUSTER_SIZE_2 1000L
129/* The following value may seem large, but note that if we get to GC at
130 * all, this means that we have a numerically intensive application
131 */
8fef55a8 132#define SCM_MIN_YIELD_2 40
4c48ba06
MD
133
134#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
135
945fec60 136#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
137#ifdef _QC
138# define SCM_HEAP_SEG_SIZE 32768L
139#else
140# ifdef sequent
4c48ba06 141# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 142# else
4c48ba06 143# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
144# endif
145#endif
4c48ba06 146/* Make heap grow with factor 1.5 */
4a4c9785 147#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 148#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 149#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
150
151/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
152 bounds for allocated storage */
153
154#ifdef PROT386
155/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
156# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
157# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
158#else
159# ifdef _UNICOS
a00c95d9
ML
160# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
161# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 162# else
a00c95d9
ML
163# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
164# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
165# endif /* UNICOS */
166#endif /* PROT386 */
a00c95d9
ML
167#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
168#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
169#ifdef GUILE_NEW_GC_SCHEME
170#define SCM_HEAP_SIZE \
171 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
172#else
173#define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
174#endif
1811ebce 175#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
176
177
178\f
945fec60 179/* scm_freelists
0f2d19dd 180 */
945fec60 181
a00c95d9
ML
182typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
185#ifdef GUILE_NEW_GC_SCHEME
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
b37fe1c5
MD
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
8fef55a8
MD
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
a00c95d9
ML
193 SCM clusters;
194 SCM *clustertail;
b37fe1c5 195 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 196 int cluster_size;
8fef55a8 197 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
198 */
199 int grow_heap_p;
8fef55a8 200 /* minimum yield on this list in order not to grow the heap
a00c95d9 201 */
8fef55a8
MD
202 long min_yield;
203 /* defines min_yield as percent of total heap size
a00c95d9 204 */
8fef55a8 205 int min_yield_fraction;
a00c95d9
ML
206#endif
207 /* number of cells per object on this list */
208 int span;
209 /* number of collected cells during last GC */
1811ebce
MD
210 long collected;
211 /* number of collected cells during penultimate GC */
212 long collected_1;
a00c95d9
ML
213 /* total number of cells in heap segments
214 * belonging to this list.
215 */
1811ebce 216 long heap_size;
a00c95d9
ML
217} scm_freelist_t;
218
4a4c9785
MD
219#ifdef GUILE_NEW_GC_SCHEME
220SCM scm_freelist = SCM_EOL;
221scm_freelist_t scm_master_freelist = {
b37fe1c5 222 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
223};
224SCM scm_freelist2 = SCM_EOL;
225scm_freelist_t scm_master_freelist2 = {
b37fe1c5 226 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785
MD
227};
228#else
945fec60
MD
229scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
230scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
4a4c9785 231#endif
0f2d19dd
JB
232
233/* scm_mtrigger
234 * is the number of bytes of must_malloc allocation needed to trigger gc.
235 */
15e9d186 236unsigned long scm_mtrigger;
0f2d19dd
JB
237
238
239/* scm_gc_heap_lock
240 * If set, don't expand the heap. Set only during gc, during which no allocation
241 * is supposed to take place anyway.
242 */
243int scm_gc_heap_lock = 0;
244
245/* GC Blocking
246 * Don't pause for collection if this is set -- just
247 * expand the heap.
248 */
249
250int scm_block_gc = 1;
251
252/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
253 * collection (GC) more space is allocated for the heap.
254 */
945fec60 255#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
256
257/* During collection, this accumulates objects holding
258 * weak references.
259 */
ab4bef85 260SCM scm_weak_vectors;
0f2d19dd
JB
261
262/* GC Statistics Keeping
263 */
264unsigned long scm_cells_allocated = 0;
a5c314c8 265long scm_mallocated = 0;
b37fe1c5 266unsigned long scm_gc_cells_collected;
8b0d194f
MD
267#ifdef GUILE_NEW_GC_SCHEME
268unsigned long scm_gc_yield;
37ddcaf6 269static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
8b0d194f 270#endif
0f2d19dd
JB
271unsigned long scm_gc_malloc_collected;
272unsigned long scm_gc_ports_collected;
273unsigned long scm_gc_rt;
274unsigned long scm_gc_time_taken = 0;
275
276SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
277SCM_SYMBOL (sym_heap_size, "cell-heap-size");
278SCM_SYMBOL (sym_mallocated, "bytes-malloced");
279SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
280SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
281SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
282
a00c95d9 283typedef struct scm_heap_seg_data_t
0f2d19dd 284{
cf2d30f6
JB
285 /* lower and upper bounds of the segment */
286 SCM_CELLPTR bounds[2];
287
288 /* address of the head-of-freelist pointer for this segment's cells.
289 All segments usually point to the same one, scm_freelist. */
4c48ba06 290 scm_freelist_t *freelist;
cf2d30f6
JB
291
292 /* number of SCM words per object in this segment */
945fec60 293 int span;
cf2d30f6
JB
294
295 /* If SEG_DATA->valid is non-zero, the conservative marking
296 functions will apply SEG_DATA->valid to the purported pointer and
297 SEG_DATA, and mark the object iff the function returns non-zero.
298 At the moment, I don't think anyone uses this. */
0f2d19dd 299 int (*valid) ();
a00c95d9 300} scm_heap_seg_data_t;
0f2d19dd
JB
301
302
303
304
945fec60
MD
305static void scm_mark_weak_vector_spines (void);
306static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
307static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
308
309
310\f
cf2d30f6
JB
311/* Debugging functions. */
312
bb2c57fa 313#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
314
315/* Return the number of the heap segment containing CELL. */
316static int
317which_seg (SCM cell)
318{
319 int i;
320
321 for (i = 0; i < scm_n_heap_segs; i++)
322 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
323 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
324 return i;
325 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 326 SCM_UNPACK (cell));
cf2d30f6
JB
327 abort ();
328}
329
330
8ded62a3
MD
331#ifdef GUILE_NEW_GC_SCHEME
332static void
333map_free_list (scm_freelist_t *master, SCM freelist)
334{
335 int last_seg = -1, count = 0;
336 SCM f;
a00c95d9 337
8ded62a3
MD
338 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
339 {
340 int this_seg = which_seg (f);
341
342 if (this_seg != last_seg)
343 {
344 if (last_seg != -1)
345 fprintf (stderr, " %5d %d-cells in segment %d\n",
346 count, master->span, last_seg);
347 last_seg = this_seg;
348 count = 0;
349 }
350 count++;
351 }
352 if (last_seg != -1)
353 fprintf (stderr, " %5d %d-cells in segment %d\n",
354 count, master->span, last_seg);
355}
356#else
acb0a19c 357static void
4c48ba06 358map_free_list (scm_freelist_t *freelist)
cf2d30f6
JB
359{
360 int last_seg = -1, count = 0;
361 SCM f;
a00c95d9 362
4c48ba06 363 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
cf2d30f6
JB
364 {
365 int this_seg = which_seg (f);
366
367 if (this_seg != last_seg)
368 {
369 if (last_seg != -1)
acb0a19c 370 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 371 count, freelist->span, last_seg);
cf2d30f6
JB
372 last_seg = this_seg;
373 count = 0;
374 }
375 count++;
376 }
377 if (last_seg != -1)
acb0a19c 378 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 379 count, freelist->span, last_seg);
acb0a19c 380}
8ded62a3 381#endif
cf2d30f6 382
a00c95d9 383SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
384 (),
385 "Print debugging information about the free-list.\n"
5384bc5b 386 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
387#define FUNC_NAME s_scm_map_free_list
388{
4c48ba06
MD
389 int i;
390 fprintf (stderr, "%d segments total (%d:%d",
391 scm_n_heap_segs,
392 scm_heap_table[0].span,
393 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
394 for (i = 1; i < scm_n_heap_segs; i++)
395 fprintf (stderr, ", %d:%d",
396 scm_heap_table[i].span,
397 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
398 fprintf (stderr, ")\n");
4a4c9785 399#ifdef GUILE_NEW_GC_SCHEME
8ded62a3
MD
400 map_free_list (&scm_master_freelist, scm_freelist);
401 map_free_list (&scm_master_freelist2, scm_freelist2);
4a4c9785 402#else
945fec60
MD
403 map_free_list (&scm_freelist);
404 map_free_list (&scm_freelist2);
4a4c9785 405#endif
cf2d30f6
JB
406 fflush (stderr);
407
408 return SCM_UNSPECIFIED;
409}
1bbd0b84 410#undef FUNC_NAME
cf2d30f6 411
a8cffa9f 412#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
413static int last_cluster;
414static int last_size;
415
5384bc5b
MD
416static int
417free_list_length (char *title, int i, SCM freelist)
418{
419 SCM ls;
420 int n = 0;
421 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
422 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
423 ++n;
424 else
425 {
426 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
427 abort ();
428 }
4c48ba06
MD
429 if (n != last_size)
430 {
431 if (i > 0)
432 {
433 if (last_cluster == i - 1)
434 fprintf (stderr, "\t%d\n", last_size);
435 else
436 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
437 }
438 if (i >= 0)
439 fprintf (stderr, "%s %d", title, i);
440 else
441 fprintf (stderr, "%s\t%d\n", title, n);
442 last_cluster = i;
443 last_size = n;
444 }
5384bc5b
MD
445 return n;
446}
447
448static void
449free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
450{
451 SCM clusters;
4c48ba06 452 int i = 0, len, n = 0;
5384bc5b
MD
453 fprintf (stderr, "%s\n\n", title);
454 n += free_list_length ("free list", -1, freelist);
455 for (clusters = master->clusters;
456 SCM_NNULLP (clusters);
457 clusters = SCM_CDR (clusters))
4c48ba06
MD
458 {
459 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
460 n += len;
461 }
462 if (last_cluster == i - 1)
463 fprintf (stderr, "\t%d\n", last_size);
464 else
465 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
466 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
467}
468
a00c95d9 469SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
470 (),
471 "Print debugging information about the free-list.\n"
472 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
473#define FUNC_NAME s_scm_free_list_length
474{
b37fe1c5
MD
475 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
476 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 477 return SCM_UNSPECIFIED;
5384bc5b
MD
478}
479#undef FUNC_NAME
a8cffa9f 480#endif
5384bc5b 481
bb2c57fa
MD
482#endif
483
484#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
485
486/* Number of calls to SCM_NEWCELL since startup. */
487static unsigned long scm_newcell_count;
acb0a19c 488static unsigned long scm_newcell2_count;
cf2d30f6
JB
489
490/* Search freelist for anything that isn't marked as a free cell.
491 Abort if we find something. */
8ded62a3
MD
492#ifdef GUILE_NEW_GC_SCHEME
493static void
494scm_check_freelist (SCM freelist)
495{
496 SCM f;
497 int i = 0;
498
499 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
500 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
501 {
502 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
503 scm_newcell_count, i);
504 fflush (stderr);
505 abort ();
506 }
507}
508#else
cf2d30f6 509static void
4c48ba06 510scm_check_freelist (scm_freelist_t *freelist)
cf2d30f6
JB
511{
512 SCM f;
513 int i = 0;
514
4c48ba06 515 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
cf2d30f6
JB
516 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
517 {
518 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
519 scm_newcell_count, i);
520 fflush (stderr);
521 abort ();
522 }
523}
8ded62a3 524#endif
cf2d30f6
JB
525
526static int scm_debug_check_freelist = 0;
25748c78 527
a00c95d9 528SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 529 (SCM flag),
da4a1dba
GB
530 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
531 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
532 "compile-time flag was selected.\n")
1bbd0b84 533#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 534{
945fec60 535 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
536 return SCM_UNSPECIFIED;
537}
1bbd0b84 538#undef FUNC_NAME
25748c78
GB
539
540
4a4c9785
MD
541#ifdef GUILE_NEW_GC_SCHEME
542
543SCM
544scm_debug_newcell (void)
545{
546 SCM new;
547
548 scm_newcell_count++;
549 if (scm_debug_check_freelist)
550 {
8ded62a3 551 scm_check_freelist (scm_freelist);
4a4c9785
MD
552 scm_gc();
553 }
554
555 /* The rest of this is supposed to be identical to the SCM_NEWCELL
556 macro. */
557 if (SCM_IMP (scm_freelist))
558 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
559 else
560 {
561 new = scm_freelist;
562 scm_freelist = SCM_CDR (scm_freelist);
563 SCM_SETCAR (new, scm_tc16_allocated);
564 }
565
566 return new;
567}
568
569SCM
570scm_debug_newcell2 (void)
571{
572 SCM new;
573
574 scm_newcell2_count++;
575 if (scm_debug_check_freelist)
576 {
8ded62a3 577 scm_check_freelist (scm_freelist2);
4a4c9785
MD
578 scm_gc ();
579 }
580
581 /* The rest of this is supposed to be identical to the SCM_NEWCELL
582 macro. */
583 if (SCM_IMP (scm_freelist2))
584 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
585 else
586 {
587 new = scm_freelist2;
588 scm_freelist2 = SCM_CDR (scm_freelist2);
589 SCM_SETCAR (new, scm_tc16_allocated);
590 }
591
592 return new;
593}
594
595#else /* GUILE_NEW_GC_SCHEME */
596
f2333166
JB
597SCM
598scm_debug_newcell (void)
cf2d30f6 599{
f2333166
JB
600 SCM new;
601
cf2d30f6 602 scm_newcell_count++;
945fec60
MD
603 if (scm_debug_check_freelist)
604 {
605 scm_check_freelist (&scm_freelist);
606 scm_gc();
607 }
cf2d30f6
JB
608
609 /* The rest of this is supposed to be identical to the SCM_NEWCELL
610 macro. */
945fec60
MD
611 if (SCM_IMP (scm_freelist.cells))
612 new = scm_gc_for_newcell (&scm_freelist);
cf2d30f6
JB
613 else
614 {
945fec60
MD
615 new = scm_freelist.cells;
616 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
d9de3881 617 SCM_SETCAR (new, scm_tc16_allocated);
cf2d30f6
JB
618 ++scm_cells_allocated;
619 }
f2333166
JB
620
621 return new;
cf2d30f6
JB
622}
623
acb0a19c
MD
624SCM
625scm_debug_newcell2 (void)
626{
627 SCM new;
628
629 scm_newcell2_count++;
630 if (scm_debug_check_freelist) {
945fec60 631 scm_check_freelist (&scm_freelist2);
acb0a19c
MD
632 scm_gc();
633 }
634
635 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
636 macro. */
945fec60
MD
637 if (SCM_IMP (scm_freelist2.cells))
638 new = scm_gc_for_newcell (&scm_freelist2);
acb0a19c
MD
639 else
640 {
945fec60
MD
641 new = scm_freelist2.cells;
642 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
acb0a19c
MD
643 SCM_SETCAR (new, scm_tc16_allocated);
644 scm_cells_allocated += 2;
645 }
646
647 return new;
648}
649
4a4c9785 650#endif /* GUILE_NEW_GC_SCHEME */
fca7547b 651#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
652
653\f
0f2d19dd 654
b37fe1c5
MD
655#ifdef GUILE_NEW_GC_SCHEME
656static unsigned long
657master_cells_allocated (scm_freelist_t *master)
658{
659 int objects = master->clusters_allocated * (master->cluster_size - 1);
660 if (SCM_NULLP (master->clusters))
661 objects -= master->left_to_collect;
662 return master->span * objects;
663}
664
665static unsigned long
666freelist_length (SCM freelist)
667{
668 int n;
669 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
670 ++n;
671 return n;
672}
673
674static unsigned long
675compute_cells_allocated ()
676{
677 return (scm_cells_allocated
678 + master_cells_allocated (&scm_master_freelist)
679 + master_cells_allocated (&scm_master_freelist2)
680 - scm_master_freelist.span * freelist_length (scm_freelist)
681 - scm_master_freelist2.span * freelist_length (scm_freelist2));
682}
683#endif
684
0f2d19dd
JB
685/* {Scheme Interface to GC}
686 */
687
a00c95d9 688SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 689 (),
b380b885 690 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 691#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
692{
693 int i;
694 int n;
695 SCM heap_segs;
c209c88e
GB
696 long int local_scm_mtrigger;
697 long int local_scm_mallocated;
698 long int local_scm_heap_size;
699 long int local_scm_cells_allocated;
700 long int local_scm_gc_time_taken;
0f2d19dd
JB
701 SCM answer;
702
703 SCM_DEFER_INTS;
704 scm_block_gc = 1;
705 retry:
706 heap_segs = SCM_EOL;
707 n = scm_n_heap_segs;
708 for (i = scm_n_heap_segs; i--; )
709 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
710 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
711 heap_segs);
712 if (scm_n_heap_segs != n)
713 goto retry;
714 scm_block_gc = 0;
715
7febb4a2
MD
716 /* Below, we cons to produce the resulting list. We want a snapshot of
717 * the heap situation before consing.
718 */
0f2d19dd
JB
719 local_scm_mtrigger = scm_mtrigger;
720 local_scm_mallocated = scm_mallocated;
b37fe1c5 721 local_scm_heap_size = SCM_HEAP_SIZE;
4a4c9785 722#ifdef GUILE_NEW_GC_SCHEME
b37fe1c5 723 local_scm_cells_allocated = compute_cells_allocated ();
4a4c9785 724#else
0f2d19dd 725 local_scm_cells_allocated = scm_cells_allocated;
b37fe1c5 726#endif
0f2d19dd
JB
727 local_scm_gc_time_taken = scm_gc_time_taken;
728
729 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
730 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
731 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
732 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
733 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
734 scm_cons (sym_heap_segments, heap_segs),
735 SCM_UNDEFINED);
736 SCM_ALLOW_INTS;
737 return answer;
738}
1bbd0b84 739#undef FUNC_NAME
0f2d19dd
JB
740
741
a00c95d9 742void
6e8d25a6 743scm_gc_start (const char *what)
0f2d19dd
JB
744{
745 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 746 scm_gc_cells_collected = 0;
8b0d194f 747#ifdef GUILE_NEW_GC_SCHEME
37ddcaf6 748 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
749 scm_gc_yield = (scm_cells_allocated
750 + master_cells_allocated (&scm_master_freelist)
751 + master_cells_allocated (&scm_master_freelist2));
752#endif
0f2d19dd
JB
753 scm_gc_malloc_collected = 0;
754 scm_gc_ports_collected = 0;
755}
756
a00c95d9 757void
0f2d19dd 758scm_gc_end ()
0f2d19dd
JB
759{
760 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 761 scm_gc_time_taken += scm_gc_rt;
9ea54cc6 762 scm_system_async_mark (scm_gc_async);
0f2d19dd
JB
763}
764
765
a00c95d9 766SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 767 (SCM obj),
b380b885
MD
768 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
769 "returned by this function for @var{obj}")
1bbd0b84 770#define FUNC_NAME s_scm_object_address
0f2d19dd 771{
54778cd3 772 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
0f2d19dd 773}
1bbd0b84 774#undef FUNC_NAME
0f2d19dd
JB
775
776
a00c95d9 777SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 778 (),
b380b885
MD
779 "Scans all of SCM objects and reclaims for further use those that are\n"
780 "no longer accessible.")
1bbd0b84 781#define FUNC_NAME s_scm_gc
0f2d19dd
JB
782{
783 SCM_DEFER_INTS;
784 scm_igc ("call");
785 SCM_ALLOW_INTS;
786 return SCM_UNSPECIFIED;
787}
1bbd0b84 788#undef FUNC_NAME
0f2d19dd
JB
789
790
791\f
792/* {C Interface For When GC is Triggered}
793 */
794
4a4c9785
MD
795#ifdef GUILE_NEW_GC_SCHEME
796
b37fe1c5 797static void
8fef55a8 798adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 799{
8fef55a8 800 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 801 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
802 * `min_yield_fraction' of total heap size. Note, however, that
803 * the absolute value of min_yield will correspond to `collected'
bda1446c 804 * on one master (the one which currently is triggering GC).
b37fe1c5 805 *
bda1446c
MD
806 * The reason why we look at total yield instead of cells collected
807 * on one list is that we want to take other freelists into account.
808 * On this freelist, we know that (local) yield = collected cells,
809 * but that's probably not the case on the other lists.
b37fe1c5
MD
810 *
811 * (We might consider computing a better prediction, for example
812 * by computing an average over multiple GC:s.)
813 */
8fef55a8 814 if (freelist->min_yield_fraction)
b37fe1c5 815 {
37ddcaf6 816 /* Pick largest of last two yields. */
8fef55a8
MD
817 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
818 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
819#ifdef DEBUGINFO
820 fprintf (stderr, " after GC = %d, delta = %d\n",
821 scm_cells_allocated,
822 delta);
823#endif
824 if (delta > 0)
8fef55a8 825 freelist->min_yield += delta;
b37fe1c5
MD
826 }
827}
828
4a4c9785 829/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
830 * common while the freelist will be individual for each thread.
831 */
4a4c9785
MD
832
833SCM
834scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
835{
836 SCM cell;
837 ++scm_ints_disabled;
4c48ba06
MD
838 do
839 {
840 if (SCM_NULLP (master->clusters))
841 {
842 if (master->grow_heap_p)
843 {
844 master->grow_heap_p = 0;
845 alloc_some_heap (master);
846 }
847 else
b37fe1c5 848 {
37ddcaf6
MD
849#ifdef DEBUGINFO
850 fprintf (stderr, "allocated = %d, ",
851 scm_cells_allocated
852 + master_cells_allocated (&scm_master_freelist)
853 + master_cells_allocated (&scm_master_freelist2));
854#endif
b37fe1c5 855 scm_igc ("cells");
8fef55a8 856 adjust_min_yield (master);
b37fe1c5 857 }
4c48ba06
MD
858 }
859 cell = SCM_CAR (master->clusters);
860 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 861 ++master->clusters_allocated;
4c48ba06
MD
862 }
863 while (SCM_NULLP (cell));
4a4c9785 864 --scm_ints_disabled;
4a4c9785 865 *freelist = SCM_CDR (cell);
54778cd3 866 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
4a4c9785
MD
867 return cell;
868}
869
4c48ba06
MD
870#if 0
871/* This is a support routine which can be used to reserve a cluster
872 * for some special use, such as debugging. It won't be useful until
873 * free cells are preserved between garbage collections.
874 */
875
876void
877scm_alloc_cluster (scm_freelist_t *master)
878{
879 SCM freelist, cell;
880 cell = scm_gc_for_newcell (master, &freelist);
881 SCM_SETCDR (cell, freelist);
882 return cell;
883}
884#endif
885
4a4c9785
MD
886#else /* GUILE_NEW_GC_SCHEME */
887
0f2d19dd 888void
4c48ba06 889scm_gc_for_alloc (scm_freelist_t *freelist)
0f2d19dd
JB
890{
891 SCM_REDEFER_INTS;
892 scm_igc ("cells");
945fec60 893#ifdef GUILE_DEBUG_FREELIST
acb0a19c 894 fprintf (stderr, "Collected: %d, min_yield: %d\n",
4c48ba06 895 freelist->collected, MIN_GC_YIELD (freelist));
acb0a19c 896#endif
4c48ba06
MD
897 if ((freelist->collected < MIN_GC_YIELD (freelist))
898 || SCM_IMP (freelist->cells))
899 alloc_some_heap (freelist);
0f2d19dd
JB
900 SCM_REALLOW_INTS;
901}
902
903
a00c95d9 904SCM
4c48ba06 905scm_gc_for_newcell (scm_freelist_t *freelist)
0f2d19dd
JB
906{
907 SCM fl;
4c48ba06
MD
908 scm_gc_for_alloc (freelist);
909 fl = freelist->cells;
910 freelist->cells = SCM_CDR (fl);
acb0a19c 911 SCM_SETCAR (fl, scm_tc16_allocated);
0f2d19dd
JB
912 return fl;
913}
914
4a4c9785
MD
915#endif /* GUILE_NEW_GC_SCHEME */
916
0f2d19dd 917void
1bbd0b84 918scm_igc (const char *what)
0f2d19dd
JB
919{
920 int j;
921
4c48ba06
MD
922#ifdef DEBUGINFO
923 fprintf (stderr,
924 SCM_NULLP (scm_freelist)
925 ? "*"
926 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
927#endif
42db06f0
MD
928#ifdef USE_THREADS
929 /* During the critical section, only the current thread may run. */
930 SCM_THREAD_CRITICAL_SECTION_START;
931#endif
932
e242dfd2 933 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 934
ab4bef85
JB
935 scm_gc_start (what);
936
937 if (!scm_stack_base || scm_block_gc)
938 {
939 scm_gc_end ();
940 return;
941 }
942
a5c314c8
JB
943 if (scm_mallocated < 0)
944 /* The byte count of allocated objects has underflowed. This is
945 probably because you forgot to report the sizes of objects you
946 have allocated, by calling scm_done_malloc or some such. When
947 the GC freed them, it subtracted their size from
948 scm_mallocated, which underflowed. */
949 abort ();
c45acc34 950
ab4bef85
JB
951 if (scm_gc_heap_lock)
952 /* We've invoked the collector while a GC is already in progress.
953 That should never happen. */
954 abort ();
0f2d19dd
JB
955
956 ++scm_gc_heap_lock;
ab4bef85
JB
957
958 scm_weak_vectors = SCM_EOL;
0f2d19dd 959
0493cd89
MD
960 scm_guardian_gc_init ();
961
0f2d19dd
JB
962 /* unprotect any struct types with no instances */
963#if 0
964 {
965 SCM type_list;
966 SCM * pos;
967
968 pos = &scm_type_obj_list;
969 type_list = scm_type_obj_list;
970 while (type_list != SCM_EOL)
971 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
972 {
24e68a57 973 pos = SCM_CDRLOC (type_list);
0f2d19dd
JB
974 type_list = SCM_CDR (type_list);
975 }
976 else
977 {
978 *pos = SCM_CDR (type_list);
979 type_list = SCM_CDR (type_list);
980 }
981 }
982#endif
983
984 /* flush dead entries from the continuation stack */
985 {
986 int x;
987 int bound;
988 SCM * elts;
989 elts = SCM_VELTS (scm_continuation_stack);
990 bound = SCM_LENGTH (scm_continuation_stack);
991 x = SCM_INUM (scm_continuation_stack_ptr);
992 while (x < bound)
993 {
994 elts[x] = SCM_BOOL_F;
995 ++x;
996 }
997 }
998
42db06f0 999#ifndef USE_THREADS
a00c95d9 1000
0f2d19dd
JB
1001 /* Protect from the C stack. This must be the first marking
1002 * done because it provides information about what objects
1003 * are "in-use" by the C code. "in-use" objects are those
1004 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1005 * usable. This requirement is stricter than a liveness
1006 * requirement -- in particular, it constrains the implementation
1007 * of scm_vector_set_length_x.
1008 */
1009 SCM_FLUSH_REGISTER_WINDOWS;
1010 /* This assumes that all registers are saved into the jmp_buf */
1011 setjmp (scm_save_regs_gc_mark);
1012 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1013 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1014 sizeof scm_save_regs_gc_mark)
1015 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1016
1017 {
1018 /* stack_len is long rather than scm_sizet in order to guarantee that
1019 &stack_len is long aligned */
1020#ifdef SCM_STACK_GROWS_UP
1021#ifdef nosve
1022 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1023#else
1024 long stack_len = scm_stack_size (scm_stack_base);
1025#endif
1026 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1027#else
1028#ifdef nosve
1029 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1030#else
1031 long stack_len = scm_stack_size (scm_stack_base);
1032#endif
1033 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1034#endif
1035 }
1036
42db06f0
MD
1037#else /* USE_THREADS */
1038
1039 /* Mark every thread's stack and registers */
945fec60 1040 scm_threads_mark_stacks ();
42db06f0
MD
1041
1042#endif /* USE_THREADS */
0f2d19dd
JB
1043
1044 /* FIXME: insert a phase to un-protect string-data preserved
1045 * in scm_vector_set_length_x.
1046 */
1047
1048 j = SCM_NUM_PROTECTS;
1049 while (j--)
1050 scm_gc_mark (scm_sys_protects[j]);
1051
9de33deb
MD
1052 /* FIXME: we should have a means to register C functions to be run
1053 * in different phases of GC
a00c95d9 1054 */
9de33deb 1055 scm_mark_subr_table ();
a00c95d9 1056
42db06f0
MD
1057#ifndef USE_THREADS
1058 scm_gc_mark (scm_root->handle);
1059#endif
a00c95d9 1060
0f2d19dd
JB
1061 scm_mark_weak_vector_spines ();
1062
0493cd89
MD
1063 scm_guardian_zombify ();
1064
0f2d19dd
JB
1065 scm_gc_sweep ();
1066
1067 --scm_gc_heap_lock;
1068 scm_gc_end ();
42db06f0
MD
1069
1070#ifdef USE_THREADS
1071 SCM_THREAD_CRITICAL_SECTION_END;
1072#endif
0f2d19dd
JB
1073}
1074
1075\f
a00c95d9 1076/* {Mark/Sweep}
0f2d19dd
JB
1077 */
1078
1079
1080
1081/* Mark an object precisely.
1082 */
a00c95d9 1083void
1bbd0b84 1084scm_gc_mark (SCM p)
0f2d19dd
JB
1085{
1086 register long i;
1087 register SCM ptr;
1088
1089 ptr = p;
1090
1091gc_mark_loop:
1092 if (SCM_IMP (ptr))
1093 return;
1094
1095gc_mark_nimp:
1096 if (SCM_NCELLP (ptr))
f8392303 1097 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
1098
1099 switch (SCM_TYP7 (ptr))
1100 {
1101 case scm_tcs_cons_nimcar:
1102 if (SCM_GCMARKP (ptr))
1103 break;
1104 SCM_SETGCMARK (ptr);
1105 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1106 {
1107 ptr = SCM_CAR (ptr);
1108 goto gc_mark_nimp;
1109 }
1110 scm_gc_mark (SCM_CAR (ptr));
1111 ptr = SCM_GCCDR (ptr);
1112 goto gc_mark_nimp;
1113 case scm_tcs_cons_imcar:
acb0a19c
MD
1114 if (SCM_GCMARKP (ptr))
1115 break;
1116 SCM_SETGCMARK (ptr);
1117 ptr = SCM_GCCDR (ptr);
1118 goto gc_mark_loop;
e641afaf 1119 case scm_tc7_pws:
0f2d19dd
JB
1120 if (SCM_GCMARKP (ptr))
1121 break;
1122 SCM_SETGCMARK (ptr);
54778cd3 1123 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
0f2d19dd
JB
1124 ptr = SCM_GCCDR (ptr);
1125 goto gc_mark_loop;
1126 case scm_tcs_cons_gloc:
1127 if (SCM_GCMARKP (ptr))
1128 break;
1129 SCM_SETGCMARK (ptr);
1130 {
1131 SCM vcell;
77490b49 1132 vcell = (SCM) SCM_STRUCT_VTABLE_DATA (ptr);
f1267706 1133 switch (SCM_UNPACK (SCM_CDR (vcell)))
0f2d19dd
JB
1134 {
1135 default:
1136 scm_gc_mark (vcell);
1137 ptr = SCM_GCCDR (ptr);
1138 goto gc_mark_loop;
1139 case 1: /* ! */
1140 case 0: /* ! */
1141 {
1142 SCM layout;
1143 SCM * vtable_data;
1144 int len;
1145 char * fields_desc;
ad75306c
MD
1146 register SCM * mem;
1147 register int x;
0f2d19dd 1148
77490b49 1149 vtable_data = (SCM *) vcell;
4bfdf158 1150 layout = vtable_data[scm_vtable_index_layout];
0f2d19dd
JB
1151 len = SCM_LENGTH (layout);
1152 fields_desc = SCM_CHARS (layout);
14d1400f
JB
1153 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1154 that it removes the mark */
77490b49 1155 mem = (SCM *) SCM_GCCDR (ptr);
a00c95d9 1156
f1267706 1157 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
aa0761ec 1158 {
73e10adf 1159 scm_gc_mark (mem[scm_struct_i_procedure]);
e641afaf 1160 scm_gc_mark (mem[scm_struct_i_setter]);
aa0761ec 1161 }
ad75306c
MD
1162 if (len)
1163 {
1164 for (x = 0; x < len - 2; x += 2, ++mem)
1165 if (fields_desc[x] == 'p')
1166 scm_gc_mark (*mem);
1167 if (fields_desc[x] == 'p')
1168 {
1169 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
77490b49 1170 for (x = (long int) *mem; x; --x)
ad75306c
MD
1171 scm_gc_mark (*++mem);
1172 else
1173 scm_gc_mark (*mem);
1174 }
1175 }
0f2d19dd
JB
1176 if (!SCM_CDR (vcell))
1177 {
1178 SCM_SETGCMARK (vcell);
4bfdf158 1179 ptr = vtable_data[scm_vtable_index_vtable];
0f2d19dd
JB
1180 goto gc_mark_loop;
1181 }
1182 }
1183 }
1184 }
1185 break;
1186 case scm_tcs_closures:
1187 if (SCM_GCMARKP (ptr))
1188 break;
1189 SCM_SETGCMARK (ptr);
1190 if (SCM_IMP (SCM_CDR (ptr)))
1191 {
1192 ptr = SCM_CLOSCAR (ptr);
1193 goto gc_mark_nimp;
1194 }
1195 scm_gc_mark (SCM_CLOSCAR (ptr));
1196 ptr = SCM_GCCDR (ptr);
1197 goto gc_mark_nimp;
1198 case scm_tc7_vector:
1199 case scm_tc7_lvector:
1200#ifdef CCLO
1201 case scm_tc7_cclo:
1202#endif
1203 if (SCM_GC8MARKP (ptr))
1204 break;
1205 SCM_SETGC8MARK (ptr);
1206 i = SCM_LENGTH (ptr);
1207 if (i == 0)
1208 break;
1209 while (--i > 0)
1210 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1211 scm_gc_mark (SCM_VELTS (ptr)[i]);
1212 ptr = SCM_VELTS (ptr)[0];
1213 goto gc_mark_loop;
1214 case scm_tc7_contin:
1215 if SCM_GC8MARKP
1216 (ptr) break;
1217 SCM_SETGC8MARK (ptr);
c68296f8 1218 if (SCM_VELTS (ptr))
41b0806d 1219 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1220 (scm_sizet)
1221 (SCM_LENGTH (ptr) +
1222 (sizeof (SCM_STACKITEM) + -1 +
1223 sizeof (scm_contregs)) /
1224 sizeof (SCM_STACKITEM)));
0f2d19dd 1225 break;
afe5177e 1226#ifdef HAVE_ARRAYS
0f2d19dd
JB
1227 case scm_tc7_bvect:
1228 case scm_tc7_byvect:
1229 case scm_tc7_ivect:
1230 case scm_tc7_uvect:
1231 case scm_tc7_fvect:
1232 case scm_tc7_dvect:
1233 case scm_tc7_cvect:
1234 case scm_tc7_svect:
5c11cc9d 1235#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1236 case scm_tc7_llvect:
1237#endif
afe5177e 1238#endif
0f2d19dd 1239 case scm_tc7_string:
0f2d19dd
JB
1240 SCM_SETGC8MARK (ptr);
1241 break;
1242
1243 case scm_tc7_substring:
0f2d19dd
JB
1244 if (SCM_GC8MARKP(ptr))
1245 break;
1246 SCM_SETGC8MARK (ptr);
1247 ptr = SCM_CDR (ptr);
1248 goto gc_mark_loop;
1249
1250 case scm_tc7_wvect:
1251 if (SCM_GC8MARKP(ptr))
1252 break;
ab4bef85
JB
1253 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1254 scm_weak_vectors = ptr;
0f2d19dd
JB
1255 SCM_SETGC8MARK (ptr);
1256 if (SCM_IS_WHVEC_ANY (ptr))
1257 {
1258 int x;
1259 int len;
1260 int weak_keys;
1261 int weak_values;
1262
1263 len = SCM_LENGTH (ptr);
1264 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1265 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1266
0f2d19dd
JB
1267 for (x = 0; x < len; ++x)
1268 {
1269 SCM alist;
1270 alist = SCM_VELTS (ptr)[x];
46408039
JB
1271
1272 /* mark everything on the alist except the keys or
1273 * values, according to weak_values and weak_keys. */
0b5f3f34 1274 while ( SCM_CONSP (alist)
0f2d19dd 1275 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1276 && SCM_CONSP (SCM_CAR (alist)))
1277 {
1278 SCM kvpair;
1279 SCM next_alist;
1280
1281 kvpair = SCM_CAR (alist);
1282 next_alist = SCM_CDR (alist);
a00c95d9 1283 /*
0f2d19dd
JB
1284 * Do not do this:
1285 * SCM_SETGCMARK (alist);
1286 * SCM_SETGCMARK (kvpair);
1287 *
1288 * It may be that either the key or value is protected by
1289 * an escaped reference to part of the spine of this alist.
1290 * If we mark the spine here, and only mark one or neither of the
1291 * key and value, they may never be properly marked.
1292 * This leads to a horrible situation in which an alist containing
1293 * freelist cells is exported.
1294 *
1295 * So only mark the spines of these arrays last of all marking.
1296 * If somebody confuses us by constructing a weak vector
1297 * with a circular alist then we are hosed, but at least we
1298 * won't prematurely drop table entries.
1299 */
1300 if (!weak_keys)
1301 scm_gc_mark (SCM_CAR (kvpair));
1302 if (!weak_values)
1303 scm_gc_mark (SCM_GCCDR (kvpair));
1304 alist = next_alist;
1305 }
1306 if (SCM_NIMP (alist))
1307 scm_gc_mark (alist);
1308 }
1309 }
1310 break;
1311
1312 case scm_tc7_msymbol:
1313 if (SCM_GC8MARKP(ptr))
1314 break;
1315 SCM_SETGC8MARK (ptr);
1316 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1317 ptr = SCM_SYMBOL_PROPS (ptr);
1318 goto gc_mark_loop;
1319 case scm_tc7_ssymbol:
1320 if (SCM_GC8MARKP(ptr))
1321 break;
1322 SCM_SETGC8MARK (ptr);
1323 break;
1324 case scm_tcs_subrs:
9de33deb 1325 break;
0f2d19dd
JB
1326 case scm_tc7_port:
1327 i = SCM_PTOBNUM (ptr);
1328 if (!(i < scm_numptob))
1329 goto def;
1330 if (SCM_GC8MARKP (ptr))
1331 break;
dc53f026 1332 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1333 if (SCM_PTAB_ENTRY(ptr))
1334 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1335 if (scm_ptobs[i].mark)
1336 {
1337 ptr = (scm_ptobs[i].mark) (ptr);
1338 goto gc_mark_loop;
1339 }
1340 else
1341 return;
0f2d19dd
JB
1342 break;
1343 case scm_tc7_smob:
1344 if (SCM_GC8MARKP (ptr))
1345 break;
dc53f026 1346 SCM_SETGC8MARK (ptr);
acb0a19c 1347 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1348 { /* should be faster than going through scm_smobs */
1349 case scm_tc_free_cell:
1350 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1351 case scm_tc16_allocated:
acb0a19c
MD
1352 case scm_tc16_big:
1353 case scm_tc16_real:
1354 case scm_tc16_complex:
0f2d19dd
JB
1355 break;
1356 default:
1357 i = SCM_SMOBNUM (ptr);
1358 if (!(i < scm_numsmob))
1359 goto def;
dc53f026
JB
1360 if (scm_smobs[i].mark)
1361 {
1362 ptr = (scm_smobs[i].mark) (ptr);
1363 goto gc_mark_loop;
1364 }
1365 else
1366 return;
0f2d19dd
JB
1367 }
1368 break;
1369 default:
1370 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1371 }
1372}
1373
1374
1375/* Mark a Region Conservatively
1376 */
1377
a00c95d9 1378void
6e8d25a6 1379scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1380{
1381 register long m = n;
1382 register int i, j;
1383 register SCM_CELLPTR ptr;
1384
1385 while (0 <= --m)
c67baafd 1386 if (SCM_CELLP (* (SCM *) &x[m]))
0f2d19dd 1387 {
c67baafd 1388 ptr = (SCM_CELLPTR) SCM2PTR (* (SCM *) &x[m]);
0f2d19dd
JB
1389 i = 0;
1390 j = scm_n_heap_segs - 1;
1391 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1392 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1393 {
1394 while (i <= j)
1395 {
1396 int seg_id;
1397 seg_id = -1;
1398 if ( (i == j)
1399 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1400 seg_id = i;
1401 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1402 seg_id = j;
1403 else
1404 {
1405 int k;
1406 k = (i + j) / 2;
1407 if (k == i)
1408 break;
1409 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1410 {
1411 j = k;
1412 ++i;
1413 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1414 continue;
1415 else
1416 break;
1417 }
1418 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1419 {
1420 i = k;
1421 --j;
1422 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1423 continue;
1424 else
1425 break;
1426 }
1427 }
c67baafd 1428 if (!scm_heap_table[seg_id].valid
0f2d19dd
JB
1429 || scm_heap_table[seg_id].valid (ptr,
1430 &scm_heap_table[seg_id]))
c67baafd
MD
1431 if (scm_heap_table[seg_id].span == 1
1432 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1433 scm_gc_mark (* (SCM *) &x[m]);
0f2d19dd
JB
1434 break;
1435 }
1436
1437 }
1438 }
1439}
1440
1441
2e11a577
MD
1442/* The following is a C predicate which determines if an SCM value can be
1443 regarded as a pointer to a cell on the heap. The code is duplicated
1444 from scm_mark_locations. */
1445
1cc91f1b 1446
2e11a577 1447int
6e8d25a6 1448scm_cellp (SCM value)
2e11a577
MD
1449{
1450 register int i, j;
1451 register SCM_CELLPTR ptr;
a00c95d9 1452
c67baafd 1453 if (SCM_CELLP (value))
2e11a577 1454 {
c67baafd 1455 ptr = (SCM_CELLPTR) SCM2PTR (value);
2e11a577
MD
1456 i = 0;
1457 j = scm_n_heap_segs - 1;
1458 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1459 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1460 {
1461 while (i <= j)
1462 {
1463 int seg_id;
1464 seg_id = -1;
1465 if ( (i == j)
1466 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1467 seg_id = i;
1468 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1469 seg_id = j;
1470 else
1471 {
1472 int k;
1473 k = (i + j) / 2;
1474 if (k == i)
1475 break;
1476 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1477 {
1478 j = k;
1479 ++i;
1480 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1481 continue;
1482 else
1483 break;
1484 }
1485 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1486 {
1487 i = k;
1488 --j;
1489 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1490 continue;
1491 else
1492 break;
1493 }
1494 }
c67baafd 1495 if (!scm_heap_table[seg_id].valid
2e11a577
MD
1496 || scm_heap_table[seg_id].valid (ptr,
1497 &scm_heap_table[seg_id]))
c67baafd
MD
1498 if (scm_heap_table[seg_id].span == 1
1499 || SCM_DOUBLE_CELLP (value))
1500 scm_gc_mark (value);
2e11a577
MD
1501 break;
1502 }
1503
1504 }
1505 }
1506 return 0;
1507}
1508
1509
3b2b8760 1510static void
0f2d19dd 1511scm_mark_weak_vector_spines ()
0f2d19dd 1512{
ab4bef85 1513 SCM w;
0f2d19dd 1514
54778cd3 1515 for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1516 {
ab4bef85 1517 if (SCM_IS_WHVEC_ANY (w))
0f2d19dd
JB
1518 {
1519 SCM *ptr;
1520 SCM obj;
1521 int j;
1522 int n;
1523
ab4bef85
JB
1524 obj = w;
1525 ptr = SCM_VELTS (w);
1526 n = SCM_LENGTH (w);
0f2d19dd
JB
1527 for (j = 0; j < n; ++j)
1528 {
1529 SCM alist;
1530
1531 alist = ptr[j];
0b5f3f34 1532 while ( SCM_CONSP (alist)
a00c95d9 1533 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1534 && SCM_CONSP (SCM_CAR (alist)))
1535 {
1536 SCM_SETGCMARK (alist);
1537 SCM_SETGCMARK (SCM_CAR (alist));
1538 alist = SCM_GCCDR (alist);
1539 }
1540 }
1541 }
1542 }
1543}
1544
1545
4c48ba06
MD
1546#ifdef GUILE_NEW_GC_SCHEME
1547static void
1548gc_sweep_freelist_start (scm_freelist_t *freelist)
1549{
1550 freelist->cells = SCM_EOL;
1551 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1552 freelist->clusters_allocated = 0;
4c48ba06
MD
1553 freelist->clusters = SCM_EOL;
1554 freelist->clustertail = &freelist->clusters;
1811ebce 1555 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1556 freelist->collected = 0;
1557}
1558
1559static void
1560gc_sweep_freelist_finish (scm_freelist_t *freelist)
1561{
1811ebce 1562 int collected;
4c48ba06
MD
1563 *freelist->clustertail = freelist->cells;
1564 if (SCM_NNULLP (freelist->cells))
1565 {
1566 SCM c = freelist->cells;
1567 SCM_SETCAR (c, SCM_CDR (c));
1568 SCM_SETCDR (c, SCM_EOL);
1569 freelist->collected +=
1570 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1571 }
b37fe1c5 1572 scm_gc_cells_collected += freelist->collected;
a00c95d9 1573
8fef55a8 1574 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1575 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1576 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1577 * size. This means that a too low yield is compensated by more
1578 * heap on the list which is currently doing most work, which is
1579 * just what we want.
1580 */
1811ebce 1581 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1582 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06
MD
1583}
1584#endif
0f2d19dd 1585
a00c95d9 1586void
0f2d19dd 1587scm_gc_sweep ()
0f2d19dd
JB
1588{
1589 register SCM_CELLPTR ptr;
0f2d19dd 1590 register SCM nfreelist;
4c48ba06 1591 register scm_freelist_t *freelist;
0f2d19dd 1592 register long m;
0f2d19dd 1593 register int span;
15e9d186 1594 long i;
0f2d19dd
JB
1595 scm_sizet seg_size;
1596
0f2d19dd 1597 m = 0;
0f2d19dd 1598
4a4c9785 1599#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1600 gc_sweep_freelist_start (&scm_master_freelist);
1601 gc_sweep_freelist_start (&scm_master_freelist2);
4a4c9785 1602#else
cf2d30f6
JB
1603 /* Reset all free list pointers. We'll reconstruct them completely
1604 while scanning. */
1605 for (i = 0; i < scm_n_heap_segs; i++)
4c48ba06 1606 scm_heap_table[i].freelist->cells = SCM_EOL;
4a4c9785 1607#endif
a00c95d9 1608
cf2d30f6 1609 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1610 {
0df07278 1611#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1612 register unsigned int left_to_collect;
1613#else
1614 register scm_sizet n = 0;
0df07278 1615#endif
4c48ba06 1616 register scm_sizet j;
15e9d186 1617
cf2d30f6
JB
1618 /* Unmarked cells go onto the front of the freelist this heap
1619 segment points to. Rather than updating the real freelist
1620 pointer as we go along, we accumulate the new head in
1621 nfreelist. Then, if it turns out that the entire segment is
1622 free, we free (i.e., malloc's free) the whole segment, and
1623 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1624 freelist = scm_heap_table[i].freelist;
1625 nfreelist = freelist->cells;
4a4c9785 1626#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1627 left_to_collect = freelist->left_to_collect;
4a4c9785 1628#endif
945fec60 1629 span = scm_heap_table[i].span;
cf2d30f6 1630
a00c95d9
ML
1631 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1632 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1633 for (j = seg_size + span; j -= span; ptr += span)
1634 {
96f6f4ae
DH
1635 SCM scmptr = PTR2SCM (ptr);
1636
0f2d19dd
JB
1637 switch SCM_TYP7 (scmptr)
1638 {
1639 case scm_tcs_cons_gloc:
1640 if (SCM_GCMARKP (scmptr))
1641 {
54778cd3
DH
1642 if (SCM_CELL_WORD_1 ((SCM) SCM_STRUCT_VTABLE_DATA (scmptr))
1643 == 1)
1644 SCM_SET_CELL_WORD_1 ((SCM) SCM_STRUCT_VTABLE_DATA (scmptr),
1645 0);
0f2d19dd
JB
1646 goto cmrkcontinue;
1647 }
1648 {
1649 SCM vcell;
890b019c 1650 vcell = (SCM) SCM_STRUCT_VTABLE_DATA (scmptr);
0f2d19dd 1651
54778cd3
DH
1652 if ((SCM_CELL_WORD_1 (vcell) == 0)
1653 || (SCM_CELL_WORD_1 (vcell) == 1))
0f2d19dd 1654 {
f0cb1733
MD
1655 scm_struct_free_t free
1656 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1657 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
0f2d19dd
JB
1658 }
1659 }
1660 break;
1661 case scm_tcs_cons_imcar:
1662 case scm_tcs_cons_nimcar:
1663 case scm_tcs_closures:
e641afaf 1664 case scm_tc7_pws:
0f2d19dd
JB
1665 if (SCM_GCMARKP (scmptr))
1666 goto cmrkcontinue;
1667 break;
1668 case scm_tc7_wvect:
1669 if (SCM_GC8MARKP (scmptr))
1670 {
1671 goto c8mrkcontinue;
1672 }
1673 else
1674 {
ab4bef85
JB
1675 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1676 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1677 break;
1678 }
1679
1680 case scm_tc7_vector:
1681 case scm_tc7_lvector:
1682#ifdef CCLO
1683 case scm_tc7_cclo:
1684#endif
1685 if (SCM_GC8MARKP (scmptr))
1686 goto c8mrkcontinue;
1687
1688 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1689 freechars:
1690 scm_must_free (SCM_CHARS (scmptr));
1691 /* SCM_SETCHARS(scmptr, 0);*/
1692 break;
afe5177e 1693#ifdef HAVE_ARRAYS
0f2d19dd
JB
1694 case scm_tc7_bvect:
1695 if SCM_GC8MARKP (scmptr)
1696 goto c8mrkcontinue;
1697 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1698 goto freechars;
1699 case scm_tc7_byvect:
1700 if SCM_GC8MARKP (scmptr)
1701 goto c8mrkcontinue;
1702 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1703 goto freechars;
1704 case scm_tc7_ivect:
1705 case scm_tc7_uvect:
1706 if SCM_GC8MARKP (scmptr)
1707 goto c8mrkcontinue;
1708 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1709 goto freechars;
1710 case scm_tc7_svect:
1711 if SCM_GC8MARKP (scmptr)
1712 goto c8mrkcontinue;
1713 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1714 goto freechars;
5c11cc9d 1715#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1716 case scm_tc7_llvect:
1717 if SCM_GC8MARKP (scmptr)
1718 goto c8mrkcontinue;
1719 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1720 goto freechars;
1721#endif
1722 case scm_tc7_fvect:
1723 if SCM_GC8MARKP (scmptr)
1724 goto c8mrkcontinue;
1725 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1726 goto freechars;
1727 case scm_tc7_dvect:
1728 if SCM_GC8MARKP (scmptr)
1729 goto c8mrkcontinue;
1730 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1731 goto freechars;
1732 case scm_tc7_cvect:
1733 if SCM_GC8MARKP (scmptr)
1734 goto c8mrkcontinue;
1735 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1736 goto freechars;
afe5177e 1737#endif
0f2d19dd 1738 case scm_tc7_substring:
0f2d19dd
JB
1739 if (SCM_GC8MARKP (scmptr))
1740 goto c8mrkcontinue;
1741 break;
1742 case scm_tc7_string:
0f2d19dd
JB
1743 if (SCM_GC8MARKP (scmptr))
1744 goto c8mrkcontinue;
1745 m += SCM_HUGE_LENGTH (scmptr) + 1;
1746 goto freechars;
1747 case scm_tc7_msymbol:
1748 if (SCM_GC8MARKP (scmptr))
1749 goto c8mrkcontinue;
1750 m += ( SCM_LENGTH (scmptr)
1751 + 1
1752 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1753 scm_must_free ((char *)SCM_SLOTS (scmptr));
1754 break;
1755 case scm_tc7_contin:
1756 if SCM_GC8MARKP (scmptr)
1757 goto c8mrkcontinue;
0db18cf4 1758 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1759 if (SCM_VELTS (scmptr))
1760 goto freechars;
0f2d19dd
JB
1761 case scm_tc7_ssymbol:
1762 if SCM_GC8MARKP(scmptr)
1763 goto c8mrkcontinue;
1764 break;
1765 case scm_tcs_subrs:
1766 continue;
1767 case scm_tc7_port:
1768 if SCM_GC8MARKP (scmptr)
1769 goto c8mrkcontinue;
1770 if SCM_OPENP (scmptr)
1771 {
1772 int k = SCM_PTOBNUM (scmptr);
1773 if (!(k < scm_numptob))
1774 goto sweeperr;
1775 /* Keep "revealed" ports alive. */
945fec60 1776 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1777 continue;
1778 /* Yes, I really do mean scm_ptobs[k].free */
1779 /* rather than ftobs[k].close. .close */
1780 /* is for explicit CLOSE-PORT by user */
84af0382 1781 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1782 SCM_SETSTREAM (scmptr, 0);
1783 scm_remove_from_port_table (scmptr);
1784 scm_gc_ports_collected++;
24e68a57 1785 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1786 }
1787 break;
1788 case scm_tc7_smob:
1789 switch SCM_GCTYP16 (scmptr)
1790 {
1791 case scm_tc_free_cell:
acb0a19c 1792 case scm_tc16_real:
0f2d19dd
JB
1793 if SCM_GC8MARKP (scmptr)
1794 goto c8mrkcontinue;
1795 break;
1796#ifdef SCM_BIGDIG
acb0a19c 1797 case scm_tc16_big:
0f2d19dd
JB
1798 if SCM_GC8MARKP (scmptr)
1799 goto c8mrkcontinue;
1800 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1801 goto freechars;
1802#endif /* def SCM_BIGDIG */
acb0a19c 1803 case scm_tc16_complex:
0f2d19dd
JB
1804 if SCM_GC8MARKP (scmptr)
1805 goto c8mrkcontinue;
acb0a19c
MD
1806 m += 2 * sizeof (double);
1807 goto freechars;
0f2d19dd
JB
1808 default:
1809 if SCM_GC8MARKP (scmptr)
1810 goto c8mrkcontinue;
1811
1812 {
1813 int k;
1814 k = SCM_SMOBNUM (scmptr);
1815 if (!(k < scm_numsmob))
1816 goto sweeperr;
1817 m += (scm_smobs[k].free) ((SCM) scmptr);
1818 break;
1819 }
1820 }
1821 break;
1822 default:
1823 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1824 }
0f2d19dd
JB
1825#if 0
1826 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1827 exit (2);
1828#endif
4a4c9785
MD
1829#ifndef GUILE_NEW_GC_SCHEME
1830 n += span;
1831#else
4c48ba06 1832 if (!--left_to_collect)
4a4c9785
MD
1833 {
1834 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1835 *freelist->clustertail = scmptr;
1836 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1837
4a4c9785 1838 nfreelist = SCM_EOL;
4c48ba06
MD
1839 freelist->collected += span * freelist->cluster_size;
1840 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1841 }
1842 else
1843#endif
1844 {
1845 /* Stick the new cell on the front of nfreelist. It's
1846 critical that we mark this cell as freed; otherwise, the
1847 conservative collector might trace it as some other type
1848 of object. */
54778cd3 1849 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
1850 SCM_SETCDR (scmptr, nfreelist);
1851 nfreelist = scmptr;
1852 }
a00c95d9 1853
0f2d19dd
JB
1854 continue;
1855 c8mrkcontinue:
1856 SCM_CLRGC8MARK (scmptr);
1857 continue;
1858 cmrkcontinue:
1859 SCM_CLRGCMARK (scmptr);
1860 }
1861#ifdef GC_FREE_SEGMENTS
1862 if (n == seg_size)
1863 {
15e9d186
JB
1864 register long j;
1865
4c48ba06 1866 freelist->heap_size -= seg_size;
cf2d30f6
JB
1867 free ((char *) scm_heap_table[i].bounds[0]);
1868 scm_heap_table[i].bounds[0] = 0;
1869 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1870 scm_heap_table[j - 1] = scm_heap_table[j];
1871 scm_n_heap_segs -= 1;
cf2d30f6 1872 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1873 }
1874 else
1875#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1876 {
1877 /* Update the real freelist pointer to point to the head of
1878 the list of free cells we've built for this segment. */
4c48ba06 1879 freelist->cells = nfreelist;
4a4c9785 1880#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1881 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1882#endif
1883 }
1884
4c48ba06
MD
1885#ifndef GUILE_NEW_GC_SCHEME
1886 freelist->collected += n;
4a4c9785 1887#endif
0f2d19dd 1888
fca7547b 1889#ifdef GUILE_DEBUG_FREELIST
8ded62a3 1890#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1891 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1892 ? scm_freelist
1893 : scm_freelist2);
1894#else
4c48ba06 1895 scm_check_freelist (freelist);
8ded62a3 1896#endif
cf2d30f6
JB
1897 scm_map_free_list ();
1898#endif
4a4c9785 1899 }
a00c95d9 1900
4a4c9785 1901#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1902 gc_sweep_freelist_finish (&scm_master_freelist);
1903 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1904
8ded62a3
MD
1905 /* When we move to POSIX threads private freelists should probably
1906 be GC-protected instead. */
1907 scm_freelist = SCM_EOL;
1908 scm_freelist2 = SCM_EOL;
4a4c9785 1909#endif
a00c95d9 1910
0f2d19dd
JB
1911 /* Scan weak vectors. */
1912 {
ab4bef85 1913 SCM *ptr, w;
54778cd3 1914 for (w = scm_weak_vectors; !SCM_NULLP (w); w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1915 {
ab4bef85 1916 if (!SCM_IS_WHVEC_ANY (w))
0f2d19dd 1917 {
15e9d186
JB
1918 register long j, n;
1919
ab4bef85
JB
1920 ptr = SCM_VELTS (w);
1921 n = SCM_LENGTH (w);
0f2d19dd 1922 for (j = 0; j < n; ++j)
0c95b57d 1923 if (SCM_FREEP (ptr[j]))
0f2d19dd
JB
1924 ptr[j] = SCM_BOOL_F;
1925 }
1926 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1927 {
ab4bef85
JB
1928 SCM obj = w;
1929 register long n = SCM_LENGTH (w);
15e9d186
JB
1930 register long j;
1931
ab4bef85 1932 ptr = SCM_VELTS (w);
15e9d186 1933
0f2d19dd
JB
1934 for (j = 0; j < n; ++j)
1935 {
1936 SCM * fixup;
1937 SCM alist;
1938 int weak_keys;
1939 int weak_values;
a00c95d9 1940
0f2d19dd
JB
1941 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1942 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1943
1944 fixup = ptr + j;
1945 alist = *fixup;
1946
0b5f3f34 1947 while ( SCM_CONSP (alist)
0f2d19dd
JB
1948 && SCM_CONSP (SCM_CAR (alist)))
1949 {
1950 SCM key;
1951 SCM value;
1952
1953 key = SCM_CAAR (alist);
1954 value = SCM_CDAR (alist);
0c95b57d
GB
1955 if ( (weak_keys && SCM_FREEP (key))
1956 || (weak_values && SCM_FREEP (value)))
0f2d19dd
JB
1957 {
1958 *fixup = SCM_CDR (alist);
1959 }
1960 else
24e68a57 1961 fixup = SCM_CDRLOC (alist);
0f2d19dd
JB
1962 alist = SCM_CDR (alist);
1963 }
1964 }
1965 }
1966 }
1967 }
b37fe1c5 1968 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f
MD
1969#ifdef GUILE_NEW_GC_SCHEME
1970 scm_gc_yield -= scm_cells_allocated;
1971#endif
0f2d19dd
JB
1972 scm_mallocated -= m;
1973 scm_gc_malloc_collected = m;
1974}
1975
1976
1977\f
1978
1979/* {Front end to malloc}
1980 *
c68296f8 1981 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1982 *
1983 * These functions provide services comperable to malloc, realloc, and
1984 * free. They are for allocating malloced parts of scheme objects.
1985 * The primary purpose of the front end is to impose calls to gc.
1986 */
1987
1988/* scm_must_malloc
1989 * Return newly malloced storage or throw an error.
1990 *
1991 * The parameter WHAT is a string for error reporting.
a00c95d9 1992 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1993 * allocation, or if the first call to malloc fails,
1994 * garbage collect -- on the presumption that some objects
1995 * using malloced storage may be collected.
1996 *
1997 * The limit scm_mtrigger may be raised by this allocation.
1998 */
07806695 1999void *
e4ef2330 2000scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 2001{
07806695 2002 void *ptr;
15e9d186 2003 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
2004
2005 if (nm <= scm_mtrigger)
0f2d19dd 2006 {
07806695 2007 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2008 if (NULL != ptr)
2009 {
2010 scm_mallocated = nm;
2011 return ptr;
2012 }
2013 }
6064dcc6 2014
0f2d19dd 2015 scm_igc (what);
e4ef2330 2016
0f2d19dd 2017 nm = scm_mallocated + size;
07806695 2018 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2019 if (NULL != ptr)
2020 {
2021 scm_mallocated = nm;
6064dcc6
MV
2022 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2023 if (nm > scm_mtrigger)
2024 scm_mtrigger = nm + nm / 2;
2025 else
2026 scm_mtrigger += scm_mtrigger / 2;
2027 }
0f2d19dd
JB
2028 return ptr;
2029 }
e4ef2330
MD
2030
2031 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2032 return 0; /* never reached */
0f2d19dd
JB
2033}
2034
2035
2036/* scm_must_realloc
2037 * is similar to scm_must_malloc.
2038 */
07806695
JB
2039void *
2040scm_must_realloc (void *where,
e4ef2330
MD
2041 scm_sizet old_size,
2042 scm_sizet size,
3eeba8d4 2043 const char *what)
0f2d19dd 2044{
07806695 2045 void *ptr;
e4ef2330
MD
2046 scm_sizet nm = scm_mallocated + size - old_size;
2047
2048 if (nm <= scm_mtrigger)
0f2d19dd 2049 {
07806695 2050 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2051 if (NULL != ptr)
2052 {
2053 scm_mallocated = nm;
2054 return ptr;
2055 }
2056 }
e4ef2330 2057
0f2d19dd 2058 scm_igc (what);
e4ef2330
MD
2059
2060 nm = scm_mallocated + size - old_size;
07806695 2061 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2062 if (NULL != ptr)
2063 {
2064 scm_mallocated = nm;
6064dcc6
MV
2065 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2066 if (nm > scm_mtrigger)
2067 scm_mtrigger = nm + nm / 2;
2068 else
2069 scm_mtrigger += scm_mtrigger / 2;
2070 }
0f2d19dd
JB
2071 return ptr;
2072 }
e4ef2330
MD
2073
2074 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2075 return 0; /* never reached */
0f2d19dd
JB
2076}
2077
a00c95d9 2078void
07806695 2079scm_must_free (void *obj)
0f2d19dd
JB
2080{
2081 if (obj)
2082 free (obj);
2083 else
2084 scm_wta (SCM_INUM0, "already free", "");
2085}
0f2d19dd 2086
c68296f8
MV
2087/* Announce that there has been some malloc done that will be freed
2088 * during gc. A typical use is for a smob that uses some malloced
2089 * memory but can not get it from scm_must_malloc (for whatever
2090 * reason). When a new object of this smob is created you call
2091 * scm_done_malloc with the size of the object. When your smob free
2092 * function is called, be sure to include this size in the return
2093 * value. */
0f2d19dd 2094
c68296f8 2095void
6e8d25a6 2096scm_done_malloc (long size)
c68296f8
MV
2097{
2098 scm_mallocated += size;
2099
2100 if (scm_mallocated > scm_mtrigger)
2101 {
2102 scm_igc ("foreign mallocs");
2103 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2104 {
2105 if (scm_mallocated > scm_mtrigger)
2106 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2107 else
2108 scm_mtrigger += scm_mtrigger / 2;
2109 }
2110 }
2111}
2112
2113
2114\f
0f2d19dd
JB
2115
2116/* {Heap Segments}
2117 *
2118 * Each heap segment is an array of objects of a particular size.
2119 * Every segment has an associated (possibly shared) freelist.
2120 * A table of segment records is kept that records the upper and
2121 * lower extents of the segment; this is used during the conservative
2122 * phase of gc to identify probably gc roots (because they point
c68296f8 2123 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2124
2125/* scm_expmem
2126 * is true if the first segment was smaller than INIT_HEAP_SEG.
2127 * If scm_expmem is set to one, subsequent segment allocations will
2128 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2129 */
2130int scm_expmem = 0;
2131
4c48ba06
MD
2132scm_sizet scm_max_segment_size;
2133
0f2d19dd
JB
2134/* scm_heap_org
2135 * is the lowest base address of any heap segment.
2136 */
2137SCM_CELLPTR scm_heap_org;
2138
a00c95d9 2139scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
2140int scm_n_heap_segs = 0;
2141
0f2d19dd
JB
2142/* init_heap_seg
2143 * initializes a new heap segment and return the number of objects it contains.
2144 *
2145 * The segment origin, segment size in bytes, and the span of objects
2146 * in cells are input parameters. The freelist is both input and output.
2147 *
2148 * This function presume that the scm_heap_table has already been expanded
2149 * to accomodate a new segment record.
2150 */
2151
2152
a00c95d9 2153static scm_sizet
4c48ba06 2154init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2155{
2156 register SCM_CELLPTR ptr;
0f2d19dd 2157 SCM_CELLPTR seg_end;
15e9d186 2158 int new_seg_index;
acb0a19c 2159 int n_new_cells;
4c48ba06 2160 int span = freelist->span;
a00c95d9 2161
0f2d19dd
JB
2162 if (seg_org == NULL)
2163 return 0;
2164
a00c95d9 2165 ptr = CELL_UP (seg_org, span);
acb0a19c 2166
a00c95d9 2167 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2168 */
a00c95d9 2169 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 2170
a00c95d9 2171 /* Find the right place and insert the segment record.
0f2d19dd
JB
2172 *
2173 */
2174 for (new_seg_index = 0;
2175 ( (new_seg_index < scm_n_heap_segs)
2176 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2177 new_seg_index++)
2178 ;
2179
2180 {
2181 int i;
2182 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2183 scm_heap_table[i] = scm_heap_table[i - 1];
2184 }
a00c95d9 2185
0f2d19dd
JB
2186 ++scm_n_heap_segs;
2187
2188 scm_heap_table[new_seg_index].valid = 0;
945fec60 2189 scm_heap_table[new_seg_index].span = span;
4c48ba06 2190 scm_heap_table[new_seg_index].freelist = freelist;
0f2d19dd
JB
2191 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
2192 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
2193
2194
a00c95d9 2195 /* Compute the least valid object pointer w/in this segment
0f2d19dd 2196 */
a00c95d9 2197 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
2198
2199
acb0a19c
MD
2200 /*n_new_cells*/
2201 n_new_cells = seg_end - ptr;
0f2d19dd 2202
4a4c9785
MD
2203#ifdef GUILE_NEW_GC_SCHEME
2204
4c48ba06 2205 freelist->heap_size += n_new_cells;
4a4c9785 2206
a00c95d9 2207 /* Partition objects in this segment into clusters */
4a4c9785
MD
2208 {
2209 SCM clusters;
2210 SCM *clusterp = &clusters;
4c48ba06 2211 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 2212
4c48ba06 2213 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 2214 {
4c48ba06
MD
2215 /* Determine end of cluster
2216 */
2217 if (n_new_cells >= n_cluster_cells)
2218 {
2219 seg_end = ptr + n_cluster_cells;
2220 n_new_cells -= n_cluster_cells;
2221 }
4a4c9785 2222 else
a00c95d9
ML
2223 /* [cmm] looks like the segment size doesn't divide cleanly by
2224 cluster size. bad cmm! */
2225 abort();
4a4c9785 2226
4c48ba06
MD
2227 /* Allocate cluster spine
2228 */
4a4c9785
MD
2229 *clusterp = PTR2SCM (ptr);
2230 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2231 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 2232 ptr += span;
a00c95d9 2233
4a4c9785
MD
2234 while (ptr < seg_end)
2235 {
96f6f4ae
DH
2236 SCM scmptr = PTR2SCM (ptr);
2237
54778cd3 2238 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
4a4c9785
MD
2239 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2240 ptr += span;
2241 }
4c48ba06 2242
4a4c9785
MD
2243 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2244 }
a00c95d9 2245
4a4c9785
MD
2246 /* Patch up the last cluster pointer in the segment
2247 * to join it to the input freelist.
2248 */
4c48ba06
MD
2249 *clusterp = freelist->clusters;
2250 freelist->clusters = clusters;
4a4c9785
MD
2251 }
2252
2253#else /* GUILE_NEW_GC_SCHEME */
2254
a00c95d9 2255 /* Prepend objects in this segment to the freelist.
0f2d19dd
JB
2256 */
2257 while (ptr < seg_end)
2258 {
96f6f4ae
DH
2259 SCM scmptr = PTR2SCM (ptr);
2260
24e68a57 2261 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
945fec60
MD
2262 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2263 ptr += span;
0f2d19dd
JB
2264 }
2265
945fec60 2266 ptr -= span;
0f2d19dd
JB
2267
2268 /* Patch up the last freelist pointer in the segment
2269 * to join it to the input freelist.
2270 */
4c48ba06 2271 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
a00c95d9 2272 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
4c48ba06
MD
2273
2274 freelist->heap_size += n_new_cells;
0f2d19dd 2275
4a4c9785 2276#endif /* GUILE_NEW_GC_SCHEME */
4c48ba06
MD
2277
2278#ifdef DEBUGINFO
2279 fprintf (stderr, "H");
2280#endif
0f2d19dd 2281 return size;
0f2d19dd
JB
2282}
2283
a00c95d9
ML
2284#ifndef GUILE_NEW_GC_SCHEME
2285#define round_to_cluster_size(freelist, len) len
2286#else
2287
2288static scm_sizet
2289round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2290{
2291 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2292
2293 return
2294 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2295 + ALIGNMENT_SLACK (freelist);
2296}
2297
2298#endif
0f2d19dd 2299
a00c95d9 2300static void
4c48ba06 2301alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 2302{
a00c95d9 2303 scm_heap_seg_data_t * tmptable;
0f2d19dd 2304 SCM_CELLPTR ptr;
b37fe1c5 2305 long len;
a00c95d9 2306
0f2d19dd
JB
2307 /* Critical code sections (such as the garbage collector)
2308 * aren't supposed to add heap segments.
2309 */
2310 if (scm_gc_heap_lock)
2311 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2312
2313 /* Expand the heap tables to have room for the new segment.
2314 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2315 * only if the allocation of the segment itself succeeds.
2316 */
a00c95d9 2317 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 2318
a00c95d9 2319 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
2320 realloc ((char *)scm_heap_table, len)));
2321 if (!tmptable)
2322 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2323 else
2324 scm_heap_table = tmptable;
2325
2326
2327 /* Pick a size for the new heap segment.
a00c95d9 2328 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2329 * gc.h
2330 */
4c48ba06
MD
2331#ifdef GUILE_NEW_GC_SCHEME
2332 {
1811ebce
MD
2333 /* Assure that the new segment is predicted to be large enough.
2334 *
2335 * New yield should at least equal GC fraction of new heap size, i.e.
2336 *
2337 * y + dh > f * (h + dh)
2338 *
2339 * y : yield
8fef55a8 2340 * f : min yield fraction
1811ebce
MD
2341 * h : heap size
2342 * dh : size of new heap segment
2343 *
2344 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2345 */
8fef55a8 2346 int f = freelist->min_yield_fraction;
1811ebce
MD
2347 long h = SCM_HEAP_SIZE;
2348 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2349 len = SCM_EXPHEAP (freelist->heap_size);
2350#ifdef DEBUGINFO
2351 fprintf (stderr, "(%d < %d)", len, min_cells);
2352#endif
2353 if (len < min_cells)
1811ebce 2354 len = min_cells + freelist->cluster_size;
4c48ba06 2355 len *= sizeof (scm_cell);
1811ebce
MD
2356 /* force new sampling */
2357 freelist->collected = LONG_MAX;
4c48ba06 2358 }
a00c95d9 2359
4c48ba06
MD
2360 if (len > scm_max_segment_size)
2361 len = scm_max_segment_size;
2362#else
0f2d19dd
JB
2363 if (scm_expmem)
2364 {
4c48ba06
MD
2365 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2366 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
945fec60 2367 != len)
0f2d19dd
JB
2368 len = 0;
2369 }
2370 else
2371 len = SCM_HEAP_SEG_SIZE;
4c48ba06 2372#endif /* GUILE_NEW_GC_SCHEME */
0f2d19dd
JB
2373
2374 {
2375 scm_sizet smallest;
2376
a00c95d9 2377#ifndef GUILE_NEW_GC_SCHEME
4c48ba06 2378 smallest = (freelist->span * sizeof (scm_cell));
a00c95d9
ML
2379#else
2380 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2381#endif
2382
0f2d19dd 2383 if (len < smallest)
a00c95d9 2384 len = smallest;
0f2d19dd
JB
2385
2386 /* Allocate with decaying ambition. */
2387 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2388 && (len >= smallest))
2389 {
1811ebce 2390 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2391 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2392 if (ptr)
2393 {
a00c95d9 2394 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2395 return;
2396 }
2397 len /= 2;
2398 }
2399 }
2400
2401 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2402}
2403
2404
2405
a00c95d9 2406SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2407 (SCM name),
b380b885 2408 "")
1bbd0b84 2409#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2410{
2411 int x;
2412 int bound;
3b3b36dd 2413 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2414 SCM_DEFER_INTS;
2415 bound = scm_n_heap_segs;
2416 for (x = 0; x < bound; ++x)
2417 {
2418 SCM_CELLPTR p;
2419 SCM_CELLPTR pbound;
2420 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2421 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2422 while (p < pbound)
2423 {
2424 SCM incar;
2425 incar = p->car;
2426 if (1 == (7 & (int)incar))
2427 {
2428 --incar;
2429 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2430 && (SCM_CDR (incar) != 0)
f1267706 2431 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
0f2d19dd
JB
2432 {
2433 p->car = name;
2434 }
2435 }
2436 ++p;
2437 }
2438 }
2439 SCM_ALLOW_INTS;
2440 return name;
2441}
1bbd0b84 2442#undef FUNC_NAME
0f2d19dd
JB
2443
2444
2445\f
2446/* {GC Protection Helper Functions}
2447 */
2448
2449
0f2d19dd 2450void
6e8d25a6
GB
2451scm_remember (SCM *ptr)
2452{ /* empty */ }
0f2d19dd 2453
1cc91f1b 2454
c209c88e 2455/*
41b0806d
GB
2456 These crazy functions prevent garbage collection
2457 of arguments after the first argument by
2458 ensuring they remain live throughout the
2459 function because they are used in the last
2460 line of the code block.
2461 It'd be better to have a nice compiler hint to
2462 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2463SCM
2464scm_return_first (SCM elt, ...)
0f2d19dd
JB
2465{
2466 return elt;
2467}
2468
41b0806d
GB
2469int
2470scm_return_first_int (int i, ...)
2471{
2472 return i;
2473}
2474
0f2d19dd 2475
0f2d19dd 2476SCM
6e8d25a6 2477scm_permanent_object (SCM obj)
0f2d19dd
JB
2478{
2479 SCM_REDEFER_INTS;
2480 scm_permobjs = scm_cons (obj, scm_permobjs);
2481 SCM_REALLOW_INTS;
2482 return obj;
2483}
2484
2485
ef290276
JB
2486/* Protect OBJ from the garbage collector. OBJ will not be freed,
2487 even if all other references are dropped, until someone applies
2488 scm_unprotect_object to it. This function returns OBJ.
2489
c209c88e
GB
2490 Calls to scm_protect_object nest. For every object OBJ, there is a
2491 counter which scm_protect_object(OBJ) increments and
2492 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
dab7f566
JB
2493 an object's counter is greater than zero, the garbage collector
2494 will not free it.
2495
2496 Of course, that's not how it's implemented. scm_protect_object and
2497 scm_unprotect_object just maintain a list of references to things.
2498 Since the GC knows about this list, all objects it mentions stay
2499 alive. scm_protect_object adds its argument to the list;
2500 scm_unprotect_object removes the first occurrence of its argument
2501 to the list. */
ef290276 2502SCM
6e8d25a6 2503scm_protect_object (SCM obj)
ef290276 2504{
ef290276
JB
2505 scm_protects = scm_cons (obj, scm_protects);
2506
2507 return obj;
2508}
2509
2510
2511/* Remove any protection for OBJ established by a prior call to
dab7f566 2512 scm_protect_object. This function returns OBJ.
ef290276 2513
dab7f566 2514 See scm_protect_object for more information. */
ef290276 2515SCM
6e8d25a6 2516scm_unprotect_object (SCM obj)
ef290276 2517{
dab7f566
JB
2518 SCM *tail_ptr = &scm_protects;
2519
0c95b57d 2520 while (SCM_CONSP (*tail_ptr))
dab7f566
JB
2521 if (SCM_CAR (*tail_ptr) == obj)
2522 {
2523 *tail_ptr = SCM_CDR (*tail_ptr);
2524 break;
2525 }
2526 else
2527 tail_ptr = SCM_CDRLOC (*tail_ptr);
ef290276
JB
2528
2529 return obj;
2530}
2531
c45acc34
JB
2532int terminating;
2533
2534/* called on process termination. */
e52ceaac
MD
2535#ifdef HAVE_ATEXIT
2536static void
2537cleanup (void)
2538#else
2539#ifdef HAVE_ON_EXIT
51157deb
MD
2540extern int on_exit (void (*procp) (), int arg);
2541
e52ceaac
MD
2542static void
2543cleanup (int status, void *arg)
2544#else
2545#error Dont know how to setup a cleanup handler on your system.
2546#endif
2547#endif
c45acc34
JB
2548{
2549 terminating = 1;
2550 scm_flush_all_ports ();
2551}
ef290276 2552
0f2d19dd 2553\f
acb0a19c 2554static int
4c48ba06 2555make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2556{
a00c95d9
ML
2557 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2558 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2559 rounded_size,
4c48ba06 2560 freelist))
acb0a19c 2561 {
a00c95d9
ML
2562 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2563 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2564 rounded_size,
4c48ba06 2565 freelist))
acb0a19c
MD
2566 return 1;
2567 }
2568 else
2569 scm_expmem = 1;
2570
b37fe1c5 2571#ifdef GUILE_NEW_GC_SCHEME
8fef55a8
MD
2572 if (freelist->min_yield_fraction)
2573 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2574 / 100);
8fef55a8 2575 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
b37fe1c5 2576#endif
a00c95d9 2577
acb0a19c
MD
2578 return 0;
2579}
2580
2581\f
4a4c9785 2582#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2583static void
2584init_freelist (scm_freelist_t *freelist,
2585 int span,
2586 int cluster_size,
8fef55a8 2587 int min_yield)
4c48ba06
MD
2588{
2589 freelist->clusters = SCM_EOL;
2590 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2591 freelist->left_to_collect = 0;
2592 freelist->clusters_allocated = 0;
8fef55a8
MD
2593 freelist->min_yield = 0;
2594 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2595 freelist->span = span;
2596 freelist->collected = 0;
1811ebce 2597 freelist->collected_1 = 0;
4c48ba06
MD
2598 freelist->heap_size = 0;
2599}
2600
4a4c9785 2601int
4c48ba06
MD
2602scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2603 scm_sizet init_heap_size_2, int gc_trigger_2,
2604 scm_sizet max_segment_size)
4a4c9785 2605#else
0f2d19dd 2606int
b37fe1c5 2607scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
4a4c9785 2608#endif
0f2d19dd
JB
2609{
2610 scm_sizet j;
2611
4c48ba06
MD
2612 if (!init_heap_size_1)
2613 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2614 if (!init_heap_size_2)
2615 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2616
0f2d19dd
JB
2617 j = SCM_NUM_PROTECTS;
2618 while (j)
2619 scm_sys_protects[--j] = SCM_BOOL_F;
2620 scm_block_gc = 1;
4a4c9785
MD
2621
2622#ifdef GUILE_NEW_GC_SCHEME
2623 scm_freelist = SCM_EOL;
4c48ba06
MD
2624 scm_freelist2 = SCM_EOL;
2625 init_freelist (&scm_master_freelist,
2626 1, SCM_CLUSTER_SIZE_1,
8fef55a8 2627 gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
4c48ba06
MD
2628 init_freelist (&scm_master_freelist2,
2629 2, SCM_CLUSTER_SIZE_2,
8fef55a8 2630 gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
4c48ba06
MD
2631 scm_max_segment_size
2632 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
4a4c9785 2633#else
945fec60
MD
2634 scm_freelist.cells = SCM_EOL;
2635 scm_freelist.span = 1;
2636 scm_freelist.collected = 0;
2637 scm_freelist.heap_size = 0;
4a4c9785 2638
945fec60
MD
2639 scm_freelist2.cells = SCM_EOL;
2640 scm_freelist2.span = 2;
2641 scm_freelist2.collected = 0;
2642 scm_freelist2.heap_size = 0;
4a4c9785
MD
2643#endif
2644
0f2d19dd
JB
2645 scm_expmem = 0;
2646
2647 j = SCM_HEAP_SEG_SIZE;
2648 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2649 scm_heap_table = ((scm_heap_seg_data_t *)
2650 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2651
4a4c9785 2652#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2653 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2654 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785
MD
2655 return 1;
2656#else
4c48ba06
MD
2657 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2658 make_initial_segment (init_heap_size_2, &scm_freelist2))
acb0a19c 2659 return 1;
4a4c9785 2660#endif
acb0a19c 2661
a00c95d9 2662 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2663
0f2d19dd 2664 /* scm_hplims[0] can change. do not remove scm_heap_org */
ab4bef85 2665 scm_weak_vectors = SCM_EOL;
0f2d19dd
JB
2666
2667 /* Initialise the list of ports. */
840ae05d
JB
2668 scm_port_table = (scm_port **)
2669 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2670 if (!scm_port_table)
2671 return 1;
2672
a18bcd0e 2673#ifdef HAVE_ATEXIT
c45acc34 2674 atexit (cleanup);
e52ceaac
MD
2675#else
2676#ifdef HAVE_ON_EXIT
2677 on_exit (cleanup, 0);
2678#endif
a18bcd0e 2679#endif
0f2d19dd
JB
2680
2681 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2682 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2683
2684 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2685 scm_nullstr = scm_makstr (0L, 0);
a8741caa 2686 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
54778cd3
DH
2687 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2688 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2689 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2690 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2691 scm_permobjs = SCM_EOL;
ef290276 2692 scm_protects = SCM_EOL;
3b2b8760 2693 scm_asyncs = SCM_EOL;
54778cd3
DH
2694 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2695 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
0f2d19dd
JB
2696#ifdef SCM_BIGDIG
2697 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2698#endif
2699 return 0;
2700}
2701\f
2702
0f2d19dd
JB
2703void
2704scm_init_gc ()
0f2d19dd
JB
2705{
2706#include "gc.x"
2707}
89e00824
ML
2708
2709/*
2710 Local Variables:
2711 c-file-style: "gnu"
2712 End:
2713*/