Fix: Don't cast SCM values to pointer. Use SCM2PTR instead.
[bpt/guile.git] / libguile / gc.c
CommitLineData
acb0a19c 1/* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
a00c95d9 2 *
0f2d19dd
JB
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
a00c95d9 7 *
0f2d19dd
JB
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
a00c95d9 12 *
0f2d19dd
JB
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
82892bed
JB
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
0f2d19dd
JB
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
82892bed 40 * If you do not wish that, delete this exception notice. */
1bbd0b84
GB
41
42/* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
37ddcaf6
MD
45/* #define DEBUGINFO */
46
0f2d19dd
JB
47\f
48#include <stdio.h>
49#include "_scm.h"
20e6290e
JB
50#include "stime.h"
51#include "stackchk.h"
52#include "struct.h"
20e6290e 53#include "weaks.h"
0493cd89 54#include "guardians.h"
20e6290e
JB
55#include "smob.h"
56#include "unif.h"
57#include "async.h"
f04d8caf 58#include "ports.h"
ba11fd4c 59#include "root.h"
7ab3fdd5 60#include "strings.h"
003d1fd0 61#include "vectors.h"
0f2d19dd 62
b6791b2e 63#include "validate.h"
fce59c93
JB
64#include "gc.h"
65
0f2d19dd 66#ifdef HAVE_MALLOC_H
95b88819 67#include <malloc.h>
0f2d19dd
JB
68#endif
69
70#ifdef HAVE_UNISTD_H
95b88819 71#include <unistd.h>
0f2d19dd
JB
72#endif
73
1cc91f1b
JB
74#ifdef __STDC__
75#include <stdarg.h>
76#define var_start(x, y) va_start(x, y)
77#else
78#include <varargs.h>
79#define var_start(x, y) va_start(x)
80#endif
81
0f2d19dd
JB
82\f
83/* {heap tuning parameters}
a00c95d9 84 *
0f2d19dd
JB
85 * These are parameters for controlling memory allocation. The heap
86 * is the area out of which scm_cons, and object headers are allocated.
87 *
88 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
89 * 64 bit machine. The units of the _SIZE parameters are bytes.
90 * Cons pairs and object headers occupy one heap cell.
91 *
92 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
93 * allocated initially the heap will grow by half its current size
94 * each subsequent time more heap is needed.
95 *
96 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
97 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
98 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
99 * is in scm_init_storage() and alloc_some_heap() in sys.c
a00c95d9 100 *
0f2d19dd
JB
101 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
102 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
103 *
104 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
105 * is needed.
106 *
107 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
a00c95d9 108 * trigger a GC.
6064dcc6
MV
109 *
110 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
111 * reclaimed by a GC triggered by must_malloc. If less than this is
112 * reclaimed, the trigger threshold is raised. [I don't know what a
113 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
a00c95d9 114 * work around a oscillation that caused almost constant GC.]
0f2d19dd
JB
115 */
116
8fef55a8
MD
117/*
118 * Heap size 45000 and 40% min yield gives quick startup and no extra
119 * heap allocation. Having higher values on min yield may lead to
120 * large heaps, especially if code behaviour is varying its
121 * maximum consumption between different freelists.
122 */
123#define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
4c48ba06 124#define SCM_CLUSTER_SIZE_1 2000L
8fef55a8 125#define SCM_MIN_YIELD_1 40
4c48ba06
MD
126
127#define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
128#define SCM_CLUSTER_SIZE_2 1000L
129/* The following value may seem large, but note that if we get to GC at
130 * all, this means that we have a numerically intensive application
131 */
8fef55a8 132#define SCM_MIN_YIELD_2 40
4c48ba06
MD
133
134#define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
135
945fec60 136#define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
0f2d19dd
JB
137#ifdef _QC
138# define SCM_HEAP_SEG_SIZE 32768L
139#else
140# ifdef sequent
4c48ba06 141# define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
0f2d19dd 142# else
4c48ba06 143# define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
0f2d19dd
JB
144# endif
145#endif
4c48ba06 146/* Make heap grow with factor 1.5 */
4a4c9785 147#define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
0f2d19dd 148#define SCM_INIT_MALLOC_LIMIT 100000
6064dcc6 149#define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
0f2d19dd
JB
150
151/* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
152 bounds for allocated storage */
153
154#ifdef PROT386
155/*in 386 protected mode we must only adjust the offset */
a00c95d9
ML
156# define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
157# define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
0f2d19dd
JB
158#else
159# ifdef _UNICOS
a00c95d9
ML
160# define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
161# define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
0f2d19dd 162# else
a00c95d9
ML
163# define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
164# define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
0f2d19dd
JB
165# endif /* UNICOS */
166#endif /* PROT386 */
a00c95d9
ML
167#define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
168#define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
b37fe1c5
MD
169#ifdef GUILE_NEW_GC_SCHEME
170#define SCM_HEAP_SIZE \
171 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
172#else
173#define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
174#endif
1811ebce 175#define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
0f2d19dd
JB
176
177
178\f
945fec60 179/* scm_freelists
0f2d19dd 180 */
945fec60 181
a00c95d9
ML
182typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
185#ifdef GUILE_NEW_GC_SCHEME
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
b37fe1c5
MD
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
8fef55a8
MD
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
a00c95d9
ML
193 SCM clusters;
194 SCM *clustertail;
b37fe1c5 195 /* this is the number of objects in each cluster, including the spine cell */
a00c95d9 196 int cluster_size;
8fef55a8 197 /* indicates that we should grow heap instead of GC:ing
a00c95d9
ML
198 */
199 int grow_heap_p;
8fef55a8 200 /* minimum yield on this list in order not to grow the heap
a00c95d9 201 */
8fef55a8
MD
202 long min_yield;
203 /* defines min_yield as percent of total heap size
a00c95d9 204 */
8fef55a8 205 int min_yield_fraction;
a00c95d9
ML
206#endif
207 /* number of cells per object on this list */
208 int span;
209 /* number of collected cells during last GC */
1811ebce
MD
210 long collected;
211 /* number of collected cells during penultimate GC */
212 long collected_1;
a00c95d9
ML
213 /* total number of cells in heap segments
214 * belonging to this list.
215 */
1811ebce 216 long heap_size;
a00c95d9
ML
217} scm_freelist_t;
218
4a4c9785
MD
219#ifdef GUILE_NEW_GC_SCHEME
220SCM scm_freelist = SCM_EOL;
221scm_freelist_t scm_master_freelist = {
b37fe1c5 222 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
4a4c9785
MD
223};
224SCM scm_freelist2 = SCM_EOL;
225scm_freelist_t scm_master_freelist2 = {
b37fe1c5 226 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
4a4c9785
MD
227};
228#else
945fec60
MD
229scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
230scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
4a4c9785 231#endif
0f2d19dd
JB
232
233/* scm_mtrigger
234 * is the number of bytes of must_malloc allocation needed to trigger gc.
235 */
15e9d186 236unsigned long scm_mtrigger;
0f2d19dd
JB
237
238
239/* scm_gc_heap_lock
240 * If set, don't expand the heap. Set only during gc, during which no allocation
241 * is supposed to take place anyway.
242 */
243int scm_gc_heap_lock = 0;
244
245/* GC Blocking
246 * Don't pause for collection if this is set -- just
247 * expand the heap.
248 */
249
250int scm_block_gc = 1;
251
252/* If fewer than MIN_GC_YIELD cells are recovered during a garbage
253 * collection (GC) more space is allocated for the heap.
254 */
945fec60 255#define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
0f2d19dd
JB
256
257/* During collection, this accumulates objects holding
258 * weak references.
259 */
ab4bef85 260SCM scm_weak_vectors;
0f2d19dd
JB
261
262/* GC Statistics Keeping
263 */
264unsigned long scm_cells_allocated = 0;
a5c314c8 265long scm_mallocated = 0;
b37fe1c5 266unsigned long scm_gc_cells_collected;
8b0d194f
MD
267#ifdef GUILE_NEW_GC_SCHEME
268unsigned long scm_gc_yield;
37ddcaf6 269static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
8b0d194f 270#endif
0f2d19dd
JB
271unsigned long scm_gc_malloc_collected;
272unsigned long scm_gc_ports_collected;
273unsigned long scm_gc_rt;
274unsigned long scm_gc_time_taken = 0;
275
276SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
277SCM_SYMBOL (sym_heap_size, "cell-heap-size");
278SCM_SYMBOL (sym_mallocated, "bytes-malloced");
279SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
280SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
281SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
282
a00c95d9 283typedef struct scm_heap_seg_data_t
0f2d19dd 284{
cf2d30f6
JB
285 /* lower and upper bounds of the segment */
286 SCM_CELLPTR bounds[2];
287
288 /* address of the head-of-freelist pointer for this segment's cells.
289 All segments usually point to the same one, scm_freelist. */
4c48ba06 290 scm_freelist_t *freelist;
cf2d30f6
JB
291
292 /* number of SCM words per object in this segment */
945fec60 293 int span;
cf2d30f6
JB
294
295 /* If SEG_DATA->valid is non-zero, the conservative marking
296 functions will apply SEG_DATA->valid to the purported pointer and
297 SEG_DATA, and mark the object iff the function returns non-zero.
298 At the moment, I don't think anyone uses this. */
0f2d19dd 299 int (*valid) ();
a00c95d9 300} scm_heap_seg_data_t;
0f2d19dd
JB
301
302
303
304
945fec60
MD
305static void scm_mark_weak_vector_spines (void);
306static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
307static void alloc_some_heap (scm_freelist_t *);
0f2d19dd
JB
308
309
310\f
cf2d30f6
JB
311/* Debugging functions. */
312
bb2c57fa 313#if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
cf2d30f6
JB
314
315/* Return the number of the heap segment containing CELL. */
316static int
317which_seg (SCM cell)
318{
319 int i;
320
321 for (i = 0; i < scm_n_heap_segs; i++)
322 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
323 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
324 return i;
325 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
945fec60 326 SCM_UNPACK (cell));
cf2d30f6
JB
327 abort ();
328}
329
330
8ded62a3
MD
331#ifdef GUILE_NEW_GC_SCHEME
332static void
333map_free_list (scm_freelist_t *master, SCM freelist)
334{
335 int last_seg = -1, count = 0;
336 SCM f;
a00c95d9 337
8ded62a3
MD
338 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
339 {
340 int this_seg = which_seg (f);
341
342 if (this_seg != last_seg)
343 {
344 if (last_seg != -1)
345 fprintf (stderr, " %5d %d-cells in segment %d\n",
346 count, master->span, last_seg);
347 last_seg = this_seg;
348 count = 0;
349 }
350 count++;
351 }
352 if (last_seg != -1)
353 fprintf (stderr, " %5d %d-cells in segment %d\n",
354 count, master->span, last_seg);
355}
356#else
acb0a19c 357static void
4c48ba06 358map_free_list (scm_freelist_t *freelist)
cf2d30f6
JB
359{
360 int last_seg = -1, count = 0;
361 SCM f;
a00c95d9 362
4c48ba06 363 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
cf2d30f6
JB
364 {
365 int this_seg = which_seg (f);
366
367 if (this_seg != last_seg)
368 {
369 if (last_seg != -1)
acb0a19c 370 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 371 count, freelist->span, last_seg);
cf2d30f6
JB
372 last_seg = this_seg;
373 count = 0;
374 }
375 count++;
376 }
377 if (last_seg != -1)
acb0a19c 378 fprintf (stderr, " %5d %d-cells in segment %d\n",
4c48ba06 379 count, freelist->span, last_seg);
acb0a19c 380}
8ded62a3 381#endif
cf2d30f6 382
a00c95d9 383SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
acb0a19c
MD
384 (),
385 "Print debugging information about the free-list.\n"
5384bc5b 386 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
acb0a19c
MD
387#define FUNC_NAME s_scm_map_free_list
388{
4c48ba06
MD
389 int i;
390 fprintf (stderr, "%d segments total (%d:%d",
391 scm_n_heap_segs,
392 scm_heap_table[0].span,
393 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
394 for (i = 1; i < scm_n_heap_segs; i++)
395 fprintf (stderr, ", %d:%d",
396 scm_heap_table[i].span,
397 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
398 fprintf (stderr, ")\n");
4a4c9785 399#ifdef GUILE_NEW_GC_SCHEME
8ded62a3
MD
400 map_free_list (&scm_master_freelist, scm_freelist);
401 map_free_list (&scm_master_freelist2, scm_freelist2);
4a4c9785 402#else
945fec60
MD
403 map_free_list (&scm_freelist);
404 map_free_list (&scm_freelist2);
4a4c9785 405#endif
cf2d30f6
JB
406 fflush (stderr);
407
408 return SCM_UNSPECIFIED;
409}
1bbd0b84 410#undef FUNC_NAME
cf2d30f6 411
a8cffa9f 412#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
413static int last_cluster;
414static int last_size;
415
5384bc5b
MD
416static int
417free_list_length (char *title, int i, SCM freelist)
418{
419 SCM ls;
420 int n = 0;
421 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
422 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
423 ++n;
424 else
425 {
426 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
427 abort ();
428 }
4c48ba06
MD
429 if (n != last_size)
430 {
431 if (i > 0)
432 {
433 if (last_cluster == i - 1)
434 fprintf (stderr, "\t%d\n", last_size);
435 else
436 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
437 }
438 if (i >= 0)
439 fprintf (stderr, "%s %d", title, i);
440 else
441 fprintf (stderr, "%s\t%d\n", title, n);
442 last_cluster = i;
443 last_size = n;
444 }
5384bc5b
MD
445 return n;
446}
447
448static void
449free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
450{
451 SCM clusters;
4c48ba06 452 int i = 0, len, n = 0;
5384bc5b
MD
453 fprintf (stderr, "%s\n\n", title);
454 n += free_list_length ("free list", -1, freelist);
455 for (clusters = master->clusters;
456 SCM_NNULLP (clusters);
457 clusters = SCM_CDR (clusters))
4c48ba06
MD
458 {
459 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
460 n += len;
461 }
462 if (last_cluster == i - 1)
463 fprintf (stderr, "\t%d\n", last_size);
464 else
465 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
466 fprintf (stderr, "\ntotal %d objects\n\n", n);
5384bc5b
MD
467}
468
a00c95d9 469SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
5384bc5b
MD
470 (),
471 "Print debugging information about the free-list.\n"
472 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
473#define FUNC_NAME s_scm_free_list_length
474{
b37fe1c5
MD
475 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
476 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
12e5fb3b 477 return SCM_UNSPECIFIED;
5384bc5b
MD
478}
479#undef FUNC_NAME
a8cffa9f 480#endif
5384bc5b 481
bb2c57fa
MD
482#endif
483
484#ifdef GUILE_DEBUG_FREELIST
cf2d30f6
JB
485
486/* Number of calls to SCM_NEWCELL since startup. */
487static unsigned long scm_newcell_count;
acb0a19c 488static unsigned long scm_newcell2_count;
cf2d30f6
JB
489
490/* Search freelist for anything that isn't marked as a free cell.
491 Abort if we find something. */
8ded62a3
MD
492#ifdef GUILE_NEW_GC_SCHEME
493static void
494scm_check_freelist (SCM freelist)
495{
496 SCM f;
497 int i = 0;
498
499 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
500 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
501 {
502 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
503 scm_newcell_count, i);
504 fflush (stderr);
505 abort ();
506 }
507}
508#else
cf2d30f6 509static void
4c48ba06 510scm_check_freelist (scm_freelist_t *freelist)
cf2d30f6
JB
511{
512 SCM f;
513 int i = 0;
514
4c48ba06 515 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
cf2d30f6
JB
516 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
517 {
518 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
519 scm_newcell_count, i);
520 fflush (stderr);
521 abort ();
522 }
523}
8ded62a3 524#endif
cf2d30f6
JB
525
526static int scm_debug_check_freelist = 0;
25748c78 527
a00c95d9 528SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
1bbd0b84 529 (SCM flag),
da4a1dba
GB
530 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
531 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
532 "compile-time flag was selected.\n")
1bbd0b84 533#define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
25748c78 534{
945fec60 535 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
25748c78
GB
536 return SCM_UNSPECIFIED;
537}
1bbd0b84 538#undef FUNC_NAME
25748c78
GB
539
540
4a4c9785
MD
541#ifdef GUILE_NEW_GC_SCHEME
542
543SCM
544scm_debug_newcell (void)
545{
546 SCM new;
547
548 scm_newcell_count++;
549 if (scm_debug_check_freelist)
550 {
8ded62a3 551 scm_check_freelist (scm_freelist);
4a4c9785
MD
552 scm_gc();
553 }
554
555 /* The rest of this is supposed to be identical to the SCM_NEWCELL
556 macro. */
557 if (SCM_IMP (scm_freelist))
558 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
559 else
560 {
561 new = scm_freelist;
562 scm_freelist = SCM_CDR (scm_freelist);
563 SCM_SETCAR (new, scm_tc16_allocated);
564 }
565
566 return new;
567}
568
569SCM
570scm_debug_newcell2 (void)
571{
572 SCM new;
573
574 scm_newcell2_count++;
575 if (scm_debug_check_freelist)
576 {
8ded62a3 577 scm_check_freelist (scm_freelist2);
4a4c9785
MD
578 scm_gc ();
579 }
580
581 /* The rest of this is supposed to be identical to the SCM_NEWCELL
582 macro. */
583 if (SCM_IMP (scm_freelist2))
584 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
585 else
586 {
587 new = scm_freelist2;
588 scm_freelist2 = SCM_CDR (scm_freelist2);
589 SCM_SETCAR (new, scm_tc16_allocated);
590 }
591
592 return new;
593}
594
595#else /* GUILE_NEW_GC_SCHEME */
596
f2333166
JB
597SCM
598scm_debug_newcell (void)
cf2d30f6 599{
f2333166
JB
600 SCM new;
601
cf2d30f6 602 scm_newcell_count++;
945fec60
MD
603 if (scm_debug_check_freelist)
604 {
605 scm_check_freelist (&scm_freelist);
606 scm_gc();
607 }
cf2d30f6
JB
608
609 /* The rest of this is supposed to be identical to the SCM_NEWCELL
610 macro. */
945fec60
MD
611 if (SCM_IMP (scm_freelist.cells))
612 new = scm_gc_for_newcell (&scm_freelist);
cf2d30f6
JB
613 else
614 {
945fec60
MD
615 new = scm_freelist.cells;
616 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
d9de3881 617 SCM_SETCAR (new, scm_tc16_allocated);
cf2d30f6
JB
618 ++scm_cells_allocated;
619 }
f2333166
JB
620
621 return new;
cf2d30f6
JB
622}
623
acb0a19c
MD
624SCM
625scm_debug_newcell2 (void)
626{
627 SCM new;
628
629 scm_newcell2_count++;
630 if (scm_debug_check_freelist) {
945fec60 631 scm_check_freelist (&scm_freelist2);
acb0a19c
MD
632 scm_gc();
633 }
634
635 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
636 macro. */
945fec60
MD
637 if (SCM_IMP (scm_freelist2.cells))
638 new = scm_gc_for_newcell (&scm_freelist2);
acb0a19c
MD
639 else
640 {
945fec60
MD
641 new = scm_freelist2.cells;
642 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
acb0a19c
MD
643 SCM_SETCAR (new, scm_tc16_allocated);
644 scm_cells_allocated += 2;
645 }
646
647 return new;
648}
649
4a4c9785 650#endif /* GUILE_NEW_GC_SCHEME */
fca7547b 651#endif /* GUILE_DEBUG_FREELIST */
cf2d30f6
JB
652
653\f
0f2d19dd 654
b37fe1c5
MD
655#ifdef GUILE_NEW_GC_SCHEME
656static unsigned long
657master_cells_allocated (scm_freelist_t *master)
658{
659 int objects = master->clusters_allocated * (master->cluster_size - 1);
660 if (SCM_NULLP (master->clusters))
661 objects -= master->left_to_collect;
662 return master->span * objects;
663}
664
665static unsigned long
666freelist_length (SCM freelist)
667{
668 int n;
669 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
670 ++n;
671 return n;
672}
673
674static unsigned long
675compute_cells_allocated ()
676{
677 return (scm_cells_allocated
678 + master_cells_allocated (&scm_master_freelist)
679 + master_cells_allocated (&scm_master_freelist2)
680 - scm_master_freelist.span * freelist_length (scm_freelist)
681 - scm_master_freelist2.span * freelist_length (scm_freelist2));
682}
683#endif
684
0f2d19dd
JB
685/* {Scheme Interface to GC}
686 */
687
a00c95d9 688SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
1bbd0b84 689 (),
b380b885 690 "Returns an association list of statistics about Guile's current use of storage. ")
1bbd0b84 691#define FUNC_NAME s_scm_gc_stats
0f2d19dd
JB
692{
693 int i;
694 int n;
695 SCM heap_segs;
c209c88e
GB
696 long int local_scm_mtrigger;
697 long int local_scm_mallocated;
698 long int local_scm_heap_size;
699 long int local_scm_cells_allocated;
700 long int local_scm_gc_time_taken;
0f2d19dd
JB
701 SCM answer;
702
703 SCM_DEFER_INTS;
704 scm_block_gc = 1;
705 retry:
706 heap_segs = SCM_EOL;
707 n = scm_n_heap_segs;
708 for (i = scm_n_heap_segs; i--; )
709 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
710 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
711 heap_segs);
712 if (scm_n_heap_segs != n)
713 goto retry;
714 scm_block_gc = 0;
715
7febb4a2
MD
716 /* Below, we cons to produce the resulting list. We want a snapshot of
717 * the heap situation before consing.
718 */
0f2d19dd
JB
719 local_scm_mtrigger = scm_mtrigger;
720 local_scm_mallocated = scm_mallocated;
b37fe1c5 721 local_scm_heap_size = SCM_HEAP_SIZE;
4a4c9785 722#ifdef GUILE_NEW_GC_SCHEME
b37fe1c5 723 local_scm_cells_allocated = compute_cells_allocated ();
4a4c9785 724#else
0f2d19dd 725 local_scm_cells_allocated = scm_cells_allocated;
b37fe1c5 726#endif
0f2d19dd
JB
727 local_scm_gc_time_taken = scm_gc_time_taken;
728
729 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
730 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
731 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
732 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
733 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
734 scm_cons (sym_heap_segments, heap_segs),
735 SCM_UNDEFINED);
736 SCM_ALLOW_INTS;
737 return answer;
738}
1bbd0b84 739#undef FUNC_NAME
0f2d19dd
JB
740
741
a00c95d9 742void
6e8d25a6 743scm_gc_start (const char *what)
0f2d19dd
JB
744{
745 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
b37fe1c5 746 scm_gc_cells_collected = 0;
8b0d194f 747#ifdef GUILE_NEW_GC_SCHEME
37ddcaf6 748 scm_gc_yield_1 = scm_gc_yield;
8b0d194f
MD
749 scm_gc_yield = (scm_cells_allocated
750 + master_cells_allocated (&scm_master_freelist)
751 + master_cells_allocated (&scm_master_freelist2));
752#endif
0f2d19dd
JB
753 scm_gc_malloc_collected = 0;
754 scm_gc_ports_collected = 0;
755}
756
a00c95d9 757void
0f2d19dd 758scm_gc_end ()
0f2d19dd
JB
759{
760 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
c209c88e 761 scm_gc_time_taken += scm_gc_rt;
9ea54cc6 762 scm_system_async_mark (scm_gc_async);
0f2d19dd
JB
763}
764
765
a00c95d9 766SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
1bbd0b84 767 (SCM obj),
b380b885
MD
768 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
769 "returned by this function for @var{obj}")
1bbd0b84 770#define FUNC_NAME s_scm_object_address
0f2d19dd 771{
acb0a19c 772 return scm_ulong2num ((unsigned long) obj);
0f2d19dd 773}
1bbd0b84 774#undef FUNC_NAME
0f2d19dd
JB
775
776
a00c95d9 777SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
1bbd0b84 778 (),
b380b885
MD
779 "Scans all of SCM objects and reclaims for further use those that are\n"
780 "no longer accessible.")
1bbd0b84 781#define FUNC_NAME s_scm_gc
0f2d19dd
JB
782{
783 SCM_DEFER_INTS;
784 scm_igc ("call");
785 SCM_ALLOW_INTS;
786 return SCM_UNSPECIFIED;
787}
1bbd0b84 788#undef FUNC_NAME
0f2d19dd
JB
789
790
791\f
792/* {C Interface For When GC is Triggered}
793 */
794
4a4c9785
MD
795#ifdef GUILE_NEW_GC_SCHEME
796
b37fe1c5 797static void
8fef55a8 798adjust_min_yield (scm_freelist_t *freelist)
b37fe1c5 799{
8fef55a8 800 /* min yield is adjusted upwards so that next predicted total yield
bda1446c 801 * (allocated cells actually freed by GC) becomes
8fef55a8
MD
802 * `min_yield_fraction' of total heap size. Note, however, that
803 * the absolute value of min_yield will correspond to `collected'
bda1446c 804 * on one master (the one which currently is triggering GC).
b37fe1c5 805 *
bda1446c
MD
806 * The reason why we look at total yield instead of cells collected
807 * on one list is that we want to take other freelists into account.
808 * On this freelist, we know that (local) yield = collected cells,
809 * but that's probably not the case on the other lists.
b37fe1c5
MD
810 *
811 * (We might consider computing a better prediction, for example
812 * by computing an average over multiple GC:s.)
813 */
8fef55a8 814 if (freelist->min_yield_fraction)
b37fe1c5 815 {
37ddcaf6 816 /* Pick largest of last two yields. */
8fef55a8
MD
817 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
818 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
b37fe1c5
MD
819#ifdef DEBUGINFO
820 fprintf (stderr, " after GC = %d, delta = %d\n",
821 scm_cells_allocated,
822 delta);
823#endif
824 if (delta > 0)
8fef55a8 825 freelist->min_yield += delta;
b37fe1c5
MD
826 }
827}
828
4a4c9785 829/* When we get POSIX threads support, the master will be global and
4c48ba06
MD
830 * common while the freelist will be individual for each thread.
831 */
4a4c9785
MD
832
833SCM
834scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
835{
836 SCM cell;
837 ++scm_ints_disabled;
4c48ba06
MD
838 do
839 {
840 if (SCM_NULLP (master->clusters))
841 {
842 if (master->grow_heap_p)
843 {
844 master->grow_heap_p = 0;
845 alloc_some_heap (master);
846 }
847 else
b37fe1c5 848 {
37ddcaf6
MD
849#ifdef DEBUGINFO
850 fprintf (stderr, "allocated = %d, ",
851 scm_cells_allocated
852 + master_cells_allocated (&scm_master_freelist)
853 + master_cells_allocated (&scm_master_freelist2));
854#endif
b37fe1c5 855 scm_igc ("cells");
8fef55a8 856 adjust_min_yield (master);
b37fe1c5 857 }
4c48ba06
MD
858 }
859 cell = SCM_CAR (master->clusters);
860 master->clusters = SCM_CDR (master->clusters);
b37fe1c5 861 ++master->clusters_allocated;
4c48ba06
MD
862 }
863 while (SCM_NULLP (cell));
4a4c9785 864 --scm_ints_disabled;
4a4c9785
MD
865 *freelist = SCM_CDR (cell);
866 SCM_SETCAR (cell, scm_tc16_allocated);
867 return cell;
868}
869
4c48ba06
MD
870#if 0
871/* This is a support routine which can be used to reserve a cluster
872 * for some special use, such as debugging. It won't be useful until
873 * free cells are preserved between garbage collections.
874 */
875
876void
877scm_alloc_cluster (scm_freelist_t *master)
878{
879 SCM freelist, cell;
880 cell = scm_gc_for_newcell (master, &freelist);
881 SCM_SETCDR (cell, freelist);
882 return cell;
883}
884#endif
885
4a4c9785
MD
886#else /* GUILE_NEW_GC_SCHEME */
887
0f2d19dd 888void
4c48ba06 889scm_gc_for_alloc (scm_freelist_t *freelist)
0f2d19dd
JB
890{
891 SCM_REDEFER_INTS;
892 scm_igc ("cells");
945fec60 893#ifdef GUILE_DEBUG_FREELIST
acb0a19c 894 fprintf (stderr, "Collected: %d, min_yield: %d\n",
4c48ba06 895 freelist->collected, MIN_GC_YIELD (freelist));
acb0a19c 896#endif
4c48ba06
MD
897 if ((freelist->collected < MIN_GC_YIELD (freelist))
898 || SCM_IMP (freelist->cells))
899 alloc_some_heap (freelist);
0f2d19dd
JB
900 SCM_REALLOW_INTS;
901}
902
903
a00c95d9 904SCM
4c48ba06 905scm_gc_for_newcell (scm_freelist_t *freelist)
0f2d19dd
JB
906{
907 SCM fl;
4c48ba06
MD
908 scm_gc_for_alloc (freelist);
909 fl = freelist->cells;
910 freelist->cells = SCM_CDR (fl);
acb0a19c 911 SCM_SETCAR (fl, scm_tc16_allocated);
0f2d19dd
JB
912 return fl;
913}
914
4a4c9785
MD
915#endif /* GUILE_NEW_GC_SCHEME */
916
0f2d19dd 917void
1bbd0b84 918scm_igc (const char *what)
0f2d19dd
JB
919{
920 int j;
921
4c48ba06
MD
922#ifdef DEBUGINFO
923 fprintf (stderr,
924 SCM_NULLP (scm_freelist)
925 ? "*"
926 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
927#endif
42db06f0
MD
928#ifdef USE_THREADS
929 /* During the critical section, only the current thread may run. */
930 SCM_THREAD_CRITICAL_SECTION_START;
931#endif
932
e242dfd2 933 /* fprintf (stderr, "gc: %s\n", what); */
c68296f8 934
ab4bef85
JB
935 scm_gc_start (what);
936
937 if (!scm_stack_base || scm_block_gc)
938 {
939 scm_gc_end ();
940 return;
941 }
942
a5c314c8
JB
943 if (scm_mallocated < 0)
944 /* The byte count of allocated objects has underflowed. This is
945 probably because you forgot to report the sizes of objects you
946 have allocated, by calling scm_done_malloc or some such. When
947 the GC freed them, it subtracted their size from
948 scm_mallocated, which underflowed. */
949 abort ();
c45acc34 950
ab4bef85
JB
951 if (scm_gc_heap_lock)
952 /* We've invoked the collector while a GC is already in progress.
953 That should never happen. */
954 abort ();
0f2d19dd
JB
955
956 ++scm_gc_heap_lock;
ab4bef85
JB
957
958 scm_weak_vectors = SCM_EOL;
0f2d19dd 959
0493cd89
MD
960 scm_guardian_gc_init ();
961
0f2d19dd
JB
962 /* unprotect any struct types with no instances */
963#if 0
964 {
965 SCM type_list;
966 SCM * pos;
967
968 pos = &scm_type_obj_list;
969 type_list = scm_type_obj_list;
970 while (type_list != SCM_EOL)
971 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
972 {
24e68a57 973 pos = SCM_CDRLOC (type_list);
0f2d19dd
JB
974 type_list = SCM_CDR (type_list);
975 }
976 else
977 {
978 *pos = SCM_CDR (type_list);
979 type_list = SCM_CDR (type_list);
980 }
981 }
982#endif
983
984 /* flush dead entries from the continuation stack */
985 {
986 int x;
987 int bound;
988 SCM * elts;
989 elts = SCM_VELTS (scm_continuation_stack);
990 bound = SCM_LENGTH (scm_continuation_stack);
991 x = SCM_INUM (scm_continuation_stack_ptr);
992 while (x < bound)
993 {
994 elts[x] = SCM_BOOL_F;
995 ++x;
996 }
997 }
998
42db06f0 999#ifndef USE_THREADS
a00c95d9 1000
0f2d19dd
JB
1001 /* Protect from the C stack. This must be the first marking
1002 * done because it provides information about what objects
1003 * are "in-use" by the C code. "in-use" objects are those
1004 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1005 * usable. This requirement is stricter than a liveness
1006 * requirement -- in particular, it constrains the implementation
1007 * of scm_vector_set_length_x.
1008 */
1009 SCM_FLUSH_REGISTER_WINDOWS;
1010 /* This assumes that all registers are saved into the jmp_buf */
1011 setjmp (scm_save_regs_gc_mark);
1012 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
ce4a361d
JB
1013 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1014 sizeof scm_save_regs_gc_mark)
1015 / sizeof (SCM_STACKITEM)));
0f2d19dd
JB
1016
1017 {
1018 /* stack_len is long rather than scm_sizet in order to guarantee that
1019 &stack_len is long aligned */
1020#ifdef SCM_STACK_GROWS_UP
1021#ifdef nosve
1022 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1023#else
1024 long stack_len = scm_stack_size (scm_stack_base);
1025#endif
1026 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1027#else
1028#ifdef nosve
1029 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1030#else
1031 long stack_len = scm_stack_size (scm_stack_base);
1032#endif
1033 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1034#endif
1035 }
1036
42db06f0
MD
1037#else /* USE_THREADS */
1038
1039 /* Mark every thread's stack and registers */
945fec60 1040 scm_threads_mark_stacks ();
42db06f0
MD
1041
1042#endif /* USE_THREADS */
0f2d19dd
JB
1043
1044 /* FIXME: insert a phase to un-protect string-data preserved
1045 * in scm_vector_set_length_x.
1046 */
1047
1048 j = SCM_NUM_PROTECTS;
1049 while (j--)
1050 scm_gc_mark (scm_sys_protects[j]);
1051
9de33deb
MD
1052 /* FIXME: we should have a means to register C functions to be run
1053 * in different phases of GC
a00c95d9 1054 */
9de33deb 1055 scm_mark_subr_table ();
a00c95d9 1056
42db06f0
MD
1057#ifndef USE_THREADS
1058 scm_gc_mark (scm_root->handle);
1059#endif
a00c95d9 1060
0f2d19dd
JB
1061 scm_mark_weak_vector_spines ();
1062
0493cd89
MD
1063 scm_guardian_zombify ();
1064
0f2d19dd
JB
1065 scm_gc_sweep ();
1066
1067 --scm_gc_heap_lock;
1068 scm_gc_end ();
42db06f0
MD
1069
1070#ifdef USE_THREADS
1071 SCM_THREAD_CRITICAL_SECTION_END;
1072#endif
0f2d19dd
JB
1073}
1074
1075\f
a00c95d9 1076/* {Mark/Sweep}
0f2d19dd
JB
1077 */
1078
1079
1080
1081/* Mark an object precisely.
1082 */
a00c95d9 1083void
1bbd0b84 1084scm_gc_mark (SCM p)
0f2d19dd
JB
1085{
1086 register long i;
1087 register SCM ptr;
1088
1089 ptr = p;
1090
1091gc_mark_loop:
1092 if (SCM_IMP (ptr))
1093 return;
1094
1095gc_mark_nimp:
1096 if (SCM_NCELLP (ptr))
f8392303 1097 scm_wta (ptr, "rogue pointer in heap", NULL);
0f2d19dd
JB
1098
1099 switch (SCM_TYP7 (ptr))
1100 {
1101 case scm_tcs_cons_nimcar:
1102 if (SCM_GCMARKP (ptr))
1103 break;
1104 SCM_SETGCMARK (ptr);
1105 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1106 {
1107 ptr = SCM_CAR (ptr);
1108 goto gc_mark_nimp;
1109 }
1110 scm_gc_mark (SCM_CAR (ptr));
1111 ptr = SCM_GCCDR (ptr);
1112 goto gc_mark_nimp;
1113 case scm_tcs_cons_imcar:
acb0a19c
MD
1114 if (SCM_GCMARKP (ptr))
1115 break;
1116 SCM_SETGCMARK (ptr);
1117 ptr = SCM_GCCDR (ptr);
1118 goto gc_mark_loop;
e641afaf 1119 case scm_tc7_pws:
0f2d19dd
JB
1120 if (SCM_GCMARKP (ptr))
1121 break;
1122 SCM_SETGCMARK (ptr);
acb0a19c 1123 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
0f2d19dd
JB
1124 ptr = SCM_GCCDR (ptr);
1125 goto gc_mark_loop;
1126 case scm_tcs_cons_gloc:
1127 if (SCM_GCMARKP (ptr))
1128 break;
1129 SCM_SETGCMARK (ptr);
1130 {
1131 SCM vcell;
1132 vcell = SCM_CAR (ptr) - 1L;
f1267706 1133 switch (SCM_UNPACK (SCM_CDR (vcell)))
0f2d19dd
JB
1134 {
1135 default:
1136 scm_gc_mark (vcell);
1137 ptr = SCM_GCCDR (ptr);
1138 goto gc_mark_loop;
1139 case 1: /* ! */
1140 case 0: /* ! */
1141 {
1142 SCM layout;
1143 SCM * vtable_data;
1144 int len;
1145 char * fields_desc;
ad75306c
MD
1146 register SCM * mem;
1147 register int x;
0f2d19dd
JB
1148
1149 vtable_data = (SCM *)vcell;
4bfdf158 1150 layout = vtable_data[scm_vtable_index_layout];
0f2d19dd
JB
1151 len = SCM_LENGTH (layout);
1152 fields_desc = SCM_CHARS (layout);
14d1400f
JB
1153 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1154 that it removes the mark */
1155 mem = (SCM *)SCM_GCCDR (ptr);
a00c95d9 1156
f1267706 1157 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
aa0761ec 1158 {
73e10adf 1159 scm_gc_mark (mem[scm_struct_i_procedure]);
e641afaf 1160 scm_gc_mark (mem[scm_struct_i_setter]);
aa0761ec 1161 }
ad75306c
MD
1162 if (len)
1163 {
1164 for (x = 0; x < len - 2; x += 2, ++mem)
1165 if (fields_desc[x] == 'p')
1166 scm_gc_mark (*mem);
1167 if (fields_desc[x] == 'p')
1168 {
c209c88e 1169 int j;
ad75306c 1170 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
c209c88e 1171 for (j = (long int) *mem; x; --x)
ad75306c
MD
1172 scm_gc_mark (*++mem);
1173 else
1174 scm_gc_mark (*mem);
1175 }
1176 }
0f2d19dd
JB
1177 if (!SCM_CDR (vcell))
1178 {
1179 SCM_SETGCMARK (vcell);
4bfdf158 1180 ptr = vtable_data[scm_vtable_index_vtable];
0f2d19dd
JB
1181 goto gc_mark_loop;
1182 }
1183 }
1184 }
1185 }
1186 break;
1187 case scm_tcs_closures:
1188 if (SCM_GCMARKP (ptr))
1189 break;
1190 SCM_SETGCMARK (ptr);
1191 if (SCM_IMP (SCM_CDR (ptr)))
1192 {
1193 ptr = SCM_CLOSCAR (ptr);
1194 goto gc_mark_nimp;
1195 }
1196 scm_gc_mark (SCM_CLOSCAR (ptr));
1197 ptr = SCM_GCCDR (ptr);
1198 goto gc_mark_nimp;
1199 case scm_tc7_vector:
1200 case scm_tc7_lvector:
1201#ifdef CCLO
1202 case scm_tc7_cclo:
1203#endif
1204 if (SCM_GC8MARKP (ptr))
1205 break;
1206 SCM_SETGC8MARK (ptr);
1207 i = SCM_LENGTH (ptr);
1208 if (i == 0)
1209 break;
1210 while (--i > 0)
1211 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1212 scm_gc_mark (SCM_VELTS (ptr)[i]);
1213 ptr = SCM_VELTS (ptr)[0];
1214 goto gc_mark_loop;
1215 case scm_tc7_contin:
1216 if SCM_GC8MARKP
1217 (ptr) break;
1218 SCM_SETGC8MARK (ptr);
c68296f8 1219 if (SCM_VELTS (ptr))
41b0806d 1220 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
c68296f8
MV
1221 (scm_sizet)
1222 (SCM_LENGTH (ptr) +
1223 (sizeof (SCM_STACKITEM) + -1 +
1224 sizeof (scm_contregs)) /
1225 sizeof (SCM_STACKITEM)));
0f2d19dd 1226 break;
afe5177e 1227#ifdef HAVE_ARRAYS
0f2d19dd
JB
1228 case scm_tc7_bvect:
1229 case scm_tc7_byvect:
1230 case scm_tc7_ivect:
1231 case scm_tc7_uvect:
1232 case scm_tc7_fvect:
1233 case scm_tc7_dvect:
1234 case scm_tc7_cvect:
1235 case scm_tc7_svect:
5c11cc9d 1236#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1237 case scm_tc7_llvect:
1238#endif
afe5177e 1239#endif
0f2d19dd 1240 case scm_tc7_string:
0f2d19dd
JB
1241 SCM_SETGC8MARK (ptr);
1242 break;
1243
1244 case scm_tc7_substring:
0f2d19dd
JB
1245 if (SCM_GC8MARKP(ptr))
1246 break;
1247 SCM_SETGC8MARK (ptr);
1248 ptr = SCM_CDR (ptr);
1249 goto gc_mark_loop;
1250
1251 case scm_tc7_wvect:
1252 if (SCM_GC8MARKP(ptr))
1253 break;
ab4bef85
JB
1254 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1255 scm_weak_vectors = ptr;
0f2d19dd
JB
1256 SCM_SETGC8MARK (ptr);
1257 if (SCM_IS_WHVEC_ANY (ptr))
1258 {
1259 int x;
1260 int len;
1261 int weak_keys;
1262 int weak_values;
1263
1264 len = SCM_LENGTH (ptr);
1265 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1266 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
a00c95d9 1267
0f2d19dd
JB
1268 for (x = 0; x < len; ++x)
1269 {
1270 SCM alist;
1271 alist = SCM_VELTS (ptr)[x];
46408039
JB
1272
1273 /* mark everything on the alist except the keys or
1274 * values, according to weak_values and weak_keys. */
0b5f3f34 1275 while ( SCM_CONSP (alist)
0f2d19dd 1276 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1277 && SCM_CONSP (SCM_CAR (alist)))
1278 {
1279 SCM kvpair;
1280 SCM next_alist;
1281
1282 kvpair = SCM_CAR (alist);
1283 next_alist = SCM_CDR (alist);
a00c95d9 1284 /*
0f2d19dd
JB
1285 * Do not do this:
1286 * SCM_SETGCMARK (alist);
1287 * SCM_SETGCMARK (kvpair);
1288 *
1289 * It may be that either the key or value is protected by
1290 * an escaped reference to part of the spine of this alist.
1291 * If we mark the spine here, and only mark one or neither of the
1292 * key and value, they may never be properly marked.
1293 * This leads to a horrible situation in which an alist containing
1294 * freelist cells is exported.
1295 *
1296 * So only mark the spines of these arrays last of all marking.
1297 * If somebody confuses us by constructing a weak vector
1298 * with a circular alist then we are hosed, but at least we
1299 * won't prematurely drop table entries.
1300 */
1301 if (!weak_keys)
1302 scm_gc_mark (SCM_CAR (kvpair));
1303 if (!weak_values)
1304 scm_gc_mark (SCM_GCCDR (kvpair));
1305 alist = next_alist;
1306 }
1307 if (SCM_NIMP (alist))
1308 scm_gc_mark (alist);
1309 }
1310 }
1311 break;
1312
1313 case scm_tc7_msymbol:
1314 if (SCM_GC8MARKP(ptr))
1315 break;
1316 SCM_SETGC8MARK (ptr);
1317 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1318 ptr = SCM_SYMBOL_PROPS (ptr);
1319 goto gc_mark_loop;
1320 case scm_tc7_ssymbol:
1321 if (SCM_GC8MARKP(ptr))
1322 break;
1323 SCM_SETGC8MARK (ptr);
1324 break;
1325 case scm_tcs_subrs:
9de33deb 1326 break;
0f2d19dd
JB
1327 case scm_tc7_port:
1328 i = SCM_PTOBNUM (ptr);
1329 if (!(i < scm_numptob))
1330 goto def;
1331 if (SCM_GC8MARKP (ptr))
1332 break;
dc53f026 1333 SCM_SETGC8MARK (ptr);
ebf7394e
GH
1334 if (SCM_PTAB_ENTRY(ptr))
1335 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
dc53f026
JB
1336 if (scm_ptobs[i].mark)
1337 {
1338 ptr = (scm_ptobs[i].mark) (ptr);
1339 goto gc_mark_loop;
1340 }
1341 else
1342 return;
0f2d19dd
JB
1343 break;
1344 case scm_tc7_smob:
1345 if (SCM_GC8MARKP (ptr))
1346 break;
dc53f026 1347 SCM_SETGC8MARK (ptr);
acb0a19c 1348 switch (SCM_GCTYP16 (ptr))
0f2d19dd
JB
1349 { /* should be faster than going through scm_smobs */
1350 case scm_tc_free_cell:
1351 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1bbd0b84 1352 case scm_tc16_allocated:
acb0a19c
MD
1353 case scm_tc16_big:
1354 case scm_tc16_real:
1355 case scm_tc16_complex:
0f2d19dd
JB
1356 break;
1357 default:
1358 i = SCM_SMOBNUM (ptr);
1359 if (!(i < scm_numsmob))
1360 goto def;
dc53f026
JB
1361 if (scm_smobs[i].mark)
1362 {
1363 ptr = (scm_smobs[i].mark) (ptr);
1364 goto gc_mark_loop;
1365 }
1366 else
1367 return;
0f2d19dd
JB
1368 }
1369 break;
1370 default:
1371 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1372 }
1373}
1374
1375
1376/* Mark a Region Conservatively
1377 */
1378
a00c95d9 1379void
6e8d25a6 1380scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
0f2d19dd
JB
1381{
1382 register long m = n;
1383 register int i, j;
1384 register SCM_CELLPTR ptr;
1385
1386 while (0 <= --m)
c209c88e 1387 if (SCM_CELLP (*(SCM **) (& x[m])))
0f2d19dd
JB
1388 {
1389 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1390 i = 0;
1391 j = scm_n_heap_segs - 1;
1392 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1393 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1394 {
1395 while (i <= j)
1396 {
1397 int seg_id;
1398 seg_id = -1;
1399 if ( (i == j)
1400 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1401 seg_id = i;
1402 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1403 seg_id = j;
1404 else
1405 {
1406 int k;
1407 k = (i + j) / 2;
1408 if (k == i)
1409 break;
1410 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1411 {
1412 j = k;
1413 ++i;
1414 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1415 continue;
1416 else
1417 break;
1418 }
1419 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1420 {
1421 i = k;
1422 --j;
1423 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1424 continue;
1425 else
1426 break;
1427 }
1428 }
1429 if ( !scm_heap_table[seg_id].valid
1430 || scm_heap_table[seg_id].valid (ptr,
1431 &scm_heap_table[seg_id]))
a00c95d9
ML
1432 if ( scm_heap_table[seg_id].span == 1
1433 || SCM_DOUBLE_CELLP (*(SCM **) (& x[m])))
1434 scm_gc_mark (*(SCM *) & x[m]);
0f2d19dd
JB
1435 break;
1436 }
1437
1438 }
1439 }
1440}
1441
1442
2e11a577
MD
1443/* The following is a C predicate which determines if an SCM value can be
1444 regarded as a pointer to a cell on the heap. The code is duplicated
1445 from scm_mark_locations. */
1446
1cc91f1b 1447
2e11a577 1448int
6e8d25a6 1449scm_cellp (SCM value)
2e11a577
MD
1450{
1451 register int i, j;
1452 register SCM_CELLPTR ptr;
a00c95d9 1453
c209c88e 1454 if SCM_CELLP (*(SCM **) (& value))
2e11a577
MD
1455 {
1456 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1457 i = 0;
1458 j = scm_n_heap_segs - 1;
1459 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1460 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1461 {
1462 while (i <= j)
1463 {
1464 int seg_id;
1465 seg_id = -1;
1466 if ( (i == j)
1467 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1468 seg_id = i;
1469 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1470 seg_id = j;
1471 else
1472 {
1473 int k;
1474 k = (i + j) / 2;
1475 if (k == i)
1476 break;
1477 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1478 {
1479 j = k;
1480 ++i;
1481 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1482 continue;
1483 else
1484 break;
1485 }
1486 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1487 {
1488 i = k;
1489 --j;
1490 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1491 continue;
1492 else
1493 break;
1494 }
1495 }
1496 if ( !scm_heap_table[seg_id].valid
1497 || scm_heap_table[seg_id].valid (ptr,
1498 &scm_heap_table[seg_id]))
1499 return 1;
1500 break;
1501 }
1502
1503 }
1504 }
1505 return 0;
1506}
1507
1508
3b2b8760 1509static void
0f2d19dd 1510scm_mark_weak_vector_spines ()
0f2d19dd 1511{
ab4bef85 1512 SCM w;
0f2d19dd 1513
ab4bef85 1514 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1515 {
ab4bef85 1516 if (SCM_IS_WHVEC_ANY (w))
0f2d19dd
JB
1517 {
1518 SCM *ptr;
1519 SCM obj;
1520 int j;
1521 int n;
1522
ab4bef85
JB
1523 obj = w;
1524 ptr = SCM_VELTS (w);
1525 n = SCM_LENGTH (w);
0f2d19dd
JB
1526 for (j = 0; j < n; ++j)
1527 {
1528 SCM alist;
1529
1530 alist = ptr[j];
0b5f3f34 1531 while ( SCM_CONSP (alist)
a00c95d9 1532 && !SCM_GCMARKP (alist)
0f2d19dd
JB
1533 && SCM_CONSP (SCM_CAR (alist)))
1534 {
1535 SCM_SETGCMARK (alist);
1536 SCM_SETGCMARK (SCM_CAR (alist));
1537 alist = SCM_GCCDR (alist);
1538 }
1539 }
1540 }
1541 }
1542}
1543
1544
4c48ba06
MD
1545#ifdef GUILE_NEW_GC_SCHEME
1546static void
1547gc_sweep_freelist_start (scm_freelist_t *freelist)
1548{
1549 freelist->cells = SCM_EOL;
1550 freelist->left_to_collect = freelist->cluster_size;
b37fe1c5 1551 freelist->clusters_allocated = 0;
4c48ba06
MD
1552 freelist->clusters = SCM_EOL;
1553 freelist->clustertail = &freelist->clusters;
1811ebce 1554 freelist->collected_1 = freelist->collected;
4c48ba06
MD
1555 freelist->collected = 0;
1556}
1557
1558static void
1559gc_sweep_freelist_finish (scm_freelist_t *freelist)
1560{
1811ebce 1561 int collected;
4c48ba06
MD
1562 *freelist->clustertail = freelist->cells;
1563 if (SCM_NNULLP (freelist->cells))
1564 {
1565 SCM c = freelist->cells;
1566 SCM_SETCAR (c, SCM_CDR (c));
1567 SCM_SETCDR (c, SCM_EOL);
1568 freelist->collected +=
1569 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1570 }
b37fe1c5 1571 scm_gc_cells_collected += freelist->collected;
a00c95d9 1572
8fef55a8 1573 /* Although freelist->min_yield is used to test freelist->collected
7dbff8b1 1574 * (which is the local GC yield for freelist), it is adjusted so
8fef55a8 1575 * that *total* yield is freelist->min_yield_fraction of total heap
7dbff8b1
MD
1576 * size. This means that a too low yield is compensated by more
1577 * heap on the list which is currently doing most work, which is
1578 * just what we want.
1579 */
1811ebce 1580 collected = SCM_MAX (freelist->collected_1, freelist->collected);
8fef55a8 1581 freelist->grow_heap_p = (collected < freelist->min_yield);
4c48ba06
MD
1582}
1583#endif
0f2d19dd 1584
a00c95d9 1585void
0f2d19dd 1586scm_gc_sweep ()
0f2d19dd
JB
1587{
1588 register SCM_CELLPTR ptr;
1589#ifdef SCM_POINTERS_MUNGED
1590 register SCM scmptr;
1591#else
1592#undef scmptr
1593#define scmptr (SCM)ptr
1594#endif
1595 register SCM nfreelist;
4c48ba06 1596 register scm_freelist_t *freelist;
0f2d19dd 1597 register long m;
0f2d19dd 1598 register int span;
15e9d186 1599 long i;
0f2d19dd
JB
1600 scm_sizet seg_size;
1601
0f2d19dd 1602 m = 0;
0f2d19dd 1603
4a4c9785 1604#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1605 gc_sweep_freelist_start (&scm_master_freelist);
1606 gc_sweep_freelist_start (&scm_master_freelist2);
4a4c9785 1607#else
cf2d30f6
JB
1608 /* Reset all free list pointers. We'll reconstruct them completely
1609 while scanning. */
1610 for (i = 0; i < scm_n_heap_segs; i++)
4c48ba06 1611 scm_heap_table[i].freelist->cells = SCM_EOL;
4a4c9785 1612#endif
a00c95d9 1613
cf2d30f6 1614 for (i = 0; i < scm_n_heap_segs; i++)
0f2d19dd 1615 {
0df07278 1616#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1617 register unsigned int left_to_collect;
1618#else
1619 register scm_sizet n = 0;
0df07278 1620#endif
4c48ba06 1621 register scm_sizet j;
15e9d186 1622
cf2d30f6
JB
1623 /* Unmarked cells go onto the front of the freelist this heap
1624 segment points to. Rather than updating the real freelist
1625 pointer as we go along, we accumulate the new head in
1626 nfreelist. Then, if it turns out that the entire segment is
1627 free, we free (i.e., malloc's free) the whole segment, and
1628 simply don't assign nfreelist back into the real freelist. */
4c48ba06
MD
1629 freelist = scm_heap_table[i].freelist;
1630 nfreelist = freelist->cells;
4a4c9785 1631#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1632 left_to_collect = freelist->left_to_collect;
4a4c9785 1633#endif
945fec60 1634 span = scm_heap_table[i].span;
cf2d30f6 1635
a00c95d9
ML
1636 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1637 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
0f2d19dd
JB
1638 for (j = seg_size + span; j -= span; ptr += span)
1639 {
1640#ifdef SCM_POINTERS_MUNGED
1641 scmptr = PTR2SCM (ptr);
1642#endif
1643 switch SCM_TYP7 (scmptr)
1644 {
1645 case scm_tcs_cons_gloc:
1646 if (SCM_GCMARKP (scmptr))
1647 {
1648 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
24e68a57 1649 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
0f2d19dd
JB
1650 goto cmrkcontinue;
1651 }
1652 {
1653 SCM vcell;
1654 vcell = SCM_CAR (scmptr) - 1L;
1655
f1267706 1656 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
0f2d19dd 1657 {
f0cb1733
MD
1658 scm_struct_free_t free
1659 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1660 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
0f2d19dd
JB
1661 }
1662 }
1663 break;
1664 case scm_tcs_cons_imcar:
1665 case scm_tcs_cons_nimcar:
1666 case scm_tcs_closures:
e641afaf 1667 case scm_tc7_pws:
0f2d19dd
JB
1668 if (SCM_GCMARKP (scmptr))
1669 goto cmrkcontinue;
1670 break;
1671 case scm_tc7_wvect:
1672 if (SCM_GC8MARKP (scmptr))
1673 {
1674 goto c8mrkcontinue;
1675 }
1676 else
1677 {
ab4bef85
JB
1678 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1679 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
0f2d19dd
JB
1680 break;
1681 }
1682
1683 case scm_tc7_vector:
1684 case scm_tc7_lvector:
1685#ifdef CCLO
1686 case scm_tc7_cclo:
1687#endif
1688 if (SCM_GC8MARKP (scmptr))
1689 goto c8mrkcontinue;
1690
1691 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1692 freechars:
1693 scm_must_free (SCM_CHARS (scmptr));
1694 /* SCM_SETCHARS(scmptr, 0);*/
1695 break;
afe5177e 1696#ifdef HAVE_ARRAYS
0f2d19dd
JB
1697 case scm_tc7_bvect:
1698 if SCM_GC8MARKP (scmptr)
1699 goto c8mrkcontinue;
1700 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1701 goto freechars;
1702 case scm_tc7_byvect:
1703 if SCM_GC8MARKP (scmptr)
1704 goto c8mrkcontinue;
1705 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1706 goto freechars;
1707 case scm_tc7_ivect:
1708 case scm_tc7_uvect:
1709 if SCM_GC8MARKP (scmptr)
1710 goto c8mrkcontinue;
1711 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1712 goto freechars;
1713 case scm_tc7_svect:
1714 if SCM_GC8MARKP (scmptr)
1715 goto c8mrkcontinue;
1716 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1717 goto freechars;
5c11cc9d 1718#ifdef HAVE_LONG_LONGS
0f2d19dd
JB
1719 case scm_tc7_llvect:
1720 if SCM_GC8MARKP (scmptr)
1721 goto c8mrkcontinue;
1722 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1723 goto freechars;
1724#endif
1725 case scm_tc7_fvect:
1726 if SCM_GC8MARKP (scmptr)
1727 goto c8mrkcontinue;
1728 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1729 goto freechars;
1730 case scm_tc7_dvect:
1731 if SCM_GC8MARKP (scmptr)
1732 goto c8mrkcontinue;
1733 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1734 goto freechars;
1735 case scm_tc7_cvect:
1736 if SCM_GC8MARKP (scmptr)
1737 goto c8mrkcontinue;
1738 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1739 goto freechars;
afe5177e 1740#endif
0f2d19dd 1741 case scm_tc7_substring:
0f2d19dd
JB
1742 if (SCM_GC8MARKP (scmptr))
1743 goto c8mrkcontinue;
1744 break;
1745 case scm_tc7_string:
0f2d19dd
JB
1746 if (SCM_GC8MARKP (scmptr))
1747 goto c8mrkcontinue;
1748 m += SCM_HUGE_LENGTH (scmptr) + 1;
1749 goto freechars;
1750 case scm_tc7_msymbol:
1751 if (SCM_GC8MARKP (scmptr))
1752 goto c8mrkcontinue;
1753 m += ( SCM_LENGTH (scmptr)
1754 + 1
1755 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1756 scm_must_free ((char *)SCM_SLOTS (scmptr));
1757 break;
1758 case scm_tc7_contin:
1759 if SCM_GC8MARKP (scmptr)
1760 goto c8mrkcontinue;
0db18cf4 1761 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
c68296f8
MV
1762 if (SCM_VELTS (scmptr))
1763 goto freechars;
0f2d19dd
JB
1764 case scm_tc7_ssymbol:
1765 if SCM_GC8MARKP(scmptr)
1766 goto c8mrkcontinue;
1767 break;
1768 case scm_tcs_subrs:
1769 continue;
1770 case scm_tc7_port:
1771 if SCM_GC8MARKP (scmptr)
1772 goto c8mrkcontinue;
1773 if SCM_OPENP (scmptr)
1774 {
1775 int k = SCM_PTOBNUM (scmptr);
1776 if (!(k < scm_numptob))
1777 goto sweeperr;
1778 /* Keep "revealed" ports alive. */
945fec60 1779 if (scm_revealed_count (scmptr) > 0)
0f2d19dd
JB
1780 continue;
1781 /* Yes, I really do mean scm_ptobs[k].free */
1782 /* rather than ftobs[k].close. .close */
1783 /* is for explicit CLOSE-PORT by user */
84af0382 1784 m += (scm_ptobs[k].free) (scmptr);
0f2d19dd
JB
1785 SCM_SETSTREAM (scmptr, 0);
1786 scm_remove_from_port_table (scmptr);
1787 scm_gc_ports_collected++;
24e68a57 1788 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
0f2d19dd
JB
1789 }
1790 break;
1791 case scm_tc7_smob:
1792 switch SCM_GCTYP16 (scmptr)
1793 {
1794 case scm_tc_free_cell:
acb0a19c 1795 case scm_tc16_real:
0f2d19dd
JB
1796 if SCM_GC8MARKP (scmptr)
1797 goto c8mrkcontinue;
1798 break;
1799#ifdef SCM_BIGDIG
acb0a19c 1800 case scm_tc16_big:
0f2d19dd
JB
1801 if SCM_GC8MARKP (scmptr)
1802 goto c8mrkcontinue;
1803 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1804 goto freechars;
1805#endif /* def SCM_BIGDIG */
acb0a19c 1806 case scm_tc16_complex:
0f2d19dd
JB
1807 if SCM_GC8MARKP (scmptr)
1808 goto c8mrkcontinue;
acb0a19c
MD
1809 m += 2 * sizeof (double);
1810 goto freechars;
0f2d19dd
JB
1811 default:
1812 if SCM_GC8MARKP (scmptr)
1813 goto c8mrkcontinue;
1814
1815 {
1816 int k;
1817 k = SCM_SMOBNUM (scmptr);
1818 if (!(k < scm_numsmob))
1819 goto sweeperr;
1820 m += (scm_smobs[k].free) ((SCM) scmptr);
1821 break;
1822 }
1823 }
1824 break;
1825 default:
1826 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1827 }
0f2d19dd
JB
1828#if 0
1829 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1830 exit (2);
1831#endif
4a4c9785
MD
1832#ifndef GUILE_NEW_GC_SCHEME
1833 n += span;
1834#else
4c48ba06 1835 if (!--left_to_collect)
4a4c9785
MD
1836 {
1837 SCM_SETCAR (scmptr, nfreelist);
4c48ba06
MD
1838 *freelist->clustertail = scmptr;
1839 freelist->clustertail = SCM_CDRLOC (scmptr);
a00c95d9 1840
4a4c9785 1841 nfreelist = SCM_EOL;
4c48ba06
MD
1842 freelist->collected += span * freelist->cluster_size;
1843 left_to_collect = freelist->cluster_size;
4a4c9785
MD
1844 }
1845 else
1846#endif
1847 {
1848 /* Stick the new cell on the front of nfreelist. It's
1849 critical that we mark this cell as freed; otherwise, the
1850 conservative collector might trace it as some other type
1851 of object. */
1852 SCM_SETCAR (scmptr, scm_tc_free_cell);
1853 SCM_SETCDR (scmptr, nfreelist);
1854 nfreelist = scmptr;
1855 }
a00c95d9 1856
0f2d19dd
JB
1857 continue;
1858 c8mrkcontinue:
1859 SCM_CLRGC8MARK (scmptr);
1860 continue;
1861 cmrkcontinue:
1862 SCM_CLRGCMARK (scmptr);
1863 }
1864#ifdef GC_FREE_SEGMENTS
1865 if (n == seg_size)
1866 {
15e9d186
JB
1867 register long j;
1868
4c48ba06 1869 freelist->heap_size -= seg_size;
cf2d30f6
JB
1870 free ((char *) scm_heap_table[i].bounds[0]);
1871 scm_heap_table[i].bounds[0] = 0;
1872 for (j = i + 1; j < scm_n_heap_segs; j++)
0f2d19dd
JB
1873 scm_heap_table[j - 1] = scm_heap_table[j];
1874 scm_n_heap_segs -= 1;
cf2d30f6 1875 i--; /* We need to scan the segment just moved. */
0f2d19dd
JB
1876 }
1877 else
1878#endif /* ifdef GC_FREE_SEGMENTS */
4a4c9785
MD
1879 {
1880 /* Update the real freelist pointer to point to the head of
1881 the list of free cells we've built for this segment. */
4c48ba06 1882 freelist->cells = nfreelist;
4a4c9785 1883#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1884 freelist->left_to_collect = left_to_collect;
4a4c9785
MD
1885#endif
1886 }
1887
4c48ba06
MD
1888#ifndef GUILE_NEW_GC_SCHEME
1889 freelist->collected += n;
4a4c9785 1890#endif
0f2d19dd 1891
fca7547b 1892#ifdef GUILE_DEBUG_FREELIST
8ded62a3 1893#ifdef GUILE_NEW_GC_SCHEME
4c48ba06 1894 scm_check_freelist (freelist == &scm_master_freelist
8ded62a3
MD
1895 ? scm_freelist
1896 : scm_freelist2);
1897#else
4c48ba06 1898 scm_check_freelist (freelist);
8ded62a3 1899#endif
cf2d30f6
JB
1900 scm_map_free_list ();
1901#endif
4a4c9785 1902 }
a00c95d9 1903
4a4c9785 1904#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
1905 gc_sweep_freelist_finish (&scm_master_freelist);
1906 gc_sweep_freelist_finish (&scm_master_freelist2);
a00c95d9 1907
8ded62a3
MD
1908 /* When we move to POSIX threads private freelists should probably
1909 be GC-protected instead. */
1910 scm_freelist = SCM_EOL;
1911 scm_freelist2 = SCM_EOL;
4a4c9785 1912#endif
a00c95d9 1913
0f2d19dd
JB
1914 /* Scan weak vectors. */
1915 {
ab4bef85
JB
1916 SCM *ptr, w;
1917 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
0f2d19dd 1918 {
ab4bef85 1919 if (!SCM_IS_WHVEC_ANY (w))
0f2d19dd 1920 {
15e9d186
JB
1921 register long j, n;
1922
ab4bef85
JB
1923 ptr = SCM_VELTS (w);
1924 n = SCM_LENGTH (w);
0f2d19dd 1925 for (j = 0; j < n; ++j)
0c95b57d 1926 if (SCM_FREEP (ptr[j]))
0f2d19dd
JB
1927 ptr[j] = SCM_BOOL_F;
1928 }
1929 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1930 {
ab4bef85
JB
1931 SCM obj = w;
1932 register long n = SCM_LENGTH (w);
15e9d186
JB
1933 register long j;
1934
ab4bef85 1935 ptr = SCM_VELTS (w);
15e9d186 1936
0f2d19dd
JB
1937 for (j = 0; j < n; ++j)
1938 {
1939 SCM * fixup;
1940 SCM alist;
1941 int weak_keys;
1942 int weak_values;
a00c95d9 1943
0f2d19dd
JB
1944 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1945 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1946
1947 fixup = ptr + j;
1948 alist = *fixup;
1949
0b5f3f34 1950 while ( SCM_CONSP (alist)
0f2d19dd
JB
1951 && SCM_CONSP (SCM_CAR (alist)))
1952 {
1953 SCM key;
1954 SCM value;
1955
1956 key = SCM_CAAR (alist);
1957 value = SCM_CDAR (alist);
0c95b57d
GB
1958 if ( (weak_keys && SCM_FREEP (key))
1959 || (weak_values && SCM_FREEP (value)))
0f2d19dd
JB
1960 {
1961 *fixup = SCM_CDR (alist);
1962 }
1963 else
24e68a57 1964 fixup = SCM_CDRLOC (alist);
0f2d19dd
JB
1965 alist = SCM_CDR (alist);
1966 }
1967 }
1968 }
1969 }
1970 }
b37fe1c5 1971 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
8b0d194f
MD
1972#ifdef GUILE_NEW_GC_SCHEME
1973 scm_gc_yield -= scm_cells_allocated;
1974#endif
0f2d19dd
JB
1975 scm_mallocated -= m;
1976 scm_gc_malloc_collected = m;
1977}
1978
1979
1980\f
1981
1982/* {Front end to malloc}
1983 *
c68296f8 1984 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
0f2d19dd
JB
1985 *
1986 * These functions provide services comperable to malloc, realloc, and
1987 * free. They are for allocating malloced parts of scheme objects.
1988 * The primary purpose of the front end is to impose calls to gc.
1989 */
1990
1991/* scm_must_malloc
1992 * Return newly malloced storage or throw an error.
1993 *
1994 * The parameter WHAT is a string for error reporting.
a00c95d9 1995 * If the threshold scm_mtrigger will be passed by this
0f2d19dd
JB
1996 * allocation, or if the first call to malloc fails,
1997 * garbage collect -- on the presumption that some objects
1998 * using malloced storage may be collected.
1999 *
2000 * The limit scm_mtrigger may be raised by this allocation.
2001 */
07806695 2002void *
e4ef2330 2003scm_must_malloc (scm_sizet size, const char *what)
0f2d19dd 2004{
07806695 2005 void *ptr;
15e9d186 2006 unsigned long nm = scm_mallocated + size;
e4ef2330
MD
2007
2008 if (nm <= scm_mtrigger)
0f2d19dd 2009 {
07806695 2010 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2011 if (NULL != ptr)
2012 {
2013 scm_mallocated = nm;
2014 return ptr;
2015 }
2016 }
6064dcc6 2017
0f2d19dd 2018 scm_igc (what);
e4ef2330 2019
0f2d19dd 2020 nm = scm_mallocated + size;
07806695 2021 SCM_SYSCALL (ptr = malloc (size));
0f2d19dd
JB
2022 if (NULL != ptr)
2023 {
2024 scm_mallocated = nm;
6064dcc6
MV
2025 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2026 if (nm > scm_mtrigger)
2027 scm_mtrigger = nm + nm / 2;
2028 else
2029 scm_mtrigger += scm_mtrigger / 2;
2030 }
0f2d19dd
JB
2031 return ptr;
2032 }
e4ef2330
MD
2033
2034 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2035 return 0; /* never reached */
0f2d19dd
JB
2036}
2037
2038
2039/* scm_must_realloc
2040 * is similar to scm_must_malloc.
2041 */
07806695
JB
2042void *
2043scm_must_realloc (void *where,
e4ef2330
MD
2044 scm_sizet old_size,
2045 scm_sizet size,
3eeba8d4 2046 const char *what)
0f2d19dd 2047{
07806695 2048 void *ptr;
e4ef2330
MD
2049 scm_sizet nm = scm_mallocated + size - old_size;
2050
2051 if (nm <= scm_mtrigger)
0f2d19dd 2052 {
07806695 2053 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2054 if (NULL != ptr)
2055 {
2056 scm_mallocated = nm;
2057 return ptr;
2058 }
2059 }
e4ef2330 2060
0f2d19dd 2061 scm_igc (what);
e4ef2330
MD
2062
2063 nm = scm_mallocated + size - old_size;
07806695 2064 SCM_SYSCALL (ptr = realloc (where, size));
0f2d19dd
JB
2065 if (NULL != ptr)
2066 {
2067 scm_mallocated = nm;
6064dcc6
MV
2068 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2069 if (nm > scm_mtrigger)
2070 scm_mtrigger = nm + nm / 2;
2071 else
2072 scm_mtrigger += scm_mtrigger / 2;
2073 }
0f2d19dd
JB
2074 return ptr;
2075 }
e4ef2330
MD
2076
2077 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2078 return 0; /* never reached */
0f2d19dd
JB
2079}
2080
a00c95d9 2081void
07806695 2082scm_must_free (void *obj)
0f2d19dd
JB
2083{
2084 if (obj)
2085 free (obj);
2086 else
2087 scm_wta (SCM_INUM0, "already free", "");
2088}
0f2d19dd 2089
c68296f8
MV
2090/* Announce that there has been some malloc done that will be freed
2091 * during gc. A typical use is for a smob that uses some malloced
2092 * memory but can not get it from scm_must_malloc (for whatever
2093 * reason). When a new object of this smob is created you call
2094 * scm_done_malloc with the size of the object. When your smob free
2095 * function is called, be sure to include this size in the return
2096 * value. */
0f2d19dd 2097
c68296f8 2098void
6e8d25a6 2099scm_done_malloc (long size)
c68296f8
MV
2100{
2101 scm_mallocated += size;
2102
2103 if (scm_mallocated > scm_mtrigger)
2104 {
2105 scm_igc ("foreign mallocs");
2106 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2107 {
2108 if (scm_mallocated > scm_mtrigger)
2109 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2110 else
2111 scm_mtrigger += scm_mtrigger / 2;
2112 }
2113 }
2114}
2115
2116
2117\f
0f2d19dd
JB
2118
2119/* {Heap Segments}
2120 *
2121 * Each heap segment is an array of objects of a particular size.
2122 * Every segment has an associated (possibly shared) freelist.
2123 * A table of segment records is kept that records the upper and
2124 * lower extents of the segment; this is used during the conservative
2125 * phase of gc to identify probably gc roots (because they point
c68296f8 2126 * into valid segments at reasonable offsets). */
0f2d19dd
JB
2127
2128/* scm_expmem
2129 * is true if the first segment was smaller than INIT_HEAP_SEG.
2130 * If scm_expmem is set to one, subsequent segment allocations will
2131 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2132 */
2133int scm_expmem = 0;
2134
4c48ba06
MD
2135scm_sizet scm_max_segment_size;
2136
0f2d19dd
JB
2137/* scm_heap_org
2138 * is the lowest base address of any heap segment.
2139 */
2140SCM_CELLPTR scm_heap_org;
2141
a00c95d9 2142scm_heap_seg_data_t * scm_heap_table = 0;
0f2d19dd
JB
2143int scm_n_heap_segs = 0;
2144
0f2d19dd
JB
2145/* init_heap_seg
2146 * initializes a new heap segment and return the number of objects it contains.
2147 *
2148 * The segment origin, segment size in bytes, and the span of objects
2149 * in cells are input parameters. The freelist is both input and output.
2150 *
2151 * This function presume that the scm_heap_table has already been expanded
2152 * to accomodate a new segment record.
2153 */
2154
2155
a00c95d9 2156static scm_sizet
4c48ba06 2157init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
0f2d19dd
JB
2158{
2159 register SCM_CELLPTR ptr;
2160#ifdef SCM_POINTERS_MUNGED
2161 register SCM scmptr;
2162#else
2163#undef scmptr
2164#define scmptr ptr
2165#endif
2166 SCM_CELLPTR seg_end;
15e9d186 2167 int new_seg_index;
acb0a19c 2168 int n_new_cells;
4c48ba06 2169 int span = freelist->span;
a00c95d9 2170
0f2d19dd
JB
2171 if (seg_org == NULL)
2172 return 0;
2173
a00c95d9 2174 ptr = CELL_UP (seg_org, span);
acb0a19c 2175
a00c95d9 2176 /* Compute the ceiling on valid object pointers w/in this segment.
0f2d19dd 2177 */
a00c95d9 2178 seg_end = CELL_DN ((char *) seg_org + size, span);
0f2d19dd 2179
a00c95d9 2180 /* Find the right place and insert the segment record.
0f2d19dd
JB
2181 *
2182 */
2183 for (new_seg_index = 0;
2184 ( (new_seg_index < scm_n_heap_segs)
2185 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2186 new_seg_index++)
2187 ;
2188
2189 {
2190 int i;
2191 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2192 scm_heap_table[i] = scm_heap_table[i - 1];
2193 }
a00c95d9 2194
0f2d19dd
JB
2195 ++scm_n_heap_segs;
2196
2197 scm_heap_table[new_seg_index].valid = 0;
945fec60 2198 scm_heap_table[new_seg_index].span = span;
4c48ba06 2199 scm_heap_table[new_seg_index].freelist = freelist;
0f2d19dd
JB
2200 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
2201 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
2202
2203
a00c95d9 2204 /* Compute the least valid object pointer w/in this segment
0f2d19dd 2205 */
a00c95d9 2206 ptr = CELL_UP (ptr, span);
0f2d19dd
JB
2207
2208
acb0a19c
MD
2209 /*n_new_cells*/
2210 n_new_cells = seg_end - ptr;
0f2d19dd 2211
4a4c9785
MD
2212#ifdef GUILE_NEW_GC_SCHEME
2213
4c48ba06 2214 freelist->heap_size += n_new_cells;
4a4c9785 2215
a00c95d9 2216 /* Partition objects in this segment into clusters */
4a4c9785
MD
2217 {
2218 SCM clusters;
2219 SCM *clusterp = &clusters;
4c48ba06 2220 int n_cluster_cells = span * freelist->cluster_size;
4a4c9785 2221
4c48ba06 2222 while (n_new_cells > span) /* at least one spine + one freecell */
4a4c9785 2223 {
4c48ba06
MD
2224 /* Determine end of cluster
2225 */
2226 if (n_new_cells >= n_cluster_cells)
2227 {
2228 seg_end = ptr + n_cluster_cells;
2229 n_new_cells -= n_cluster_cells;
2230 }
4a4c9785 2231 else
a00c95d9
ML
2232 /* [cmm] looks like the segment size doesn't divide cleanly by
2233 cluster size. bad cmm! */
2234 abort();
4a4c9785 2235
4c48ba06
MD
2236 /* Allocate cluster spine
2237 */
4a4c9785
MD
2238 *clusterp = PTR2SCM (ptr);
2239 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2240 clusterp = SCM_CDRLOC (*clusterp);
4a4c9785 2241 ptr += span;
a00c95d9 2242
4a4c9785
MD
2243 while (ptr < seg_end)
2244 {
2245#ifdef SCM_POINTERS_MUNGED
2246 scmptr = PTR2SCM (ptr);
2247#endif
2248 SCM_SETCAR (scmptr, scm_tc_free_cell);
2249 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2250 ptr += span;
2251 }
4c48ba06 2252
4a4c9785
MD
2253 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2254 }
a00c95d9 2255
4a4c9785
MD
2256 /* Patch up the last cluster pointer in the segment
2257 * to join it to the input freelist.
2258 */
4c48ba06
MD
2259 *clusterp = freelist->clusters;
2260 freelist->clusters = clusters;
4a4c9785
MD
2261 }
2262
2263#else /* GUILE_NEW_GC_SCHEME */
2264
a00c95d9 2265 /* Prepend objects in this segment to the freelist.
0f2d19dd
JB
2266 */
2267 while (ptr < seg_end)
2268 {
2269#ifdef SCM_POINTERS_MUNGED
2270 scmptr = PTR2SCM (ptr);
2271#endif
24e68a57 2272 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
945fec60
MD
2273 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2274 ptr += span;
0f2d19dd
JB
2275 }
2276
945fec60 2277 ptr -= span;
0f2d19dd
JB
2278
2279 /* Patch up the last freelist pointer in the segment
2280 * to join it to the input freelist.
2281 */
4c48ba06 2282 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
a00c95d9 2283 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
4c48ba06
MD
2284
2285 freelist->heap_size += n_new_cells;
0f2d19dd 2286
4a4c9785 2287#endif /* GUILE_NEW_GC_SCHEME */
4c48ba06
MD
2288
2289#ifdef DEBUGINFO
2290 fprintf (stderr, "H");
2291#endif
0f2d19dd
JB
2292 return size;
2293#ifdef scmptr
2294#undef scmptr
2295#endif
2296}
2297
a00c95d9
ML
2298#ifndef GUILE_NEW_GC_SCHEME
2299#define round_to_cluster_size(freelist, len) len
2300#else
2301
2302static scm_sizet
2303round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2304{
2305 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2306
2307 return
2308 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2309 + ALIGNMENT_SLACK (freelist);
2310}
2311
2312#endif
0f2d19dd 2313
a00c95d9 2314static void
4c48ba06 2315alloc_some_heap (scm_freelist_t *freelist)
0f2d19dd 2316{
a00c95d9 2317 scm_heap_seg_data_t * tmptable;
0f2d19dd 2318 SCM_CELLPTR ptr;
b37fe1c5 2319 long len;
a00c95d9 2320
0f2d19dd
JB
2321 /* Critical code sections (such as the garbage collector)
2322 * aren't supposed to add heap segments.
2323 */
2324 if (scm_gc_heap_lock)
2325 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2326
2327 /* Expand the heap tables to have room for the new segment.
2328 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2329 * only if the allocation of the segment itself succeeds.
2330 */
a00c95d9 2331 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
0f2d19dd 2332
a00c95d9 2333 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
0f2d19dd
JB
2334 realloc ((char *)scm_heap_table, len)));
2335 if (!tmptable)
2336 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2337 else
2338 scm_heap_table = tmptable;
2339
2340
2341 /* Pick a size for the new heap segment.
a00c95d9 2342 * The rule for picking the size of a segment is explained in
0f2d19dd
JB
2343 * gc.h
2344 */
4c48ba06
MD
2345#ifdef GUILE_NEW_GC_SCHEME
2346 {
1811ebce
MD
2347 /* Assure that the new segment is predicted to be large enough.
2348 *
2349 * New yield should at least equal GC fraction of new heap size, i.e.
2350 *
2351 * y + dh > f * (h + dh)
2352 *
2353 * y : yield
8fef55a8 2354 * f : min yield fraction
1811ebce
MD
2355 * h : heap size
2356 * dh : size of new heap segment
2357 *
2358 * This gives dh > (f * h - y) / (1 - f)
bda1446c 2359 */
8fef55a8 2360 int f = freelist->min_yield_fraction;
1811ebce
MD
2361 long h = SCM_HEAP_SIZE;
2362 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
4c48ba06
MD
2363 len = SCM_EXPHEAP (freelist->heap_size);
2364#ifdef DEBUGINFO
2365 fprintf (stderr, "(%d < %d)", len, min_cells);
2366#endif
2367 if (len < min_cells)
1811ebce 2368 len = min_cells + freelist->cluster_size;
4c48ba06 2369 len *= sizeof (scm_cell);
1811ebce
MD
2370 /* force new sampling */
2371 freelist->collected = LONG_MAX;
4c48ba06 2372 }
a00c95d9 2373
4c48ba06
MD
2374 if (len > scm_max_segment_size)
2375 len = scm_max_segment_size;
2376#else
0f2d19dd
JB
2377 if (scm_expmem)
2378 {
4c48ba06
MD
2379 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2380 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
945fec60 2381 != len)
0f2d19dd
JB
2382 len = 0;
2383 }
2384 else
2385 len = SCM_HEAP_SEG_SIZE;
4c48ba06 2386#endif /* GUILE_NEW_GC_SCHEME */
0f2d19dd
JB
2387
2388 {
2389 scm_sizet smallest;
2390
a00c95d9 2391#ifndef GUILE_NEW_GC_SCHEME
4c48ba06 2392 smallest = (freelist->span * sizeof (scm_cell));
a00c95d9
ML
2393#else
2394 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2395#endif
2396
0f2d19dd 2397 if (len < smallest)
a00c95d9 2398 len = smallest;
0f2d19dd
JB
2399
2400 /* Allocate with decaying ambition. */
2401 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2402 && (len >= smallest))
2403 {
1811ebce 2404 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
a00c95d9 2405 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
0f2d19dd
JB
2406 if (ptr)
2407 {
a00c95d9 2408 init_heap_seg (ptr, rounded_len, freelist);
0f2d19dd
JB
2409 return;
2410 }
2411 len /= 2;
2412 }
2413 }
2414
2415 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2416}
2417
2418
2419
a00c95d9 2420SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
1bbd0b84 2421 (SCM name),
b380b885 2422 "")
1bbd0b84 2423#define FUNC_NAME s_scm_unhash_name
0f2d19dd
JB
2424{
2425 int x;
2426 int bound;
3b3b36dd 2427 SCM_VALIDATE_SYMBOL (1,name);
0f2d19dd
JB
2428 SCM_DEFER_INTS;
2429 bound = scm_n_heap_segs;
2430 for (x = 0; x < bound; ++x)
2431 {
2432 SCM_CELLPTR p;
2433 SCM_CELLPTR pbound;
2434 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2435 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2436 while (p < pbound)
2437 {
2438 SCM incar;
2439 incar = p->car;
2440 if (1 == (7 & (int)incar))
2441 {
2442 --incar;
2443 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2444 && (SCM_CDR (incar) != 0)
f1267706 2445 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
0f2d19dd
JB
2446 {
2447 p->car = name;
2448 }
2449 }
2450 ++p;
2451 }
2452 }
2453 SCM_ALLOW_INTS;
2454 return name;
2455}
1bbd0b84 2456#undef FUNC_NAME
0f2d19dd
JB
2457
2458
2459\f
2460/* {GC Protection Helper Functions}
2461 */
2462
2463
0f2d19dd 2464void
6e8d25a6
GB
2465scm_remember (SCM *ptr)
2466{ /* empty */ }
0f2d19dd 2467
1cc91f1b 2468
c209c88e 2469/*
41b0806d
GB
2470 These crazy functions prevent garbage collection
2471 of arguments after the first argument by
2472 ensuring they remain live throughout the
2473 function because they are used in the last
2474 line of the code block.
2475 It'd be better to have a nice compiler hint to
2476 aid the conservative stack-scanning GC. --03/09/00 gjb */
0f2d19dd
JB
2477SCM
2478scm_return_first (SCM elt, ...)
0f2d19dd
JB
2479{
2480 return elt;
2481}
2482
41b0806d
GB
2483int
2484scm_return_first_int (int i, ...)
2485{
2486 return i;
2487}
2488
0f2d19dd 2489
0f2d19dd 2490SCM
6e8d25a6 2491scm_permanent_object (SCM obj)
0f2d19dd
JB
2492{
2493 SCM_REDEFER_INTS;
2494 scm_permobjs = scm_cons (obj, scm_permobjs);
2495 SCM_REALLOW_INTS;
2496 return obj;
2497}
2498
2499
ef290276
JB
2500/* Protect OBJ from the garbage collector. OBJ will not be freed,
2501 even if all other references are dropped, until someone applies
2502 scm_unprotect_object to it. This function returns OBJ.
2503
c209c88e
GB
2504 Calls to scm_protect_object nest. For every object OBJ, there is a
2505 counter which scm_protect_object(OBJ) increments and
2506 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
dab7f566
JB
2507 an object's counter is greater than zero, the garbage collector
2508 will not free it.
2509
2510 Of course, that's not how it's implemented. scm_protect_object and
2511 scm_unprotect_object just maintain a list of references to things.
2512 Since the GC knows about this list, all objects it mentions stay
2513 alive. scm_protect_object adds its argument to the list;
2514 scm_unprotect_object removes the first occurrence of its argument
2515 to the list. */
ef290276 2516SCM
6e8d25a6 2517scm_protect_object (SCM obj)
ef290276 2518{
ef290276
JB
2519 scm_protects = scm_cons (obj, scm_protects);
2520
2521 return obj;
2522}
2523
2524
2525/* Remove any protection for OBJ established by a prior call to
dab7f566 2526 scm_protect_object. This function returns OBJ.
ef290276 2527
dab7f566 2528 See scm_protect_object for more information. */
ef290276 2529SCM
6e8d25a6 2530scm_unprotect_object (SCM obj)
ef290276 2531{
dab7f566
JB
2532 SCM *tail_ptr = &scm_protects;
2533
0c95b57d 2534 while (SCM_CONSP (*tail_ptr))
dab7f566
JB
2535 if (SCM_CAR (*tail_ptr) == obj)
2536 {
2537 *tail_ptr = SCM_CDR (*tail_ptr);
2538 break;
2539 }
2540 else
2541 tail_ptr = SCM_CDRLOC (*tail_ptr);
ef290276
JB
2542
2543 return obj;
2544}
2545
c45acc34
JB
2546int terminating;
2547
2548/* called on process termination. */
e52ceaac
MD
2549#ifdef HAVE_ATEXIT
2550static void
2551cleanup (void)
2552#else
2553#ifdef HAVE_ON_EXIT
51157deb
MD
2554extern int on_exit (void (*procp) (), int arg);
2555
e52ceaac
MD
2556static void
2557cleanup (int status, void *arg)
2558#else
2559#error Dont know how to setup a cleanup handler on your system.
2560#endif
2561#endif
c45acc34
JB
2562{
2563 terminating = 1;
2564 scm_flush_all_ports ();
2565}
ef290276 2566
0f2d19dd 2567\f
acb0a19c 2568static int
4c48ba06 2569make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
acb0a19c 2570{
a00c95d9
ML
2571 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2572 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2573 rounded_size,
4c48ba06 2574 freelist))
acb0a19c 2575 {
a00c95d9
ML
2576 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2577 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2578 rounded_size,
4c48ba06 2579 freelist))
acb0a19c
MD
2580 return 1;
2581 }
2582 else
2583 scm_expmem = 1;
2584
b37fe1c5 2585#ifdef GUILE_NEW_GC_SCHEME
8fef55a8
MD
2586 if (freelist->min_yield_fraction)
2587 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
b37fe1c5 2588 / 100);
8fef55a8 2589 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
b37fe1c5 2590#endif
a00c95d9 2591
acb0a19c
MD
2592 return 0;
2593}
2594
2595\f
4a4c9785 2596#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2597static void
2598init_freelist (scm_freelist_t *freelist,
2599 int span,
2600 int cluster_size,
8fef55a8 2601 int min_yield)
4c48ba06
MD
2602{
2603 freelist->clusters = SCM_EOL;
2604 freelist->cluster_size = cluster_size + 1;
b37fe1c5
MD
2605 freelist->left_to_collect = 0;
2606 freelist->clusters_allocated = 0;
8fef55a8
MD
2607 freelist->min_yield = 0;
2608 freelist->min_yield_fraction = min_yield;
4c48ba06
MD
2609 freelist->span = span;
2610 freelist->collected = 0;
1811ebce 2611 freelist->collected_1 = 0;
4c48ba06
MD
2612 freelist->heap_size = 0;
2613}
2614
4a4c9785 2615int
4c48ba06
MD
2616scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2617 scm_sizet init_heap_size_2, int gc_trigger_2,
2618 scm_sizet max_segment_size)
4a4c9785 2619#else
0f2d19dd 2620int
b37fe1c5 2621scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
4a4c9785 2622#endif
0f2d19dd
JB
2623{
2624 scm_sizet j;
2625
4c48ba06
MD
2626 if (!init_heap_size_1)
2627 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2628 if (!init_heap_size_2)
2629 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2630
0f2d19dd
JB
2631 j = SCM_NUM_PROTECTS;
2632 while (j)
2633 scm_sys_protects[--j] = SCM_BOOL_F;
2634 scm_block_gc = 1;
4a4c9785
MD
2635
2636#ifdef GUILE_NEW_GC_SCHEME
2637 scm_freelist = SCM_EOL;
4c48ba06
MD
2638 scm_freelist2 = SCM_EOL;
2639 init_freelist (&scm_master_freelist,
2640 1, SCM_CLUSTER_SIZE_1,
8fef55a8 2641 gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
4c48ba06
MD
2642 init_freelist (&scm_master_freelist2,
2643 2, SCM_CLUSTER_SIZE_2,
8fef55a8 2644 gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
4c48ba06
MD
2645 scm_max_segment_size
2646 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
4a4c9785 2647#else
945fec60
MD
2648 scm_freelist.cells = SCM_EOL;
2649 scm_freelist.span = 1;
2650 scm_freelist.collected = 0;
2651 scm_freelist.heap_size = 0;
4a4c9785 2652
945fec60
MD
2653 scm_freelist2.cells = SCM_EOL;
2654 scm_freelist2.span = 2;
2655 scm_freelist2.collected = 0;
2656 scm_freelist2.heap_size = 0;
4a4c9785
MD
2657#endif
2658
0f2d19dd
JB
2659 scm_expmem = 0;
2660
2661 j = SCM_HEAP_SEG_SIZE;
2662 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
a00c95d9
ML
2663 scm_heap_table = ((scm_heap_seg_data_t *)
2664 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
acb0a19c 2665
4a4c9785 2666#ifdef GUILE_NEW_GC_SCHEME
4c48ba06
MD
2667 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2668 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
4a4c9785
MD
2669 return 1;
2670#else
4c48ba06
MD
2671 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2672 make_initial_segment (init_heap_size_2, &scm_freelist2))
acb0a19c 2673 return 1;
4a4c9785 2674#endif
acb0a19c 2675
a00c95d9 2676 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
acb0a19c 2677
0f2d19dd 2678 /* scm_hplims[0] can change. do not remove scm_heap_org */
ab4bef85 2679 scm_weak_vectors = SCM_EOL;
0f2d19dd
JB
2680
2681 /* Initialise the list of ports. */
840ae05d
JB
2682 scm_port_table = (scm_port **)
2683 malloc (sizeof (scm_port *) * scm_port_table_room);
0f2d19dd
JB
2684 if (!scm_port_table)
2685 return 1;
2686
a18bcd0e 2687#ifdef HAVE_ATEXIT
c45acc34 2688 atexit (cleanup);
e52ceaac
MD
2689#else
2690#ifdef HAVE_ON_EXIT
2691 on_exit (cleanup, 0);
2692#endif
a18bcd0e 2693#endif
0f2d19dd
JB
2694
2695 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
24e68a57 2696 SCM_SETCDR (scm_undefineds, scm_undefineds);
0f2d19dd
JB
2697
2698 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2699 scm_nullstr = scm_makstr (0L, 0);
a8741caa
MD
2700 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2701 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
4037ac5f 2702 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
a8741caa 2703 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
8960e0a0 2704 scm_stand_in_procs = SCM_EOL;
0f2d19dd 2705 scm_permobjs = SCM_EOL;
ef290276 2706 scm_protects = SCM_EOL;
3b2b8760 2707 scm_asyncs = SCM_EOL;
0f2d19dd
JB
2708 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2709 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2710#ifdef SCM_BIGDIG
2711 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2712#endif
2713 return 0;
2714}
2715\f
2716
0f2d19dd
JB
2717void
2718scm_init_gc ()
0f2d19dd
JB
2719{
2720#include "gc.x"
2721}
89e00824
ML
2722
2723/*
2724 Local Variables:
2725 c-file-style: "gnu"
2726 End:
2727*/