* Deprecated some unused SCM_NxxxP macros.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/stime.h"
51 #include "libguile/stackchk.h"
52 #include "libguile/struct.h"
53 #include "libguile/smob.h"
54 #include "libguile/unif.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/root.h"
58 #include "libguile/strings.h"
59 #include "libguile/vectors.h"
60 #include "libguile/weaks.h"
61 #include "libguile/hashtab.h"
62
63 #include "libguile/validate.h"
64 #include "libguile/gc.h"
65
66 #ifdef GUILE_DEBUG_MALLOC
67 #include "libguile/debug-malloc.h"
68 #endif
69
70 #ifdef HAVE_MALLOC_H
71 #include <malloc.h>
72 #endif
73
74 #ifdef HAVE_UNISTD_H
75 #include <unistd.h>
76 #endif
77
78 #ifdef __STDC__
79 #include <stdarg.h>
80 #define var_start(x, y) va_start(x, y)
81 #else
82 #include <varargs.h>
83 #define var_start(x, y) va_start(x)
84 #endif
85
86 \f
87 /* {heap tuning parameters}
88 *
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
91 *
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
95 *
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
99 *
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
104 *
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 *
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
109 * is needed.
110 *
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
112 * trigger a GC.
113 *
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
118 * work around a oscillation that caused almost constant GC.]
119 */
120
121 /*
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
126 */
127 #define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
128 #define SCM_CLUSTER_SIZE_1 2000L
129 #define SCM_MIN_YIELD_1 40
130
131 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
132 #define SCM_CLUSTER_SIZE_2 1000L
133 /* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
136 #define SCM_MIN_YIELD_2 40
137
138 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
139
140 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
141 #ifdef _QC
142 # define SCM_HEAP_SEG_SIZE 32768L
143 #else
144 # ifdef sequent
145 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
146 # else
147 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
148 # endif
149 #endif
150 /* Make heap grow with factor 1.5 */
151 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
152 #define SCM_INIT_MALLOC_LIMIT 100000
153 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
154
155 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
157
158 #ifdef PROT386
159 /*in 386 protected mode we must only adjust the offset */
160 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
162 #else
163 # ifdef _UNICOS
164 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
166 # else
167 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
169 # endif /* UNICOS */
170 #endif /* PROT386 */
171 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
173 #define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
175 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
176
177
178 \f
179 /* scm_freelists
180 */
181
182 typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect;
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated;
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
191 */
192 SCM clusters;
193 SCM *clustertail;
194 /* this is the number of objects in each cluster, including the spine cell */
195 int cluster_size;
196 /* indicates that we should grow heap instead of GC:ing
197 */
198 int grow_heap_p;
199 /* minimum yield on this list in order not to grow the heap
200 */
201 long min_yield;
202 /* defines min_yield as percent of total heap size
203 */
204 int min_yield_fraction;
205 /* number of cells per object on this list */
206 int span;
207 /* number of collected cells during last GC */
208 long collected;
209 /* number of collected cells during penultimate GC */
210 long collected_1;
211 /* total number of cells in heap segments
212 * belonging to this list.
213 */
214 long heap_size;
215 } scm_freelist_t;
216
217 SCM scm_freelist = SCM_EOL;
218 scm_freelist_t scm_master_freelist = {
219 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
220 };
221 SCM scm_freelist2 = SCM_EOL;
222 scm_freelist_t scm_master_freelist2 = {
223 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
224 };
225
226 /* scm_mtrigger
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
228 */
229 unsigned long scm_mtrigger;
230
231
232 /* scm_gc_heap_lock
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
235 */
236 int scm_gc_heap_lock = 0;
237
238 /* GC Blocking
239 * Don't pause for collection if this is set -- just
240 * expand the heap.
241 */
242
243 int scm_block_gc = 1;
244
245 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
247 */
248 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
249
250 /* During collection, this accumulates objects holding
251 * weak references.
252 */
253 SCM scm_weak_vectors;
254
255 /* GC Statistics Keeping
256 */
257 unsigned long scm_cells_allocated = 0;
258 long scm_mallocated = 0;
259 unsigned long scm_gc_cells_collected;
260 unsigned long scm_gc_yield;
261 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
262 unsigned long scm_gc_malloc_collected;
263 unsigned long scm_gc_ports_collected;
264 unsigned long scm_gc_rt;
265 unsigned long scm_gc_time_taken = 0;
266
267 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
273
274 typedef struct scm_heap_seg_data_t
275 {
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds[2];
278
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t *freelist;
282
283 /* number of cells per object in this segment */
284 int span;
285
286 /* If SEG_DATA->valid is non-zero, the conservative marking
287 functions will apply SEG_DATA->valid to the purported pointer and
288 SEG_DATA, and mark the object iff the function returns non-zero.
289 At the moment, I don't think anyone uses this. */
290 int (*valid) ();
291 } scm_heap_seg_data_t;
292
293
294
295 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
296 static void alloc_some_heap (scm_freelist_t *);
297
298
299 \f
300 /* Debugging functions. */
301
302 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
303
304 /* Return the number of the heap segment containing CELL. */
305 static int
306 which_seg (SCM cell)
307 {
308 int i;
309
310 for (i = 0; i < scm_n_heap_segs; i++)
311 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
312 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
313 return i;
314 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
315 SCM_UNPACK (cell));
316 abort ();
317 }
318
319
320 static void
321 map_free_list (scm_freelist_t *master, SCM freelist)
322 {
323 int last_seg = -1, count = 0;
324 SCM f;
325
326 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
327 {
328 int this_seg = which_seg (f);
329
330 if (this_seg != last_seg)
331 {
332 if (last_seg != -1)
333 fprintf (stderr, " %5d %d-cells in segment %d\n",
334 count, master->span, last_seg);
335 last_seg = this_seg;
336 count = 0;
337 }
338 count++;
339 }
340 if (last_seg != -1)
341 fprintf (stderr, " %5d %d-cells in segment %d\n",
342 count, master->span, last_seg);
343 }
344
345 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
346 (),
347 "Print debugging information about the free-list.\n"
348 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
349 #define FUNC_NAME s_scm_map_free_list
350 {
351 int i;
352 fprintf (stderr, "%d segments total (%d:%d",
353 scm_n_heap_segs,
354 scm_heap_table[0].span,
355 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
356 for (i = 1; i < scm_n_heap_segs; i++)
357 fprintf (stderr, ", %d:%d",
358 scm_heap_table[i].span,
359 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
360 fprintf (stderr, ")\n");
361 map_free_list (&scm_master_freelist, scm_freelist);
362 map_free_list (&scm_master_freelist2, scm_freelist2);
363 fflush (stderr);
364
365 return SCM_UNSPECIFIED;
366 }
367 #undef FUNC_NAME
368
369 static int last_cluster;
370 static int last_size;
371
372 static int
373 free_list_length (char *title, int i, SCM freelist)
374 {
375 SCM ls;
376 int n = 0;
377 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
378 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
379 ++n;
380 else
381 {
382 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
383 abort ();
384 }
385 if (n != last_size)
386 {
387 if (i > 0)
388 {
389 if (last_cluster == i - 1)
390 fprintf (stderr, "\t%d\n", last_size);
391 else
392 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
393 }
394 if (i >= 0)
395 fprintf (stderr, "%s %d", title, i);
396 else
397 fprintf (stderr, "%s\t%d\n", title, n);
398 last_cluster = i;
399 last_size = n;
400 }
401 return n;
402 }
403
404 static void
405 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
406 {
407 SCM clusters;
408 int i = 0, len, n = 0;
409 fprintf (stderr, "%s\n\n", title);
410 n += free_list_length ("free list", -1, freelist);
411 for (clusters = master->clusters;
412 SCM_NNULLP (clusters);
413 clusters = SCM_CDR (clusters))
414 {
415 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
416 n += len;
417 }
418 if (last_cluster == i - 1)
419 fprintf (stderr, "\t%d\n", last_size);
420 else
421 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
422 fprintf (stderr, "\ntotal %d objects\n\n", n);
423 }
424
425 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
426 (),
427 "Print debugging information about the free-list.\n"
428 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
429 #define FUNC_NAME s_scm_free_list_length
430 {
431 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
432 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
433 return SCM_UNSPECIFIED;
434 }
435 #undef FUNC_NAME
436
437 #endif
438
439 #ifdef GUILE_DEBUG_FREELIST
440
441 /* Number of calls to SCM_NEWCELL since startup. */
442 static unsigned long scm_newcell_count;
443 static unsigned long scm_newcell2_count;
444
445 /* Search freelist for anything that isn't marked as a free cell.
446 Abort if we find something. */
447 static void
448 scm_check_freelist (SCM freelist)
449 {
450 SCM f;
451 int i = 0;
452
453 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
454 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
455 {
456 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
457 scm_newcell_count, i);
458 fflush (stderr);
459 abort ();
460 }
461 }
462
463 static int scm_debug_check_freelist = 0;
464
465 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
466 (SCM flag),
467 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
468 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
469 "compile-time flag was selected.\n")
470 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
471 {
472 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
473 return SCM_UNSPECIFIED;
474 }
475 #undef FUNC_NAME
476
477
478 SCM
479 scm_debug_newcell (void)
480 {
481 SCM new;
482
483 scm_newcell_count++;
484 if (scm_debug_check_freelist)
485 {
486 scm_check_freelist (scm_freelist);
487 scm_gc();
488 }
489
490 /* The rest of this is supposed to be identical to the SCM_NEWCELL
491 macro. */
492 if (SCM_IMP (scm_freelist))
493 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
494 else
495 {
496 new = scm_freelist;
497 scm_freelist = SCM_CDR (scm_freelist);
498 SCM_SETCAR (new, scm_tc16_allocated);
499 }
500
501 return new;
502 }
503
504 SCM
505 scm_debug_newcell2 (void)
506 {
507 SCM new;
508
509 scm_newcell2_count++;
510 if (scm_debug_check_freelist)
511 {
512 scm_check_freelist (scm_freelist2);
513 scm_gc ();
514 }
515
516 /* The rest of this is supposed to be identical to the SCM_NEWCELL
517 macro. */
518 if (SCM_IMP (scm_freelist2))
519 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
520 else
521 {
522 new = scm_freelist2;
523 scm_freelist2 = SCM_CDR (scm_freelist2);
524 SCM_SETCAR (new, scm_tc16_allocated);
525 }
526
527 return new;
528 }
529
530 #endif /* GUILE_DEBUG_FREELIST */
531
532 \f
533
534 static unsigned long
535 master_cells_allocated (scm_freelist_t *master)
536 {
537 int objects = master->clusters_allocated * (master->cluster_size - 1);
538 if (SCM_NULLP (master->clusters))
539 objects -= master->left_to_collect;
540 return master->span * objects;
541 }
542
543 static unsigned long
544 freelist_length (SCM freelist)
545 {
546 int n;
547 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
548 ++n;
549 return n;
550 }
551
552 static unsigned long
553 compute_cells_allocated ()
554 {
555 return (scm_cells_allocated
556 + master_cells_allocated (&scm_master_freelist)
557 + master_cells_allocated (&scm_master_freelist2)
558 - scm_master_freelist.span * freelist_length (scm_freelist)
559 - scm_master_freelist2.span * freelist_length (scm_freelist2));
560 }
561
562 /* {Scheme Interface to GC}
563 */
564
565 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
566 (),
567 "Returns an association list of statistics about Guile's current use of storage. ")
568 #define FUNC_NAME s_scm_gc_stats
569 {
570 int i;
571 int n;
572 SCM heap_segs;
573 long int local_scm_mtrigger;
574 long int local_scm_mallocated;
575 long int local_scm_heap_size;
576 long int local_scm_cells_allocated;
577 long int local_scm_gc_time_taken;
578 SCM answer;
579
580 SCM_DEFER_INTS;
581 scm_block_gc = 1;
582 retry:
583 heap_segs = SCM_EOL;
584 n = scm_n_heap_segs;
585 for (i = scm_n_heap_segs; i--; )
586 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
587 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
588 heap_segs);
589 if (scm_n_heap_segs != n)
590 goto retry;
591 scm_block_gc = 0;
592
593 /* Below, we cons to produce the resulting list. We want a snapshot of
594 * the heap situation before consing.
595 */
596 local_scm_mtrigger = scm_mtrigger;
597 local_scm_mallocated = scm_mallocated;
598 local_scm_heap_size = SCM_HEAP_SIZE;
599 local_scm_cells_allocated = compute_cells_allocated ();
600 local_scm_gc_time_taken = scm_gc_time_taken;
601
602 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
603 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
604 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
605 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
606 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
607 scm_cons (sym_heap_segments, heap_segs),
608 SCM_UNDEFINED);
609 SCM_ALLOW_INTS;
610 return answer;
611 }
612 #undef FUNC_NAME
613
614
615 void
616 scm_gc_start (const char *what)
617 {
618 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
619 scm_gc_cells_collected = 0;
620 scm_gc_yield_1 = scm_gc_yield;
621 scm_gc_yield = (scm_cells_allocated
622 + master_cells_allocated (&scm_master_freelist)
623 + master_cells_allocated (&scm_master_freelist2));
624 scm_gc_malloc_collected = 0;
625 scm_gc_ports_collected = 0;
626 }
627
628 void
629 scm_gc_end ()
630 {
631 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
632 scm_gc_time_taken += scm_gc_rt;
633 scm_system_async_mark (scm_gc_async);
634 }
635
636
637 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
638 (SCM obj),
639 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
640 "returned by this function for @var{obj}")
641 #define FUNC_NAME s_scm_object_address
642 {
643 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
644 }
645 #undef FUNC_NAME
646
647
648 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
649 (),
650 "Scans all of SCM objects and reclaims for further use those that are\n"
651 "no longer accessible.")
652 #define FUNC_NAME s_scm_gc
653 {
654 SCM_DEFER_INTS;
655 scm_igc ("call");
656 SCM_ALLOW_INTS;
657 return SCM_UNSPECIFIED;
658 }
659 #undef FUNC_NAME
660
661
662 \f
663 /* {C Interface For When GC is Triggered}
664 */
665
666 static void
667 adjust_min_yield (scm_freelist_t *freelist)
668 {
669 /* min yield is adjusted upwards so that next predicted total yield
670 * (allocated cells actually freed by GC) becomes
671 * `min_yield_fraction' of total heap size. Note, however, that
672 * the absolute value of min_yield will correspond to `collected'
673 * on one master (the one which currently is triggering GC).
674 *
675 * The reason why we look at total yield instead of cells collected
676 * on one list is that we want to take other freelists into account.
677 * On this freelist, we know that (local) yield = collected cells,
678 * but that's probably not the case on the other lists.
679 *
680 * (We might consider computing a better prediction, for example
681 * by computing an average over multiple GC:s.)
682 */
683 if (freelist->min_yield_fraction)
684 {
685 /* Pick largest of last two yields. */
686 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
687 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
688 #ifdef DEBUGINFO
689 fprintf (stderr, " after GC = %d, delta = %d\n",
690 scm_cells_allocated,
691 delta);
692 #endif
693 if (delta > 0)
694 freelist->min_yield += delta;
695 }
696 }
697
698 /* When we get POSIX threads support, the master will be global and
699 * common while the freelist will be individual for each thread.
700 */
701
702 SCM
703 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
704 {
705 SCM cell;
706 ++scm_ints_disabled;
707 do
708 {
709 if (SCM_NULLP (master->clusters))
710 {
711 if (master->grow_heap_p)
712 {
713 master->grow_heap_p = 0;
714 alloc_some_heap (master);
715 }
716 else
717 {
718 #ifdef DEBUGINFO
719 fprintf (stderr, "allocated = %d, ",
720 scm_cells_allocated
721 + master_cells_allocated (&scm_master_freelist)
722 + master_cells_allocated (&scm_master_freelist2));
723 #endif
724 scm_igc ("cells");
725 adjust_min_yield (master);
726 }
727 }
728 cell = SCM_CAR (master->clusters);
729 master->clusters = SCM_CDR (master->clusters);
730 ++master->clusters_allocated;
731 }
732 while (SCM_NULLP (cell));
733 --scm_ints_disabled;
734 *freelist = SCM_CDR (cell);
735 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
736 return cell;
737 }
738
739 #if 0
740 /* This is a support routine which can be used to reserve a cluster
741 * for some special use, such as debugging. It won't be useful until
742 * free cells are preserved between garbage collections.
743 */
744
745 void
746 scm_alloc_cluster (scm_freelist_t *master)
747 {
748 SCM freelist, cell;
749 cell = scm_gc_for_newcell (master, &freelist);
750 SCM_SETCDR (cell, freelist);
751 return cell;
752 }
753 #endif
754
755 SCM scm_after_gc_hook;
756
757 scm_c_hook_t scm_before_gc_c_hook;
758 scm_c_hook_t scm_before_mark_c_hook;
759 scm_c_hook_t scm_before_sweep_c_hook;
760 scm_c_hook_t scm_after_sweep_c_hook;
761 scm_c_hook_t scm_after_gc_c_hook;
762
763 void
764 scm_igc (const char *what)
765 {
766 int j;
767
768 scm_c_hook_run (&scm_before_gc_c_hook, 0);
769 #ifdef DEBUGINFO
770 fprintf (stderr,
771 SCM_NULLP (scm_freelist)
772 ? "*"
773 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
774 #endif
775 #ifdef USE_THREADS
776 /* During the critical section, only the current thread may run. */
777 SCM_THREAD_CRITICAL_SECTION_START;
778 #endif
779
780 /* fprintf (stderr, "gc: %s\n", what); */
781
782 scm_gc_start (what);
783
784 if (!scm_stack_base || scm_block_gc)
785 {
786 scm_gc_end ();
787 return;
788 }
789
790 if (scm_mallocated < 0)
791 /* The byte count of allocated objects has underflowed. This is
792 probably because you forgot to report the sizes of objects you
793 have allocated, by calling scm_done_malloc or some such. When
794 the GC freed them, it subtracted their size from
795 scm_mallocated, which underflowed. */
796 abort ();
797
798 if (scm_gc_heap_lock)
799 /* We've invoked the collector while a GC is already in progress.
800 That should never happen. */
801 abort ();
802
803 ++scm_gc_heap_lock;
804
805 /* unprotect any struct types with no instances */
806 #if 0
807 {
808 SCM type_list;
809 SCM * pos;
810
811 pos = &scm_type_obj_list;
812 type_list = scm_type_obj_list;
813 while (type_list != SCM_EOL)
814 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
815 {
816 pos = SCM_CDRLOC (type_list);
817 type_list = SCM_CDR (type_list);
818 }
819 else
820 {
821 *pos = SCM_CDR (type_list);
822 type_list = SCM_CDR (type_list);
823 }
824 }
825 #endif
826
827 /* flush dead entries from the continuation stack */
828 {
829 int x;
830 int bound;
831 SCM * elts;
832 elts = SCM_VELTS (scm_continuation_stack);
833 bound = SCM_LENGTH (scm_continuation_stack);
834 x = SCM_INUM (scm_continuation_stack_ptr);
835 while (x < bound)
836 {
837 elts[x] = SCM_BOOL_F;
838 ++x;
839 }
840 }
841
842 scm_c_hook_run (&scm_before_mark_c_hook, 0);
843
844 #ifndef USE_THREADS
845
846 /* Protect from the C stack. This must be the first marking
847 * done because it provides information about what objects
848 * are "in-use" by the C code. "in-use" objects are those
849 * for which the values from SCM_LENGTH and SCM_CHARS must remain
850 * usable. This requirement is stricter than a liveness
851 * requirement -- in particular, it constrains the implementation
852 * of scm_vector_set_length_x.
853 */
854 SCM_FLUSH_REGISTER_WINDOWS;
855 /* This assumes that all registers are saved into the jmp_buf */
856 setjmp (scm_save_regs_gc_mark);
857 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
858 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
859 sizeof scm_save_regs_gc_mark)
860 / sizeof (SCM_STACKITEM)));
861
862 {
863 scm_sizet stack_len = scm_stack_size (scm_stack_base);
864 #ifdef SCM_STACK_GROWS_UP
865 scm_mark_locations (scm_stack_base, stack_len);
866 #else
867 scm_mark_locations (scm_stack_base - stack_len, stack_len);
868 #endif
869 }
870
871 #else /* USE_THREADS */
872
873 /* Mark every thread's stack and registers */
874 scm_threads_mark_stacks ();
875
876 #endif /* USE_THREADS */
877
878 /* FIXME: insert a phase to un-protect string-data preserved
879 * in scm_vector_set_length_x.
880 */
881
882 j = SCM_NUM_PROTECTS;
883 while (j--)
884 scm_gc_mark (scm_sys_protects[j]);
885
886 /* FIXME: we should have a means to register C functions to be run
887 * in different phases of GC
888 */
889 scm_mark_subr_table ();
890
891 #ifndef USE_THREADS
892 scm_gc_mark (scm_root->handle);
893 #endif
894
895 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
896
897 scm_gc_sweep ();
898
899 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
900
901 --scm_gc_heap_lock;
902 scm_gc_end ();
903
904 #ifdef USE_THREADS
905 SCM_THREAD_CRITICAL_SECTION_END;
906 #endif
907 scm_c_hook_run (&scm_after_gc_c_hook, 0);
908 }
909
910 \f
911 /* {Mark/Sweep}
912 */
913
914
915
916 /* Mark an object precisely.
917 */
918 void
919 scm_gc_mark (SCM p)
920 {
921 register long i;
922 register SCM ptr;
923
924 ptr = p;
925
926 gc_mark_loop:
927 if (SCM_IMP (ptr))
928 return;
929
930 gc_mark_nimp:
931 if (SCM_NCELLP (ptr))
932 scm_wta (ptr, "rogue pointer in heap", NULL);
933
934 switch (SCM_TYP7 (ptr))
935 {
936 case scm_tcs_cons_nimcar:
937 if (SCM_GCMARKP (ptr))
938 break;
939 SCM_SETGCMARK (ptr);
940 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
941 {
942 ptr = SCM_CAR (ptr);
943 goto gc_mark_nimp;
944 }
945 scm_gc_mark (SCM_CAR (ptr));
946 ptr = SCM_GCCDR (ptr);
947 goto gc_mark_nimp;
948 case scm_tcs_cons_imcar:
949 if (SCM_GCMARKP (ptr))
950 break;
951 SCM_SETGCMARK (ptr);
952 ptr = SCM_GCCDR (ptr);
953 goto gc_mark_loop;
954 case scm_tc7_pws:
955 if (SCM_GCMARKP (ptr))
956 break;
957 SCM_SETGCMARK (ptr);
958 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
959 ptr = SCM_GCCDR (ptr);
960 goto gc_mark_loop;
961 case scm_tcs_cons_gloc:
962 if (SCM_GCMARKP (ptr))
963 break;
964 SCM_SETGCMARK (ptr);
965 {
966 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
967 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
968 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
969 * pointer to a struct vtable data region. The fact that these are
970 * accessed in the same way restricts the possibilites to change the
971 * data layout of structs or heap cells.
972 */
973 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
974 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
975 switch (vtable_data [scm_vtable_index_vcell])
976 {
977 default:
978 {
979 /* ptr is a gloc */
980 SCM gloc_car = SCM_PACK (word0);
981 scm_gc_mark (gloc_car);
982 ptr = SCM_GCCDR (ptr);
983 goto gc_mark_loop;
984 }
985 case 1: /* ! */
986 case 0: /* ! */
987 {
988 /* ptr is a struct */
989 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
990 int len = SCM_LENGTH (layout);
991 char * fields_desc = SCM_CHARS (layout);
992 /* We're using SCM_GCCDR here like STRUCT_DATA, except
993 that it removes the mark */
994 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
995
996 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
997 {
998 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
999 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
1000 }
1001 if (len)
1002 {
1003 int x;
1004
1005 for (x = 0; x < len - 2; x += 2, ++struct_data)
1006 if (fields_desc[x] == 'p')
1007 scm_gc_mark (SCM_PACK (*struct_data));
1008 if (fields_desc[x] == 'p')
1009 {
1010 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1011 for (x = *struct_data; x; --x)
1012 scm_gc_mark (SCM_PACK (*++struct_data));
1013 else
1014 scm_gc_mark (SCM_PACK (*struct_data));
1015 }
1016 }
1017 if (vtable_data [scm_vtable_index_vcell] == 0)
1018 {
1019 vtable_data [scm_vtable_index_vcell] = 1;
1020 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1021 goto gc_mark_loop;
1022 }
1023 }
1024 }
1025 }
1026 break;
1027 case scm_tcs_closures:
1028 if (SCM_GCMARKP (ptr))
1029 break;
1030 SCM_SETGCMARK (ptr);
1031 if (SCM_IMP (SCM_CDR (ptr)))
1032 {
1033 ptr = SCM_CLOSCAR (ptr);
1034 goto gc_mark_nimp;
1035 }
1036 scm_gc_mark (SCM_CLOSCAR (ptr));
1037 ptr = SCM_GCCDR (ptr);
1038 goto gc_mark_nimp;
1039 case scm_tc7_vector:
1040 case scm_tc7_lvector:
1041 #ifdef CCLO
1042 case scm_tc7_cclo:
1043 #endif
1044 if (SCM_GC8MARKP (ptr))
1045 break;
1046 SCM_SETGC8MARK (ptr);
1047 i = SCM_LENGTH (ptr);
1048 if (i == 0)
1049 break;
1050 while (--i > 0)
1051 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1052 scm_gc_mark (SCM_VELTS (ptr)[i]);
1053 ptr = SCM_VELTS (ptr)[0];
1054 goto gc_mark_loop;
1055 case scm_tc7_contin:
1056 if SCM_GC8MARKP
1057 (ptr) break;
1058 SCM_SETGC8MARK (ptr);
1059 if (SCM_VELTS (ptr))
1060 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1061 (scm_sizet)
1062 (SCM_LENGTH (ptr) +
1063 (sizeof (SCM_STACKITEM) + -1 +
1064 sizeof (scm_contregs)) /
1065 sizeof (SCM_STACKITEM)));
1066 break;
1067 #ifdef HAVE_ARRAYS
1068 case scm_tc7_bvect:
1069 case scm_tc7_byvect:
1070 case scm_tc7_ivect:
1071 case scm_tc7_uvect:
1072 case scm_tc7_fvect:
1073 case scm_tc7_dvect:
1074 case scm_tc7_cvect:
1075 case scm_tc7_svect:
1076 #ifdef HAVE_LONG_LONGS
1077 case scm_tc7_llvect:
1078 #endif
1079 #endif
1080 case scm_tc7_string:
1081 SCM_SETGC8MARK (ptr);
1082 break;
1083
1084 case scm_tc7_substring:
1085 if (SCM_GC8MARKP(ptr))
1086 break;
1087 SCM_SETGC8MARK (ptr);
1088 ptr = SCM_CDR (ptr);
1089 goto gc_mark_loop;
1090
1091 case scm_tc7_wvect:
1092 if (SCM_GC8MARKP(ptr))
1093 break;
1094 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1095 scm_weak_vectors = ptr;
1096 SCM_SETGC8MARK (ptr);
1097 if (SCM_IS_WHVEC_ANY (ptr))
1098 {
1099 int x;
1100 int len;
1101 int weak_keys;
1102 int weak_values;
1103
1104 len = SCM_LENGTH (ptr);
1105 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1106 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1107
1108 for (x = 0; x < len; ++x)
1109 {
1110 SCM alist;
1111 alist = SCM_VELTS (ptr)[x];
1112
1113 /* mark everything on the alist except the keys or
1114 * values, according to weak_values and weak_keys. */
1115 while ( SCM_CONSP (alist)
1116 && !SCM_GCMARKP (alist)
1117 && SCM_CONSP (SCM_CAR (alist)))
1118 {
1119 SCM kvpair;
1120 SCM next_alist;
1121
1122 kvpair = SCM_CAR (alist);
1123 next_alist = SCM_CDR (alist);
1124 /*
1125 * Do not do this:
1126 * SCM_SETGCMARK (alist);
1127 * SCM_SETGCMARK (kvpair);
1128 *
1129 * It may be that either the key or value is protected by
1130 * an escaped reference to part of the spine of this alist.
1131 * If we mark the spine here, and only mark one or neither of the
1132 * key and value, they may never be properly marked.
1133 * This leads to a horrible situation in which an alist containing
1134 * freelist cells is exported.
1135 *
1136 * So only mark the spines of these arrays last of all marking.
1137 * If somebody confuses us by constructing a weak vector
1138 * with a circular alist then we are hosed, but at least we
1139 * won't prematurely drop table entries.
1140 */
1141 if (!weak_keys)
1142 scm_gc_mark (SCM_CAR (kvpair));
1143 if (!weak_values)
1144 scm_gc_mark (SCM_GCCDR (kvpair));
1145 alist = next_alist;
1146 }
1147 if (SCM_NIMP (alist))
1148 scm_gc_mark (alist);
1149 }
1150 }
1151 break;
1152
1153 case scm_tc7_msymbol:
1154 if (SCM_GC8MARKP(ptr))
1155 break;
1156 SCM_SETGC8MARK (ptr);
1157 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1158 ptr = SCM_SYMBOL_PROPS (ptr);
1159 goto gc_mark_loop;
1160 case scm_tc7_ssymbol:
1161 if (SCM_GC8MARKP(ptr))
1162 break;
1163 SCM_SETGC8MARK (ptr);
1164 break;
1165 case scm_tcs_subrs:
1166 break;
1167 case scm_tc7_port:
1168 i = SCM_PTOBNUM (ptr);
1169 if (!(i < scm_numptob))
1170 goto def;
1171 if (SCM_GC8MARKP (ptr))
1172 break;
1173 SCM_SETGC8MARK (ptr);
1174 if (SCM_PTAB_ENTRY(ptr))
1175 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1176 if (scm_ptobs[i].mark)
1177 {
1178 ptr = (scm_ptobs[i].mark) (ptr);
1179 goto gc_mark_loop;
1180 }
1181 else
1182 return;
1183 break;
1184 case scm_tc7_smob:
1185 if (SCM_GC8MARKP (ptr))
1186 break;
1187 SCM_SETGC8MARK (ptr);
1188 switch (SCM_GCTYP16 (ptr))
1189 { /* should be faster than going through scm_smobs */
1190 case scm_tc_free_cell:
1191 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1192 case scm_tc16_allocated:
1193 case scm_tc16_big:
1194 case scm_tc16_real:
1195 case scm_tc16_complex:
1196 break;
1197 default:
1198 i = SCM_SMOBNUM (ptr);
1199 if (!(i < scm_numsmob))
1200 goto def;
1201 if (scm_smobs[i].mark)
1202 {
1203 ptr = (scm_smobs[i].mark) (ptr);
1204 goto gc_mark_loop;
1205 }
1206 else
1207 return;
1208 }
1209 break;
1210 default:
1211 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1212 }
1213 }
1214
1215
1216 /* Mark a Region Conservatively
1217 */
1218
1219 void
1220 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1221 {
1222 register long m = n;
1223 register int i, j;
1224 register SCM_CELLPTR ptr;
1225
1226 while (0 <= --m)
1227 if (SCM_CELLP (* (SCM *) &x[m]))
1228 {
1229 ptr = SCM2PTR (* (SCM *) &x[m]);
1230 i = 0;
1231 j = scm_n_heap_segs - 1;
1232 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1233 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1234 {
1235 while (i <= j)
1236 {
1237 int seg_id;
1238 seg_id = -1;
1239 if ( (i == j)
1240 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1241 seg_id = i;
1242 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1243 seg_id = j;
1244 else
1245 {
1246 int k;
1247 k = (i + j) / 2;
1248 if (k == i)
1249 break;
1250 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1251 {
1252 j = k;
1253 ++i;
1254 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1255 continue;
1256 else
1257 break;
1258 }
1259 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1260 {
1261 i = k;
1262 --j;
1263 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1264 continue;
1265 else
1266 break;
1267 }
1268 }
1269 if (!scm_heap_table[seg_id].valid
1270 || scm_heap_table[seg_id].valid (ptr,
1271 &scm_heap_table[seg_id]))
1272 if (scm_heap_table[seg_id].span == 1
1273 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1274 scm_gc_mark (* (SCM *) &x[m]);
1275 break;
1276 }
1277
1278 }
1279 }
1280 }
1281
1282
1283 /* The function scm_cellp determines whether an SCM value can be regarded as a
1284 * pointer to a cell on the heap. Binary search is used in order to determine
1285 * the heap segment that contains the cell.
1286 */
1287 int
1288 scm_cellp (SCM value)
1289 {
1290 if (SCM_CELLP (value)) {
1291 scm_cell * ptr = SCM2PTR (value);
1292 unsigned int i = 0;
1293 unsigned int j = scm_n_heap_segs - 1;
1294
1295 while (i < j) {
1296 int k = (i + j) / 2;
1297 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1298 j = k;
1299 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1300 i = k + 1;
1301 }
1302 }
1303
1304 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1305 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1306 && (!scm_heap_table[i].valid || scm_heap_table[i].valid (ptr, &scm_heap_table[i]))
1307 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1308 return 1;
1309 } else {
1310 return 0;
1311 }
1312 } else {
1313 return 0;
1314 }
1315 }
1316
1317
1318 static void
1319 gc_sweep_freelist_start (scm_freelist_t *freelist)
1320 {
1321 freelist->cells = SCM_EOL;
1322 freelist->left_to_collect = freelist->cluster_size;
1323 freelist->clusters_allocated = 0;
1324 freelist->clusters = SCM_EOL;
1325 freelist->clustertail = &freelist->clusters;
1326 freelist->collected_1 = freelist->collected;
1327 freelist->collected = 0;
1328 }
1329
1330 static void
1331 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1332 {
1333 int collected;
1334 *freelist->clustertail = freelist->cells;
1335 if (SCM_NNULLP (freelist->cells))
1336 {
1337 SCM c = freelist->cells;
1338 SCM_SETCAR (c, SCM_CDR (c));
1339 SCM_SETCDR (c, SCM_EOL);
1340 freelist->collected +=
1341 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1342 }
1343 scm_gc_cells_collected += freelist->collected;
1344
1345 /* Although freelist->min_yield is used to test freelist->collected
1346 * (which is the local GC yield for freelist), it is adjusted so
1347 * that *total* yield is freelist->min_yield_fraction of total heap
1348 * size. This means that a too low yield is compensated by more
1349 * heap on the list which is currently doing most work, which is
1350 * just what we want.
1351 */
1352 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1353 freelist->grow_heap_p = (collected < freelist->min_yield);
1354 }
1355
1356 void
1357 scm_gc_sweep ()
1358 {
1359 register SCM_CELLPTR ptr;
1360 register SCM nfreelist;
1361 register scm_freelist_t *freelist;
1362 register long m;
1363 register int span;
1364 long i;
1365 scm_sizet seg_size;
1366
1367 m = 0;
1368
1369 gc_sweep_freelist_start (&scm_master_freelist);
1370 gc_sweep_freelist_start (&scm_master_freelist2);
1371
1372 for (i = 0; i < scm_n_heap_segs; i++)
1373 {
1374 register unsigned int left_to_collect;
1375 register scm_sizet j;
1376
1377 /* Unmarked cells go onto the front of the freelist this heap
1378 segment points to. Rather than updating the real freelist
1379 pointer as we go along, we accumulate the new head in
1380 nfreelist. Then, if it turns out that the entire segment is
1381 free, we free (i.e., malloc's free) the whole segment, and
1382 simply don't assign nfreelist back into the real freelist. */
1383 freelist = scm_heap_table[i].freelist;
1384 nfreelist = freelist->cells;
1385 left_to_collect = freelist->left_to_collect;
1386 span = scm_heap_table[i].span;
1387
1388 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1389 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1390 for (j = seg_size + span; j -= span; ptr += span)
1391 {
1392 SCM scmptr = PTR2SCM (ptr);
1393
1394 switch SCM_TYP7 (scmptr)
1395 {
1396 case scm_tcs_cons_gloc:
1397 {
1398 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1399 * struct or a gloc. See the corresponding comment in
1400 * scm_gc_mark.
1401 */
1402 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1403 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1404 if (SCM_GCMARKP (scmptr))
1405 {
1406 if (vtable_data [scm_vtable_index_vcell] == 1)
1407 vtable_data [scm_vtable_index_vcell] = 0;
1408 goto cmrkcontinue;
1409 }
1410 else
1411 {
1412 if (vtable_data [scm_vtable_index_vcell] == 0
1413 || vtable_data [scm_vtable_index_vcell] == 1)
1414 {
1415 scm_struct_free_t free
1416 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1417 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1418 }
1419 }
1420 }
1421 break;
1422 case scm_tcs_cons_imcar:
1423 case scm_tcs_cons_nimcar:
1424 case scm_tcs_closures:
1425 case scm_tc7_pws:
1426 if (SCM_GCMARKP (scmptr))
1427 goto cmrkcontinue;
1428 break;
1429 case scm_tc7_wvect:
1430 if (SCM_GC8MARKP (scmptr))
1431 {
1432 goto c8mrkcontinue;
1433 }
1434 else
1435 {
1436 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1437 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1438 break;
1439 }
1440
1441 case scm_tc7_vector:
1442 case scm_tc7_lvector:
1443 #ifdef CCLO
1444 case scm_tc7_cclo:
1445 #endif
1446 if (SCM_GC8MARKP (scmptr))
1447 goto c8mrkcontinue;
1448
1449 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1450 freechars:
1451 scm_must_free (SCM_CHARS (scmptr));
1452 /* SCM_SETCHARS(scmptr, 0);*/
1453 break;
1454 #ifdef HAVE_ARRAYS
1455 case scm_tc7_bvect:
1456 if SCM_GC8MARKP (scmptr)
1457 goto c8mrkcontinue;
1458 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1459 goto freechars;
1460 case scm_tc7_byvect:
1461 if SCM_GC8MARKP (scmptr)
1462 goto c8mrkcontinue;
1463 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1464 goto freechars;
1465 case scm_tc7_ivect:
1466 case scm_tc7_uvect:
1467 if SCM_GC8MARKP (scmptr)
1468 goto c8mrkcontinue;
1469 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1470 goto freechars;
1471 case scm_tc7_svect:
1472 if SCM_GC8MARKP (scmptr)
1473 goto c8mrkcontinue;
1474 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1475 goto freechars;
1476 #ifdef HAVE_LONG_LONGS
1477 case scm_tc7_llvect:
1478 if SCM_GC8MARKP (scmptr)
1479 goto c8mrkcontinue;
1480 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1481 goto freechars;
1482 #endif
1483 case scm_tc7_fvect:
1484 if SCM_GC8MARKP (scmptr)
1485 goto c8mrkcontinue;
1486 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1487 goto freechars;
1488 case scm_tc7_dvect:
1489 if SCM_GC8MARKP (scmptr)
1490 goto c8mrkcontinue;
1491 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1492 goto freechars;
1493 case scm_tc7_cvect:
1494 if SCM_GC8MARKP (scmptr)
1495 goto c8mrkcontinue;
1496 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1497 goto freechars;
1498 #endif
1499 case scm_tc7_substring:
1500 if (SCM_GC8MARKP (scmptr))
1501 goto c8mrkcontinue;
1502 break;
1503 case scm_tc7_string:
1504 if (SCM_GC8MARKP (scmptr))
1505 goto c8mrkcontinue;
1506 m += SCM_HUGE_LENGTH (scmptr) + 1;
1507 goto freechars;
1508 case scm_tc7_msymbol:
1509 if (SCM_GC8MARKP (scmptr))
1510 goto c8mrkcontinue;
1511 m += (SCM_LENGTH (scmptr) + 1
1512 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1513 scm_must_free ((char *)SCM_SLOTS (scmptr));
1514 break;
1515 case scm_tc7_contin:
1516 if SCM_GC8MARKP (scmptr)
1517 goto c8mrkcontinue;
1518 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1519 if (SCM_VELTS (scmptr))
1520 goto freechars;
1521 case scm_tc7_ssymbol:
1522 if SCM_GC8MARKP(scmptr)
1523 goto c8mrkcontinue;
1524 break;
1525 case scm_tcs_subrs:
1526 continue;
1527 case scm_tc7_port:
1528 if SCM_GC8MARKP (scmptr)
1529 goto c8mrkcontinue;
1530 if SCM_OPENP (scmptr)
1531 {
1532 int k = SCM_PTOBNUM (scmptr);
1533 if (!(k < scm_numptob))
1534 goto sweeperr;
1535 /* Keep "revealed" ports alive. */
1536 if (scm_revealed_count (scmptr) > 0)
1537 continue;
1538 /* Yes, I really do mean scm_ptobs[k].free */
1539 /* rather than ftobs[k].close. .close */
1540 /* is for explicit CLOSE-PORT by user */
1541 m += (scm_ptobs[k].free) (scmptr);
1542 SCM_SETSTREAM (scmptr, 0);
1543 scm_remove_from_port_table (scmptr);
1544 scm_gc_ports_collected++;
1545 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1546 }
1547 break;
1548 case scm_tc7_smob:
1549 switch SCM_GCTYP16 (scmptr)
1550 {
1551 case scm_tc_free_cell:
1552 case scm_tc16_real:
1553 if SCM_GC8MARKP (scmptr)
1554 goto c8mrkcontinue;
1555 break;
1556 #ifdef SCM_BIGDIG
1557 case scm_tc16_big:
1558 if SCM_GC8MARKP (scmptr)
1559 goto c8mrkcontinue;
1560 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1561 goto freechars;
1562 #endif /* def SCM_BIGDIG */
1563 case scm_tc16_complex:
1564 if SCM_GC8MARKP (scmptr)
1565 goto c8mrkcontinue;
1566 m += 2 * sizeof (double);
1567 goto freechars;
1568 default:
1569 if SCM_GC8MARKP (scmptr)
1570 goto c8mrkcontinue;
1571
1572 {
1573 int k;
1574 k = SCM_SMOBNUM (scmptr);
1575 if (!(k < scm_numsmob))
1576 goto sweeperr;
1577 m += (scm_smobs[k].free) (scmptr);
1578 break;
1579 }
1580 }
1581 break;
1582 default:
1583 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1584 }
1585 #if 0
1586 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1587 exit (2);
1588 #endif
1589 if (!--left_to_collect)
1590 {
1591 SCM_SETCAR (scmptr, nfreelist);
1592 *freelist->clustertail = scmptr;
1593 freelist->clustertail = SCM_CDRLOC (scmptr);
1594
1595 nfreelist = SCM_EOL;
1596 freelist->collected += span * freelist->cluster_size;
1597 left_to_collect = freelist->cluster_size;
1598 }
1599 else
1600 {
1601 /* Stick the new cell on the front of nfreelist. It's
1602 critical that we mark this cell as freed; otherwise, the
1603 conservative collector might trace it as some other type
1604 of object. */
1605 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1606 SCM_SETCDR (scmptr, nfreelist);
1607 nfreelist = scmptr;
1608 }
1609
1610 continue;
1611 c8mrkcontinue:
1612 SCM_CLRGC8MARK (scmptr);
1613 continue;
1614 cmrkcontinue:
1615 SCM_CLRGCMARK (scmptr);
1616 }
1617 #ifdef GC_FREE_SEGMENTS
1618 if (n == seg_size)
1619 {
1620 register long j;
1621
1622 freelist->heap_size -= seg_size;
1623 free ((char *) scm_heap_table[i].bounds[0]);
1624 scm_heap_table[i].bounds[0] = 0;
1625 for (j = i + 1; j < scm_n_heap_segs; j++)
1626 scm_heap_table[j - 1] = scm_heap_table[j];
1627 scm_n_heap_segs -= 1;
1628 i--; /* We need to scan the segment just moved. */
1629 }
1630 else
1631 #endif /* ifdef GC_FREE_SEGMENTS */
1632 {
1633 /* Update the real freelist pointer to point to the head of
1634 the list of free cells we've built for this segment. */
1635 freelist->cells = nfreelist;
1636 freelist->left_to_collect = left_to_collect;
1637 }
1638
1639 #ifdef GUILE_DEBUG_FREELIST
1640 scm_check_freelist (freelist == &scm_master_freelist
1641 ? scm_freelist
1642 : scm_freelist2);
1643 scm_map_free_list ();
1644 #endif
1645 }
1646
1647 gc_sweep_freelist_finish (&scm_master_freelist);
1648 gc_sweep_freelist_finish (&scm_master_freelist2);
1649
1650 /* When we move to POSIX threads private freelists should probably
1651 be GC-protected instead. */
1652 scm_freelist = SCM_EOL;
1653 scm_freelist2 = SCM_EOL;
1654
1655 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1656 scm_gc_yield -= scm_cells_allocated;
1657 scm_mallocated -= m;
1658 scm_gc_malloc_collected = m;
1659 }
1660
1661
1662 \f
1663
1664 /* {Front end to malloc}
1665 *
1666 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1667 *
1668 * These functions provide services comperable to malloc, realloc, and
1669 * free. They are for allocating malloced parts of scheme objects.
1670 * The primary purpose of the front end is to impose calls to gc.
1671 */
1672
1673
1674 /* scm_must_malloc
1675 * Return newly malloced storage or throw an error.
1676 *
1677 * The parameter WHAT is a string for error reporting.
1678 * If the threshold scm_mtrigger will be passed by this
1679 * allocation, or if the first call to malloc fails,
1680 * garbage collect -- on the presumption that some objects
1681 * using malloced storage may be collected.
1682 *
1683 * The limit scm_mtrigger may be raised by this allocation.
1684 */
1685 void *
1686 scm_must_malloc (scm_sizet size, const char *what)
1687 {
1688 void *ptr;
1689 unsigned long nm = scm_mallocated + size;
1690
1691 if (nm <= scm_mtrigger)
1692 {
1693 SCM_SYSCALL (ptr = malloc (size));
1694 if (NULL != ptr)
1695 {
1696 scm_mallocated = nm;
1697 #ifdef GUILE_DEBUG_MALLOC
1698 scm_malloc_register (ptr, what);
1699 #endif
1700 return ptr;
1701 }
1702 }
1703
1704 scm_igc (what);
1705
1706 nm = scm_mallocated + size;
1707 SCM_SYSCALL (ptr = malloc (size));
1708 if (NULL != ptr)
1709 {
1710 scm_mallocated = nm;
1711 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1712 if (nm > scm_mtrigger)
1713 scm_mtrigger = nm + nm / 2;
1714 else
1715 scm_mtrigger += scm_mtrigger / 2;
1716 }
1717 #ifdef GUILE_DEBUG_MALLOC
1718 scm_malloc_register (ptr, what);
1719 #endif
1720
1721 return ptr;
1722 }
1723
1724 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1725 return 0; /* never reached */
1726 }
1727
1728
1729 /* scm_must_realloc
1730 * is similar to scm_must_malloc.
1731 */
1732 void *
1733 scm_must_realloc (void *where,
1734 scm_sizet old_size,
1735 scm_sizet size,
1736 const char *what)
1737 {
1738 void *ptr;
1739 scm_sizet nm = scm_mallocated + size - old_size;
1740
1741 if (nm <= scm_mtrigger)
1742 {
1743 SCM_SYSCALL (ptr = realloc (where, size));
1744 if (NULL != ptr)
1745 {
1746 scm_mallocated = nm;
1747 #ifdef GUILE_DEBUG_MALLOC
1748 scm_malloc_reregister (where, ptr, what);
1749 #endif
1750 return ptr;
1751 }
1752 }
1753
1754 scm_igc (what);
1755
1756 nm = scm_mallocated + size - old_size;
1757 SCM_SYSCALL (ptr = realloc (where, size));
1758 if (NULL != ptr)
1759 {
1760 scm_mallocated = nm;
1761 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1762 if (nm > scm_mtrigger)
1763 scm_mtrigger = nm + nm / 2;
1764 else
1765 scm_mtrigger += scm_mtrigger / 2;
1766 }
1767 #ifdef GUILE_DEBUG_MALLOC
1768 scm_malloc_reregister (where, ptr, what);
1769 #endif
1770 return ptr;
1771 }
1772
1773 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1774 return 0; /* never reached */
1775 }
1776
1777 void
1778 scm_must_free (void *obj)
1779 {
1780 #ifdef GUILE_DEBUG_MALLOC
1781 scm_malloc_unregister (obj);
1782 #endif
1783 if (obj)
1784 free (obj);
1785 else
1786 scm_wta (SCM_INUM0, "already free", "");
1787 }
1788
1789 /* Announce that there has been some malloc done that will be freed
1790 * during gc. A typical use is for a smob that uses some malloced
1791 * memory but can not get it from scm_must_malloc (for whatever
1792 * reason). When a new object of this smob is created you call
1793 * scm_done_malloc with the size of the object. When your smob free
1794 * function is called, be sure to include this size in the return
1795 * value. */
1796
1797 void
1798 scm_done_malloc (long size)
1799 {
1800 scm_mallocated += size;
1801
1802 if (scm_mallocated > scm_mtrigger)
1803 {
1804 scm_igc ("foreign mallocs");
1805 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1806 {
1807 if (scm_mallocated > scm_mtrigger)
1808 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1809 else
1810 scm_mtrigger += scm_mtrigger / 2;
1811 }
1812 }
1813 }
1814
1815
1816 \f
1817
1818 /* {Heap Segments}
1819 *
1820 * Each heap segment is an array of objects of a particular size.
1821 * Every segment has an associated (possibly shared) freelist.
1822 * A table of segment records is kept that records the upper and
1823 * lower extents of the segment; this is used during the conservative
1824 * phase of gc to identify probably gc roots (because they point
1825 * into valid segments at reasonable offsets). */
1826
1827 /* scm_expmem
1828 * is true if the first segment was smaller than INIT_HEAP_SEG.
1829 * If scm_expmem is set to one, subsequent segment allocations will
1830 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1831 */
1832 int scm_expmem = 0;
1833
1834 scm_sizet scm_max_segment_size;
1835
1836 /* scm_heap_org
1837 * is the lowest base address of any heap segment.
1838 */
1839 SCM_CELLPTR scm_heap_org;
1840
1841 scm_heap_seg_data_t * scm_heap_table = 0;
1842 int scm_n_heap_segs = 0;
1843
1844 /* init_heap_seg
1845 * initializes a new heap segment and return the number of objects it contains.
1846 *
1847 * The segment origin, segment size in bytes, and the span of objects
1848 * in cells are input parameters. The freelist is both input and output.
1849 *
1850 * This function presume that the scm_heap_table has already been expanded
1851 * to accomodate a new segment record.
1852 */
1853
1854
1855 static scm_sizet
1856 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1857 {
1858 register SCM_CELLPTR ptr;
1859 SCM_CELLPTR seg_end;
1860 int new_seg_index;
1861 int n_new_cells;
1862 int span = freelist->span;
1863
1864 if (seg_org == NULL)
1865 return 0;
1866
1867 ptr = CELL_UP (seg_org, span);
1868
1869 /* Compute the ceiling on valid object pointers w/in this segment.
1870 */
1871 seg_end = CELL_DN ((char *) seg_org + size, span);
1872
1873 /* Find the right place and insert the segment record.
1874 *
1875 */
1876 for (new_seg_index = 0;
1877 ( (new_seg_index < scm_n_heap_segs)
1878 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1879 new_seg_index++)
1880 ;
1881
1882 {
1883 int i;
1884 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1885 scm_heap_table[i] = scm_heap_table[i - 1];
1886 }
1887
1888 ++scm_n_heap_segs;
1889
1890 scm_heap_table[new_seg_index].valid = 0;
1891 scm_heap_table[new_seg_index].span = span;
1892 scm_heap_table[new_seg_index].freelist = freelist;
1893 scm_heap_table[new_seg_index].bounds[0] = ptr;
1894 scm_heap_table[new_seg_index].bounds[1] = seg_end;
1895
1896
1897 /* Compute the least valid object pointer w/in this segment
1898 */
1899 ptr = CELL_UP (ptr, span);
1900
1901
1902 /*n_new_cells*/
1903 n_new_cells = seg_end - ptr;
1904
1905 freelist->heap_size += n_new_cells;
1906
1907 /* Partition objects in this segment into clusters */
1908 {
1909 SCM clusters;
1910 SCM *clusterp = &clusters;
1911 int n_cluster_cells = span * freelist->cluster_size;
1912
1913 while (n_new_cells > span) /* at least one spine + one freecell */
1914 {
1915 /* Determine end of cluster
1916 */
1917 if (n_new_cells >= n_cluster_cells)
1918 {
1919 seg_end = ptr + n_cluster_cells;
1920 n_new_cells -= n_cluster_cells;
1921 }
1922 else
1923 /* [cmm] looks like the segment size doesn't divide cleanly by
1924 cluster size. bad cmm! */
1925 abort();
1926
1927 /* Allocate cluster spine
1928 */
1929 *clusterp = PTR2SCM (ptr);
1930 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1931 clusterp = SCM_CDRLOC (*clusterp);
1932 ptr += span;
1933
1934 while (ptr < seg_end)
1935 {
1936 SCM scmptr = PTR2SCM (ptr);
1937
1938 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1939 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1940 ptr += span;
1941 }
1942
1943 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1944 }
1945
1946 /* Patch up the last cluster pointer in the segment
1947 * to join it to the input freelist.
1948 */
1949 *clusterp = freelist->clusters;
1950 freelist->clusters = clusters;
1951 }
1952
1953 #ifdef DEBUGINFO
1954 fprintf (stderr, "H");
1955 #endif
1956 return size;
1957 }
1958
1959 static scm_sizet
1960 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1961 {
1962 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1963
1964 return
1965 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1966 + ALIGNMENT_SLACK (freelist);
1967 }
1968
1969 static void
1970 alloc_some_heap (scm_freelist_t *freelist)
1971 {
1972 scm_heap_seg_data_t * tmptable;
1973 SCM_CELLPTR ptr;
1974 long len;
1975
1976 /* Critical code sections (such as the garbage collector)
1977 * aren't supposed to add heap segments.
1978 */
1979 if (scm_gc_heap_lock)
1980 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1981
1982 /* Expand the heap tables to have room for the new segment.
1983 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1984 * only if the allocation of the segment itself succeeds.
1985 */
1986 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
1987
1988 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
1989 realloc ((char *)scm_heap_table, len)));
1990 if (!tmptable)
1991 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1992 else
1993 scm_heap_table = tmptable;
1994
1995
1996 /* Pick a size for the new heap segment.
1997 * The rule for picking the size of a segment is explained in
1998 * gc.h
1999 */
2000 {
2001 /* Assure that the new segment is predicted to be large enough.
2002 *
2003 * New yield should at least equal GC fraction of new heap size, i.e.
2004 *
2005 * y + dh > f * (h + dh)
2006 *
2007 * y : yield
2008 * f : min yield fraction
2009 * h : heap size
2010 * dh : size of new heap segment
2011 *
2012 * This gives dh > (f * h - y) / (1 - f)
2013 */
2014 int f = freelist->min_yield_fraction;
2015 long h = SCM_HEAP_SIZE;
2016 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2017 len = SCM_EXPHEAP (freelist->heap_size);
2018 #ifdef DEBUGINFO
2019 fprintf (stderr, "(%d < %d)", len, min_cells);
2020 #endif
2021 if (len < min_cells)
2022 len = min_cells + freelist->cluster_size;
2023 len *= sizeof (scm_cell);
2024 /* force new sampling */
2025 freelist->collected = LONG_MAX;
2026 }
2027
2028 if (len > scm_max_segment_size)
2029 len = scm_max_segment_size;
2030
2031 {
2032 scm_sizet smallest;
2033
2034 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2035
2036 if (len < smallest)
2037 len = smallest;
2038
2039 /* Allocate with decaying ambition. */
2040 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2041 && (len >= smallest))
2042 {
2043 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2044 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2045 if (ptr)
2046 {
2047 init_heap_seg (ptr, rounded_len, freelist);
2048 return;
2049 }
2050 len /= 2;
2051 }
2052 }
2053
2054 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2055 }
2056
2057
2058 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2059 (SCM name),
2060 "")
2061 #define FUNC_NAME s_scm_unhash_name
2062 {
2063 int x;
2064 int bound;
2065 SCM_VALIDATE_SYMBOL (1,name);
2066 SCM_DEFER_INTS;
2067 bound = scm_n_heap_segs;
2068 for (x = 0; x < bound; ++x)
2069 {
2070 SCM_CELLPTR p;
2071 SCM_CELLPTR pbound;
2072 p = scm_heap_table[x].bounds[0];
2073 pbound = scm_heap_table[x].bounds[1];
2074 while (p < pbound)
2075 {
2076 SCM cell = PTR2SCM (p);
2077 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2078 {
2079 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2080 * struct cell. See the corresponding comment in scm_gc_mark.
2081 */
2082 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2083 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2084 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2085 if ((SCM_TRUE_P (name) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2086 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2087 {
2088 SCM_SET_CELL_OBJECT_0 (cell, name);
2089 }
2090 }
2091 ++p;
2092 }
2093 }
2094 SCM_ALLOW_INTS;
2095 return name;
2096 }
2097 #undef FUNC_NAME
2098
2099
2100 \f
2101 /* {GC Protection Helper Functions}
2102 */
2103
2104
2105 void
2106 scm_remember (SCM *ptr)
2107 { /* empty */ }
2108
2109
2110 /*
2111 These crazy functions prevent garbage collection
2112 of arguments after the first argument by
2113 ensuring they remain live throughout the
2114 function because they are used in the last
2115 line of the code block.
2116 It'd be better to have a nice compiler hint to
2117 aid the conservative stack-scanning GC. --03/09/00 gjb */
2118 SCM
2119 scm_return_first (SCM elt, ...)
2120 {
2121 return elt;
2122 }
2123
2124 int
2125 scm_return_first_int (int i, ...)
2126 {
2127 return i;
2128 }
2129
2130
2131 SCM
2132 scm_permanent_object (SCM obj)
2133 {
2134 SCM_REDEFER_INTS;
2135 scm_permobjs = scm_cons (obj, scm_permobjs);
2136 SCM_REALLOW_INTS;
2137 return obj;
2138 }
2139
2140
2141 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2142 even if all other references are dropped, until someone applies
2143 scm_unprotect_object to it. This function returns OBJ.
2144
2145 Calls to scm_protect_object nest. For every object OBJ, there is a
2146 counter which scm_protect_object(OBJ) increments and
2147 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2148 an object's counter is greater than zero, the garbage collector
2149 will not free it. */
2150
2151 SCM
2152 scm_protect_object (SCM obj)
2153 {
2154 SCM handle;
2155
2156 /* This critical section barrier will be replaced by a mutex. */
2157 SCM_DEFER_INTS;
2158
2159 handle = scm_hashq_get_handle (scm_protects, obj);
2160
2161 if (SCM_IMP (handle))
2162 scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (1));
2163 else
2164 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2165
2166 SCM_ALLOW_INTS;
2167
2168 return obj;
2169 }
2170
2171
2172 /* Remove any protection for OBJ established by a prior call to
2173 scm_protect_object. This function returns OBJ.
2174
2175 See scm_protect_object for more information. */
2176 SCM
2177 scm_unprotect_object (SCM obj)
2178 {
2179 SCM handle;
2180
2181 /* This critical section barrier will be replaced by a mutex. */
2182 SCM_DEFER_INTS;
2183
2184 handle = scm_hashq_get_handle (scm_protects, obj);
2185
2186 if (SCM_NIMP (handle))
2187 {
2188 int count = SCM_INUM (SCM_CAR (handle)) - 1;
2189 if (count <= 0)
2190 scm_hashq_remove_x (scm_protects, obj);
2191 else
2192 SCM_SETCDR (handle, SCM_MAKINUM (count));
2193 }
2194
2195 SCM_ALLOW_INTS;
2196
2197 return obj;
2198 }
2199
2200 int terminating;
2201
2202 /* called on process termination. */
2203 #ifdef HAVE_ATEXIT
2204 static void
2205 cleanup (void)
2206 #else
2207 #ifdef HAVE_ON_EXIT
2208 extern int on_exit (void (*procp) (), int arg);
2209
2210 static void
2211 cleanup (int status, void *arg)
2212 #else
2213 #error Dont know how to setup a cleanup handler on your system.
2214 #endif
2215 #endif
2216 {
2217 terminating = 1;
2218 scm_flush_all_ports ();
2219 }
2220
2221 \f
2222 static int
2223 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2224 {
2225 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2226 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2227 rounded_size,
2228 freelist))
2229 {
2230 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2231 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2232 rounded_size,
2233 freelist))
2234 return 1;
2235 }
2236 else
2237 scm_expmem = 1;
2238
2239 if (freelist->min_yield_fraction)
2240 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2241 / 100);
2242 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2243
2244 return 0;
2245 }
2246
2247 \f
2248 static void
2249 init_freelist (scm_freelist_t *freelist,
2250 int span,
2251 int cluster_size,
2252 int min_yield)
2253 {
2254 freelist->clusters = SCM_EOL;
2255 freelist->cluster_size = cluster_size + 1;
2256 freelist->left_to_collect = 0;
2257 freelist->clusters_allocated = 0;
2258 freelist->min_yield = 0;
2259 freelist->min_yield_fraction = min_yield;
2260 freelist->span = span;
2261 freelist->collected = 0;
2262 freelist->collected_1 = 0;
2263 freelist->heap_size = 0;
2264 }
2265
2266 int
2267 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2268 scm_sizet init_heap_size_2, int gc_trigger_2,
2269 scm_sizet max_segment_size)
2270 {
2271 scm_sizet j;
2272
2273 if (!init_heap_size_1)
2274 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2275 if (!init_heap_size_2)
2276 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2277
2278 j = SCM_NUM_PROTECTS;
2279 while (j)
2280 scm_sys_protects[--j] = SCM_BOOL_F;
2281 scm_block_gc = 1;
2282
2283 scm_freelist = SCM_EOL;
2284 scm_freelist2 = SCM_EOL;
2285 init_freelist (&scm_master_freelist,
2286 1, SCM_CLUSTER_SIZE_1,
2287 gc_trigger_1 ? gc_trigger_1 : SCM_MIN_YIELD_1);
2288 init_freelist (&scm_master_freelist2,
2289 2, SCM_CLUSTER_SIZE_2,
2290 gc_trigger_2 ? gc_trigger_2 : SCM_MIN_YIELD_2);
2291 scm_max_segment_size
2292 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
2293
2294 scm_expmem = 0;
2295
2296 j = SCM_HEAP_SEG_SIZE;
2297 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2298 scm_heap_table = ((scm_heap_seg_data_t *)
2299 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2300
2301 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2302 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2303 return 1;
2304
2305 /* scm_hplims[0] can change. do not remove scm_heap_org */
2306 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2307
2308 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2309 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2310 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2311 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2312 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2313
2314 /* Initialise the list of ports. */
2315 scm_port_table = (scm_port **)
2316 malloc (sizeof (scm_port *) * scm_port_table_room);
2317 if (!scm_port_table)
2318 return 1;
2319
2320 #ifdef HAVE_ATEXIT
2321 atexit (cleanup);
2322 #else
2323 #ifdef HAVE_ON_EXIT
2324 on_exit (cleanup, 0);
2325 #endif
2326 #endif
2327
2328 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2329 SCM_SETCDR (scm_undefineds, scm_undefineds);
2330
2331 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2332 scm_nullstr = scm_makstr (0L, 0);
2333 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2334 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2335 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2336 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2337 scm_stand_in_procs = SCM_EOL;
2338 scm_permobjs = SCM_EOL;
2339 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2340 scm_asyncs = SCM_EOL;
2341 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2342 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2343 #ifdef SCM_BIGDIG
2344 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2345 #endif
2346 return 0;
2347 }
2348 \f
2349
2350 void
2351 scm_init_gc ()
2352 {
2353 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2354 #include "libguile/gc.x"
2355 }
2356
2357 /*
2358 Local Variables:
2359 c-file-style: "gnu"
2360 End:
2361 */