806f19ae0b1eb169aa7a68fdefbf378777d133cb
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "weaks.h"
52 #include "guardians.h"
53 #include "smob.h"
54 #include "unif.h"
55 #include "async.h"
56 #include "ports.h"
57 #include "root.h"
58 #include "strings.h"
59 #include "vectors.h"
60
61 #include "validate.h"
62 #include "gc.h"
63
64 #ifdef HAVE_MALLOC_H
65 #include <malloc.h>
66 #endif
67
68 #ifdef HAVE_UNISTD_H
69 #include <unistd.h>
70 #endif
71
72 #ifdef __STDC__
73 #include <stdarg.h>
74 #define var_start(x, y) va_start(x, y)
75 #else
76 #include <varargs.h>
77 #define var_start(x, y) va_start(x)
78 #endif
79
80 \f
81 /* {heap tuning parameters}
82 *
83 * These are parameters for controlling memory allocation. The heap
84 * is the area out of which scm_cons, and object headers are allocated.
85 *
86 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
87 * 64 bit machine. The units of the _SIZE parameters are bytes.
88 * Cons pairs and object headers occupy one heap cell.
89 *
90 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
91 * allocated initially the heap will grow by half its current size
92 * each subsequent time more heap is needed.
93 *
94 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
95 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
96 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
97 * is in scm_init_storage() and alloc_some_heap() in sys.c
98 *
99 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
100 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
101 *
102 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
103 * is needed.
104 *
105 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
106 * trigger a GC.
107 *
108 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
109 * reclaimed by a GC triggered by must_malloc. If less than this is
110 * reclaimed, the trigger threshold is raised. [I don't know what a
111 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
112 * work around a oscillation that caused almost constant GC.]
113 */
114
115 #define SCM_INIT_HEAP_SIZE_1 (40000L * sizeof (scm_cell))
116 #define SCM_CLUSTER_SIZE_1 2000L
117 #define SCM_GC_TRIGGER_1 -50
118
119 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
120 #define SCM_CLUSTER_SIZE_2 1000L
121 /* The following value may seem large, but note that if we get to GC at
122 * all, this means that we have a numerically intensive application
123 */
124 #define SCM_GC_TRIGGER_2 -50
125
126 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
127
128 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
129 #ifdef _QC
130 # define SCM_HEAP_SEG_SIZE 32768L
131 #else
132 # ifdef sequent
133 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
134 # else
135 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
136 # endif
137 #endif
138 /* Make heap grow with factor 1.5 */
139 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
140 #define SCM_INIT_MALLOC_LIMIT 100000
141 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
142
143 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
144 bounds for allocated storage */
145
146 #ifdef PROT386
147 /*in 386 protected mode we must only adjust the offset */
148 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
149 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
150 #else
151 # ifdef _UNICOS
152 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
153 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
154 # else
155 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
156 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
157 # endif /* UNICOS */
158 #endif /* PROT386 */
159 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
160 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
161
162
163 \f
164 /* scm_freelists
165 */
166
167 typedef struct scm_freelist_t {
168 /* collected cells */
169 SCM cells;
170 #ifdef GUILE_NEW_GC_SCHEME
171 /* number of cells left to collect before cluster is full */
172 unsigned int left_to_collect;
173 /* a list of freelists, each of size gc_trigger,
174 except the last one which may be shorter */
175 SCM clusters;
176 SCM *clustertail;
177 /* this is the number of cells in each cluster, including the spine cell */
178 int cluster_size;
179 /* set to grow the heap when we run out of clusters
180 */
181 int grow_heap_p;
182 /* minimum number of objects allocated before GC is triggered
183 */
184 int gc_trigger;
185 /* defines gc_trigger as percent of heap size
186 * 0 => constant trigger
187 */
188 int gc_trigger_fraction;
189 #endif
190 /* number of cells per object on this list */
191 int span;
192 /* number of collected cells during last GC */
193 int collected;
194 /* total number of cells in heap segments
195 * belonging to this list.
196 */
197 int heap_size;
198 } scm_freelist_t;
199
200 #ifdef GUILE_NEW_GC_SCHEME
201 SCM scm_freelist = SCM_EOL;
202 scm_freelist_t scm_master_freelist = {
203 SCM_EOL, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
204 };
205 SCM scm_freelist2 = SCM_EOL;
206 scm_freelist_t scm_master_freelist2 = {
207 SCM_EOL, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
208 };
209 #else
210 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
211 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
212 #endif
213
214 /* scm_mtrigger
215 * is the number of bytes of must_malloc allocation needed to trigger gc.
216 */
217 unsigned long scm_mtrigger;
218
219
220 /* scm_gc_heap_lock
221 * If set, don't expand the heap. Set only during gc, during which no allocation
222 * is supposed to take place anyway.
223 */
224 int scm_gc_heap_lock = 0;
225
226 /* GC Blocking
227 * Don't pause for collection if this is set -- just
228 * expand the heap.
229 */
230
231 int scm_block_gc = 1;
232
233 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
234 * collection (GC) more space is allocated for the heap.
235 */
236 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
237
238 /* During collection, this accumulates objects holding
239 * weak references.
240 */
241 SCM scm_weak_vectors;
242
243 /* GC Statistics Keeping
244 */
245 unsigned long scm_cells_allocated = 0;
246 long scm_mallocated = 0;
247 /* unsigned long scm_gc_cells_collected; */
248 unsigned long scm_gc_malloc_collected;
249 unsigned long scm_gc_ports_collected;
250 unsigned long scm_gc_rt;
251 unsigned long scm_gc_time_taken = 0;
252
253 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
254 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
255 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
256 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
257 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
258 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
259
260 typedef struct scm_heap_seg_data_t
261 {
262 /* lower and upper bounds of the segment */
263 SCM_CELLPTR bounds[2];
264
265 /* address of the head-of-freelist pointer for this segment's cells.
266 All segments usually point to the same one, scm_freelist. */
267 scm_freelist_t *freelist;
268
269 /* number of SCM words per object in this segment */
270 int span;
271
272 /* If SEG_DATA->valid is non-zero, the conservative marking
273 functions will apply SEG_DATA->valid to the purported pointer and
274 SEG_DATA, and mark the object iff the function returns non-zero.
275 At the moment, I don't think anyone uses this. */
276 int (*valid) ();
277 } scm_heap_seg_data_t;
278
279
280
281
282 static void scm_mark_weak_vector_spines (void);
283 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
284 static void alloc_some_heap (scm_freelist_t *);
285
286
287 \f
288 /* Debugging functions. */
289
290 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
291
292 /* Return the number of the heap segment containing CELL. */
293 static int
294 which_seg (SCM cell)
295 {
296 int i;
297
298 for (i = 0; i < scm_n_heap_segs; i++)
299 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
300 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
301 return i;
302 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
303 SCM_UNPACK (cell));
304 abort ();
305 }
306
307
308 #ifdef GUILE_NEW_GC_SCHEME
309 static void
310 map_free_list (scm_freelist_t *master, SCM freelist)
311 {
312 int last_seg = -1, count = 0;
313 SCM f;
314
315 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
316 {
317 int this_seg = which_seg (f);
318
319 if (this_seg != last_seg)
320 {
321 if (last_seg != -1)
322 fprintf (stderr, " %5d %d-cells in segment %d\n",
323 count, master->span, last_seg);
324 last_seg = this_seg;
325 count = 0;
326 }
327 count++;
328 }
329 if (last_seg != -1)
330 fprintf (stderr, " %5d %d-cells in segment %d\n",
331 count, master->span, last_seg);
332 }
333 #else
334 static void
335 map_free_list (scm_freelist_t *freelist)
336 {
337 int last_seg = -1, count = 0;
338 SCM f;
339
340 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
341 {
342 int this_seg = which_seg (f);
343
344 if (this_seg != last_seg)
345 {
346 if (last_seg != -1)
347 fprintf (stderr, " %5d %d-cells in segment %d\n",
348 count, freelist->span, last_seg);
349 last_seg = this_seg;
350 count = 0;
351 }
352 count++;
353 }
354 if (last_seg != -1)
355 fprintf (stderr, " %5d %d-cells in segment %d\n",
356 count, freelist->span, last_seg);
357 }
358 #endif
359
360 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
361 (),
362 "Print debugging information about the free-list.\n"
363 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
364 #define FUNC_NAME s_scm_map_free_list
365 {
366 int i;
367 fprintf (stderr, "%d segments total (%d:%d",
368 scm_n_heap_segs,
369 scm_heap_table[0].span,
370 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
371 for (i = 1; i < scm_n_heap_segs; i++)
372 fprintf (stderr, ", %d:%d",
373 scm_heap_table[i].span,
374 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
375 fprintf (stderr, ")\n");
376 #ifdef GUILE_NEW_GC_SCHEME
377 map_free_list (&scm_master_freelist, scm_freelist);
378 map_free_list (&scm_master_freelist2, scm_freelist2);
379 #else
380 map_free_list (&scm_freelist);
381 map_free_list (&scm_freelist2);
382 #endif
383 fflush (stderr);
384
385 return SCM_UNSPECIFIED;
386 }
387 #undef FUNC_NAME
388
389 #ifdef GUILE_NEW_GC_SCHEME
390 static int last_cluster;
391 static int last_size;
392
393 static int
394 free_list_length (char *title, int i, SCM freelist)
395 {
396 SCM ls;
397 int n = 0;
398 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
399 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
400 ++n;
401 else
402 {
403 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
404 abort ();
405 }
406 if (n != last_size)
407 {
408 if (i > 0)
409 {
410 if (last_cluster == i - 1)
411 fprintf (stderr, "\t%d\n", last_size);
412 else
413 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
414 }
415 if (i >= 0)
416 fprintf (stderr, "%s %d", title, i);
417 else
418 fprintf (stderr, "%s\t%d\n", title, n);
419 last_cluster = i;
420 last_size = n;
421 }
422 return n;
423 }
424
425 static void
426 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
427 {
428 SCM clusters;
429 int i = 0, len, n = 0;
430 fprintf (stderr, "%s\n\n", title);
431 n += free_list_length ("free list", -1, freelist);
432 for (clusters = master->clusters;
433 SCM_NNULLP (clusters);
434 clusters = SCM_CDR (clusters))
435 {
436 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
437 n += len;
438 }
439 if (last_cluster == i - 1)
440 fprintf (stderr, "\t%d\n", last_size);
441 else
442 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
443 fprintf (stderr, "\ntotal %d objects\n\n", n);
444 }
445
446 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
447 (),
448 "Print debugging information about the free-list.\n"
449 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
450 #define FUNC_NAME s_scm_free_list_length
451 {
452 free_list_lengths ("1-words", &scm_master_freelist, scm_freelist);
453 free_list_lengths ("2-words", &scm_master_freelist2, scm_freelist2);
454 return SCM_UNSPECIFIED;
455 }
456 #undef FUNC_NAME
457 #endif
458
459 #endif
460
461 #ifdef GUILE_DEBUG_FREELIST
462
463 /* Number of calls to SCM_NEWCELL since startup. */
464 static unsigned long scm_newcell_count;
465 static unsigned long scm_newcell2_count;
466
467 /* Search freelist for anything that isn't marked as a free cell.
468 Abort if we find something. */
469 #ifdef GUILE_NEW_GC_SCHEME
470 static void
471 scm_check_freelist (SCM freelist)
472 {
473 SCM f;
474 int i = 0;
475
476 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
477 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
478 {
479 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
480 scm_newcell_count, i);
481 fflush (stderr);
482 abort ();
483 }
484 }
485 #else
486 static void
487 scm_check_freelist (scm_freelist_t *freelist)
488 {
489 SCM f;
490 int i = 0;
491
492 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
493 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
494 {
495 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
496 scm_newcell_count, i);
497 fflush (stderr);
498 abort ();
499 }
500 }
501 #endif
502
503 static int scm_debug_check_freelist = 0;
504
505 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
506 (SCM flag),
507 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
508 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
509 "compile-time flag was selected.\n")
510 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
511 {
512 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
513 return SCM_UNSPECIFIED;
514 }
515 #undef FUNC_NAME
516
517
518 #ifdef GUILE_NEW_GC_SCHEME
519
520 SCM
521 scm_debug_newcell (void)
522 {
523 SCM new;
524
525 scm_newcell_count++;
526 if (scm_debug_check_freelist)
527 {
528 scm_check_freelist (scm_freelist);
529 scm_gc();
530 }
531
532 /* The rest of this is supposed to be identical to the SCM_NEWCELL
533 macro. */
534 if (SCM_IMP (scm_freelist))
535 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
536 else
537 {
538 new = scm_freelist;
539 scm_freelist = SCM_CDR (scm_freelist);
540 SCM_SETCAR (new, scm_tc16_allocated);
541 }
542
543 return new;
544 }
545
546 SCM
547 scm_debug_newcell2 (void)
548 {
549 SCM new;
550
551 scm_newcell2_count++;
552 if (scm_debug_check_freelist)
553 {
554 scm_check_freelist (scm_freelist2);
555 scm_gc ();
556 }
557
558 /* The rest of this is supposed to be identical to the SCM_NEWCELL
559 macro. */
560 if (SCM_IMP (scm_freelist2))
561 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
562 else
563 {
564 new = scm_freelist2;
565 scm_freelist2 = SCM_CDR (scm_freelist2);
566 SCM_SETCAR (new, scm_tc16_allocated);
567 }
568
569 return new;
570 }
571
572 #else /* GUILE_NEW_GC_SCHEME */
573
574 SCM
575 scm_debug_newcell (void)
576 {
577 SCM new;
578
579 scm_newcell_count++;
580 if (scm_debug_check_freelist)
581 {
582 scm_check_freelist (&scm_freelist);
583 scm_gc();
584 }
585
586 /* The rest of this is supposed to be identical to the SCM_NEWCELL
587 macro. */
588 if (SCM_IMP (scm_freelist.cells))
589 new = scm_gc_for_newcell (&scm_freelist);
590 else
591 {
592 new = scm_freelist.cells;
593 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
594 SCM_SETCAR (new, scm_tc16_allocated);
595 ++scm_cells_allocated;
596 }
597
598 return new;
599 }
600
601 SCM
602 scm_debug_newcell2 (void)
603 {
604 SCM new;
605
606 scm_newcell2_count++;
607 if (scm_debug_check_freelist) {
608 scm_check_freelist (&scm_freelist2);
609 scm_gc();
610 }
611
612 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
613 macro. */
614 if (SCM_IMP (scm_freelist2.cells))
615 new = scm_gc_for_newcell (&scm_freelist2);
616 else
617 {
618 new = scm_freelist2.cells;
619 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
620 SCM_SETCAR (new, scm_tc16_allocated);
621 scm_cells_allocated += 2;
622 }
623
624 return new;
625 }
626
627 #endif /* GUILE_NEW_GC_SCHEME */
628 #endif /* GUILE_DEBUG_FREELIST */
629
630 \f
631
632 /* {Scheme Interface to GC}
633 */
634
635 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
636 (),
637 "Returns an association list of statistics about Guile's current use of storage. ")
638 #define FUNC_NAME s_scm_gc_stats
639 {
640 int i;
641 int n;
642 SCM heap_segs;
643 long int local_scm_mtrigger;
644 long int local_scm_mallocated;
645 long int local_scm_heap_size;
646 long int local_scm_cells_allocated;
647 long int local_scm_gc_time_taken;
648 SCM answer;
649
650 SCM_DEFER_INTS;
651 scm_block_gc = 1;
652 retry:
653 heap_segs = SCM_EOL;
654 n = scm_n_heap_segs;
655 for (i = scm_n_heap_segs; i--; )
656 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
657 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
658 heap_segs);
659 if (scm_n_heap_segs != n)
660 goto retry;
661 scm_block_gc = 0;
662
663 /// ? ?? ?
664 local_scm_mtrigger = scm_mtrigger;
665 local_scm_mallocated = scm_mallocated;
666 #ifdef GUILE_NEW_GC_SCHEME
667 local_scm_heap_size = scm_master_freelist.heap_size; /*fixme*/
668 #else
669 local_scm_heap_size = scm_freelist.heap_size; /*fixme*/
670 #endif
671 local_scm_cells_allocated = scm_cells_allocated;
672 local_scm_gc_time_taken = scm_gc_time_taken;
673
674 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
675 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
676 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
677 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
678 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
679 scm_cons (sym_heap_segments, heap_segs),
680 SCM_UNDEFINED);
681 SCM_ALLOW_INTS;
682 return answer;
683 }
684 #undef FUNC_NAME
685
686
687 void
688 scm_gc_start (const char *what)
689 {
690 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
691 /* scm_gc_cells_collected = 0; */
692 scm_gc_malloc_collected = 0;
693 scm_gc_ports_collected = 0;
694 }
695
696 void
697 scm_gc_end ()
698 {
699 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
700 scm_gc_time_taken += scm_gc_rt;
701 scm_system_async_mark (scm_gc_async);
702 }
703
704
705 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
706 (SCM obj),
707 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
708 "returned by this function for @var{obj}")
709 #define FUNC_NAME s_scm_object_address
710 {
711 return scm_ulong2num ((unsigned long) obj);
712 }
713 #undef FUNC_NAME
714
715
716 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
717 (),
718 "Scans all of SCM objects and reclaims for further use those that are\n"
719 "no longer accessible.")
720 #define FUNC_NAME s_scm_gc
721 {
722 SCM_DEFER_INTS;
723 scm_igc ("call");
724 SCM_ALLOW_INTS;
725 return SCM_UNSPECIFIED;
726 }
727 #undef FUNC_NAME
728
729
730 \f
731 /* {C Interface For When GC is Triggered}
732 */
733
734 #ifdef GUILE_NEW_GC_SCHEME
735
736 /* When we get POSIX threads support, the master will be global and
737 * common while the freelist will be individual for each thread.
738 */
739
740 SCM
741 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
742 {
743 SCM cell;
744 ++scm_ints_disabled;
745 do
746 {
747 if (SCM_NULLP (master->clusters))
748 {
749 if (master->grow_heap_p)
750 {
751 master->grow_heap_p = 0;
752 alloc_some_heap (master);
753 }
754 else
755 scm_igc ("cells");
756 }
757 cell = SCM_CAR (master->clusters);
758 master->clusters = SCM_CDR (master->clusters);
759 }
760 while (SCM_NULLP (cell));
761 --scm_ints_disabled;
762 *freelist = SCM_CDR (cell);
763 SCM_SETCAR (cell, scm_tc16_allocated);
764 return cell;
765 }
766
767 #if 0
768 /* This is a support routine which can be used to reserve a cluster
769 * for some special use, such as debugging. It won't be useful until
770 * free cells are preserved between garbage collections.
771 */
772
773 void
774 scm_alloc_cluster (scm_freelist_t *master)
775 {
776 SCM freelist, cell;
777 cell = scm_gc_for_newcell (master, &freelist);
778 SCM_SETCDR (cell, freelist);
779 return cell;
780 }
781 #endif
782
783 #else /* GUILE_NEW_GC_SCHEME */
784
785 void
786 scm_gc_for_alloc (scm_freelist_t *freelist)
787 {
788 SCM_REDEFER_INTS;
789 scm_igc ("cells");
790 #ifdef GUILE_DEBUG_FREELIST
791 fprintf (stderr, "Collected: %d, min_yield: %d\n",
792 freelist->collected, MIN_GC_YIELD (freelist));
793 #endif
794 if ((freelist->collected < MIN_GC_YIELD (freelist))
795 || SCM_IMP (freelist->cells))
796 alloc_some_heap (freelist);
797 SCM_REALLOW_INTS;
798 }
799
800
801 SCM
802 scm_gc_for_newcell (scm_freelist_t *freelist)
803 {
804 SCM fl;
805 scm_gc_for_alloc (freelist);
806 fl = freelist->cells;
807 freelist->cells = SCM_CDR (fl);
808 SCM_SETCAR (fl, scm_tc16_allocated);
809 return fl;
810 }
811
812 #endif /* GUILE_NEW_GC_SCHEME */
813
814 void
815 scm_igc (const char *what)
816 {
817 int j;
818
819 #ifdef DEBUGINFO
820 fprintf (stderr,
821 SCM_NULLP (scm_freelist)
822 ? "*"
823 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
824 #endif
825 #ifdef USE_THREADS
826 /* During the critical section, only the current thread may run. */
827 SCM_THREAD_CRITICAL_SECTION_START;
828 #endif
829
830 /* fprintf (stderr, "gc: %s\n", what); */
831
832 scm_gc_start (what);
833
834 if (!scm_stack_base || scm_block_gc)
835 {
836 scm_gc_end ();
837 return;
838 }
839
840 if (scm_mallocated < 0)
841 /* The byte count of allocated objects has underflowed. This is
842 probably because you forgot to report the sizes of objects you
843 have allocated, by calling scm_done_malloc or some such. When
844 the GC freed them, it subtracted their size from
845 scm_mallocated, which underflowed. */
846 abort ();
847
848 if (scm_gc_heap_lock)
849 /* We've invoked the collector while a GC is already in progress.
850 That should never happen. */
851 abort ();
852
853 ++scm_gc_heap_lock;
854
855 scm_weak_vectors = SCM_EOL;
856
857 scm_guardian_gc_init ();
858
859 /* unprotect any struct types with no instances */
860 #if 0
861 {
862 SCM type_list;
863 SCM * pos;
864
865 pos = &scm_type_obj_list;
866 type_list = scm_type_obj_list;
867 while (type_list != SCM_EOL)
868 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
869 {
870 pos = SCM_CDRLOC (type_list);
871 type_list = SCM_CDR (type_list);
872 }
873 else
874 {
875 *pos = SCM_CDR (type_list);
876 type_list = SCM_CDR (type_list);
877 }
878 }
879 #endif
880
881 /* flush dead entries from the continuation stack */
882 {
883 int x;
884 int bound;
885 SCM * elts;
886 elts = SCM_VELTS (scm_continuation_stack);
887 bound = SCM_LENGTH (scm_continuation_stack);
888 x = SCM_INUM (scm_continuation_stack_ptr);
889 while (x < bound)
890 {
891 elts[x] = SCM_BOOL_F;
892 ++x;
893 }
894 }
895
896 #ifndef USE_THREADS
897
898 /* Protect from the C stack. This must be the first marking
899 * done because it provides information about what objects
900 * are "in-use" by the C code. "in-use" objects are those
901 * for which the values from SCM_LENGTH and SCM_CHARS must remain
902 * usable. This requirement is stricter than a liveness
903 * requirement -- in particular, it constrains the implementation
904 * of scm_vector_set_length_x.
905 */
906 SCM_FLUSH_REGISTER_WINDOWS;
907 /* This assumes that all registers are saved into the jmp_buf */
908 setjmp (scm_save_regs_gc_mark);
909 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
910 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
911 sizeof scm_save_regs_gc_mark)
912 / sizeof (SCM_STACKITEM)));
913
914 {
915 /* stack_len is long rather than scm_sizet in order to guarantee that
916 &stack_len is long aligned */
917 #ifdef SCM_STACK_GROWS_UP
918 #ifdef nosve
919 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
920 #else
921 long stack_len = scm_stack_size (scm_stack_base);
922 #endif
923 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
924 #else
925 #ifdef nosve
926 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
927 #else
928 long stack_len = scm_stack_size (scm_stack_base);
929 #endif
930 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
931 #endif
932 }
933
934 #else /* USE_THREADS */
935
936 /* Mark every thread's stack and registers */
937 scm_threads_mark_stacks ();
938
939 #endif /* USE_THREADS */
940
941 /* FIXME: insert a phase to un-protect string-data preserved
942 * in scm_vector_set_length_x.
943 */
944
945 j = SCM_NUM_PROTECTS;
946 while (j--)
947 scm_gc_mark (scm_sys_protects[j]);
948
949 /* FIXME: we should have a means to register C functions to be run
950 * in different phases of GC
951 */
952 scm_mark_subr_table ();
953
954 #ifndef USE_THREADS
955 scm_gc_mark (scm_root->handle);
956 #endif
957
958 scm_mark_weak_vector_spines ();
959
960 scm_guardian_zombify ();
961
962 scm_gc_sweep ();
963
964 --scm_gc_heap_lock;
965 scm_gc_end ();
966
967 #ifdef USE_THREADS
968 SCM_THREAD_CRITICAL_SECTION_END;
969 #endif
970 }
971
972 \f
973 /* {Mark/Sweep}
974 */
975
976
977
978 /* Mark an object precisely.
979 */
980 void
981 scm_gc_mark (SCM p)
982 {
983 register long i;
984 register SCM ptr;
985
986 ptr = p;
987
988 gc_mark_loop:
989 if (SCM_IMP (ptr))
990 return;
991
992 gc_mark_nimp:
993 if (SCM_NCELLP (ptr))
994 scm_wta (ptr, "rogue pointer in heap", NULL);
995
996 switch (SCM_TYP7 (ptr))
997 {
998 case scm_tcs_cons_nimcar:
999 if (SCM_GCMARKP (ptr))
1000 break;
1001 SCM_SETGCMARK (ptr);
1002 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1003 {
1004 ptr = SCM_CAR (ptr);
1005 goto gc_mark_nimp;
1006 }
1007 scm_gc_mark (SCM_CAR (ptr));
1008 ptr = SCM_GCCDR (ptr);
1009 goto gc_mark_nimp;
1010 case scm_tcs_cons_imcar:
1011 if (SCM_GCMARKP (ptr))
1012 break;
1013 SCM_SETGCMARK (ptr);
1014 ptr = SCM_GCCDR (ptr);
1015 goto gc_mark_loop;
1016 case scm_tc7_pws:
1017 if (SCM_GCMARKP (ptr))
1018 break;
1019 SCM_SETGCMARK (ptr);
1020 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
1021 ptr = SCM_GCCDR (ptr);
1022 goto gc_mark_loop;
1023 case scm_tcs_cons_gloc:
1024 if (SCM_GCMARKP (ptr))
1025 break;
1026 SCM_SETGCMARK (ptr);
1027 {
1028 SCM vcell;
1029 vcell = SCM_CAR (ptr) - 1L;
1030 switch (SCM_UNPACK (SCM_CDR (vcell)))
1031 {
1032 default:
1033 scm_gc_mark (vcell);
1034 ptr = SCM_GCCDR (ptr);
1035 goto gc_mark_loop;
1036 case 1: /* ! */
1037 case 0: /* ! */
1038 {
1039 SCM layout;
1040 SCM * vtable_data;
1041 int len;
1042 char * fields_desc;
1043 register SCM * mem;
1044 register int x;
1045
1046 vtable_data = (SCM *)vcell;
1047 layout = vtable_data[scm_vtable_index_layout];
1048 len = SCM_LENGTH (layout);
1049 fields_desc = SCM_CHARS (layout);
1050 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1051 that it removes the mark */
1052 mem = (SCM *)SCM_GCCDR (ptr);
1053
1054 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
1055 {
1056 scm_gc_mark (mem[scm_struct_i_procedure]);
1057 scm_gc_mark (mem[scm_struct_i_setter]);
1058 }
1059 if (len)
1060 {
1061 for (x = 0; x < len - 2; x += 2, ++mem)
1062 if (fields_desc[x] == 'p')
1063 scm_gc_mark (*mem);
1064 if (fields_desc[x] == 'p')
1065 {
1066 int j;
1067 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1068 for (j = (long int) *mem; x; --x)
1069 scm_gc_mark (*++mem);
1070 else
1071 scm_gc_mark (*mem);
1072 }
1073 }
1074 if (!SCM_CDR (vcell))
1075 {
1076 SCM_SETGCMARK (vcell);
1077 ptr = vtable_data[scm_vtable_index_vtable];
1078 goto gc_mark_loop;
1079 }
1080 }
1081 }
1082 }
1083 break;
1084 case scm_tcs_closures:
1085 if (SCM_GCMARKP (ptr))
1086 break;
1087 SCM_SETGCMARK (ptr);
1088 if (SCM_IMP (SCM_CDR (ptr)))
1089 {
1090 ptr = SCM_CLOSCAR (ptr);
1091 goto gc_mark_nimp;
1092 }
1093 scm_gc_mark (SCM_CLOSCAR (ptr));
1094 ptr = SCM_GCCDR (ptr);
1095 goto gc_mark_nimp;
1096 case scm_tc7_vector:
1097 case scm_tc7_lvector:
1098 #ifdef CCLO
1099 case scm_tc7_cclo:
1100 #endif
1101 if (SCM_GC8MARKP (ptr))
1102 break;
1103 SCM_SETGC8MARK (ptr);
1104 i = SCM_LENGTH (ptr);
1105 if (i == 0)
1106 break;
1107 while (--i > 0)
1108 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1109 scm_gc_mark (SCM_VELTS (ptr)[i]);
1110 ptr = SCM_VELTS (ptr)[0];
1111 goto gc_mark_loop;
1112 case scm_tc7_contin:
1113 if SCM_GC8MARKP
1114 (ptr) break;
1115 SCM_SETGC8MARK (ptr);
1116 if (SCM_VELTS (ptr))
1117 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1118 (scm_sizet)
1119 (SCM_LENGTH (ptr) +
1120 (sizeof (SCM_STACKITEM) + -1 +
1121 sizeof (scm_contregs)) /
1122 sizeof (SCM_STACKITEM)));
1123 break;
1124 #ifdef HAVE_ARRAYS
1125 case scm_tc7_bvect:
1126 case scm_tc7_byvect:
1127 case scm_tc7_ivect:
1128 case scm_tc7_uvect:
1129 case scm_tc7_fvect:
1130 case scm_tc7_dvect:
1131 case scm_tc7_cvect:
1132 case scm_tc7_svect:
1133 #ifdef HAVE_LONG_LONGS
1134 case scm_tc7_llvect:
1135 #endif
1136 #endif
1137 case scm_tc7_string:
1138 SCM_SETGC8MARK (ptr);
1139 break;
1140
1141 case scm_tc7_substring:
1142 if (SCM_GC8MARKP(ptr))
1143 break;
1144 SCM_SETGC8MARK (ptr);
1145 ptr = SCM_CDR (ptr);
1146 goto gc_mark_loop;
1147
1148 case scm_tc7_wvect:
1149 if (SCM_GC8MARKP(ptr))
1150 break;
1151 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1152 scm_weak_vectors = ptr;
1153 SCM_SETGC8MARK (ptr);
1154 if (SCM_IS_WHVEC_ANY (ptr))
1155 {
1156 int x;
1157 int len;
1158 int weak_keys;
1159 int weak_values;
1160
1161 len = SCM_LENGTH (ptr);
1162 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1163 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1164
1165 for (x = 0; x < len; ++x)
1166 {
1167 SCM alist;
1168 alist = SCM_VELTS (ptr)[x];
1169
1170 /* mark everything on the alist except the keys or
1171 * values, according to weak_values and weak_keys. */
1172 while ( SCM_CONSP (alist)
1173 && !SCM_GCMARKP (alist)
1174 && SCM_CONSP (SCM_CAR (alist)))
1175 {
1176 SCM kvpair;
1177 SCM next_alist;
1178
1179 kvpair = SCM_CAR (alist);
1180 next_alist = SCM_CDR (alist);
1181 /*
1182 * Do not do this:
1183 * SCM_SETGCMARK (alist);
1184 * SCM_SETGCMARK (kvpair);
1185 *
1186 * It may be that either the key or value is protected by
1187 * an escaped reference to part of the spine of this alist.
1188 * If we mark the spine here, and only mark one or neither of the
1189 * key and value, they may never be properly marked.
1190 * This leads to a horrible situation in which an alist containing
1191 * freelist cells is exported.
1192 *
1193 * So only mark the spines of these arrays last of all marking.
1194 * If somebody confuses us by constructing a weak vector
1195 * with a circular alist then we are hosed, but at least we
1196 * won't prematurely drop table entries.
1197 */
1198 if (!weak_keys)
1199 scm_gc_mark (SCM_CAR (kvpair));
1200 if (!weak_values)
1201 scm_gc_mark (SCM_GCCDR (kvpair));
1202 alist = next_alist;
1203 }
1204 if (SCM_NIMP (alist))
1205 scm_gc_mark (alist);
1206 }
1207 }
1208 break;
1209
1210 case scm_tc7_msymbol:
1211 if (SCM_GC8MARKP(ptr))
1212 break;
1213 SCM_SETGC8MARK (ptr);
1214 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1215 ptr = SCM_SYMBOL_PROPS (ptr);
1216 goto gc_mark_loop;
1217 case scm_tc7_ssymbol:
1218 if (SCM_GC8MARKP(ptr))
1219 break;
1220 SCM_SETGC8MARK (ptr);
1221 break;
1222 case scm_tcs_subrs:
1223 break;
1224 case scm_tc7_port:
1225 i = SCM_PTOBNUM (ptr);
1226 if (!(i < scm_numptob))
1227 goto def;
1228 if (SCM_GC8MARKP (ptr))
1229 break;
1230 SCM_SETGC8MARK (ptr);
1231 if (SCM_PTAB_ENTRY(ptr))
1232 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1233 if (scm_ptobs[i].mark)
1234 {
1235 ptr = (scm_ptobs[i].mark) (ptr);
1236 goto gc_mark_loop;
1237 }
1238 else
1239 return;
1240 break;
1241 case scm_tc7_smob:
1242 if (SCM_GC8MARKP (ptr))
1243 break;
1244 SCM_SETGC8MARK (ptr);
1245 switch (SCM_GCTYP16 (ptr))
1246 { /* should be faster than going through scm_smobs */
1247 case scm_tc_free_cell:
1248 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1249 case scm_tc16_allocated:
1250 case scm_tc16_big:
1251 case scm_tc16_real:
1252 case scm_tc16_complex:
1253 break;
1254 default:
1255 i = SCM_SMOBNUM (ptr);
1256 if (!(i < scm_numsmob))
1257 goto def;
1258 if (scm_smobs[i].mark)
1259 {
1260 ptr = (scm_smobs[i].mark) (ptr);
1261 goto gc_mark_loop;
1262 }
1263 else
1264 return;
1265 }
1266 break;
1267 default:
1268 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1269 }
1270 }
1271
1272
1273 /* Mark a Region Conservatively
1274 */
1275
1276 void
1277 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1278 {
1279 register long m = n;
1280 register int i, j;
1281 register SCM_CELLPTR ptr;
1282
1283 while (0 <= --m)
1284 if (SCM_CELLP (*(SCM **) (& x[m])))
1285 {
1286 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1287 i = 0;
1288 j = scm_n_heap_segs - 1;
1289 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1290 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1291 {
1292 while (i <= j)
1293 {
1294 int seg_id;
1295 seg_id = -1;
1296 if ( (i == j)
1297 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1298 seg_id = i;
1299 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1300 seg_id = j;
1301 else
1302 {
1303 int k;
1304 k = (i + j) / 2;
1305 if (k == i)
1306 break;
1307 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1308 {
1309 j = k;
1310 ++i;
1311 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1312 continue;
1313 else
1314 break;
1315 }
1316 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1317 {
1318 i = k;
1319 --j;
1320 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1321 continue;
1322 else
1323 break;
1324 }
1325 }
1326 if ( !scm_heap_table[seg_id].valid
1327 || scm_heap_table[seg_id].valid (ptr,
1328 &scm_heap_table[seg_id]))
1329 if ( scm_heap_table[seg_id].span == 1
1330 || SCM_DOUBLE_CELLP (*(SCM **) (& x[m])))
1331 scm_gc_mark (*(SCM *) & x[m]);
1332 break;
1333 }
1334
1335 }
1336 }
1337 }
1338
1339
1340 /* The following is a C predicate which determines if an SCM value can be
1341 regarded as a pointer to a cell on the heap. The code is duplicated
1342 from scm_mark_locations. */
1343
1344
1345 int
1346 scm_cellp (SCM value)
1347 {
1348 register int i, j;
1349 register SCM_CELLPTR ptr;
1350
1351 if SCM_CELLP (*(SCM **) (& value))
1352 {
1353 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1354 i = 0;
1355 j = scm_n_heap_segs - 1;
1356 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1357 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1358 {
1359 while (i <= j)
1360 {
1361 int seg_id;
1362 seg_id = -1;
1363 if ( (i == j)
1364 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1365 seg_id = i;
1366 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1367 seg_id = j;
1368 else
1369 {
1370 int k;
1371 k = (i + j) / 2;
1372 if (k == i)
1373 break;
1374 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1375 {
1376 j = k;
1377 ++i;
1378 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1379 continue;
1380 else
1381 break;
1382 }
1383 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1384 {
1385 i = k;
1386 --j;
1387 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1388 continue;
1389 else
1390 break;
1391 }
1392 }
1393 if ( !scm_heap_table[seg_id].valid
1394 || scm_heap_table[seg_id].valid (ptr,
1395 &scm_heap_table[seg_id]))
1396 return 1;
1397 break;
1398 }
1399
1400 }
1401 }
1402 return 0;
1403 }
1404
1405
1406 static void
1407 scm_mark_weak_vector_spines ()
1408 {
1409 SCM w;
1410
1411 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1412 {
1413 if (SCM_IS_WHVEC_ANY (w))
1414 {
1415 SCM *ptr;
1416 SCM obj;
1417 int j;
1418 int n;
1419
1420 obj = w;
1421 ptr = SCM_VELTS (w);
1422 n = SCM_LENGTH (w);
1423 for (j = 0; j < n; ++j)
1424 {
1425 SCM alist;
1426
1427 alist = ptr[j];
1428 while ( SCM_CONSP (alist)
1429 && !SCM_GCMARKP (alist)
1430 && SCM_CONSP (SCM_CAR (alist)))
1431 {
1432 SCM_SETGCMARK (alist);
1433 SCM_SETGCMARK (SCM_CAR (alist));
1434 alist = SCM_GCCDR (alist);
1435 }
1436 }
1437 }
1438 }
1439 }
1440
1441
1442 #ifdef GUILE_NEW_GC_SCHEME
1443 static void
1444 gc_sweep_freelist_start (scm_freelist_t *freelist)
1445 {
1446 freelist->cells = SCM_EOL;
1447 freelist->left_to_collect = freelist->cluster_size;
1448 freelist->clusters = SCM_EOL;
1449 freelist->clustertail = &freelist->clusters;
1450 freelist->collected = 0;
1451 }
1452
1453 static void
1454 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1455 {
1456 *freelist->clustertail = freelist->cells;
1457 if (SCM_NNULLP (freelist->cells))
1458 {
1459 SCM c = freelist->cells;
1460 SCM_SETCAR (c, SCM_CDR (c));
1461 SCM_SETCDR (c, SCM_EOL);
1462 freelist->collected +=
1463 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1464 }
1465
1466 freelist->grow_heap_p = (freelist->collected < freelist->gc_trigger);
1467 }
1468 #endif
1469
1470 void
1471 scm_gc_sweep ()
1472 {
1473 register SCM_CELLPTR ptr;
1474 #ifdef SCM_POINTERS_MUNGED
1475 register SCM scmptr;
1476 #else
1477 #undef scmptr
1478 #define scmptr (SCM)ptr
1479 #endif
1480 register SCM nfreelist;
1481 register scm_freelist_t *freelist;
1482 register long m;
1483 register int span;
1484 long i;
1485 scm_sizet seg_size;
1486
1487 m = 0;
1488
1489 #ifdef GUILE_NEW_GC_SCHEME
1490 gc_sweep_freelist_start (&scm_master_freelist);
1491 gc_sweep_freelist_start (&scm_master_freelist2);
1492 #else
1493 /* Reset all free list pointers. We'll reconstruct them completely
1494 while scanning. */
1495 for (i = 0; i < scm_n_heap_segs; i++)
1496 scm_heap_table[i].freelist->cells = SCM_EOL;
1497 #endif
1498
1499 for (i = 0; i < scm_n_heap_segs; i++)
1500 {
1501 #ifdef GUILE_NEW_GC_SCHEME
1502 register unsigned int left_to_collect;
1503 #else
1504 register scm_sizet n = 0;
1505 #endif
1506 register scm_sizet j;
1507
1508 /* Unmarked cells go onto the front of the freelist this heap
1509 segment points to. Rather than updating the real freelist
1510 pointer as we go along, we accumulate the new head in
1511 nfreelist. Then, if it turns out that the entire segment is
1512 free, we free (i.e., malloc's free) the whole segment, and
1513 simply don't assign nfreelist back into the real freelist. */
1514 freelist = scm_heap_table[i].freelist;
1515 nfreelist = freelist->cells;
1516 #ifdef GUILE_NEW_GC_SCHEME
1517 left_to_collect = freelist->left_to_collect;
1518 #endif
1519 span = scm_heap_table[i].span;
1520
1521 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1522 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1523 for (j = seg_size + span; j -= span; ptr += span)
1524 {
1525 #ifdef SCM_POINTERS_MUNGED
1526 scmptr = PTR2SCM (ptr);
1527 #endif
1528 switch SCM_TYP7 (scmptr)
1529 {
1530 case scm_tcs_cons_gloc:
1531 if (SCM_GCMARKP (scmptr))
1532 {
1533 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1534 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1535 goto cmrkcontinue;
1536 }
1537 {
1538 SCM vcell;
1539 vcell = SCM_CAR (scmptr) - 1L;
1540
1541 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1542 {
1543 scm_struct_free_t free
1544 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1545 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1546 }
1547 }
1548 break;
1549 case scm_tcs_cons_imcar:
1550 case scm_tcs_cons_nimcar:
1551 case scm_tcs_closures:
1552 case scm_tc7_pws:
1553 if (SCM_GCMARKP (scmptr))
1554 goto cmrkcontinue;
1555 break;
1556 case scm_tc7_wvect:
1557 if (SCM_GC8MARKP (scmptr))
1558 {
1559 goto c8mrkcontinue;
1560 }
1561 else
1562 {
1563 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1564 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1565 break;
1566 }
1567
1568 case scm_tc7_vector:
1569 case scm_tc7_lvector:
1570 #ifdef CCLO
1571 case scm_tc7_cclo:
1572 #endif
1573 if (SCM_GC8MARKP (scmptr))
1574 goto c8mrkcontinue;
1575
1576 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1577 freechars:
1578 scm_must_free (SCM_CHARS (scmptr));
1579 /* SCM_SETCHARS(scmptr, 0);*/
1580 break;
1581 #ifdef HAVE_ARRAYS
1582 case scm_tc7_bvect:
1583 if SCM_GC8MARKP (scmptr)
1584 goto c8mrkcontinue;
1585 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1586 goto freechars;
1587 case scm_tc7_byvect:
1588 if SCM_GC8MARKP (scmptr)
1589 goto c8mrkcontinue;
1590 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1591 goto freechars;
1592 case scm_tc7_ivect:
1593 case scm_tc7_uvect:
1594 if SCM_GC8MARKP (scmptr)
1595 goto c8mrkcontinue;
1596 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1597 goto freechars;
1598 case scm_tc7_svect:
1599 if SCM_GC8MARKP (scmptr)
1600 goto c8mrkcontinue;
1601 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1602 goto freechars;
1603 #ifdef HAVE_LONG_LONGS
1604 case scm_tc7_llvect:
1605 if SCM_GC8MARKP (scmptr)
1606 goto c8mrkcontinue;
1607 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1608 goto freechars;
1609 #endif
1610 case scm_tc7_fvect:
1611 if SCM_GC8MARKP (scmptr)
1612 goto c8mrkcontinue;
1613 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1614 goto freechars;
1615 case scm_tc7_dvect:
1616 if SCM_GC8MARKP (scmptr)
1617 goto c8mrkcontinue;
1618 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1619 goto freechars;
1620 case scm_tc7_cvect:
1621 if SCM_GC8MARKP (scmptr)
1622 goto c8mrkcontinue;
1623 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1624 goto freechars;
1625 #endif
1626 case scm_tc7_substring:
1627 if (SCM_GC8MARKP (scmptr))
1628 goto c8mrkcontinue;
1629 break;
1630 case scm_tc7_string:
1631 if (SCM_GC8MARKP (scmptr))
1632 goto c8mrkcontinue;
1633 m += SCM_HUGE_LENGTH (scmptr) + 1;
1634 goto freechars;
1635 case scm_tc7_msymbol:
1636 if (SCM_GC8MARKP (scmptr))
1637 goto c8mrkcontinue;
1638 m += ( SCM_LENGTH (scmptr)
1639 + 1
1640 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1641 scm_must_free ((char *)SCM_SLOTS (scmptr));
1642 break;
1643 case scm_tc7_contin:
1644 if SCM_GC8MARKP (scmptr)
1645 goto c8mrkcontinue;
1646 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1647 if (SCM_VELTS (scmptr))
1648 goto freechars;
1649 case scm_tc7_ssymbol:
1650 if SCM_GC8MARKP(scmptr)
1651 goto c8mrkcontinue;
1652 break;
1653 case scm_tcs_subrs:
1654 continue;
1655 case scm_tc7_port:
1656 if SCM_GC8MARKP (scmptr)
1657 goto c8mrkcontinue;
1658 if SCM_OPENP (scmptr)
1659 {
1660 int k = SCM_PTOBNUM (scmptr);
1661 if (!(k < scm_numptob))
1662 goto sweeperr;
1663 /* Keep "revealed" ports alive. */
1664 if (scm_revealed_count (scmptr) > 0)
1665 continue;
1666 /* Yes, I really do mean scm_ptobs[k].free */
1667 /* rather than ftobs[k].close. .close */
1668 /* is for explicit CLOSE-PORT by user */
1669 m += (scm_ptobs[k].free) (scmptr);
1670 SCM_SETSTREAM (scmptr, 0);
1671 scm_remove_from_port_table (scmptr);
1672 scm_gc_ports_collected++;
1673 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1674 }
1675 break;
1676 case scm_tc7_smob:
1677 switch SCM_GCTYP16 (scmptr)
1678 {
1679 case scm_tc_free_cell:
1680 case scm_tc16_real:
1681 if SCM_GC8MARKP (scmptr)
1682 goto c8mrkcontinue;
1683 break;
1684 #ifdef SCM_BIGDIG
1685 case scm_tc16_big:
1686 if SCM_GC8MARKP (scmptr)
1687 goto c8mrkcontinue;
1688 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1689 goto freechars;
1690 #endif /* def SCM_BIGDIG */
1691 case scm_tc16_complex:
1692 if SCM_GC8MARKP (scmptr)
1693 goto c8mrkcontinue;
1694 m += 2 * sizeof (double);
1695 goto freechars;
1696 default:
1697 if SCM_GC8MARKP (scmptr)
1698 goto c8mrkcontinue;
1699
1700 {
1701 int k;
1702 k = SCM_SMOBNUM (scmptr);
1703 if (!(k < scm_numsmob))
1704 goto sweeperr;
1705 m += (scm_smobs[k].free) ((SCM) scmptr);
1706 break;
1707 }
1708 }
1709 break;
1710 default:
1711 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1712 }
1713 #if 0
1714 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1715 exit (2);
1716 #endif
1717 #ifndef GUILE_NEW_GC_SCHEME
1718 n += span;
1719 #else
1720 if (!--left_to_collect)
1721 {
1722 SCM_SETCAR (scmptr, nfreelist);
1723 *freelist->clustertail = scmptr;
1724 freelist->clustertail = SCM_CDRLOC (scmptr);
1725
1726 nfreelist = SCM_EOL;
1727 freelist->collected += span * freelist->cluster_size;
1728 left_to_collect = freelist->cluster_size;
1729 }
1730 else
1731 #endif
1732 {
1733 /* Stick the new cell on the front of nfreelist. It's
1734 critical that we mark this cell as freed; otherwise, the
1735 conservative collector might trace it as some other type
1736 of object. */
1737 SCM_SETCAR (scmptr, scm_tc_free_cell);
1738 SCM_SETCDR (scmptr, nfreelist);
1739 nfreelist = scmptr;
1740 }
1741
1742 continue;
1743 c8mrkcontinue:
1744 SCM_CLRGC8MARK (scmptr);
1745 continue;
1746 cmrkcontinue:
1747 SCM_CLRGCMARK (scmptr);
1748 }
1749 #ifdef GC_FREE_SEGMENTS
1750 if (n == seg_size)
1751 {
1752 register long j;
1753
1754 freelist->heap_size -= seg_size;
1755 free ((char *) scm_heap_table[i].bounds[0]);
1756 scm_heap_table[i].bounds[0] = 0;
1757 for (j = i + 1; j < scm_n_heap_segs; j++)
1758 scm_heap_table[j - 1] = scm_heap_table[j];
1759 scm_n_heap_segs -= 1;
1760 i--; /* We need to scan the segment just moved. */
1761 }
1762 else
1763 #endif /* ifdef GC_FREE_SEGMENTS */
1764 {
1765 /* Update the real freelist pointer to point to the head of
1766 the list of free cells we've built for this segment. */
1767 freelist->cells = nfreelist;
1768 #ifdef GUILE_NEW_GC_SCHEME
1769 freelist->left_to_collect = left_to_collect;
1770 #endif
1771 }
1772
1773 #ifndef GUILE_NEW_GC_SCHEME
1774 freelist->collected += n;
1775 scm_cells_allocated += freelist->heap_size - freelist->collected;
1776 #endif
1777
1778 #ifdef GUILE_DEBUG_FREELIST
1779 #ifdef GUILE_NEW_GC_SCHEME
1780 scm_check_freelist (freelist == &scm_master_freelist
1781 ? scm_freelist
1782 : scm_freelist2);
1783 #else
1784 scm_check_freelist (freelist);
1785 #endif
1786 scm_map_free_list ();
1787 #endif
1788 }
1789
1790 #ifdef GUILE_NEW_GC_SCHEME
1791 gc_sweep_freelist_finish (&scm_master_freelist);
1792 gc_sweep_freelist_finish (&scm_master_freelist2);
1793
1794 /* When we move to POSIX threads private freelists should probably
1795 be GC-protected instead. */
1796 scm_freelist = SCM_EOL;
1797 scm_freelist2 = SCM_EOL;
1798 #endif
1799
1800 /* Scan weak vectors. */
1801 {
1802 SCM *ptr, w;
1803 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1804 {
1805 if (!SCM_IS_WHVEC_ANY (w))
1806 {
1807 register long j, n;
1808
1809 ptr = SCM_VELTS (w);
1810 n = SCM_LENGTH (w);
1811 for (j = 0; j < n; ++j)
1812 if (SCM_FREEP (ptr[j]))
1813 ptr[j] = SCM_BOOL_F;
1814 }
1815 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1816 {
1817 SCM obj = w;
1818 register long n = SCM_LENGTH (w);
1819 register long j;
1820
1821 ptr = SCM_VELTS (w);
1822
1823 for (j = 0; j < n; ++j)
1824 {
1825 SCM * fixup;
1826 SCM alist;
1827 int weak_keys;
1828 int weak_values;
1829
1830 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1831 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1832
1833 fixup = ptr + j;
1834 alist = *fixup;
1835
1836 while ( SCM_CONSP (alist)
1837 && SCM_CONSP (SCM_CAR (alist)))
1838 {
1839 SCM key;
1840 SCM value;
1841
1842 key = SCM_CAAR (alist);
1843 value = SCM_CDAR (alist);
1844 if ( (weak_keys && SCM_FREEP (key))
1845 || (weak_values && SCM_FREEP (value)))
1846 {
1847 *fixup = SCM_CDR (alist);
1848 }
1849 else
1850 fixup = SCM_CDRLOC (alist);
1851 alist = SCM_CDR (alist);
1852 }
1853 }
1854 }
1855 }
1856 }
1857 scm_mallocated -= m;
1858 scm_gc_malloc_collected = m;
1859 }
1860
1861
1862 \f
1863
1864 /* {Front end to malloc}
1865 *
1866 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1867 *
1868 * These functions provide services comperable to malloc, realloc, and
1869 * free. They are for allocating malloced parts of scheme objects.
1870 * The primary purpose of the front end is to impose calls to gc.
1871 */
1872
1873 /* scm_must_malloc
1874 * Return newly malloced storage or throw an error.
1875 *
1876 * The parameter WHAT is a string for error reporting.
1877 * If the threshold scm_mtrigger will be passed by this
1878 * allocation, or if the first call to malloc fails,
1879 * garbage collect -- on the presumption that some objects
1880 * using malloced storage may be collected.
1881 *
1882 * The limit scm_mtrigger may be raised by this allocation.
1883 */
1884 void *
1885 scm_must_malloc (scm_sizet size, const char *what)
1886 {
1887 void *ptr;
1888 unsigned long nm = scm_mallocated + size;
1889
1890 if (nm <= scm_mtrigger)
1891 {
1892 SCM_SYSCALL (ptr = malloc (size));
1893 if (NULL != ptr)
1894 {
1895 scm_mallocated = nm;
1896 return ptr;
1897 }
1898 }
1899
1900 scm_igc (what);
1901
1902 nm = scm_mallocated + size;
1903 SCM_SYSCALL (ptr = malloc (size));
1904 if (NULL != ptr)
1905 {
1906 scm_mallocated = nm;
1907 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1908 if (nm > scm_mtrigger)
1909 scm_mtrigger = nm + nm / 2;
1910 else
1911 scm_mtrigger += scm_mtrigger / 2;
1912 }
1913 return ptr;
1914 }
1915
1916 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1917 return 0; /* never reached */
1918 }
1919
1920
1921 /* scm_must_realloc
1922 * is similar to scm_must_malloc.
1923 */
1924 void *
1925 scm_must_realloc (void *where,
1926 scm_sizet old_size,
1927 scm_sizet size,
1928 const char *what)
1929 {
1930 void *ptr;
1931 scm_sizet nm = scm_mallocated + size - old_size;
1932
1933 if (nm <= scm_mtrigger)
1934 {
1935 SCM_SYSCALL (ptr = realloc (where, size));
1936 if (NULL != ptr)
1937 {
1938 scm_mallocated = nm;
1939 return ptr;
1940 }
1941 }
1942
1943 scm_igc (what);
1944
1945 nm = scm_mallocated + size - old_size;
1946 SCM_SYSCALL (ptr = realloc (where, size));
1947 if (NULL != ptr)
1948 {
1949 scm_mallocated = nm;
1950 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1951 if (nm > scm_mtrigger)
1952 scm_mtrigger = nm + nm / 2;
1953 else
1954 scm_mtrigger += scm_mtrigger / 2;
1955 }
1956 return ptr;
1957 }
1958
1959 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1960 return 0; /* never reached */
1961 }
1962
1963 void
1964 scm_must_free (void *obj)
1965 {
1966 if (obj)
1967 free (obj);
1968 else
1969 scm_wta (SCM_INUM0, "already free", "");
1970 }
1971
1972 /* Announce that there has been some malloc done that will be freed
1973 * during gc. A typical use is for a smob that uses some malloced
1974 * memory but can not get it from scm_must_malloc (for whatever
1975 * reason). When a new object of this smob is created you call
1976 * scm_done_malloc with the size of the object. When your smob free
1977 * function is called, be sure to include this size in the return
1978 * value. */
1979
1980 void
1981 scm_done_malloc (long size)
1982 {
1983 scm_mallocated += size;
1984
1985 if (scm_mallocated > scm_mtrigger)
1986 {
1987 scm_igc ("foreign mallocs");
1988 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1989 {
1990 if (scm_mallocated > scm_mtrigger)
1991 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1992 else
1993 scm_mtrigger += scm_mtrigger / 2;
1994 }
1995 }
1996 }
1997
1998
1999 #ifdef GUILE_NEW_GC_SCHEME
2000 static void
2001 adjust_gc_trigger (scm_freelist_t *freelist)
2002 {
2003 /* Adjust GC trigger based on total heap size */
2004 if (freelist->gc_trigger_fraction)
2005 freelist->gc_trigger = ((scm_master_freelist.heap_size
2006 + scm_master_freelist2.heap_size)
2007 * freelist->gc_trigger_fraction
2008 / 100);
2009 }
2010 #endif
2011
2012
2013 \f
2014
2015 /* {Heap Segments}
2016 *
2017 * Each heap segment is an array of objects of a particular size.
2018 * Every segment has an associated (possibly shared) freelist.
2019 * A table of segment records is kept that records the upper and
2020 * lower extents of the segment; this is used during the conservative
2021 * phase of gc to identify probably gc roots (because they point
2022 * into valid segments at reasonable offsets). */
2023
2024 /* scm_expmem
2025 * is true if the first segment was smaller than INIT_HEAP_SEG.
2026 * If scm_expmem is set to one, subsequent segment allocations will
2027 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2028 */
2029 int scm_expmem = 0;
2030
2031 scm_sizet scm_max_segment_size;
2032
2033 /* scm_heap_org
2034 * is the lowest base address of any heap segment.
2035 */
2036 SCM_CELLPTR scm_heap_org;
2037
2038 scm_heap_seg_data_t * scm_heap_table = 0;
2039 int scm_n_heap_segs = 0;
2040
2041 /* init_heap_seg
2042 * initializes a new heap segment and return the number of objects it contains.
2043 *
2044 * The segment origin, segment size in bytes, and the span of objects
2045 * in cells are input parameters. The freelist is both input and output.
2046 *
2047 * This function presume that the scm_heap_table has already been expanded
2048 * to accomodate a new segment record.
2049 */
2050
2051
2052 static scm_sizet
2053 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2054 {
2055 register SCM_CELLPTR ptr;
2056 #ifdef SCM_POINTERS_MUNGED
2057 register SCM scmptr;
2058 #else
2059 #undef scmptr
2060 #define scmptr ptr
2061 #endif
2062 SCM_CELLPTR seg_end;
2063 int new_seg_index;
2064 int n_new_cells;
2065 int span = freelist->span;
2066
2067 if (seg_org == NULL)
2068 return 0;
2069
2070 ptr = CELL_UP (seg_org, span);
2071
2072 /* Compute the ceiling on valid object pointers w/in this segment.
2073 */
2074 seg_end = CELL_DN ((char *) seg_org + size, span);
2075
2076 /* Find the right place and insert the segment record.
2077 *
2078 */
2079 for (new_seg_index = 0;
2080 ( (new_seg_index < scm_n_heap_segs)
2081 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2082 new_seg_index++)
2083 ;
2084
2085 {
2086 int i;
2087 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2088 scm_heap_table[i] = scm_heap_table[i - 1];
2089 }
2090
2091 ++scm_n_heap_segs;
2092
2093 scm_heap_table[new_seg_index].valid = 0;
2094 scm_heap_table[new_seg_index].span = span;
2095 scm_heap_table[new_seg_index].freelist = freelist;
2096 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
2097 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
2098
2099
2100 /* Compute the least valid object pointer w/in this segment
2101 */
2102 ptr = CELL_UP (ptr, span);
2103
2104
2105 /*n_new_cells*/
2106 n_new_cells = seg_end - ptr;
2107
2108 #ifdef GUILE_NEW_GC_SCHEME
2109
2110 freelist->heap_size += n_new_cells;
2111
2112 /* Partition objects in this segment into clusters */
2113 {
2114 SCM clusters;
2115 SCM *clusterp = &clusters;
2116 int n_cluster_cells = span * freelist->cluster_size;
2117
2118 while (n_new_cells > span) /* at least one spine + one freecell */
2119 {
2120 /* Determine end of cluster
2121 */
2122 if (n_new_cells >= n_cluster_cells)
2123 {
2124 seg_end = ptr + n_cluster_cells;
2125 n_new_cells -= n_cluster_cells;
2126 }
2127 else
2128 /* [cmm] looks like the segment size doesn't divide cleanly by
2129 cluster size. bad cmm! */
2130 abort();
2131
2132 /* Allocate cluster spine
2133 */
2134 *clusterp = PTR2SCM (ptr);
2135 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2136 clusterp = SCM_CDRLOC (*clusterp);
2137 ptr += span;
2138
2139 while (ptr < seg_end)
2140 {
2141 #ifdef SCM_POINTERS_MUNGED
2142 scmptr = PTR2SCM (ptr);
2143 #endif
2144 SCM_SETCAR (scmptr, scm_tc_free_cell);
2145 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2146 ptr += span;
2147 }
2148
2149 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2150 }
2151
2152 /* Patch up the last cluster pointer in the segment
2153 * to join it to the input freelist.
2154 */
2155 *clusterp = freelist->clusters;
2156 freelist->clusters = clusters;
2157 }
2158
2159 adjust_gc_trigger (&scm_master_freelist);
2160 adjust_gc_trigger (&scm_master_freelist2);
2161
2162 #else /* GUILE_NEW_GC_SCHEME */
2163
2164 /* Prepend objects in this segment to the freelist.
2165 */
2166 while (ptr < seg_end)
2167 {
2168 #ifdef SCM_POINTERS_MUNGED
2169 scmptr = PTR2SCM (ptr);
2170 #endif
2171 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
2172 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2173 ptr += span;
2174 }
2175
2176 ptr -= span;
2177
2178 /* Patch up the last freelist pointer in the segment
2179 * to join it to the input freelist.
2180 */
2181 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
2182 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
2183
2184 freelist->heap_size += n_new_cells;
2185
2186 #endif /* GUILE_NEW_GC_SCHEME */
2187
2188 #ifdef DEBUGINFO
2189 fprintf (stderr, "H");
2190 #endif
2191 return size;
2192 #ifdef scmptr
2193 #undef scmptr
2194 #endif
2195 }
2196
2197 #ifndef GUILE_NEW_GC_SCHEME
2198 #define round_to_cluster_size(freelist, len) len
2199 #else
2200
2201 static scm_sizet
2202 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2203 {
2204 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2205
2206 return
2207 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2208 + ALIGNMENT_SLACK (freelist);
2209 }
2210
2211 #endif
2212
2213 static void
2214 alloc_some_heap (scm_freelist_t *freelist)
2215 {
2216 scm_heap_seg_data_t * tmptable;
2217 SCM_CELLPTR ptr;
2218 scm_sizet len;
2219
2220 /* Critical code sections (such as the garbage collector)
2221 * aren't supposed to add heap segments.
2222 */
2223 if (scm_gc_heap_lock)
2224 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2225
2226 /* Expand the heap tables to have room for the new segment.
2227 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2228 * only if the allocation of the segment itself succeeds.
2229 */
2230 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
2231
2232 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
2233 realloc ((char *)scm_heap_table, len)));
2234 if (!tmptable)
2235 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2236 else
2237 scm_heap_table = tmptable;
2238
2239
2240 /* Pick a size for the new heap segment.
2241 * The rule for picking the size of a segment is explained in
2242 * gc.h
2243 */
2244 #ifdef GUILE_NEW_GC_SCHEME
2245 {
2246 /* Assure that the new segment is large enough for the new trigger */
2247 int slack = freelist->gc_trigger - freelist->collected;
2248 int min_cells = 100 * slack / (99 - freelist->gc_trigger_fraction);
2249 len = SCM_EXPHEAP (freelist->heap_size);
2250 #ifdef DEBUGINFO
2251 fprintf (stderr, "(%d < %d)", len, min_cells);
2252 #endif
2253 if (len < min_cells)
2254 len = min_cells + 1;
2255 len *= sizeof (scm_cell);
2256 }
2257
2258 if (len > scm_max_segment_size)
2259 len = scm_max_segment_size;
2260 #else
2261 if (scm_expmem)
2262 {
2263 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2264 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
2265 != len)
2266 len = 0;
2267 }
2268 else
2269 len = SCM_HEAP_SEG_SIZE;
2270 #endif /* GUILE_NEW_GC_SCHEME */
2271
2272 {
2273 scm_sizet smallest;
2274
2275 #ifndef GUILE_NEW_GC_SCHEME
2276 smallest = (freelist->span * sizeof (scm_cell));
2277 #else
2278 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2279 #endif
2280
2281 if (len < smallest)
2282 len = smallest;
2283
2284 /* Allocate with decaying ambition. */
2285 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2286 && (len >= smallest))
2287 {
2288 scm_sizet rounded_len = round_to_cluster_size(freelist, len);
2289 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2290 if (ptr)
2291 {
2292 init_heap_seg (ptr, rounded_len, freelist);
2293 return;
2294 }
2295 len /= 2;
2296 }
2297 }
2298
2299 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2300 }
2301
2302
2303
2304 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2305 (SCM name),
2306 "")
2307 #define FUNC_NAME s_scm_unhash_name
2308 {
2309 int x;
2310 int bound;
2311 SCM_VALIDATE_SYMBOL (1,name);
2312 SCM_DEFER_INTS;
2313 bound = scm_n_heap_segs;
2314 for (x = 0; x < bound; ++x)
2315 {
2316 SCM_CELLPTR p;
2317 SCM_CELLPTR pbound;
2318 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2319 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2320 while (p < pbound)
2321 {
2322 SCM incar;
2323 incar = p->car;
2324 if (1 == (7 & (int)incar))
2325 {
2326 --incar;
2327 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2328 && (SCM_CDR (incar) != 0)
2329 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2330 {
2331 p->car = name;
2332 }
2333 }
2334 ++p;
2335 }
2336 }
2337 SCM_ALLOW_INTS;
2338 return name;
2339 }
2340 #undef FUNC_NAME
2341
2342
2343 \f
2344 /* {GC Protection Helper Functions}
2345 */
2346
2347
2348 void
2349 scm_remember (SCM *ptr)
2350 { /* empty */ }
2351
2352
2353 /*
2354 These crazy functions prevent garbage collection
2355 of arguments after the first argument by
2356 ensuring they remain live throughout the
2357 function because they are used in the last
2358 line of the code block.
2359 It'd be better to have a nice compiler hint to
2360 aid the conservative stack-scanning GC. --03/09/00 gjb */
2361 SCM
2362 scm_return_first (SCM elt, ...)
2363 {
2364 return elt;
2365 }
2366
2367 int
2368 scm_return_first_int (int i, ...)
2369 {
2370 return i;
2371 }
2372
2373
2374 SCM
2375 scm_permanent_object (SCM obj)
2376 {
2377 SCM_REDEFER_INTS;
2378 scm_permobjs = scm_cons (obj, scm_permobjs);
2379 SCM_REALLOW_INTS;
2380 return obj;
2381 }
2382
2383
2384 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2385 even if all other references are dropped, until someone applies
2386 scm_unprotect_object to it. This function returns OBJ.
2387
2388 Calls to scm_protect_object nest. For every object OBJ, there is a
2389 counter which scm_protect_object(OBJ) increments and
2390 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2391 an object's counter is greater than zero, the garbage collector
2392 will not free it.
2393
2394 Of course, that's not how it's implemented. scm_protect_object and
2395 scm_unprotect_object just maintain a list of references to things.
2396 Since the GC knows about this list, all objects it mentions stay
2397 alive. scm_protect_object adds its argument to the list;
2398 scm_unprotect_object removes the first occurrence of its argument
2399 to the list. */
2400 SCM
2401 scm_protect_object (SCM obj)
2402 {
2403 scm_protects = scm_cons (obj, scm_protects);
2404
2405 return obj;
2406 }
2407
2408
2409 /* Remove any protection for OBJ established by a prior call to
2410 scm_protect_object. This function returns OBJ.
2411
2412 See scm_protect_object for more information. */
2413 SCM
2414 scm_unprotect_object (SCM obj)
2415 {
2416 SCM *tail_ptr = &scm_protects;
2417
2418 while (SCM_CONSP (*tail_ptr))
2419 if (SCM_CAR (*tail_ptr) == obj)
2420 {
2421 *tail_ptr = SCM_CDR (*tail_ptr);
2422 break;
2423 }
2424 else
2425 tail_ptr = SCM_CDRLOC (*tail_ptr);
2426
2427 return obj;
2428 }
2429
2430 int terminating;
2431
2432 /* called on process termination. */
2433 #ifdef HAVE_ATEXIT
2434 static void
2435 cleanup (void)
2436 #else
2437 #ifdef HAVE_ON_EXIT
2438 extern int on_exit (void (*procp) (), int arg);
2439
2440 static void
2441 cleanup (int status, void *arg)
2442 #else
2443 #error Dont know how to setup a cleanup handler on your system.
2444 #endif
2445 #endif
2446 {
2447 terminating = 1;
2448 scm_flush_all_ports ();
2449 }
2450
2451 \f
2452 static int
2453 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2454 {
2455 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2456 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2457 rounded_size,
2458 freelist))
2459 {
2460 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2461 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2462 rounded_size,
2463 freelist))
2464 return 1;
2465 }
2466 else
2467 scm_expmem = 1;
2468
2469 freelist->grow_heap_p = (freelist->heap_size < freelist->gc_trigger);
2470
2471 return 0;
2472 }
2473
2474 \f
2475 #ifdef GUILE_NEW_GC_SCHEME
2476 static void
2477 init_freelist (scm_freelist_t *freelist,
2478 int span,
2479 int cluster_size,
2480 int gc_trigger)
2481 {
2482 freelist->clusters = SCM_EOL;
2483 freelist->cluster_size = cluster_size + 1;
2484 if (gc_trigger < 0)
2485 freelist->gc_trigger_fraction = - gc_trigger;
2486 else
2487 {
2488 freelist->gc_trigger = gc_trigger;
2489 freelist->gc_trigger_fraction = 0;
2490 }
2491 freelist->span = span;
2492 freelist->collected = 0;
2493 freelist->heap_size = 0;
2494 }
2495
2496 int
2497 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2498 scm_sizet init_heap_size_2, int gc_trigger_2,
2499 scm_sizet max_segment_size)
2500 #else
2501 int
2502 scm_init_storage (scm_sizet init_heap_size, scm_sizet init_heap2_size)
2503 #endif
2504 {
2505 scm_sizet j;
2506
2507 if (!init_heap_size_1)
2508 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2509 if (!init_heap_size_2)
2510 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2511
2512 j = SCM_NUM_PROTECTS;
2513 while (j)
2514 scm_sys_protects[--j] = SCM_BOOL_F;
2515 scm_block_gc = 1;
2516
2517 #ifdef GUILE_NEW_GC_SCHEME
2518 scm_freelist = SCM_EOL;
2519 scm_freelist2 = SCM_EOL;
2520 init_freelist (&scm_master_freelist,
2521 1, SCM_CLUSTER_SIZE_1,
2522 gc_trigger_1 ? gc_trigger_1 : SCM_GC_TRIGGER_1);
2523 init_freelist (&scm_master_freelist2,
2524 2, SCM_CLUSTER_SIZE_2,
2525 gc_trigger_2 ? gc_trigger_2 : SCM_GC_TRIGGER_2);
2526 scm_max_segment_size
2527 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
2528 #else
2529 scm_freelist.cells = SCM_EOL;
2530 scm_freelist.span = 1;
2531 scm_freelist.collected = 0;
2532 scm_freelist.heap_size = 0;
2533
2534 scm_freelist2.cells = SCM_EOL;
2535 scm_freelist2.span = 2;
2536 scm_freelist2.collected = 0;
2537 scm_freelist2.heap_size = 0;
2538 #endif
2539
2540 scm_expmem = 0;
2541
2542 j = SCM_HEAP_SEG_SIZE;
2543 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2544 scm_heap_table = ((scm_heap_seg_data_t *)
2545 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2546
2547 #ifdef GUILE_NEW_GC_SCHEME
2548 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2549 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2550 return 1;
2551 #else
2552 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2553 make_initial_segment (init_heap_size_2, &scm_freelist2))
2554 return 1;
2555 #endif
2556
2557 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2558
2559 /* scm_hplims[0] can change. do not remove scm_heap_org */
2560 scm_weak_vectors = SCM_EOL;
2561
2562 /* Initialise the list of ports. */
2563 scm_port_table = (scm_port **)
2564 malloc (sizeof (scm_port *) * scm_port_table_room);
2565 if (!scm_port_table)
2566 return 1;
2567
2568 #ifdef HAVE_ATEXIT
2569 atexit (cleanup);
2570 #else
2571 #ifdef HAVE_ON_EXIT
2572 on_exit (cleanup, 0);
2573 #endif
2574 #endif
2575
2576 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2577 SCM_SETCDR (scm_undefineds, scm_undefineds);
2578
2579 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2580 scm_nullstr = scm_makstr (0L, 0);
2581 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2582 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2583 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2584 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2585 scm_stand_in_procs = SCM_EOL;
2586 scm_permobjs = SCM_EOL;
2587 scm_protects = SCM_EOL;
2588 scm_asyncs = SCM_EOL;
2589 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2590 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2591 #ifdef SCM_BIGDIG
2592 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2593 #endif
2594 return 0;
2595 }
2596 \f
2597
2598 void
2599 scm_init_gc ()
2600 {
2601 #include "gc.x"
2602 }