* gc.c (scm_gc_stats): Inserted explanation of local_scm_mtrigger
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "_scm.h"
50 #include "stime.h"
51 #include "stackchk.h"
52 #include "struct.h"
53 #include "weaks.h"
54 #include "guardians.h"
55 #include "smob.h"
56 #include "unif.h"
57 #include "async.h"
58 #include "ports.h"
59 #include "root.h"
60 #include "strings.h"
61 #include "vectors.h"
62
63 #include "validate.h"
64 #include "gc.h"
65
66 #ifdef HAVE_MALLOC_H
67 #include <malloc.h>
68 #endif
69
70 #ifdef HAVE_UNISTD_H
71 #include <unistd.h>
72 #endif
73
74 #ifdef __STDC__
75 #include <stdarg.h>
76 #define var_start(x, y) va_start(x, y)
77 #else
78 #include <varargs.h>
79 #define var_start(x, y) va_start(x)
80 #endif
81
82 \f
83 /* {heap tuning parameters}
84 *
85 * These are parameters for controlling memory allocation. The heap
86 * is the area out of which scm_cons, and object headers are allocated.
87 *
88 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
89 * 64 bit machine. The units of the _SIZE parameters are bytes.
90 * Cons pairs and object headers occupy one heap cell.
91 *
92 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
93 * allocated initially the heap will grow by half its current size
94 * each subsequent time more heap is needed.
95 *
96 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
97 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
98 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
99 * is in scm_init_storage() and alloc_some_heap() in sys.c
100 *
101 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
102 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
103 *
104 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
105 * is needed.
106 *
107 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
108 * trigger a GC.
109 *
110 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
111 * reclaimed by a GC triggered by must_malloc. If less than this is
112 * reclaimed, the trigger threshold is raised. [I don't know what a
113 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
114 * work around a oscillation that caused almost constant GC.]
115 */
116
117 #define SCM_INIT_HEAP_SIZE_1 (50000L * sizeof (scm_cell))
118 #define SCM_CLUSTER_SIZE_1 2000L
119 #define SCM_GC_TRIGGER_1 -45
120
121 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
122 #define SCM_CLUSTER_SIZE_2 1000L
123 /* The following value may seem large, but note that if we get to GC at
124 * all, this means that we have a numerically intensive application
125 */
126 #define SCM_GC_TRIGGER_2 -45
127
128 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
129
130 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
131 #ifdef _QC
132 # define SCM_HEAP_SEG_SIZE 32768L
133 #else
134 # ifdef sequent
135 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
136 # else
137 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
138 # endif
139 #endif
140 /* Make heap grow with factor 1.5 */
141 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
142 #define SCM_INIT_MALLOC_LIMIT 100000
143 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
144
145 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
146 bounds for allocated storage */
147
148 #ifdef PROT386
149 /*in 386 protected mode we must only adjust the offset */
150 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
151 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
152 #else
153 # ifdef _UNICOS
154 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
155 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
156 # else
157 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
158 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
159 # endif /* UNICOS */
160 #endif /* PROT386 */
161 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
162 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
163 #ifdef GUILE_NEW_GC_SCHEME
164 #define SCM_HEAP_SIZE \
165 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
166 #else
167 #define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
168 #endif
169
170
171 \f
172 /* scm_freelists
173 */
174
175 typedef struct scm_freelist_t {
176 /* collected cells */
177 SCM cells;
178 #ifdef GUILE_NEW_GC_SCHEME
179 /* number of cells left to collect before cluster is full */
180 unsigned int left_to_collect;
181 /* number of clusters which have been allocated */
182 unsigned int clusters_allocated;
183 /* a list of freelists, each of size gc_trigger,
184 except the last one which may be shorter */
185 SCM clusters;
186 SCM *clustertail;
187 /* this is the number of objects in each cluster, including the spine cell */
188 int cluster_size;
189 /* set to grow the heap when we run out of clusters
190 */
191 int grow_heap_p;
192 /* minimum number of objects allocated before GC is triggered
193 */
194 int gc_trigger;
195 /* defines gc_trigger as percent of heap size
196 * 0 => constant trigger
197 */
198 int gc_trigger_fraction;
199 #endif
200 /* number of cells per object on this list */
201 int span;
202 /* number of collected cells during last GC */
203 int collected;
204 /* total number of cells in heap segments
205 * belonging to this list.
206 */
207 int heap_size;
208 } scm_freelist_t;
209
210 #ifdef GUILE_NEW_GC_SCHEME
211 SCM scm_freelist = SCM_EOL;
212 scm_freelist_t scm_master_freelist = {
213 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
214 };
215 SCM scm_freelist2 = SCM_EOL;
216 scm_freelist_t scm_master_freelist2 = {
217 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
218 };
219 #else
220 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
221 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
222 #endif
223
224 /* scm_mtrigger
225 * is the number of bytes of must_malloc allocation needed to trigger gc.
226 */
227 unsigned long scm_mtrigger;
228
229
230 /* scm_gc_heap_lock
231 * If set, don't expand the heap. Set only during gc, during which no allocation
232 * is supposed to take place anyway.
233 */
234 int scm_gc_heap_lock = 0;
235
236 /* GC Blocking
237 * Don't pause for collection if this is set -- just
238 * expand the heap.
239 */
240
241 int scm_block_gc = 1;
242
243 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
244 * collection (GC) more space is allocated for the heap.
245 */
246 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
247
248 /* During collection, this accumulates objects holding
249 * weak references.
250 */
251 SCM scm_weak_vectors;
252
253 /* GC Statistics Keeping
254 */
255 unsigned long scm_cells_allocated = 0;
256 long scm_mallocated = 0;
257 unsigned long scm_gc_cells_collected;
258 #ifdef GUILE_NEW_GC_SCHEME
259 unsigned long scm_gc_yield;
260 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
261 #endif
262 unsigned long scm_gc_malloc_collected;
263 unsigned long scm_gc_ports_collected;
264 unsigned long scm_gc_rt;
265 unsigned long scm_gc_time_taken = 0;
266
267 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
273
274 typedef struct scm_heap_seg_data_t
275 {
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds[2];
278
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t *freelist;
282
283 /* number of SCM words per object in this segment */
284 int span;
285
286 /* If SEG_DATA->valid is non-zero, the conservative marking
287 functions will apply SEG_DATA->valid to the purported pointer and
288 SEG_DATA, and mark the object iff the function returns non-zero.
289 At the moment, I don't think anyone uses this. */
290 int (*valid) ();
291 } scm_heap_seg_data_t;
292
293
294
295
296 static void scm_mark_weak_vector_spines (void);
297 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
298 static void alloc_some_heap (scm_freelist_t *);
299
300
301 \f
302 /* Debugging functions. */
303
304 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
305
306 /* Return the number of the heap segment containing CELL. */
307 static int
308 which_seg (SCM cell)
309 {
310 int i;
311
312 for (i = 0; i < scm_n_heap_segs; i++)
313 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
314 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
315 return i;
316 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
317 SCM_UNPACK (cell));
318 abort ();
319 }
320
321
322 #ifdef GUILE_NEW_GC_SCHEME
323 static void
324 map_free_list (scm_freelist_t *master, SCM freelist)
325 {
326 int last_seg = -1, count = 0;
327 SCM f;
328
329 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
330 {
331 int this_seg = which_seg (f);
332
333 if (this_seg != last_seg)
334 {
335 if (last_seg != -1)
336 fprintf (stderr, " %5d %d-cells in segment %d\n",
337 count, master->span, last_seg);
338 last_seg = this_seg;
339 count = 0;
340 }
341 count++;
342 }
343 if (last_seg != -1)
344 fprintf (stderr, " %5d %d-cells in segment %d\n",
345 count, master->span, last_seg);
346 }
347 #else
348 static void
349 map_free_list (scm_freelist_t *freelist)
350 {
351 int last_seg = -1, count = 0;
352 SCM f;
353
354 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
355 {
356 int this_seg = which_seg (f);
357
358 if (this_seg != last_seg)
359 {
360 if (last_seg != -1)
361 fprintf (stderr, " %5d %d-cells in segment %d\n",
362 count, freelist->span, last_seg);
363 last_seg = this_seg;
364 count = 0;
365 }
366 count++;
367 }
368 if (last_seg != -1)
369 fprintf (stderr, " %5d %d-cells in segment %d\n",
370 count, freelist->span, last_seg);
371 }
372 #endif
373
374 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
375 (),
376 "Print debugging information about the free-list.\n"
377 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
378 #define FUNC_NAME s_scm_map_free_list
379 {
380 int i;
381 fprintf (stderr, "%d segments total (%d:%d",
382 scm_n_heap_segs,
383 scm_heap_table[0].span,
384 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
385 for (i = 1; i < scm_n_heap_segs; i++)
386 fprintf (stderr, ", %d:%d",
387 scm_heap_table[i].span,
388 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
389 fprintf (stderr, ")\n");
390 #ifdef GUILE_NEW_GC_SCHEME
391 map_free_list (&scm_master_freelist, scm_freelist);
392 map_free_list (&scm_master_freelist2, scm_freelist2);
393 #else
394 map_free_list (&scm_freelist);
395 map_free_list (&scm_freelist2);
396 #endif
397 fflush (stderr);
398
399 return SCM_UNSPECIFIED;
400 }
401 #undef FUNC_NAME
402
403 #ifdef GUILE_NEW_GC_SCHEME
404 static int last_cluster;
405 static int last_size;
406
407 static int
408 free_list_length (char *title, int i, SCM freelist)
409 {
410 SCM ls;
411 int n = 0;
412 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
413 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
414 ++n;
415 else
416 {
417 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
418 abort ();
419 }
420 if (n != last_size)
421 {
422 if (i > 0)
423 {
424 if (last_cluster == i - 1)
425 fprintf (stderr, "\t%d\n", last_size);
426 else
427 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
428 }
429 if (i >= 0)
430 fprintf (stderr, "%s %d", title, i);
431 else
432 fprintf (stderr, "%s\t%d\n", title, n);
433 last_cluster = i;
434 last_size = n;
435 }
436 return n;
437 }
438
439 static void
440 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
441 {
442 SCM clusters;
443 int i = 0, len, n = 0;
444 fprintf (stderr, "%s\n\n", title);
445 n += free_list_length ("free list", -1, freelist);
446 for (clusters = master->clusters;
447 SCM_NNULLP (clusters);
448 clusters = SCM_CDR (clusters))
449 {
450 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
451 n += len;
452 }
453 if (last_cluster == i - 1)
454 fprintf (stderr, "\t%d\n", last_size);
455 else
456 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
457 fprintf (stderr, "\ntotal %d objects\n\n", n);
458 }
459
460 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
461 (),
462 "Print debugging information about the free-list.\n"
463 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
464 #define FUNC_NAME s_scm_free_list_length
465 {
466 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
467 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
468 return SCM_UNSPECIFIED;
469 }
470 #undef FUNC_NAME
471 #endif
472
473 #endif
474
475 #ifdef GUILE_DEBUG_FREELIST
476
477 /* Number of calls to SCM_NEWCELL since startup. */
478 static unsigned long scm_newcell_count;
479 static unsigned long scm_newcell2_count;
480
481 /* Search freelist for anything that isn't marked as a free cell.
482 Abort if we find something. */
483 #ifdef GUILE_NEW_GC_SCHEME
484 static void
485 scm_check_freelist (SCM freelist)
486 {
487 SCM f;
488 int i = 0;
489
490 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
491 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
492 {
493 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
494 scm_newcell_count, i);
495 fflush (stderr);
496 abort ();
497 }
498 }
499 #else
500 static void
501 scm_check_freelist (scm_freelist_t *freelist)
502 {
503 SCM f;
504 int i = 0;
505
506 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
507 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
508 {
509 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
510 scm_newcell_count, i);
511 fflush (stderr);
512 abort ();
513 }
514 }
515 #endif
516
517 static int scm_debug_check_freelist = 0;
518
519 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
520 (SCM flag),
521 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
522 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
523 "compile-time flag was selected.\n")
524 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
525 {
526 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
527 return SCM_UNSPECIFIED;
528 }
529 #undef FUNC_NAME
530
531
532 #ifdef GUILE_NEW_GC_SCHEME
533
534 SCM
535 scm_debug_newcell (void)
536 {
537 SCM new;
538
539 scm_newcell_count++;
540 if (scm_debug_check_freelist)
541 {
542 scm_check_freelist (scm_freelist);
543 scm_gc();
544 }
545
546 /* The rest of this is supposed to be identical to the SCM_NEWCELL
547 macro. */
548 if (SCM_IMP (scm_freelist))
549 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
550 else
551 {
552 new = scm_freelist;
553 scm_freelist = SCM_CDR (scm_freelist);
554 SCM_SETCAR (new, scm_tc16_allocated);
555 }
556
557 return new;
558 }
559
560 SCM
561 scm_debug_newcell2 (void)
562 {
563 SCM new;
564
565 scm_newcell2_count++;
566 if (scm_debug_check_freelist)
567 {
568 scm_check_freelist (scm_freelist2);
569 scm_gc ();
570 }
571
572 /* The rest of this is supposed to be identical to the SCM_NEWCELL
573 macro. */
574 if (SCM_IMP (scm_freelist2))
575 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
576 else
577 {
578 new = scm_freelist2;
579 scm_freelist2 = SCM_CDR (scm_freelist2);
580 SCM_SETCAR (new, scm_tc16_allocated);
581 }
582
583 return new;
584 }
585
586 #else /* GUILE_NEW_GC_SCHEME */
587
588 SCM
589 scm_debug_newcell (void)
590 {
591 SCM new;
592
593 scm_newcell_count++;
594 if (scm_debug_check_freelist)
595 {
596 scm_check_freelist (&scm_freelist);
597 scm_gc();
598 }
599
600 /* The rest of this is supposed to be identical to the SCM_NEWCELL
601 macro. */
602 if (SCM_IMP (scm_freelist.cells))
603 new = scm_gc_for_newcell (&scm_freelist);
604 else
605 {
606 new = scm_freelist.cells;
607 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
608 SCM_SETCAR (new, scm_tc16_allocated);
609 ++scm_cells_allocated;
610 }
611
612 return new;
613 }
614
615 SCM
616 scm_debug_newcell2 (void)
617 {
618 SCM new;
619
620 scm_newcell2_count++;
621 if (scm_debug_check_freelist) {
622 scm_check_freelist (&scm_freelist2);
623 scm_gc();
624 }
625
626 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
627 macro. */
628 if (SCM_IMP (scm_freelist2.cells))
629 new = scm_gc_for_newcell (&scm_freelist2);
630 else
631 {
632 new = scm_freelist2.cells;
633 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
634 SCM_SETCAR (new, scm_tc16_allocated);
635 scm_cells_allocated += 2;
636 }
637
638 return new;
639 }
640
641 #endif /* GUILE_NEW_GC_SCHEME */
642 #endif /* GUILE_DEBUG_FREELIST */
643
644 \f
645
646 #ifdef GUILE_NEW_GC_SCHEME
647 static unsigned long
648 master_cells_allocated (scm_freelist_t *master)
649 {
650 int objects = master->clusters_allocated * (master->cluster_size - 1);
651 if (SCM_NULLP (master->clusters))
652 objects -= master->left_to_collect;
653 return master->span * objects;
654 }
655
656 static unsigned long
657 freelist_length (SCM freelist)
658 {
659 int n;
660 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
661 ++n;
662 return n;
663 }
664
665 static unsigned long
666 compute_cells_allocated ()
667 {
668 return (scm_cells_allocated
669 + master_cells_allocated (&scm_master_freelist)
670 + master_cells_allocated (&scm_master_freelist2)
671 - scm_master_freelist.span * freelist_length (scm_freelist)
672 - scm_master_freelist2.span * freelist_length (scm_freelist2));
673 }
674 #endif
675
676 /* {Scheme Interface to GC}
677 */
678
679 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
680 (),
681 "Returns an association list of statistics about Guile's current use of storage. ")
682 #define FUNC_NAME s_scm_gc_stats
683 {
684 int i;
685 int n;
686 SCM heap_segs;
687 long int local_scm_mtrigger;
688 long int local_scm_mallocated;
689 long int local_scm_heap_size;
690 long int local_scm_cells_allocated;
691 long int local_scm_gc_time_taken;
692 SCM answer;
693
694 SCM_DEFER_INTS;
695 scm_block_gc = 1;
696 retry:
697 heap_segs = SCM_EOL;
698 n = scm_n_heap_segs;
699 for (i = scm_n_heap_segs; i--; )
700 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
701 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
702 heap_segs);
703 if (scm_n_heap_segs != n)
704 goto retry;
705 scm_block_gc = 0;
706
707 /* Below, we cons to produce the resulting list. We want a snapshot of
708 * the heap situation before consing.
709 */
710 local_scm_mtrigger = scm_mtrigger;
711 local_scm_mallocated = scm_mallocated;
712 local_scm_heap_size = SCM_HEAP_SIZE;
713 #ifdef GUILE_NEW_GC_SCHEME
714 local_scm_cells_allocated = compute_cells_allocated ();
715 #else
716 local_scm_cells_allocated = scm_cells_allocated;
717 #endif
718 local_scm_gc_time_taken = scm_gc_time_taken;
719
720 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
721 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
722 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
723 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
724 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
725 scm_cons (sym_heap_segments, heap_segs),
726 SCM_UNDEFINED);
727 SCM_ALLOW_INTS;
728 return answer;
729 }
730 #undef FUNC_NAME
731
732
733 void
734 scm_gc_start (const char *what)
735 {
736 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
737 scm_gc_cells_collected = 0;
738 #ifdef GUILE_NEW_GC_SCHEME
739 scm_gc_yield_1 = scm_gc_yield;
740 scm_gc_yield = (scm_cells_allocated
741 + master_cells_allocated (&scm_master_freelist)
742 + master_cells_allocated (&scm_master_freelist2));
743 #endif
744 scm_gc_malloc_collected = 0;
745 scm_gc_ports_collected = 0;
746 }
747
748 void
749 scm_gc_end ()
750 {
751 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
752 scm_gc_time_taken += scm_gc_rt;
753 scm_system_async_mark (scm_gc_async);
754 }
755
756
757 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
758 (SCM obj),
759 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
760 "returned by this function for @var{obj}")
761 #define FUNC_NAME s_scm_object_address
762 {
763 return scm_ulong2num ((unsigned long) obj);
764 }
765 #undef FUNC_NAME
766
767
768 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
769 (),
770 "Scans all of SCM objects and reclaims for further use those that are\n"
771 "no longer accessible.")
772 #define FUNC_NAME s_scm_gc
773 {
774 SCM_DEFER_INTS;
775 scm_igc ("call");
776 SCM_ALLOW_INTS;
777 return SCM_UNSPECIFIED;
778 }
779 #undef FUNC_NAME
780
781
782 \f
783 /* {C Interface For When GC is Triggered}
784 */
785
786 #ifdef GUILE_NEW_GC_SCHEME
787
788 static void
789 adjust_gc_trigger (scm_freelist_t *freelist)
790 {
791 /* GC trigger is adjusted upwards so that next predicted total yield
792 * (allocated cells actually freed by GC) becomes
793 * `gc_trigger_fraction' of total heap size. Note, however, that
794 * the absolute value of gc_trigger will correspond to `collected'
795 * on one master (the one which currently is triggering GC).
796 *
797 * The reason why we look at total yield instead of cells collected
798 * on one list is that we want to take other freelists into account.
799 * On this freelist, we know that (local) yield = collected cells,
800 * but that's probably not the case on the other lists.
801 *
802 * (We might consider computing a better prediction, for example
803 * by computing an average over multiple GC:s.)
804 */
805 if (freelist->gc_trigger_fraction)
806 {
807 /* Pick largest of last two yields. */
808 int yield = (scm_gc_yield_1 > scm_gc_yield
809 ? scm_gc_yield_1
810 : scm_gc_yield);
811 int delta = ((SCM_HEAP_SIZE * freelist->gc_trigger_fraction / 100)
812 - yield);
813 #ifdef DEBUGINFO
814 fprintf (stderr, " after GC = %d, delta = %d\n",
815 scm_cells_allocated,
816 delta);
817 #endif
818 if (delta > 0)
819 freelist->gc_trigger += delta;
820 }
821 }
822
823 /* When we get POSIX threads support, the master will be global and
824 * common while the freelist will be individual for each thread.
825 */
826
827 SCM
828 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
829 {
830 SCM cell;
831 ++scm_ints_disabled;
832 do
833 {
834 if (SCM_NULLP (master->clusters))
835 {
836 if (master->grow_heap_p)
837 {
838 master->grow_heap_p = 0;
839 alloc_some_heap (master);
840 }
841 else
842 {
843 #ifdef DEBUGINFO
844 fprintf (stderr, "allocated = %d, ",
845 scm_cells_allocated
846 + master_cells_allocated (&scm_master_freelist)
847 + master_cells_allocated (&scm_master_freelist2));
848 #endif
849 scm_igc ("cells");
850 adjust_gc_trigger (master);
851 }
852 }
853 cell = SCM_CAR (master->clusters);
854 master->clusters = SCM_CDR (master->clusters);
855 ++master->clusters_allocated;
856 }
857 while (SCM_NULLP (cell));
858 --scm_ints_disabled;
859 *freelist = SCM_CDR (cell);
860 SCM_SETCAR (cell, scm_tc16_allocated);
861 return cell;
862 }
863
864 #if 0
865 /* This is a support routine which can be used to reserve a cluster
866 * for some special use, such as debugging. It won't be useful until
867 * free cells are preserved between garbage collections.
868 */
869
870 void
871 scm_alloc_cluster (scm_freelist_t *master)
872 {
873 SCM freelist, cell;
874 cell = scm_gc_for_newcell (master, &freelist);
875 SCM_SETCDR (cell, freelist);
876 return cell;
877 }
878 #endif
879
880 #else /* GUILE_NEW_GC_SCHEME */
881
882 void
883 scm_gc_for_alloc (scm_freelist_t *freelist)
884 {
885 SCM_REDEFER_INTS;
886 scm_igc ("cells");
887 #ifdef GUILE_DEBUG_FREELIST
888 fprintf (stderr, "Collected: %d, min_yield: %d\n",
889 freelist->collected, MIN_GC_YIELD (freelist));
890 #endif
891 if ((freelist->collected < MIN_GC_YIELD (freelist))
892 || SCM_IMP (freelist->cells))
893 alloc_some_heap (freelist);
894 SCM_REALLOW_INTS;
895 }
896
897
898 SCM
899 scm_gc_for_newcell (scm_freelist_t *freelist)
900 {
901 SCM fl;
902 scm_gc_for_alloc (freelist);
903 fl = freelist->cells;
904 freelist->cells = SCM_CDR (fl);
905 SCM_SETCAR (fl, scm_tc16_allocated);
906 return fl;
907 }
908
909 #endif /* GUILE_NEW_GC_SCHEME */
910
911 void
912 scm_igc (const char *what)
913 {
914 int j;
915
916 #ifdef DEBUGINFO
917 fprintf (stderr,
918 SCM_NULLP (scm_freelist)
919 ? "*"
920 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
921 #endif
922 #ifdef USE_THREADS
923 /* During the critical section, only the current thread may run. */
924 SCM_THREAD_CRITICAL_SECTION_START;
925 #endif
926
927 /* fprintf (stderr, "gc: %s\n", what); */
928
929 scm_gc_start (what);
930
931 if (!scm_stack_base || scm_block_gc)
932 {
933 scm_gc_end ();
934 return;
935 }
936
937 if (scm_mallocated < 0)
938 /* The byte count of allocated objects has underflowed. This is
939 probably because you forgot to report the sizes of objects you
940 have allocated, by calling scm_done_malloc or some such. When
941 the GC freed them, it subtracted their size from
942 scm_mallocated, which underflowed. */
943 abort ();
944
945 if (scm_gc_heap_lock)
946 /* We've invoked the collector while a GC is already in progress.
947 That should never happen. */
948 abort ();
949
950 ++scm_gc_heap_lock;
951
952 scm_weak_vectors = SCM_EOL;
953
954 scm_guardian_gc_init ();
955
956 /* unprotect any struct types with no instances */
957 #if 0
958 {
959 SCM type_list;
960 SCM * pos;
961
962 pos = &scm_type_obj_list;
963 type_list = scm_type_obj_list;
964 while (type_list != SCM_EOL)
965 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
966 {
967 pos = SCM_CDRLOC (type_list);
968 type_list = SCM_CDR (type_list);
969 }
970 else
971 {
972 *pos = SCM_CDR (type_list);
973 type_list = SCM_CDR (type_list);
974 }
975 }
976 #endif
977
978 /* flush dead entries from the continuation stack */
979 {
980 int x;
981 int bound;
982 SCM * elts;
983 elts = SCM_VELTS (scm_continuation_stack);
984 bound = SCM_LENGTH (scm_continuation_stack);
985 x = SCM_INUM (scm_continuation_stack_ptr);
986 while (x < bound)
987 {
988 elts[x] = SCM_BOOL_F;
989 ++x;
990 }
991 }
992
993 #ifndef USE_THREADS
994
995 /* Protect from the C stack. This must be the first marking
996 * done because it provides information about what objects
997 * are "in-use" by the C code. "in-use" objects are those
998 * for which the values from SCM_LENGTH and SCM_CHARS must remain
999 * usable. This requirement is stricter than a liveness
1000 * requirement -- in particular, it constrains the implementation
1001 * of scm_vector_set_length_x.
1002 */
1003 SCM_FLUSH_REGISTER_WINDOWS;
1004 /* This assumes that all registers are saved into the jmp_buf */
1005 setjmp (scm_save_regs_gc_mark);
1006 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1007 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1008 sizeof scm_save_regs_gc_mark)
1009 / sizeof (SCM_STACKITEM)));
1010
1011 {
1012 /* stack_len is long rather than scm_sizet in order to guarantee that
1013 &stack_len is long aligned */
1014 #ifdef SCM_STACK_GROWS_UP
1015 #ifdef nosve
1016 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1017 #else
1018 long stack_len = scm_stack_size (scm_stack_base);
1019 #endif
1020 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1021 #else
1022 #ifdef nosve
1023 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1024 #else
1025 long stack_len = scm_stack_size (scm_stack_base);
1026 #endif
1027 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1028 #endif
1029 }
1030
1031 #else /* USE_THREADS */
1032
1033 /* Mark every thread's stack and registers */
1034 scm_threads_mark_stacks ();
1035
1036 #endif /* USE_THREADS */
1037
1038 /* FIXME: insert a phase to un-protect string-data preserved
1039 * in scm_vector_set_length_x.
1040 */
1041
1042 j = SCM_NUM_PROTECTS;
1043 while (j--)
1044 scm_gc_mark (scm_sys_protects[j]);
1045
1046 /* FIXME: we should have a means to register C functions to be run
1047 * in different phases of GC
1048 */
1049 scm_mark_subr_table ();
1050
1051 #ifndef USE_THREADS
1052 scm_gc_mark (scm_root->handle);
1053 #endif
1054
1055 scm_mark_weak_vector_spines ();
1056
1057 scm_guardian_zombify ();
1058
1059 scm_gc_sweep ();
1060
1061 --scm_gc_heap_lock;
1062 scm_gc_end ();
1063
1064 #ifdef USE_THREADS
1065 SCM_THREAD_CRITICAL_SECTION_END;
1066 #endif
1067 }
1068
1069 \f
1070 /* {Mark/Sweep}
1071 */
1072
1073
1074
1075 /* Mark an object precisely.
1076 */
1077 void
1078 scm_gc_mark (SCM p)
1079 {
1080 register long i;
1081 register SCM ptr;
1082
1083 ptr = p;
1084
1085 gc_mark_loop:
1086 if (SCM_IMP (ptr))
1087 return;
1088
1089 gc_mark_nimp:
1090 if (SCM_NCELLP (ptr))
1091 scm_wta (ptr, "rogue pointer in heap", NULL);
1092
1093 switch (SCM_TYP7 (ptr))
1094 {
1095 case scm_tcs_cons_nimcar:
1096 if (SCM_GCMARKP (ptr))
1097 break;
1098 SCM_SETGCMARK (ptr);
1099 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1100 {
1101 ptr = SCM_CAR (ptr);
1102 goto gc_mark_nimp;
1103 }
1104 scm_gc_mark (SCM_CAR (ptr));
1105 ptr = SCM_GCCDR (ptr);
1106 goto gc_mark_nimp;
1107 case scm_tcs_cons_imcar:
1108 if (SCM_GCMARKP (ptr))
1109 break;
1110 SCM_SETGCMARK (ptr);
1111 ptr = SCM_GCCDR (ptr);
1112 goto gc_mark_loop;
1113 case scm_tc7_pws:
1114 if (SCM_GCMARKP (ptr))
1115 break;
1116 SCM_SETGCMARK (ptr);
1117 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
1118 ptr = SCM_GCCDR (ptr);
1119 goto gc_mark_loop;
1120 case scm_tcs_cons_gloc:
1121 if (SCM_GCMARKP (ptr))
1122 break;
1123 SCM_SETGCMARK (ptr);
1124 {
1125 SCM vcell;
1126 vcell = SCM_CAR (ptr) - 1L;
1127 switch (SCM_UNPACK (SCM_CDR (vcell)))
1128 {
1129 default:
1130 scm_gc_mark (vcell);
1131 ptr = SCM_GCCDR (ptr);
1132 goto gc_mark_loop;
1133 case 1: /* ! */
1134 case 0: /* ! */
1135 {
1136 SCM layout;
1137 SCM * vtable_data;
1138 int len;
1139 char * fields_desc;
1140 register SCM * mem;
1141 register int x;
1142
1143 vtable_data = (SCM *)vcell;
1144 layout = vtable_data[scm_vtable_index_layout];
1145 len = SCM_LENGTH (layout);
1146 fields_desc = SCM_CHARS (layout);
1147 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1148 that it removes the mark */
1149 mem = (SCM *)SCM_GCCDR (ptr);
1150
1151 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
1152 {
1153 scm_gc_mark (mem[scm_struct_i_procedure]);
1154 scm_gc_mark (mem[scm_struct_i_setter]);
1155 }
1156 if (len)
1157 {
1158 for (x = 0; x < len - 2; x += 2, ++mem)
1159 if (fields_desc[x] == 'p')
1160 scm_gc_mark (*mem);
1161 if (fields_desc[x] == 'p')
1162 {
1163 int j;
1164 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1165 for (j = (long int) *mem; x; --x)
1166 scm_gc_mark (*++mem);
1167 else
1168 scm_gc_mark (*mem);
1169 }
1170 }
1171 if (!SCM_CDR (vcell))
1172 {
1173 SCM_SETGCMARK (vcell);
1174 ptr = vtable_data[scm_vtable_index_vtable];
1175 goto gc_mark_loop;
1176 }
1177 }
1178 }
1179 }
1180 break;
1181 case scm_tcs_closures:
1182 if (SCM_GCMARKP (ptr))
1183 break;
1184 SCM_SETGCMARK (ptr);
1185 if (SCM_IMP (SCM_CDR (ptr)))
1186 {
1187 ptr = SCM_CLOSCAR (ptr);
1188 goto gc_mark_nimp;
1189 }
1190 scm_gc_mark (SCM_CLOSCAR (ptr));
1191 ptr = SCM_GCCDR (ptr);
1192 goto gc_mark_nimp;
1193 case scm_tc7_vector:
1194 case scm_tc7_lvector:
1195 #ifdef CCLO
1196 case scm_tc7_cclo:
1197 #endif
1198 if (SCM_GC8MARKP (ptr))
1199 break;
1200 SCM_SETGC8MARK (ptr);
1201 i = SCM_LENGTH (ptr);
1202 if (i == 0)
1203 break;
1204 while (--i > 0)
1205 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1206 scm_gc_mark (SCM_VELTS (ptr)[i]);
1207 ptr = SCM_VELTS (ptr)[0];
1208 goto gc_mark_loop;
1209 case scm_tc7_contin:
1210 if SCM_GC8MARKP
1211 (ptr) break;
1212 SCM_SETGC8MARK (ptr);
1213 if (SCM_VELTS (ptr))
1214 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1215 (scm_sizet)
1216 (SCM_LENGTH (ptr) +
1217 (sizeof (SCM_STACKITEM) + -1 +
1218 sizeof (scm_contregs)) /
1219 sizeof (SCM_STACKITEM)));
1220 break;
1221 #ifdef HAVE_ARRAYS
1222 case scm_tc7_bvect:
1223 case scm_tc7_byvect:
1224 case scm_tc7_ivect:
1225 case scm_tc7_uvect:
1226 case scm_tc7_fvect:
1227 case scm_tc7_dvect:
1228 case scm_tc7_cvect:
1229 case scm_tc7_svect:
1230 #ifdef HAVE_LONG_LONGS
1231 case scm_tc7_llvect:
1232 #endif
1233 #endif
1234 case scm_tc7_string:
1235 SCM_SETGC8MARK (ptr);
1236 break;
1237
1238 case scm_tc7_substring:
1239 if (SCM_GC8MARKP(ptr))
1240 break;
1241 SCM_SETGC8MARK (ptr);
1242 ptr = SCM_CDR (ptr);
1243 goto gc_mark_loop;
1244
1245 case scm_tc7_wvect:
1246 if (SCM_GC8MARKP(ptr))
1247 break;
1248 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1249 scm_weak_vectors = ptr;
1250 SCM_SETGC8MARK (ptr);
1251 if (SCM_IS_WHVEC_ANY (ptr))
1252 {
1253 int x;
1254 int len;
1255 int weak_keys;
1256 int weak_values;
1257
1258 len = SCM_LENGTH (ptr);
1259 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1260 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1261
1262 for (x = 0; x < len; ++x)
1263 {
1264 SCM alist;
1265 alist = SCM_VELTS (ptr)[x];
1266
1267 /* mark everything on the alist except the keys or
1268 * values, according to weak_values and weak_keys. */
1269 while ( SCM_CONSP (alist)
1270 && !SCM_GCMARKP (alist)
1271 && SCM_CONSP (SCM_CAR (alist)))
1272 {
1273 SCM kvpair;
1274 SCM next_alist;
1275
1276 kvpair = SCM_CAR (alist);
1277 next_alist = SCM_CDR (alist);
1278 /*
1279 * Do not do this:
1280 * SCM_SETGCMARK (alist);
1281 * SCM_SETGCMARK (kvpair);
1282 *
1283 * It may be that either the key or value is protected by
1284 * an escaped reference to part of the spine of this alist.
1285 * If we mark the spine here, and only mark one or neither of the
1286 * key and value, they may never be properly marked.
1287 * This leads to a horrible situation in which an alist containing
1288 * freelist cells is exported.
1289 *
1290 * So only mark the spines of these arrays last of all marking.
1291 * If somebody confuses us by constructing a weak vector
1292 * with a circular alist then we are hosed, but at least we
1293 * won't prematurely drop table entries.
1294 */
1295 if (!weak_keys)
1296 scm_gc_mark (SCM_CAR (kvpair));
1297 if (!weak_values)
1298 scm_gc_mark (SCM_GCCDR (kvpair));
1299 alist = next_alist;
1300 }
1301 if (SCM_NIMP (alist))
1302 scm_gc_mark (alist);
1303 }
1304 }
1305 break;
1306
1307 case scm_tc7_msymbol:
1308 if (SCM_GC8MARKP(ptr))
1309 break;
1310 SCM_SETGC8MARK (ptr);
1311 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1312 ptr = SCM_SYMBOL_PROPS (ptr);
1313 goto gc_mark_loop;
1314 case scm_tc7_ssymbol:
1315 if (SCM_GC8MARKP(ptr))
1316 break;
1317 SCM_SETGC8MARK (ptr);
1318 break;
1319 case scm_tcs_subrs:
1320 break;
1321 case scm_tc7_port:
1322 i = SCM_PTOBNUM (ptr);
1323 if (!(i < scm_numptob))
1324 goto def;
1325 if (SCM_GC8MARKP (ptr))
1326 break;
1327 SCM_SETGC8MARK (ptr);
1328 if (SCM_PTAB_ENTRY(ptr))
1329 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1330 if (scm_ptobs[i].mark)
1331 {
1332 ptr = (scm_ptobs[i].mark) (ptr);
1333 goto gc_mark_loop;
1334 }
1335 else
1336 return;
1337 break;
1338 case scm_tc7_smob:
1339 if (SCM_GC8MARKP (ptr))
1340 break;
1341 SCM_SETGC8MARK (ptr);
1342 switch (SCM_GCTYP16 (ptr))
1343 { /* should be faster than going through scm_smobs */
1344 case scm_tc_free_cell:
1345 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1346 case scm_tc16_allocated:
1347 case scm_tc16_big:
1348 case scm_tc16_real:
1349 case scm_tc16_complex:
1350 break;
1351 default:
1352 i = SCM_SMOBNUM (ptr);
1353 if (!(i < scm_numsmob))
1354 goto def;
1355 if (scm_smobs[i].mark)
1356 {
1357 ptr = (scm_smobs[i].mark) (ptr);
1358 goto gc_mark_loop;
1359 }
1360 else
1361 return;
1362 }
1363 break;
1364 default:
1365 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1366 }
1367 }
1368
1369
1370 /* Mark a Region Conservatively
1371 */
1372
1373 void
1374 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1375 {
1376 register long m = n;
1377 register int i, j;
1378 register SCM_CELLPTR ptr;
1379
1380 while (0 <= --m)
1381 if (SCM_CELLP (*(SCM **) (& x[m])))
1382 {
1383 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1384 i = 0;
1385 j = scm_n_heap_segs - 1;
1386 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1387 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1388 {
1389 while (i <= j)
1390 {
1391 int seg_id;
1392 seg_id = -1;
1393 if ( (i == j)
1394 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1395 seg_id = i;
1396 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1397 seg_id = j;
1398 else
1399 {
1400 int k;
1401 k = (i + j) / 2;
1402 if (k == i)
1403 break;
1404 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1405 {
1406 j = k;
1407 ++i;
1408 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1409 continue;
1410 else
1411 break;
1412 }
1413 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1414 {
1415 i = k;
1416 --j;
1417 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1418 continue;
1419 else
1420 break;
1421 }
1422 }
1423 if ( !scm_heap_table[seg_id].valid
1424 || scm_heap_table[seg_id].valid (ptr,
1425 &scm_heap_table[seg_id]))
1426 if ( scm_heap_table[seg_id].span == 1
1427 || SCM_DOUBLE_CELLP (*(SCM **) (& x[m])))
1428 scm_gc_mark (*(SCM *) & x[m]);
1429 break;
1430 }
1431
1432 }
1433 }
1434 }
1435
1436
1437 /* The following is a C predicate which determines if an SCM value can be
1438 regarded as a pointer to a cell on the heap. The code is duplicated
1439 from scm_mark_locations. */
1440
1441
1442 int
1443 scm_cellp (SCM value)
1444 {
1445 register int i, j;
1446 register SCM_CELLPTR ptr;
1447
1448 if SCM_CELLP (*(SCM **) (& value))
1449 {
1450 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1451 i = 0;
1452 j = scm_n_heap_segs - 1;
1453 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1454 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1455 {
1456 while (i <= j)
1457 {
1458 int seg_id;
1459 seg_id = -1;
1460 if ( (i == j)
1461 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1462 seg_id = i;
1463 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1464 seg_id = j;
1465 else
1466 {
1467 int k;
1468 k = (i + j) / 2;
1469 if (k == i)
1470 break;
1471 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1472 {
1473 j = k;
1474 ++i;
1475 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1476 continue;
1477 else
1478 break;
1479 }
1480 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1481 {
1482 i = k;
1483 --j;
1484 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1485 continue;
1486 else
1487 break;
1488 }
1489 }
1490 if ( !scm_heap_table[seg_id].valid
1491 || scm_heap_table[seg_id].valid (ptr,
1492 &scm_heap_table[seg_id]))
1493 return 1;
1494 break;
1495 }
1496
1497 }
1498 }
1499 return 0;
1500 }
1501
1502
1503 static void
1504 scm_mark_weak_vector_spines ()
1505 {
1506 SCM w;
1507
1508 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1509 {
1510 if (SCM_IS_WHVEC_ANY (w))
1511 {
1512 SCM *ptr;
1513 SCM obj;
1514 int j;
1515 int n;
1516
1517 obj = w;
1518 ptr = SCM_VELTS (w);
1519 n = SCM_LENGTH (w);
1520 for (j = 0; j < n; ++j)
1521 {
1522 SCM alist;
1523
1524 alist = ptr[j];
1525 while ( SCM_CONSP (alist)
1526 && !SCM_GCMARKP (alist)
1527 && SCM_CONSP (SCM_CAR (alist)))
1528 {
1529 SCM_SETGCMARK (alist);
1530 SCM_SETGCMARK (SCM_CAR (alist));
1531 alist = SCM_GCCDR (alist);
1532 }
1533 }
1534 }
1535 }
1536 }
1537
1538
1539 #ifdef GUILE_NEW_GC_SCHEME
1540 static void
1541 gc_sweep_freelist_start (scm_freelist_t *freelist)
1542 {
1543 freelist->cells = SCM_EOL;
1544 freelist->left_to_collect = freelist->cluster_size;
1545 freelist->clusters_allocated = 0;
1546 freelist->clusters = SCM_EOL;
1547 freelist->clustertail = &freelist->clusters;
1548 freelist->collected = 0;
1549 }
1550
1551 static void
1552 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1553 {
1554 *freelist->clustertail = freelist->cells;
1555 if (SCM_NNULLP (freelist->cells))
1556 {
1557 SCM c = freelist->cells;
1558 SCM_SETCAR (c, SCM_CDR (c));
1559 SCM_SETCDR (c, SCM_EOL);
1560 freelist->collected +=
1561 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1562 }
1563 scm_gc_cells_collected += freelist->collected;
1564
1565 freelist->grow_heap_p = (freelist->collected < freelist->gc_trigger);
1566 }
1567 #endif
1568
1569 void
1570 scm_gc_sweep ()
1571 {
1572 register SCM_CELLPTR ptr;
1573 #ifdef SCM_POINTERS_MUNGED
1574 register SCM scmptr;
1575 #else
1576 #undef scmptr
1577 #define scmptr (SCM)ptr
1578 #endif
1579 register SCM nfreelist;
1580 register scm_freelist_t *freelist;
1581 register long m;
1582 register int span;
1583 long i;
1584 scm_sizet seg_size;
1585
1586 m = 0;
1587
1588 #ifdef GUILE_NEW_GC_SCHEME
1589 gc_sweep_freelist_start (&scm_master_freelist);
1590 gc_sweep_freelist_start (&scm_master_freelist2);
1591 #else
1592 /* Reset all free list pointers. We'll reconstruct them completely
1593 while scanning. */
1594 for (i = 0; i < scm_n_heap_segs; i++)
1595 scm_heap_table[i].freelist->cells = SCM_EOL;
1596 #endif
1597
1598 for (i = 0; i < scm_n_heap_segs; i++)
1599 {
1600 #ifdef GUILE_NEW_GC_SCHEME
1601 register unsigned int left_to_collect;
1602 #else
1603 register scm_sizet n = 0;
1604 #endif
1605 register scm_sizet j;
1606
1607 /* Unmarked cells go onto the front of the freelist this heap
1608 segment points to. Rather than updating the real freelist
1609 pointer as we go along, we accumulate the new head in
1610 nfreelist. Then, if it turns out that the entire segment is
1611 free, we free (i.e., malloc's free) the whole segment, and
1612 simply don't assign nfreelist back into the real freelist. */
1613 freelist = scm_heap_table[i].freelist;
1614 nfreelist = freelist->cells;
1615 #ifdef GUILE_NEW_GC_SCHEME
1616 left_to_collect = freelist->left_to_collect;
1617 #endif
1618 span = scm_heap_table[i].span;
1619
1620 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1621 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1622 for (j = seg_size + span; j -= span; ptr += span)
1623 {
1624 #ifdef SCM_POINTERS_MUNGED
1625 scmptr = PTR2SCM (ptr);
1626 #endif
1627 switch SCM_TYP7 (scmptr)
1628 {
1629 case scm_tcs_cons_gloc:
1630 if (SCM_GCMARKP (scmptr))
1631 {
1632 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1633 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1634 goto cmrkcontinue;
1635 }
1636 {
1637 SCM vcell;
1638 vcell = SCM_CAR (scmptr) - 1L;
1639
1640 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1641 {
1642 scm_struct_free_t free
1643 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1644 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1645 }
1646 }
1647 break;
1648 case scm_tcs_cons_imcar:
1649 case scm_tcs_cons_nimcar:
1650 case scm_tcs_closures:
1651 case scm_tc7_pws:
1652 if (SCM_GCMARKP (scmptr))
1653 goto cmrkcontinue;
1654 break;
1655 case scm_tc7_wvect:
1656 if (SCM_GC8MARKP (scmptr))
1657 {
1658 goto c8mrkcontinue;
1659 }
1660 else
1661 {
1662 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1663 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1664 break;
1665 }
1666
1667 case scm_tc7_vector:
1668 case scm_tc7_lvector:
1669 #ifdef CCLO
1670 case scm_tc7_cclo:
1671 #endif
1672 if (SCM_GC8MARKP (scmptr))
1673 goto c8mrkcontinue;
1674
1675 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1676 freechars:
1677 scm_must_free (SCM_CHARS (scmptr));
1678 /* SCM_SETCHARS(scmptr, 0);*/
1679 break;
1680 #ifdef HAVE_ARRAYS
1681 case scm_tc7_bvect:
1682 if SCM_GC8MARKP (scmptr)
1683 goto c8mrkcontinue;
1684 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1685 goto freechars;
1686 case scm_tc7_byvect:
1687 if SCM_GC8MARKP (scmptr)
1688 goto c8mrkcontinue;
1689 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1690 goto freechars;
1691 case scm_tc7_ivect:
1692 case scm_tc7_uvect:
1693 if SCM_GC8MARKP (scmptr)
1694 goto c8mrkcontinue;
1695 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1696 goto freechars;
1697 case scm_tc7_svect:
1698 if SCM_GC8MARKP (scmptr)
1699 goto c8mrkcontinue;
1700 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1701 goto freechars;
1702 #ifdef HAVE_LONG_LONGS
1703 case scm_tc7_llvect:
1704 if SCM_GC8MARKP (scmptr)
1705 goto c8mrkcontinue;
1706 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1707 goto freechars;
1708 #endif
1709 case scm_tc7_fvect:
1710 if SCM_GC8MARKP (scmptr)
1711 goto c8mrkcontinue;
1712 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1713 goto freechars;
1714 case scm_tc7_dvect:
1715 if SCM_GC8MARKP (scmptr)
1716 goto c8mrkcontinue;
1717 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1718 goto freechars;
1719 case scm_tc7_cvect:
1720 if SCM_GC8MARKP (scmptr)
1721 goto c8mrkcontinue;
1722 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1723 goto freechars;
1724 #endif
1725 case scm_tc7_substring:
1726 if (SCM_GC8MARKP (scmptr))
1727 goto c8mrkcontinue;
1728 break;
1729 case scm_tc7_string:
1730 if (SCM_GC8MARKP (scmptr))
1731 goto c8mrkcontinue;
1732 m += SCM_HUGE_LENGTH (scmptr) + 1;
1733 goto freechars;
1734 case scm_tc7_msymbol:
1735 if (SCM_GC8MARKP (scmptr))
1736 goto c8mrkcontinue;
1737 m += ( SCM_LENGTH (scmptr)
1738 + 1
1739 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1740 scm_must_free ((char *)SCM_SLOTS (scmptr));
1741 break;
1742 case scm_tc7_contin:
1743 if SCM_GC8MARKP (scmptr)
1744 goto c8mrkcontinue;
1745 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1746 if (SCM_VELTS (scmptr))
1747 goto freechars;
1748 case scm_tc7_ssymbol:
1749 if SCM_GC8MARKP(scmptr)
1750 goto c8mrkcontinue;
1751 break;
1752 case scm_tcs_subrs:
1753 continue;
1754 case scm_tc7_port:
1755 if SCM_GC8MARKP (scmptr)
1756 goto c8mrkcontinue;
1757 if SCM_OPENP (scmptr)
1758 {
1759 int k = SCM_PTOBNUM (scmptr);
1760 if (!(k < scm_numptob))
1761 goto sweeperr;
1762 /* Keep "revealed" ports alive. */
1763 if (scm_revealed_count (scmptr) > 0)
1764 continue;
1765 /* Yes, I really do mean scm_ptobs[k].free */
1766 /* rather than ftobs[k].close. .close */
1767 /* is for explicit CLOSE-PORT by user */
1768 m += (scm_ptobs[k].free) (scmptr);
1769 SCM_SETSTREAM (scmptr, 0);
1770 scm_remove_from_port_table (scmptr);
1771 scm_gc_ports_collected++;
1772 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1773 }
1774 break;
1775 case scm_tc7_smob:
1776 switch SCM_GCTYP16 (scmptr)
1777 {
1778 case scm_tc_free_cell:
1779 case scm_tc16_real:
1780 if SCM_GC8MARKP (scmptr)
1781 goto c8mrkcontinue;
1782 break;
1783 #ifdef SCM_BIGDIG
1784 case scm_tc16_big:
1785 if SCM_GC8MARKP (scmptr)
1786 goto c8mrkcontinue;
1787 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1788 goto freechars;
1789 #endif /* def SCM_BIGDIG */
1790 case scm_tc16_complex:
1791 if SCM_GC8MARKP (scmptr)
1792 goto c8mrkcontinue;
1793 m += 2 * sizeof (double);
1794 goto freechars;
1795 default:
1796 if SCM_GC8MARKP (scmptr)
1797 goto c8mrkcontinue;
1798
1799 {
1800 int k;
1801 k = SCM_SMOBNUM (scmptr);
1802 if (!(k < scm_numsmob))
1803 goto sweeperr;
1804 m += (scm_smobs[k].free) ((SCM) scmptr);
1805 break;
1806 }
1807 }
1808 break;
1809 default:
1810 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1811 }
1812 #if 0
1813 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1814 exit (2);
1815 #endif
1816 #ifndef GUILE_NEW_GC_SCHEME
1817 n += span;
1818 #else
1819 if (!--left_to_collect)
1820 {
1821 SCM_SETCAR (scmptr, nfreelist);
1822 *freelist->clustertail = scmptr;
1823 freelist->clustertail = SCM_CDRLOC (scmptr);
1824
1825 nfreelist = SCM_EOL;
1826 freelist->collected += span * freelist->cluster_size;
1827 left_to_collect = freelist->cluster_size;
1828 }
1829 else
1830 #endif
1831 {
1832 /* Stick the new cell on the front of nfreelist. It's
1833 critical that we mark this cell as freed; otherwise, the
1834 conservative collector might trace it as some other type
1835 of object. */
1836 SCM_SETCAR (scmptr, scm_tc_free_cell);
1837 SCM_SETCDR (scmptr, nfreelist);
1838 nfreelist = scmptr;
1839 }
1840
1841 continue;
1842 c8mrkcontinue:
1843 SCM_CLRGC8MARK (scmptr);
1844 continue;
1845 cmrkcontinue:
1846 SCM_CLRGCMARK (scmptr);
1847 }
1848 #ifdef GC_FREE_SEGMENTS
1849 if (n == seg_size)
1850 {
1851 register long j;
1852
1853 freelist->heap_size -= seg_size;
1854 free ((char *) scm_heap_table[i].bounds[0]);
1855 scm_heap_table[i].bounds[0] = 0;
1856 for (j = i + 1; j < scm_n_heap_segs; j++)
1857 scm_heap_table[j - 1] = scm_heap_table[j];
1858 scm_n_heap_segs -= 1;
1859 i--; /* We need to scan the segment just moved. */
1860 }
1861 else
1862 #endif /* ifdef GC_FREE_SEGMENTS */
1863 {
1864 /* Update the real freelist pointer to point to the head of
1865 the list of free cells we've built for this segment. */
1866 freelist->cells = nfreelist;
1867 #ifdef GUILE_NEW_GC_SCHEME
1868 freelist->left_to_collect = left_to_collect;
1869 #endif
1870 }
1871
1872 #ifndef GUILE_NEW_GC_SCHEME
1873 freelist->collected += n;
1874 #endif
1875
1876 #ifdef GUILE_DEBUG_FREELIST
1877 #ifdef GUILE_NEW_GC_SCHEME
1878 scm_check_freelist (freelist == &scm_master_freelist
1879 ? scm_freelist
1880 : scm_freelist2);
1881 #else
1882 scm_check_freelist (freelist);
1883 #endif
1884 scm_map_free_list ();
1885 #endif
1886 }
1887
1888 #ifdef GUILE_NEW_GC_SCHEME
1889 gc_sweep_freelist_finish (&scm_master_freelist);
1890 gc_sweep_freelist_finish (&scm_master_freelist2);
1891
1892 /* When we move to POSIX threads private freelists should probably
1893 be GC-protected instead. */
1894 scm_freelist = SCM_EOL;
1895 scm_freelist2 = SCM_EOL;
1896 #endif
1897
1898 /* Scan weak vectors. */
1899 {
1900 SCM *ptr, w;
1901 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1902 {
1903 if (!SCM_IS_WHVEC_ANY (w))
1904 {
1905 register long j, n;
1906
1907 ptr = SCM_VELTS (w);
1908 n = SCM_LENGTH (w);
1909 for (j = 0; j < n; ++j)
1910 if (SCM_FREEP (ptr[j]))
1911 ptr[j] = SCM_BOOL_F;
1912 }
1913 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1914 {
1915 SCM obj = w;
1916 register long n = SCM_LENGTH (w);
1917 register long j;
1918
1919 ptr = SCM_VELTS (w);
1920
1921 for (j = 0; j < n; ++j)
1922 {
1923 SCM * fixup;
1924 SCM alist;
1925 int weak_keys;
1926 int weak_values;
1927
1928 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1929 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1930
1931 fixup = ptr + j;
1932 alist = *fixup;
1933
1934 while ( SCM_CONSP (alist)
1935 && SCM_CONSP (SCM_CAR (alist)))
1936 {
1937 SCM key;
1938 SCM value;
1939
1940 key = SCM_CAAR (alist);
1941 value = SCM_CDAR (alist);
1942 if ( (weak_keys && SCM_FREEP (key))
1943 || (weak_values && SCM_FREEP (value)))
1944 {
1945 *fixup = SCM_CDR (alist);
1946 }
1947 else
1948 fixup = SCM_CDRLOC (alist);
1949 alist = SCM_CDR (alist);
1950 }
1951 }
1952 }
1953 }
1954 }
1955 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1956 #ifdef GUILE_NEW_GC_SCHEME
1957 scm_gc_yield -= scm_cells_allocated;
1958 #endif
1959 scm_mallocated -= m;
1960 scm_gc_malloc_collected = m;
1961 }
1962
1963
1964 \f
1965
1966 /* {Front end to malloc}
1967 *
1968 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1969 *
1970 * These functions provide services comperable to malloc, realloc, and
1971 * free. They are for allocating malloced parts of scheme objects.
1972 * The primary purpose of the front end is to impose calls to gc.
1973 */
1974
1975 /* scm_must_malloc
1976 * Return newly malloced storage or throw an error.
1977 *
1978 * The parameter WHAT is a string for error reporting.
1979 * If the threshold scm_mtrigger will be passed by this
1980 * allocation, or if the first call to malloc fails,
1981 * garbage collect -- on the presumption that some objects
1982 * using malloced storage may be collected.
1983 *
1984 * The limit scm_mtrigger may be raised by this allocation.
1985 */
1986 void *
1987 scm_must_malloc (scm_sizet size, const char *what)
1988 {
1989 void *ptr;
1990 unsigned long nm = scm_mallocated + size;
1991
1992 if (nm <= scm_mtrigger)
1993 {
1994 SCM_SYSCALL (ptr = malloc (size));
1995 if (NULL != ptr)
1996 {
1997 scm_mallocated = nm;
1998 return ptr;
1999 }
2000 }
2001
2002 scm_igc (what);
2003
2004 nm = scm_mallocated + size;
2005 SCM_SYSCALL (ptr = malloc (size));
2006 if (NULL != ptr)
2007 {
2008 scm_mallocated = nm;
2009 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2010 if (nm > scm_mtrigger)
2011 scm_mtrigger = nm + nm / 2;
2012 else
2013 scm_mtrigger += scm_mtrigger / 2;
2014 }
2015 return ptr;
2016 }
2017
2018 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2019 return 0; /* never reached */
2020 }
2021
2022
2023 /* scm_must_realloc
2024 * is similar to scm_must_malloc.
2025 */
2026 void *
2027 scm_must_realloc (void *where,
2028 scm_sizet old_size,
2029 scm_sizet size,
2030 const char *what)
2031 {
2032 void *ptr;
2033 scm_sizet nm = scm_mallocated + size - old_size;
2034
2035 if (nm <= scm_mtrigger)
2036 {
2037 SCM_SYSCALL (ptr = realloc (where, size));
2038 if (NULL != ptr)
2039 {
2040 scm_mallocated = nm;
2041 return ptr;
2042 }
2043 }
2044
2045 scm_igc (what);
2046
2047 nm = scm_mallocated + size - old_size;
2048 SCM_SYSCALL (ptr = realloc (where, size));
2049 if (NULL != ptr)
2050 {
2051 scm_mallocated = nm;
2052 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2053 if (nm > scm_mtrigger)
2054 scm_mtrigger = nm + nm / 2;
2055 else
2056 scm_mtrigger += scm_mtrigger / 2;
2057 }
2058 return ptr;
2059 }
2060
2061 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2062 return 0; /* never reached */
2063 }
2064
2065 void
2066 scm_must_free (void *obj)
2067 {
2068 if (obj)
2069 free (obj);
2070 else
2071 scm_wta (SCM_INUM0, "already free", "");
2072 }
2073
2074 /* Announce that there has been some malloc done that will be freed
2075 * during gc. A typical use is for a smob that uses some malloced
2076 * memory but can not get it from scm_must_malloc (for whatever
2077 * reason). When a new object of this smob is created you call
2078 * scm_done_malloc with the size of the object. When your smob free
2079 * function is called, be sure to include this size in the return
2080 * value. */
2081
2082 void
2083 scm_done_malloc (long size)
2084 {
2085 scm_mallocated += size;
2086
2087 if (scm_mallocated > scm_mtrigger)
2088 {
2089 scm_igc ("foreign mallocs");
2090 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2091 {
2092 if (scm_mallocated > scm_mtrigger)
2093 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2094 else
2095 scm_mtrigger += scm_mtrigger / 2;
2096 }
2097 }
2098 }
2099
2100
2101 \f
2102
2103 /* {Heap Segments}
2104 *
2105 * Each heap segment is an array of objects of a particular size.
2106 * Every segment has an associated (possibly shared) freelist.
2107 * A table of segment records is kept that records the upper and
2108 * lower extents of the segment; this is used during the conservative
2109 * phase of gc to identify probably gc roots (because they point
2110 * into valid segments at reasonable offsets). */
2111
2112 /* scm_expmem
2113 * is true if the first segment was smaller than INIT_HEAP_SEG.
2114 * If scm_expmem is set to one, subsequent segment allocations will
2115 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2116 */
2117 int scm_expmem = 0;
2118
2119 scm_sizet scm_max_segment_size;
2120
2121 /* scm_heap_org
2122 * is the lowest base address of any heap segment.
2123 */
2124 SCM_CELLPTR scm_heap_org;
2125
2126 scm_heap_seg_data_t * scm_heap_table = 0;
2127 int scm_n_heap_segs = 0;
2128
2129 /* init_heap_seg
2130 * initializes a new heap segment and return the number of objects it contains.
2131 *
2132 * The segment origin, segment size in bytes, and the span of objects
2133 * in cells are input parameters. The freelist is both input and output.
2134 *
2135 * This function presume that the scm_heap_table has already been expanded
2136 * to accomodate a new segment record.
2137 */
2138
2139
2140 static scm_sizet
2141 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2142 {
2143 register SCM_CELLPTR ptr;
2144 #ifdef SCM_POINTERS_MUNGED
2145 register SCM scmptr;
2146 #else
2147 #undef scmptr
2148 #define scmptr ptr
2149 #endif
2150 SCM_CELLPTR seg_end;
2151 int new_seg_index;
2152 int n_new_cells;
2153 int span = freelist->span;
2154
2155 if (seg_org == NULL)
2156 return 0;
2157
2158 ptr = CELL_UP (seg_org, span);
2159
2160 /* Compute the ceiling on valid object pointers w/in this segment.
2161 */
2162 seg_end = CELL_DN ((char *) seg_org + size, span);
2163
2164 /* Find the right place and insert the segment record.
2165 *
2166 */
2167 for (new_seg_index = 0;
2168 ( (new_seg_index < scm_n_heap_segs)
2169 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2170 new_seg_index++)
2171 ;
2172
2173 {
2174 int i;
2175 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2176 scm_heap_table[i] = scm_heap_table[i - 1];
2177 }
2178
2179 ++scm_n_heap_segs;
2180
2181 scm_heap_table[new_seg_index].valid = 0;
2182 scm_heap_table[new_seg_index].span = span;
2183 scm_heap_table[new_seg_index].freelist = freelist;
2184 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
2185 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
2186
2187
2188 /* Compute the least valid object pointer w/in this segment
2189 */
2190 ptr = CELL_UP (ptr, span);
2191
2192
2193 /*n_new_cells*/
2194 n_new_cells = seg_end - ptr;
2195
2196 #ifdef GUILE_NEW_GC_SCHEME
2197
2198 freelist->heap_size += n_new_cells;
2199
2200 /* Partition objects in this segment into clusters */
2201 {
2202 SCM clusters;
2203 SCM *clusterp = &clusters;
2204 int n_cluster_cells = span * freelist->cluster_size;
2205
2206 while (n_new_cells > span) /* at least one spine + one freecell */
2207 {
2208 /* Determine end of cluster
2209 */
2210 if (n_new_cells >= n_cluster_cells)
2211 {
2212 seg_end = ptr + n_cluster_cells;
2213 n_new_cells -= n_cluster_cells;
2214 }
2215 else
2216 /* [cmm] looks like the segment size doesn't divide cleanly by
2217 cluster size. bad cmm! */
2218 abort();
2219
2220 /* Allocate cluster spine
2221 */
2222 *clusterp = PTR2SCM (ptr);
2223 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2224 clusterp = SCM_CDRLOC (*clusterp);
2225 ptr += span;
2226
2227 while (ptr < seg_end)
2228 {
2229 #ifdef SCM_POINTERS_MUNGED
2230 scmptr = PTR2SCM (ptr);
2231 #endif
2232 SCM_SETCAR (scmptr, scm_tc_free_cell);
2233 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2234 ptr += span;
2235 }
2236
2237 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2238 }
2239
2240 /* Patch up the last cluster pointer in the segment
2241 * to join it to the input freelist.
2242 */
2243 *clusterp = freelist->clusters;
2244 freelist->clusters = clusters;
2245 }
2246
2247 #else /* GUILE_NEW_GC_SCHEME */
2248
2249 /* Prepend objects in this segment to the freelist.
2250 */
2251 while (ptr < seg_end)
2252 {
2253 #ifdef SCM_POINTERS_MUNGED
2254 scmptr = PTR2SCM (ptr);
2255 #endif
2256 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
2257 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2258 ptr += span;
2259 }
2260
2261 ptr -= span;
2262
2263 /* Patch up the last freelist pointer in the segment
2264 * to join it to the input freelist.
2265 */
2266 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
2267 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
2268
2269 freelist->heap_size += n_new_cells;
2270
2271 #endif /* GUILE_NEW_GC_SCHEME */
2272
2273 #ifdef DEBUGINFO
2274 fprintf (stderr, "H");
2275 #endif
2276 return size;
2277 #ifdef scmptr
2278 #undef scmptr
2279 #endif
2280 }
2281
2282 #ifndef GUILE_NEW_GC_SCHEME
2283 #define round_to_cluster_size(freelist, len) len
2284 #else
2285
2286 static scm_sizet
2287 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2288 {
2289 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2290
2291 return
2292 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2293 + ALIGNMENT_SLACK (freelist);
2294 }
2295
2296 #endif
2297
2298 static void
2299 alloc_some_heap (scm_freelist_t *freelist)
2300 {
2301 scm_heap_seg_data_t * tmptable;
2302 SCM_CELLPTR ptr;
2303 long len;
2304
2305 /* Critical code sections (such as the garbage collector)
2306 * aren't supposed to add heap segments.
2307 */
2308 if (scm_gc_heap_lock)
2309 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2310
2311 /* Expand the heap tables to have room for the new segment.
2312 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2313 * only if the allocation of the segment itself succeeds.
2314 */
2315 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
2316
2317 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
2318 realloc ((char *)scm_heap_table, len)));
2319 if (!tmptable)
2320 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2321 else
2322 scm_heap_table = tmptable;
2323
2324
2325 /* Pick a size for the new heap segment.
2326 * The rule for picking the size of a segment is explained in
2327 * gc.h
2328 */
2329 #ifdef GUILE_NEW_GC_SCHEME
2330 {
2331 /* Assure that the new segment is predicted to be large enough for
2332 * the new trigger.
2333 */
2334 int slack = freelist->gc_trigger - freelist->collected;
2335 int min_cells = 100 * slack / (99 - freelist->gc_trigger_fraction);
2336 len = SCM_EXPHEAP (freelist->heap_size);
2337 #ifdef DEBUGINFO
2338 fprintf (stderr, "(%d < %d)", len, min_cells);
2339 #endif
2340 if (len < min_cells)
2341 len = min_cells + 1;
2342 len *= sizeof (scm_cell);
2343 }
2344
2345 if (len > scm_max_segment_size)
2346 len = scm_max_segment_size;
2347 #else
2348 if (scm_expmem)
2349 {
2350 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2351 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
2352 != len)
2353 len = 0;
2354 }
2355 else
2356 len = SCM_HEAP_SEG_SIZE;
2357 #endif /* GUILE_NEW_GC_SCHEME */
2358
2359 {
2360 scm_sizet smallest;
2361
2362 #ifndef GUILE_NEW_GC_SCHEME
2363 smallest = (freelist->span * sizeof (scm_cell));
2364 #else
2365 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2366 #endif
2367
2368 if (len < smallest)
2369 len = smallest;
2370
2371 /* Allocate with decaying ambition. */
2372 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2373 && (len >= smallest))
2374 {
2375 scm_sizet rounded_len = round_to_cluster_size(freelist, len);
2376 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2377 if (ptr)
2378 {
2379 init_heap_seg (ptr, rounded_len, freelist);
2380 return;
2381 }
2382 len /= 2;
2383 }
2384 }
2385
2386 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2387 }
2388
2389
2390
2391 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2392 (SCM name),
2393 "")
2394 #define FUNC_NAME s_scm_unhash_name
2395 {
2396 int x;
2397 int bound;
2398 SCM_VALIDATE_SYMBOL (1,name);
2399 SCM_DEFER_INTS;
2400 bound = scm_n_heap_segs;
2401 for (x = 0; x < bound; ++x)
2402 {
2403 SCM_CELLPTR p;
2404 SCM_CELLPTR pbound;
2405 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2406 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2407 while (p < pbound)
2408 {
2409 SCM incar;
2410 incar = p->car;
2411 if (1 == (7 & (int)incar))
2412 {
2413 --incar;
2414 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2415 && (SCM_CDR (incar) != 0)
2416 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2417 {
2418 p->car = name;
2419 }
2420 }
2421 ++p;
2422 }
2423 }
2424 SCM_ALLOW_INTS;
2425 return name;
2426 }
2427 #undef FUNC_NAME
2428
2429
2430 \f
2431 /* {GC Protection Helper Functions}
2432 */
2433
2434
2435 void
2436 scm_remember (SCM *ptr)
2437 { /* empty */ }
2438
2439
2440 /*
2441 These crazy functions prevent garbage collection
2442 of arguments after the first argument by
2443 ensuring they remain live throughout the
2444 function because they are used in the last
2445 line of the code block.
2446 It'd be better to have a nice compiler hint to
2447 aid the conservative stack-scanning GC. --03/09/00 gjb */
2448 SCM
2449 scm_return_first (SCM elt, ...)
2450 {
2451 return elt;
2452 }
2453
2454 int
2455 scm_return_first_int (int i, ...)
2456 {
2457 return i;
2458 }
2459
2460
2461 SCM
2462 scm_permanent_object (SCM obj)
2463 {
2464 SCM_REDEFER_INTS;
2465 scm_permobjs = scm_cons (obj, scm_permobjs);
2466 SCM_REALLOW_INTS;
2467 return obj;
2468 }
2469
2470
2471 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2472 even if all other references are dropped, until someone applies
2473 scm_unprotect_object to it. This function returns OBJ.
2474
2475 Calls to scm_protect_object nest. For every object OBJ, there is a
2476 counter which scm_protect_object(OBJ) increments and
2477 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2478 an object's counter is greater than zero, the garbage collector
2479 will not free it.
2480
2481 Of course, that's not how it's implemented. scm_protect_object and
2482 scm_unprotect_object just maintain a list of references to things.
2483 Since the GC knows about this list, all objects it mentions stay
2484 alive. scm_protect_object adds its argument to the list;
2485 scm_unprotect_object removes the first occurrence of its argument
2486 to the list. */
2487 SCM
2488 scm_protect_object (SCM obj)
2489 {
2490 scm_protects = scm_cons (obj, scm_protects);
2491
2492 return obj;
2493 }
2494
2495
2496 /* Remove any protection for OBJ established by a prior call to
2497 scm_protect_object. This function returns OBJ.
2498
2499 See scm_protect_object for more information. */
2500 SCM
2501 scm_unprotect_object (SCM obj)
2502 {
2503 SCM *tail_ptr = &scm_protects;
2504
2505 while (SCM_CONSP (*tail_ptr))
2506 if (SCM_CAR (*tail_ptr) == obj)
2507 {
2508 *tail_ptr = SCM_CDR (*tail_ptr);
2509 break;
2510 }
2511 else
2512 tail_ptr = SCM_CDRLOC (*tail_ptr);
2513
2514 return obj;
2515 }
2516
2517 int terminating;
2518
2519 /* called on process termination. */
2520 #ifdef HAVE_ATEXIT
2521 static void
2522 cleanup (void)
2523 #else
2524 #ifdef HAVE_ON_EXIT
2525 extern int on_exit (void (*procp) (), int arg);
2526
2527 static void
2528 cleanup (int status, void *arg)
2529 #else
2530 #error Dont know how to setup a cleanup handler on your system.
2531 #endif
2532 #endif
2533 {
2534 terminating = 1;
2535 scm_flush_all_ports ();
2536 }
2537
2538 \f
2539 static int
2540 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2541 {
2542 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2543 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2544 rounded_size,
2545 freelist))
2546 {
2547 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2548 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2549 rounded_size,
2550 freelist))
2551 return 1;
2552 }
2553 else
2554 scm_expmem = 1;
2555
2556 #ifdef GUILE_NEW_GC_SCHEME
2557 if (freelist->gc_trigger_fraction)
2558 freelist->gc_trigger = (freelist->heap_size * freelist->gc_trigger_fraction
2559 / 100);
2560 freelist->grow_heap_p = (freelist->heap_size < freelist->gc_trigger);
2561 #endif
2562
2563 return 0;
2564 }
2565
2566 \f
2567 #ifdef GUILE_NEW_GC_SCHEME
2568 static void
2569 init_freelist (scm_freelist_t *freelist,
2570 int span,
2571 int cluster_size,
2572 int gc_trigger)
2573 {
2574 freelist->clusters = SCM_EOL;
2575 freelist->cluster_size = cluster_size + 1;
2576 freelist->left_to_collect = 0;
2577 freelist->clusters_allocated = 0;
2578 if (gc_trigger < 0)
2579 freelist->gc_trigger_fraction = - gc_trigger;
2580 else
2581 {
2582 freelist->gc_trigger = gc_trigger;
2583 freelist->gc_trigger_fraction = 0;
2584 }
2585 freelist->span = span;
2586 freelist->collected = 0;
2587 freelist->heap_size = 0;
2588 }
2589
2590 int
2591 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2592 scm_sizet init_heap_size_2, int gc_trigger_2,
2593 scm_sizet max_segment_size)
2594 #else
2595 int
2596 scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
2597 #endif
2598 {
2599 scm_sizet j;
2600
2601 if (!init_heap_size_1)
2602 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2603 if (!init_heap_size_2)
2604 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2605
2606 j = SCM_NUM_PROTECTS;
2607 while (j)
2608 scm_sys_protects[--j] = SCM_BOOL_F;
2609 scm_block_gc = 1;
2610
2611 #ifdef GUILE_NEW_GC_SCHEME
2612 scm_freelist = SCM_EOL;
2613 scm_freelist2 = SCM_EOL;
2614 init_freelist (&scm_master_freelist,
2615 1, SCM_CLUSTER_SIZE_1,
2616 gc_trigger_1 ? gc_trigger_1 : SCM_GC_TRIGGER_1);
2617 init_freelist (&scm_master_freelist2,
2618 2, SCM_CLUSTER_SIZE_2,
2619 gc_trigger_2 ? gc_trigger_2 : SCM_GC_TRIGGER_2);
2620 scm_max_segment_size
2621 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
2622 #else
2623 scm_freelist.cells = SCM_EOL;
2624 scm_freelist.span = 1;
2625 scm_freelist.collected = 0;
2626 scm_freelist.heap_size = 0;
2627
2628 scm_freelist2.cells = SCM_EOL;
2629 scm_freelist2.span = 2;
2630 scm_freelist2.collected = 0;
2631 scm_freelist2.heap_size = 0;
2632 #endif
2633
2634 scm_expmem = 0;
2635
2636 j = SCM_HEAP_SEG_SIZE;
2637 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2638 scm_heap_table = ((scm_heap_seg_data_t *)
2639 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2640
2641 #ifdef GUILE_NEW_GC_SCHEME
2642 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2643 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2644 return 1;
2645 #else
2646 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2647 make_initial_segment (init_heap_size_2, &scm_freelist2))
2648 return 1;
2649 #endif
2650
2651 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2652
2653 /* scm_hplims[0] can change. do not remove scm_heap_org */
2654 scm_weak_vectors = SCM_EOL;
2655
2656 /* Initialise the list of ports. */
2657 scm_port_table = (scm_port **)
2658 malloc (sizeof (scm_port *) * scm_port_table_room);
2659 if (!scm_port_table)
2660 return 1;
2661
2662 #ifdef HAVE_ATEXIT
2663 atexit (cleanup);
2664 #else
2665 #ifdef HAVE_ON_EXIT
2666 on_exit (cleanup, 0);
2667 #endif
2668 #endif
2669
2670 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2671 SCM_SETCDR (scm_undefineds, scm_undefineds);
2672
2673 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2674 scm_nullstr = scm_makstr (0L, 0);
2675 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2676 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2677 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2678 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2679 scm_stand_in_procs = SCM_EOL;
2680 scm_permobjs = SCM_EOL;
2681 scm_protects = SCM_EOL;
2682 scm_asyncs = SCM_EOL;
2683 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2684 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2685 #ifdef SCM_BIGDIG
2686 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2687 #endif
2688 return 0;
2689 }
2690 \f
2691
2692 void
2693 scm_init_gc ()
2694 {
2695 #include "gc.x"
2696 }
2697
2698 /*
2699 Local Variables:
2700 c-file-style: "gnu"
2701 End:
2702 */