* print.h, stacks.h, options.c, options.h: Changed C++
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 \f
46 #include <stdio.h>
47 #include "_scm.h"
48 #include "stime.h"
49 #include "stackchk.h"
50 #include "struct.h"
51 #include "weaks.h"
52 #include "guardians.h"
53 #include "smob.h"
54 #include "unif.h"
55 #include "async.h"
56 #include "ports.h"
57 #include "root.h"
58 #include "strings.h"
59 #include "vectors.h"
60
61 #include "validate.h"
62 #include "gc.h"
63
64 #ifdef HAVE_MALLOC_H
65 #include <malloc.h>
66 #endif
67
68 #ifdef HAVE_UNISTD_H
69 #include <unistd.h>
70 #endif
71
72 #ifdef __STDC__
73 #include <stdarg.h>
74 #define var_start(x, y) va_start(x, y)
75 #else
76 #include <varargs.h>
77 #define var_start(x, y) va_start(x)
78 #endif
79
80 \f
81 /* {heap tuning parameters}
82 *
83 * These are parameters for controlling memory allocation. The heap
84 * is the area out of which scm_cons, and object headers are allocated.
85 *
86 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
87 * 64 bit machine. The units of the _SIZE parameters are bytes.
88 * Cons pairs and object headers occupy one heap cell.
89 *
90 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
91 * allocated initially the heap will grow by half its current size
92 * each subsequent time more heap is needed.
93 *
94 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
95 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
96 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
97 * is in scm_init_storage() and alloc_some_heap() in sys.c
98 *
99 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
100 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
101 *
102 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
103 * is needed.
104 *
105 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
106 * trigger a GC.
107 *
108 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
109 * reclaimed by a GC triggered by must_malloc. If less than this is
110 * reclaimed, the trigger threshold is raised. [I don't know what a
111 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
112 * work around a oscillation that caused almost constant GC.]
113 */
114
115 #define SCM_INIT_HEAP_SIZE_1 (50000L * sizeof (scm_cell))
116 #define SCM_CLUSTER_SIZE_1 2000L
117 #define SCM_GC_TRIGGER_1 -45
118
119 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
120 #define SCM_CLUSTER_SIZE_2 1000L
121 /* The following value may seem large, but note that if we get to GC at
122 * all, this means that we have a numerically intensive application
123 */
124 #define SCM_GC_TRIGGER_2 -45
125
126 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
127
128 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
129 #ifdef _QC
130 # define SCM_HEAP_SEG_SIZE 32768L
131 #else
132 # ifdef sequent
133 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
134 # else
135 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
136 # endif
137 #endif
138 /* Make heap grow with factor 1.5 */
139 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
140 #define SCM_INIT_MALLOC_LIMIT 100000
141 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
142
143 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
144 bounds for allocated storage */
145
146 #ifdef PROT386
147 /*in 386 protected mode we must only adjust the offset */
148 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
149 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
150 #else
151 # ifdef _UNICOS
152 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
153 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
154 # else
155 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
156 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
157 # endif /* UNICOS */
158 #endif /* PROT386 */
159 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
160 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
161 #ifdef GUILE_NEW_GC_SCHEME
162 #define SCM_HEAP_SIZE \
163 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
164 #else
165 #define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
166 #endif
167
168
169 \f
170 /* scm_freelists
171 */
172
173 typedef struct scm_freelist_t {
174 /* collected cells */
175 SCM cells;
176 #ifdef GUILE_NEW_GC_SCHEME
177 /* number of cells left to collect before cluster is full */
178 unsigned int left_to_collect;
179 /* number of clusters which have been allocated */
180 unsigned int clusters_allocated;
181 /* a list of freelists, each of size gc_trigger,
182 except the last one which may be shorter */
183 SCM clusters;
184 SCM *clustertail;
185 /* this is the number of objects in each cluster, including the spine cell */
186 int cluster_size;
187 /* set to grow the heap when we run out of clusters
188 */
189 int grow_heap_p;
190 /* minimum number of objects allocated before GC is triggered
191 */
192 int gc_trigger;
193 /* defines gc_trigger as percent of heap size
194 * 0 => constant trigger
195 */
196 int gc_trigger_fraction;
197 #endif
198 /* number of cells per object on this list */
199 int span;
200 /* number of collected cells during last GC */
201 int collected;
202 /* total number of cells in heap segments
203 * belonging to this list.
204 */
205 int heap_size;
206 } scm_freelist_t;
207
208 #ifdef GUILE_NEW_GC_SCHEME
209 SCM scm_freelist = SCM_EOL;
210 scm_freelist_t scm_master_freelist = {
211 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
212 };
213 SCM scm_freelist2 = SCM_EOL;
214 scm_freelist_t scm_master_freelist2 = {
215 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
216 };
217 #else
218 scm_freelist_t scm_freelist = { SCM_EOL, 1, 0, 0 };
219 scm_freelist_t scm_freelist2 = { SCM_EOL, 2, 0, 0 };
220 #endif
221
222 /* scm_mtrigger
223 * is the number of bytes of must_malloc allocation needed to trigger gc.
224 */
225 unsigned long scm_mtrigger;
226
227
228 /* scm_gc_heap_lock
229 * If set, don't expand the heap. Set only during gc, during which no allocation
230 * is supposed to take place anyway.
231 */
232 int scm_gc_heap_lock = 0;
233
234 /* GC Blocking
235 * Don't pause for collection if this is set -- just
236 * expand the heap.
237 */
238
239 int scm_block_gc = 1;
240
241 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
242 * collection (GC) more space is allocated for the heap.
243 */
244 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
245
246 /* During collection, this accumulates objects holding
247 * weak references.
248 */
249 SCM scm_weak_vectors;
250
251 /* GC Statistics Keeping
252 */
253 unsigned long scm_cells_allocated = 0;
254 long scm_mallocated = 0;
255 unsigned long scm_gc_cells_collected;
256 #ifdef GUILE_NEW_GC_SCHEME
257 unsigned long scm_gc_yield;
258 #endif
259 unsigned long scm_gc_malloc_collected;
260 unsigned long scm_gc_ports_collected;
261 unsigned long scm_gc_rt;
262 unsigned long scm_gc_time_taken = 0;
263
264 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
265 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
266 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
267 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
268 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
269 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
270
271 typedef struct scm_heap_seg_data_t
272 {
273 /* lower and upper bounds of the segment */
274 SCM_CELLPTR bounds[2];
275
276 /* address of the head-of-freelist pointer for this segment's cells.
277 All segments usually point to the same one, scm_freelist. */
278 scm_freelist_t *freelist;
279
280 /* number of SCM words per object in this segment */
281 int span;
282
283 /* If SEG_DATA->valid is non-zero, the conservative marking
284 functions will apply SEG_DATA->valid to the purported pointer and
285 SEG_DATA, and mark the object iff the function returns non-zero.
286 At the moment, I don't think anyone uses this. */
287 int (*valid) ();
288 } scm_heap_seg_data_t;
289
290
291
292
293 static void scm_mark_weak_vector_spines (void);
294 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
295 static void alloc_some_heap (scm_freelist_t *);
296
297
298 \f
299 /* Debugging functions. */
300
301 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
302
303 /* Return the number of the heap segment containing CELL. */
304 static int
305 which_seg (SCM cell)
306 {
307 int i;
308
309 for (i = 0; i < scm_n_heap_segs; i++)
310 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], (SCM_CELLPTR) cell)
311 && SCM_PTR_GT (scm_heap_table[i].bounds[1], (SCM_CELLPTR) cell))
312 return i;
313 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
314 SCM_UNPACK (cell));
315 abort ();
316 }
317
318
319 #ifdef GUILE_NEW_GC_SCHEME
320 static void
321 map_free_list (scm_freelist_t *master, SCM freelist)
322 {
323 int last_seg = -1, count = 0;
324 SCM f;
325
326 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
327 {
328 int this_seg = which_seg (f);
329
330 if (this_seg != last_seg)
331 {
332 if (last_seg != -1)
333 fprintf (stderr, " %5d %d-cells in segment %d\n",
334 count, master->span, last_seg);
335 last_seg = this_seg;
336 count = 0;
337 }
338 count++;
339 }
340 if (last_seg != -1)
341 fprintf (stderr, " %5d %d-cells in segment %d\n",
342 count, master->span, last_seg);
343 }
344 #else
345 static void
346 map_free_list (scm_freelist_t *freelist)
347 {
348 int last_seg = -1, count = 0;
349 SCM f;
350
351 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f))
352 {
353 int this_seg = which_seg (f);
354
355 if (this_seg != last_seg)
356 {
357 if (last_seg != -1)
358 fprintf (stderr, " %5d %d-cells in segment %d\n",
359 count, freelist->span, last_seg);
360 last_seg = this_seg;
361 count = 0;
362 }
363 count++;
364 }
365 if (last_seg != -1)
366 fprintf (stderr, " %5d %d-cells in segment %d\n",
367 count, freelist->span, last_seg);
368 }
369 #endif
370
371 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
372 (),
373 "Print debugging information about the free-list.\n"
374 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
375 #define FUNC_NAME s_scm_map_free_list
376 {
377 int i;
378 fprintf (stderr, "%d segments total (%d:%d",
379 scm_n_heap_segs,
380 scm_heap_table[0].span,
381 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
382 for (i = 1; i < scm_n_heap_segs; i++)
383 fprintf (stderr, ", %d:%d",
384 scm_heap_table[i].span,
385 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
386 fprintf (stderr, ")\n");
387 #ifdef GUILE_NEW_GC_SCHEME
388 map_free_list (&scm_master_freelist, scm_freelist);
389 map_free_list (&scm_master_freelist2, scm_freelist2);
390 #else
391 map_free_list (&scm_freelist);
392 map_free_list (&scm_freelist2);
393 #endif
394 fflush (stderr);
395
396 return SCM_UNSPECIFIED;
397 }
398 #undef FUNC_NAME
399
400 #ifdef GUILE_NEW_GC_SCHEME
401 static int last_cluster;
402 static int last_size;
403
404 static int
405 free_list_length (char *title, int i, SCM freelist)
406 {
407 SCM ls;
408 int n = 0;
409 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
410 if (SCM_UNPACK_CAR (ls) == scm_tc_free_cell)
411 ++n;
412 else
413 {
414 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
415 abort ();
416 }
417 if (n != last_size)
418 {
419 if (i > 0)
420 {
421 if (last_cluster == i - 1)
422 fprintf (stderr, "\t%d\n", last_size);
423 else
424 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
425 }
426 if (i >= 0)
427 fprintf (stderr, "%s %d", title, i);
428 else
429 fprintf (stderr, "%s\t%d\n", title, n);
430 last_cluster = i;
431 last_size = n;
432 }
433 return n;
434 }
435
436 static void
437 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
438 {
439 SCM clusters;
440 int i = 0, len, n = 0;
441 fprintf (stderr, "%s\n\n", title);
442 n += free_list_length ("free list", -1, freelist);
443 for (clusters = master->clusters;
444 SCM_NNULLP (clusters);
445 clusters = SCM_CDR (clusters))
446 {
447 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
448 n += len;
449 }
450 if (last_cluster == i - 1)
451 fprintf (stderr, "\t%d\n", last_size);
452 else
453 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
454 fprintf (stderr, "\ntotal %d objects\n\n", n);
455 }
456
457 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
458 (),
459 "Print debugging information about the free-list.\n"
460 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
461 #define FUNC_NAME s_scm_free_list_length
462 {
463 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
464 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
465 return SCM_UNSPECIFIED;
466 }
467 #undef FUNC_NAME
468 #endif
469
470 #endif
471
472 #ifdef GUILE_DEBUG_FREELIST
473
474 /* Number of calls to SCM_NEWCELL since startup. */
475 static unsigned long scm_newcell_count;
476 static unsigned long scm_newcell2_count;
477
478 /* Search freelist for anything that isn't marked as a free cell.
479 Abort if we find something. */
480 #ifdef GUILE_NEW_GC_SCHEME
481 static void
482 scm_check_freelist (SCM freelist)
483 {
484 SCM f;
485 int i = 0;
486
487 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
488 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
489 {
490 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
491 scm_newcell_count, i);
492 fflush (stderr);
493 abort ();
494 }
495 }
496 #else
497 static void
498 scm_check_freelist (scm_freelist_t *freelist)
499 {
500 SCM f;
501 int i = 0;
502
503 for (f = freelist->cells; SCM_NIMP (f); f = SCM_CDR (f), i++)
504 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
505 {
506 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
507 scm_newcell_count, i);
508 fflush (stderr);
509 abort ();
510 }
511 }
512 #endif
513
514 static int scm_debug_check_freelist = 0;
515
516 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
517 (SCM flag),
518 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
519 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
520 "compile-time flag was selected.\n")
521 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
522 {
523 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
524 return SCM_UNSPECIFIED;
525 }
526 #undef FUNC_NAME
527
528
529 #ifdef GUILE_NEW_GC_SCHEME
530
531 SCM
532 scm_debug_newcell (void)
533 {
534 SCM new;
535
536 scm_newcell_count++;
537 if (scm_debug_check_freelist)
538 {
539 scm_check_freelist (scm_freelist);
540 scm_gc();
541 }
542
543 /* The rest of this is supposed to be identical to the SCM_NEWCELL
544 macro. */
545 if (SCM_IMP (scm_freelist))
546 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
547 else
548 {
549 new = scm_freelist;
550 scm_freelist = SCM_CDR (scm_freelist);
551 SCM_SETCAR (new, scm_tc16_allocated);
552 }
553
554 return new;
555 }
556
557 SCM
558 scm_debug_newcell2 (void)
559 {
560 SCM new;
561
562 scm_newcell2_count++;
563 if (scm_debug_check_freelist)
564 {
565 scm_check_freelist (scm_freelist2);
566 scm_gc ();
567 }
568
569 /* The rest of this is supposed to be identical to the SCM_NEWCELL
570 macro. */
571 if (SCM_IMP (scm_freelist2))
572 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
573 else
574 {
575 new = scm_freelist2;
576 scm_freelist2 = SCM_CDR (scm_freelist2);
577 SCM_SETCAR (new, scm_tc16_allocated);
578 }
579
580 return new;
581 }
582
583 #else /* GUILE_NEW_GC_SCHEME */
584
585 SCM
586 scm_debug_newcell (void)
587 {
588 SCM new;
589
590 scm_newcell_count++;
591 if (scm_debug_check_freelist)
592 {
593 scm_check_freelist (&scm_freelist);
594 scm_gc();
595 }
596
597 /* The rest of this is supposed to be identical to the SCM_NEWCELL
598 macro. */
599 if (SCM_IMP (scm_freelist.cells))
600 new = scm_gc_for_newcell (&scm_freelist);
601 else
602 {
603 new = scm_freelist.cells;
604 scm_freelist.cells = SCM_CDR (scm_freelist.cells);
605 SCM_SETCAR (new, scm_tc16_allocated);
606 ++scm_cells_allocated;
607 }
608
609 return new;
610 }
611
612 SCM
613 scm_debug_newcell2 (void)
614 {
615 SCM new;
616
617 scm_newcell2_count++;
618 if (scm_debug_check_freelist) {
619 scm_check_freelist (&scm_freelist2);
620 scm_gc();
621 }
622
623 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
624 macro. */
625 if (SCM_IMP (scm_freelist2.cells))
626 new = scm_gc_for_newcell (&scm_freelist2);
627 else
628 {
629 new = scm_freelist2.cells;
630 scm_freelist2.cells = SCM_CDR (scm_freelist2.cells);
631 SCM_SETCAR (new, scm_tc16_allocated);
632 scm_cells_allocated += 2;
633 }
634
635 return new;
636 }
637
638 #endif /* GUILE_NEW_GC_SCHEME */
639 #endif /* GUILE_DEBUG_FREELIST */
640
641 \f
642
643 #ifdef GUILE_NEW_GC_SCHEME
644 static unsigned long
645 master_cells_allocated (scm_freelist_t *master)
646 {
647 int objects = master->clusters_allocated * (master->cluster_size - 1);
648 if (SCM_NULLP (master->clusters))
649 objects -= master->left_to_collect;
650 return master->span * objects;
651 }
652
653 static unsigned long
654 freelist_length (SCM freelist)
655 {
656 int n;
657 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
658 ++n;
659 return n;
660 }
661
662 static unsigned long
663 compute_cells_allocated ()
664 {
665 return (scm_cells_allocated
666 + master_cells_allocated (&scm_master_freelist)
667 + master_cells_allocated (&scm_master_freelist2)
668 - scm_master_freelist.span * freelist_length (scm_freelist)
669 - scm_master_freelist2.span * freelist_length (scm_freelist2));
670 }
671 #endif
672
673 /* {Scheme Interface to GC}
674 */
675
676 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
677 (),
678 "Returns an association list of statistics about Guile's current use of storage. ")
679 #define FUNC_NAME s_scm_gc_stats
680 {
681 int i;
682 int n;
683 SCM heap_segs;
684 long int local_scm_mtrigger;
685 long int local_scm_mallocated;
686 long int local_scm_heap_size;
687 long int local_scm_cells_allocated;
688 long int local_scm_gc_time_taken;
689 SCM answer;
690
691 SCM_DEFER_INTS;
692 scm_block_gc = 1;
693 retry:
694 heap_segs = SCM_EOL;
695 n = scm_n_heap_segs;
696 for (i = scm_n_heap_segs; i--; )
697 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
698 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
699 heap_segs);
700 if (scm_n_heap_segs != n)
701 goto retry;
702 scm_block_gc = 0;
703
704 /* Below, we cons to produce the resulting list. We want a snapshot of
705 * the heap situation before consing.
706 */
707 local_scm_mtrigger = scm_mtrigger;
708 local_scm_mallocated = scm_mallocated;
709 local_scm_heap_size = SCM_HEAP_SIZE;
710 #ifdef GUILE_NEW_GC_SCHEME
711 local_scm_cells_allocated = compute_cells_allocated ();
712 #else
713 local_scm_cells_allocated = scm_cells_allocated;
714 #endif
715 local_scm_gc_time_taken = scm_gc_time_taken;
716
717 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
718 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
719 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
720 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
721 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
722 scm_cons (sym_heap_segments, heap_segs),
723 SCM_UNDEFINED);
724 SCM_ALLOW_INTS;
725 return answer;
726 }
727 #undef FUNC_NAME
728
729
730 void
731 scm_gc_start (const char *what)
732 {
733 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
734 scm_gc_cells_collected = 0;
735 #ifdef GUILE_NEW_GC_SCHEME
736 scm_gc_yield = (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2));
739 #endif
740 scm_gc_malloc_collected = 0;
741 scm_gc_ports_collected = 0;
742 }
743
744 void
745 scm_gc_end ()
746 {
747 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
748 scm_gc_time_taken += scm_gc_rt;
749 scm_system_async_mark (scm_gc_async);
750 }
751
752
753 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
754 (SCM obj),
755 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
756 "returned by this function for @var{obj}")
757 #define FUNC_NAME s_scm_object_address
758 {
759 return scm_ulong2num ((unsigned long) obj);
760 }
761 #undef FUNC_NAME
762
763
764 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
765 (),
766 "Scans all of SCM objects and reclaims for further use those that are\n"
767 "no longer accessible.")
768 #define FUNC_NAME s_scm_gc
769 {
770 SCM_DEFER_INTS;
771 scm_igc ("call");
772 SCM_ALLOW_INTS;
773 return SCM_UNSPECIFIED;
774 }
775 #undef FUNC_NAME
776
777
778 \f
779 /* {C Interface For When GC is Triggered}
780 */
781
782 #ifdef GUILE_NEW_GC_SCHEME
783
784 static void
785 adjust_gc_trigger (scm_freelist_t *freelist)
786 {
787 /* GC trigger is adjusted upwards so that next predicted total yield
788 * (allocated cells actually freed by GC) becomes
789 * `gc_trigger_fraction' of total heap size. Note, however, that
790 * the absolute value of gc_trigger will correspond to `collected'
791 * on one master (the one which currently is triggering GC).
792 *
793 * The reason why we look at total yield instead of cells collected
794 * on one list is that we want to take other freelists into account.
795 * On this freelist, we know that (local) yield = collected cells,
796 * but that's probably not the case on the other lists.
797 *
798 * (We might consider computing a better prediction, for example
799 * by computing an average over multiple GC:s.)
800 */
801 if (freelist->gc_trigger_fraction)
802 {
803 int delta = ((SCM_HEAP_SIZE * freelist->gc_trigger_fraction / 100)
804 - scm_gc_yield);
805 #ifdef DEBUGINFO
806 fprintf (stderr, " after GC = %d, delta = %d\n",
807 scm_cells_allocated,
808 delta);
809 #endif
810 if (delta > 0)
811 freelist->gc_trigger += delta;
812 }
813 }
814
815 /* When we get POSIX threads support, the master will be global and
816 * common while the freelist will be individual for each thread.
817 */
818
819 SCM
820 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
821 {
822 SCM cell;
823 ++scm_ints_disabled;
824 do
825 {
826 if (SCM_NULLP (master->clusters))
827 {
828 if (master->grow_heap_p)
829 {
830 master->grow_heap_p = 0;
831 alloc_some_heap (master);
832 }
833 else
834 {
835 scm_igc ("cells");
836 adjust_gc_trigger (master);
837 }
838 }
839 cell = SCM_CAR (master->clusters);
840 master->clusters = SCM_CDR (master->clusters);
841 ++master->clusters_allocated;
842 }
843 while (SCM_NULLP (cell));
844 --scm_ints_disabled;
845 *freelist = SCM_CDR (cell);
846 SCM_SETCAR (cell, scm_tc16_allocated);
847 return cell;
848 }
849
850 #if 0
851 /* This is a support routine which can be used to reserve a cluster
852 * for some special use, such as debugging. It won't be useful until
853 * free cells are preserved between garbage collections.
854 */
855
856 void
857 scm_alloc_cluster (scm_freelist_t *master)
858 {
859 SCM freelist, cell;
860 cell = scm_gc_for_newcell (master, &freelist);
861 SCM_SETCDR (cell, freelist);
862 return cell;
863 }
864 #endif
865
866 #else /* GUILE_NEW_GC_SCHEME */
867
868 void
869 scm_gc_for_alloc (scm_freelist_t *freelist)
870 {
871 SCM_REDEFER_INTS;
872 scm_igc ("cells");
873 #ifdef GUILE_DEBUG_FREELIST
874 fprintf (stderr, "Collected: %d, min_yield: %d\n",
875 freelist->collected, MIN_GC_YIELD (freelist));
876 #endif
877 if ((freelist->collected < MIN_GC_YIELD (freelist))
878 || SCM_IMP (freelist->cells))
879 alloc_some_heap (freelist);
880 SCM_REALLOW_INTS;
881 }
882
883
884 SCM
885 scm_gc_for_newcell (scm_freelist_t *freelist)
886 {
887 SCM fl;
888 scm_gc_for_alloc (freelist);
889 fl = freelist->cells;
890 freelist->cells = SCM_CDR (fl);
891 SCM_SETCAR (fl, scm_tc16_allocated);
892 return fl;
893 }
894
895 #endif /* GUILE_NEW_GC_SCHEME */
896
897 void
898 scm_igc (const char *what)
899 {
900 int j;
901
902 #ifdef DEBUGINFO
903 fprintf (stderr,
904 SCM_NULLP (scm_freelist)
905 ? "*"
906 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
907 #endif
908 #ifdef USE_THREADS
909 /* During the critical section, only the current thread may run. */
910 SCM_THREAD_CRITICAL_SECTION_START;
911 #endif
912
913 /* fprintf (stderr, "gc: %s\n", what); */
914
915 scm_gc_start (what);
916
917 if (!scm_stack_base || scm_block_gc)
918 {
919 scm_gc_end ();
920 return;
921 }
922
923 if (scm_mallocated < 0)
924 /* The byte count of allocated objects has underflowed. This is
925 probably because you forgot to report the sizes of objects you
926 have allocated, by calling scm_done_malloc or some such. When
927 the GC freed them, it subtracted their size from
928 scm_mallocated, which underflowed. */
929 abort ();
930
931 if (scm_gc_heap_lock)
932 /* We've invoked the collector while a GC is already in progress.
933 That should never happen. */
934 abort ();
935
936 ++scm_gc_heap_lock;
937
938 scm_weak_vectors = SCM_EOL;
939
940 scm_guardian_gc_init ();
941
942 /* unprotect any struct types with no instances */
943 #if 0
944 {
945 SCM type_list;
946 SCM * pos;
947
948 pos = &scm_type_obj_list;
949 type_list = scm_type_obj_list;
950 while (type_list != SCM_EOL)
951 if (SCM_VELTS (SCM_CAR (type_list))[scm_struct_i_refcnt])
952 {
953 pos = SCM_CDRLOC (type_list);
954 type_list = SCM_CDR (type_list);
955 }
956 else
957 {
958 *pos = SCM_CDR (type_list);
959 type_list = SCM_CDR (type_list);
960 }
961 }
962 #endif
963
964 /* flush dead entries from the continuation stack */
965 {
966 int x;
967 int bound;
968 SCM * elts;
969 elts = SCM_VELTS (scm_continuation_stack);
970 bound = SCM_LENGTH (scm_continuation_stack);
971 x = SCM_INUM (scm_continuation_stack_ptr);
972 while (x < bound)
973 {
974 elts[x] = SCM_BOOL_F;
975 ++x;
976 }
977 }
978
979 #ifndef USE_THREADS
980
981 /* Protect from the C stack. This must be the first marking
982 * done because it provides information about what objects
983 * are "in-use" by the C code. "in-use" objects are those
984 * for which the values from SCM_LENGTH and SCM_CHARS must remain
985 * usable. This requirement is stricter than a liveness
986 * requirement -- in particular, it constrains the implementation
987 * of scm_vector_set_length_x.
988 */
989 SCM_FLUSH_REGISTER_WINDOWS;
990 /* This assumes that all registers are saved into the jmp_buf */
991 setjmp (scm_save_regs_gc_mark);
992 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
993 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
994 sizeof scm_save_regs_gc_mark)
995 / sizeof (SCM_STACKITEM)));
996
997 {
998 /* stack_len is long rather than scm_sizet in order to guarantee that
999 &stack_len is long aligned */
1000 #ifdef SCM_STACK_GROWS_UP
1001 #ifdef nosve
1002 long stack_len = (SCM_STACKITEM *) (&stack_len) - scm_stack_base;
1003 #else
1004 long stack_len = scm_stack_size (scm_stack_base);
1005 #endif
1006 scm_mark_locations (scm_stack_base, (scm_sizet) stack_len);
1007 #else
1008 #ifdef nosve
1009 long stack_len = scm_stack_base - (SCM_STACKITEM *) (&stack_len);
1010 #else
1011 long stack_len = scm_stack_size (scm_stack_base);
1012 #endif
1013 scm_mark_locations ((scm_stack_base - stack_len), (scm_sizet) stack_len);
1014 #endif
1015 }
1016
1017 #else /* USE_THREADS */
1018
1019 /* Mark every thread's stack and registers */
1020 scm_threads_mark_stacks ();
1021
1022 #endif /* USE_THREADS */
1023
1024 /* FIXME: insert a phase to un-protect string-data preserved
1025 * in scm_vector_set_length_x.
1026 */
1027
1028 j = SCM_NUM_PROTECTS;
1029 while (j--)
1030 scm_gc_mark (scm_sys_protects[j]);
1031
1032 /* FIXME: we should have a means to register C functions to be run
1033 * in different phases of GC
1034 */
1035 scm_mark_subr_table ();
1036
1037 #ifndef USE_THREADS
1038 scm_gc_mark (scm_root->handle);
1039 #endif
1040
1041 scm_mark_weak_vector_spines ();
1042
1043 scm_guardian_zombify ();
1044
1045 scm_gc_sweep ();
1046
1047 --scm_gc_heap_lock;
1048 scm_gc_end ();
1049
1050 #ifdef USE_THREADS
1051 SCM_THREAD_CRITICAL_SECTION_END;
1052 #endif
1053 }
1054
1055 \f
1056 /* {Mark/Sweep}
1057 */
1058
1059
1060
1061 /* Mark an object precisely.
1062 */
1063 void
1064 scm_gc_mark (SCM p)
1065 {
1066 register long i;
1067 register SCM ptr;
1068
1069 ptr = p;
1070
1071 gc_mark_loop:
1072 if (SCM_IMP (ptr))
1073 return;
1074
1075 gc_mark_nimp:
1076 if (SCM_NCELLP (ptr))
1077 scm_wta (ptr, "rogue pointer in heap", NULL);
1078
1079 switch (SCM_TYP7 (ptr))
1080 {
1081 case scm_tcs_cons_nimcar:
1082 if (SCM_GCMARKP (ptr))
1083 break;
1084 SCM_SETGCMARK (ptr);
1085 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1086 {
1087 ptr = SCM_CAR (ptr);
1088 goto gc_mark_nimp;
1089 }
1090 scm_gc_mark (SCM_CAR (ptr));
1091 ptr = SCM_GCCDR (ptr);
1092 goto gc_mark_nimp;
1093 case scm_tcs_cons_imcar:
1094 if (SCM_GCMARKP (ptr))
1095 break;
1096 SCM_SETGCMARK (ptr);
1097 ptr = SCM_GCCDR (ptr);
1098 goto gc_mark_loop;
1099 case scm_tc7_pws:
1100 if (SCM_GCMARKP (ptr))
1101 break;
1102 SCM_SETGCMARK (ptr);
1103 scm_gc_mark (SCM_CELL_WORD (ptr, 2));
1104 ptr = SCM_GCCDR (ptr);
1105 goto gc_mark_loop;
1106 case scm_tcs_cons_gloc:
1107 if (SCM_GCMARKP (ptr))
1108 break;
1109 SCM_SETGCMARK (ptr);
1110 {
1111 SCM vcell;
1112 vcell = SCM_CAR (ptr) - 1L;
1113 switch (SCM_UNPACK (SCM_CDR (vcell)))
1114 {
1115 default:
1116 scm_gc_mark (vcell);
1117 ptr = SCM_GCCDR (ptr);
1118 goto gc_mark_loop;
1119 case 1: /* ! */
1120 case 0: /* ! */
1121 {
1122 SCM layout;
1123 SCM * vtable_data;
1124 int len;
1125 char * fields_desc;
1126 register SCM * mem;
1127 register int x;
1128
1129 vtable_data = (SCM *)vcell;
1130 layout = vtable_data[scm_vtable_index_layout];
1131 len = SCM_LENGTH (layout);
1132 fields_desc = SCM_CHARS (layout);
1133 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1134 that it removes the mark */
1135 mem = (SCM *)SCM_GCCDR (ptr);
1136
1137 if (SCM_UNPACK (vtable_data[scm_struct_i_flags]) & SCM_STRUCTF_ENTITY)
1138 {
1139 scm_gc_mark (mem[scm_struct_i_procedure]);
1140 scm_gc_mark (mem[scm_struct_i_setter]);
1141 }
1142 if (len)
1143 {
1144 for (x = 0; x < len - 2; x += 2, ++mem)
1145 if (fields_desc[x] == 'p')
1146 scm_gc_mark (*mem);
1147 if (fields_desc[x] == 'p')
1148 {
1149 int j;
1150 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1151 for (j = (long int) *mem; x; --x)
1152 scm_gc_mark (*++mem);
1153 else
1154 scm_gc_mark (*mem);
1155 }
1156 }
1157 if (!SCM_CDR (vcell))
1158 {
1159 SCM_SETGCMARK (vcell);
1160 ptr = vtable_data[scm_vtable_index_vtable];
1161 goto gc_mark_loop;
1162 }
1163 }
1164 }
1165 }
1166 break;
1167 case scm_tcs_closures:
1168 if (SCM_GCMARKP (ptr))
1169 break;
1170 SCM_SETGCMARK (ptr);
1171 if (SCM_IMP (SCM_CDR (ptr)))
1172 {
1173 ptr = SCM_CLOSCAR (ptr);
1174 goto gc_mark_nimp;
1175 }
1176 scm_gc_mark (SCM_CLOSCAR (ptr));
1177 ptr = SCM_GCCDR (ptr);
1178 goto gc_mark_nimp;
1179 case scm_tc7_vector:
1180 case scm_tc7_lvector:
1181 #ifdef CCLO
1182 case scm_tc7_cclo:
1183 #endif
1184 if (SCM_GC8MARKP (ptr))
1185 break;
1186 SCM_SETGC8MARK (ptr);
1187 i = SCM_LENGTH (ptr);
1188 if (i == 0)
1189 break;
1190 while (--i > 0)
1191 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1192 scm_gc_mark (SCM_VELTS (ptr)[i]);
1193 ptr = SCM_VELTS (ptr)[0];
1194 goto gc_mark_loop;
1195 case scm_tc7_contin:
1196 if SCM_GC8MARKP
1197 (ptr) break;
1198 SCM_SETGC8MARK (ptr);
1199 if (SCM_VELTS (ptr))
1200 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1201 (scm_sizet)
1202 (SCM_LENGTH (ptr) +
1203 (sizeof (SCM_STACKITEM) + -1 +
1204 sizeof (scm_contregs)) /
1205 sizeof (SCM_STACKITEM)));
1206 break;
1207 #ifdef HAVE_ARRAYS
1208 case scm_tc7_bvect:
1209 case scm_tc7_byvect:
1210 case scm_tc7_ivect:
1211 case scm_tc7_uvect:
1212 case scm_tc7_fvect:
1213 case scm_tc7_dvect:
1214 case scm_tc7_cvect:
1215 case scm_tc7_svect:
1216 #ifdef HAVE_LONG_LONGS
1217 case scm_tc7_llvect:
1218 #endif
1219 #endif
1220 case scm_tc7_string:
1221 SCM_SETGC8MARK (ptr);
1222 break;
1223
1224 case scm_tc7_substring:
1225 if (SCM_GC8MARKP(ptr))
1226 break;
1227 SCM_SETGC8MARK (ptr);
1228 ptr = SCM_CDR (ptr);
1229 goto gc_mark_loop;
1230
1231 case scm_tc7_wvect:
1232 if (SCM_GC8MARKP(ptr))
1233 break;
1234 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1235 scm_weak_vectors = ptr;
1236 SCM_SETGC8MARK (ptr);
1237 if (SCM_IS_WHVEC_ANY (ptr))
1238 {
1239 int x;
1240 int len;
1241 int weak_keys;
1242 int weak_values;
1243
1244 len = SCM_LENGTH (ptr);
1245 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1246 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1247
1248 for (x = 0; x < len; ++x)
1249 {
1250 SCM alist;
1251 alist = SCM_VELTS (ptr)[x];
1252
1253 /* mark everything on the alist except the keys or
1254 * values, according to weak_values and weak_keys. */
1255 while ( SCM_CONSP (alist)
1256 && !SCM_GCMARKP (alist)
1257 && SCM_CONSP (SCM_CAR (alist)))
1258 {
1259 SCM kvpair;
1260 SCM next_alist;
1261
1262 kvpair = SCM_CAR (alist);
1263 next_alist = SCM_CDR (alist);
1264 /*
1265 * Do not do this:
1266 * SCM_SETGCMARK (alist);
1267 * SCM_SETGCMARK (kvpair);
1268 *
1269 * It may be that either the key or value is protected by
1270 * an escaped reference to part of the spine of this alist.
1271 * If we mark the spine here, and only mark one or neither of the
1272 * key and value, they may never be properly marked.
1273 * This leads to a horrible situation in which an alist containing
1274 * freelist cells is exported.
1275 *
1276 * So only mark the spines of these arrays last of all marking.
1277 * If somebody confuses us by constructing a weak vector
1278 * with a circular alist then we are hosed, but at least we
1279 * won't prematurely drop table entries.
1280 */
1281 if (!weak_keys)
1282 scm_gc_mark (SCM_CAR (kvpair));
1283 if (!weak_values)
1284 scm_gc_mark (SCM_GCCDR (kvpair));
1285 alist = next_alist;
1286 }
1287 if (SCM_NIMP (alist))
1288 scm_gc_mark (alist);
1289 }
1290 }
1291 break;
1292
1293 case scm_tc7_msymbol:
1294 if (SCM_GC8MARKP(ptr))
1295 break;
1296 SCM_SETGC8MARK (ptr);
1297 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1298 ptr = SCM_SYMBOL_PROPS (ptr);
1299 goto gc_mark_loop;
1300 case scm_tc7_ssymbol:
1301 if (SCM_GC8MARKP(ptr))
1302 break;
1303 SCM_SETGC8MARK (ptr);
1304 break;
1305 case scm_tcs_subrs:
1306 break;
1307 case scm_tc7_port:
1308 i = SCM_PTOBNUM (ptr);
1309 if (!(i < scm_numptob))
1310 goto def;
1311 if (SCM_GC8MARKP (ptr))
1312 break;
1313 SCM_SETGC8MARK (ptr);
1314 if (SCM_PTAB_ENTRY(ptr))
1315 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1316 if (scm_ptobs[i].mark)
1317 {
1318 ptr = (scm_ptobs[i].mark) (ptr);
1319 goto gc_mark_loop;
1320 }
1321 else
1322 return;
1323 break;
1324 case scm_tc7_smob:
1325 if (SCM_GC8MARKP (ptr))
1326 break;
1327 SCM_SETGC8MARK (ptr);
1328 switch (SCM_GCTYP16 (ptr))
1329 { /* should be faster than going through scm_smobs */
1330 case scm_tc_free_cell:
1331 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1332 case scm_tc16_allocated:
1333 case scm_tc16_big:
1334 case scm_tc16_real:
1335 case scm_tc16_complex:
1336 break;
1337 default:
1338 i = SCM_SMOBNUM (ptr);
1339 if (!(i < scm_numsmob))
1340 goto def;
1341 if (scm_smobs[i].mark)
1342 {
1343 ptr = (scm_smobs[i].mark) (ptr);
1344 goto gc_mark_loop;
1345 }
1346 else
1347 return;
1348 }
1349 break;
1350 default:
1351 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1352 }
1353 }
1354
1355
1356 /* Mark a Region Conservatively
1357 */
1358
1359 void
1360 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1361 {
1362 register long m = n;
1363 register int i, j;
1364 register SCM_CELLPTR ptr;
1365
1366 while (0 <= --m)
1367 if (SCM_CELLP (*(SCM **) (& x[m])))
1368 {
1369 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & x[m]));
1370 i = 0;
1371 j = scm_n_heap_segs - 1;
1372 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1373 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1374 {
1375 while (i <= j)
1376 {
1377 int seg_id;
1378 seg_id = -1;
1379 if ( (i == j)
1380 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1381 seg_id = i;
1382 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1383 seg_id = j;
1384 else
1385 {
1386 int k;
1387 k = (i + j) / 2;
1388 if (k == i)
1389 break;
1390 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1391 {
1392 j = k;
1393 ++i;
1394 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1395 continue;
1396 else
1397 break;
1398 }
1399 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1400 {
1401 i = k;
1402 --j;
1403 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1404 continue;
1405 else
1406 break;
1407 }
1408 }
1409 if ( !scm_heap_table[seg_id].valid
1410 || scm_heap_table[seg_id].valid (ptr,
1411 &scm_heap_table[seg_id]))
1412 if ( scm_heap_table[seg_id].span == 1
1413 || SCM_DOUBLE_CELLP (*(SCM **) (& x[m])))
1414 scm_gc_mark (*(SCM *) & x[m]);
1415 break;
1416 }
1417
1418 }
1419 }
1420 }
1421
1422
1423 /* The following is a C predicate which determines if an SCM value can be
1424 regarded as a pointer to a cell on the heap. The code is duplicated
1425 from scm_mark_locations. */
1426
1427
1428 int
1429 scm_cellp (SCM value)
1430 {
1431 register int i, j;
1432 register SCM_CELLPTR ptr;
1433
1434 if SCM_CELLP (*(SCM **) (& value))
1435 {
1436 ptr = (SCM_CELLPTR) SCM2PTR ((*(SCM **) & value));
1437 i = 0;
1438 j = scm_n_heap_segs - 1;
1439 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1440 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1441 {
1442 while (i <= j)
1443 {
1444 int seg_id;
1445 seg_id = -1;
1446 if ( (i == j)
1447 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1448 seg_id = i;
1449 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1450 seg_id = j;
1451 else
1452 {
1453 int k;
1454 k = (i + j) / 2;
1455 if (k == i)
1456 break;
1457 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1458 {
1459 j = k;
1460 ++i;
1461 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1462 continue;
1463 else
1464 break;
1465 }
1466 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1467 {
1468 i = k;
1469 --j;
1470 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1471 continue;
1472 else
1473 break;
1474 }
1475 }
1476 if ( !scm_heap_table[seg_id].valid
1477 || scm_heap_table[seg_id].valid (ptr,
1478 &scm_heap_table[seg_id]))
1479 return 1;
1480 break;
1481 }
1482
1483 }
1484 }
1485 return 0;
1486 }
1487
1488
1489 static void
1490 scm_mark_weak_vector_spines ()
1491 {
1492 SCM w;
1493
1494 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1495 {
1496 if (SCM_IS_WHVEC_ANY (w))
1497 {
1498 SCM *ptr;
1499 SCM obj;
1500 int j;
1501 int n;
1502
1503 obj = w;
1504 ptr = SCM_VELTS (w);
1505 n = SCM_LENGTH (w);
1506 for (j = 0; j < n; ++j)
1507 {
1508 SCM alist;
1509
1510 alist = ptr[j];
1511 while ( SCM_CONSP (alist)
1512 && !SCM_GCMARKP (alist)
1513 && SCM_CONSP (SCM_CAR (alist)))
1514 {
1515 SCM_SETGCMARK (alist);
1516 SCM_SETGCMARK (SCM_CAR (alist));
1517 alist = SCM_GCCDR (alist);
1518 }
1519 }
1520 }
1521 }
1522 }
1523
1524
1525 #ifdef GUILE_NEW_GC_SCHEME
1526 static void
1527 gc_sweep_freelist_start (scm_freelist_t *freelist)
1528 {
1529 freelist->cells = SCM_EOL;
1530 freelist->left_to_collect = freelist->cluster_size;
1531 freelist->clusters_allocated = 0;
1532 freelist->clusters = SCM_EOL;
1533 freelist->clustertail = &freelist->clusters;
1534 freelist->collected = 0;
1535 }
1536
1537 static void
1538 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1539 {
1540 *freelist->clustertail = freelist->cells;
1541 if (SCM_NNULLP (freelist->cells))
1542 {
1543 SCM c = freelist->cells;
1544 SCM_SETCAR (c, SCM_CDR (c));
1545 SCM_SETCDR (c, SCM_EOL);
1546 freelist->collected +=
1547 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1548 }
1549 scm_gc_cells_collected += freelist->collected;
1550
1551 freelist->grow_heap_p = (freelist->collected < freelist->gc_trigger);
1552 }
1553 #endif
1554
1555 void
1556 scm_gc_sweep ()
1557 {
1558 register SCM_CELLPTR ptr;
1559 #ifdef SCM_POINTERS_MUNGED
1560 register SCM scmptr;
1561 #else
1562 #undef scmptr
1563 #define scmptr (SCM)ptr
1564 #endif
1565 register SCM nfreelist;
1566 register scm_freelist_t *freelist;
1567 register long m;
1568 register int span;
1569 long i;
1570 scm_sizet seg_size;
1571
1572 m = 0;
1573
1574 #ifdef GUILE_NEW_GC_SCHEME
1575 gc_sweep_freelist_start (&scm_master_freelist);
1576 gc_sweep_freelist_start (&scm_master_freelist2);
1577 #else
1578 /* Reset all free list pointers. We'll reconstruct them completely
1579 while scanning. */
1580 for (i = 0; i < scm_n_heap_segs; i++)
1581 scm_heap_table[i].freelist->cells = SCM_EOL;
1582 #endif
1583
1584 for (i = 0; i < scm_n_heap_segs; i++)
1585 {
1586 #ifdef GUILE_NEW_GC_SCHEME
1587 register unsigned int left_to_collect;
1588 #else
1589 register scm_sizet n = 0;
1590 #endif
1591 register scm_sizet j;
1592
1593 /* Unmarked cells go onto the front of the freelist this heap
1594 segment points to. Rather than updating the real freelist
1595 pointer as we go along, we accumulate the new head in
1596 nfreelist. Then, if it turns out that the entire segment is
1597 free, we free (i.e., malloc's free) the whole segment, and
1598 simply don't assign nfreelist back into the real freelist. */
1599 freelist = scm_heap_table[i].freelist;
1600 nfreelist = freelist->cells;
1601 #ifdef GUILE_NEW_GC_SCHEME
1602 left_to_collect = freelist->left_to_collect;
1603 #endif
1604 span = scm_heap_table[i].span;
1605
1606 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1607 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1608 for (j = seg_size + span; j -= span; ptr += span)
1609 {
1610 #ifdef SCM_POINTERS_MUNGED
1611 scmptr = PTR2SCM (ptr);
1612 #endif
1613 switch SCM_TYP7 (scmptr)
1614 {
1615 case scm_tcs_cons_gloc:
1616 if (SCM_GCMARKP (scmptr))
1617 {
1618 if (SCM_CDR (SCM_CAR (scmptr) - 1) == (SCM)1)
1619 SCM_SETCDR (SCM_CAR (scmptr) - 1, (SCM) 0);
1620 goto cmrkcontinue;
1621 }
1622 {
1623 SCM vcell;
1624 vcell = SCM_CAR (scmptr) - 1L;
1625
1626 if ((SCM_CDR (vcell) == 0) || (SCM_UNPACK (SCM_CDR (vcell)) == 1))
1627 {
1628 scm_struct_free_t free
1629 = (scm_struct_free_t) ((SCM*) vcell)[scm_struct_i_free];
1630 m += free ((SCM *) vcell, (SCM *) SCM_GCCDR (scmptr));
1631 }
1632 }
1633 break;
1634 case scm_tcs_cons_imcar:
1635 case scm_tcs_cons_nimcar:
1636 case scm_tcs_closures:
1637 case scm_tc7_pws:
1638 if (SCM_GCMARKP (scmptr))
1639 goto cmrkcontinue;
1640 break;
1641 case scm_tc7_wvect:
1642 if (SCM_GC8MARKP (scmptr))
1643 {
1644 goto c8mrkcontinue;
1645 }
1646 else
1647 {
1648 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1649 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1650 break;
1651 }
1652
1653 case scm_tc7_vector:
1654 case scm_tc7_lvector:
1655 #ifdef CCLO
1656 case scm_tc7_cclo:
1657 #endif
1658 if (SCM_GC8MARKP (scmptr))
1659 goto c8mrkcontinue;
1660
1661 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1662 freechars:
1663 scm_must_free (SCM_CHARS (scmptr));
1664 /* SCM_SETCHARS(scmptr, 0);*/
1665 break;
1666 #ifdef HAVE_ARRAYS
1667 case scm_tc7_bvect:
1668 if SCM_GC8MARKP (scmptr)
1669 goto c8mrkcontinue;
1670 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1671 goto freechars;
1672 case scm_tc7_byvect:
1673 if SCM_GC8MARKP (scmptr)
1674 goto c8mrkcontinue;
1675 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1676 goto freechars;
1677 case scm_tc7_ivect:
1678 case scm_tc7_uvect:
1679 if SCM_GC8MARKP (scmptr)
1680 goto c8mrkcontinue;
1681 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1682 goto freechars;
1683 case scm_tc7_svect:
1684 if SCM_GC8MARKP (scmptr)
1685 goto c8mrkcontinue;
1686 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1687 goto freechars;
1688 #ifdef HAVE_LONG_LONGS
1689 case scm_tc7_llvect:
1690 if SCM_GC8MARKP (scmptr)
1691 goto c8mrkcontinue;
1692 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1693 goto freechars;
1694 #endif
1695 case scm_tc7_fvect:
1696 if SCM_GC8MARKP (scmptr)
1697 goto c8mrkcontinue;
1698 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1699 goto freechars;
1700 case scm_tc7_dvect:
1701 if SCM_GC8MARKP (scmptr)
1702 goto c8mrkcontinue;
1703 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1704 goto freechars;
1705 case scm_tc7_cvect:
1706 if SCM_GC8MARKP (scmptr)
1707 goto c8mrkcontinue;
1708 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1709 goto freechars;
1710 #endif
1711 case scm_tc7_substring:
1712 if (SCM_GC8MARKP (scmptr))
1713 goto c8mrkcontinue;
1714 break;
1715 case scm_tc7_string:
1716 if (SCM_GC8MARKP (scmptr))
1717 goto c8mrkcontinue;
1718 m += SCM_HUGE_LENGTH (scmptr) + 1;
1719 goto freechars;
1720 case scm_tc7_msymbol:
1721 if (SCM_GC8MARKP (scmptr))
1722 goto c8mrkcontinue;
1723 m += ( SCM_LENGTH (scmptr)
1724 + 1
1725 + sizeof (SCM) * ((SCM *)SCM_CHARS (scmptr) - SCM_SLOTS(scmptr)));
1726 scm_must_free ((char *)SCM_SLOTS (scmptr));
1727 break;
1728 case scm_tc7_contin:
1729 if SCM_GC8MARKP (scmptr)
1730 goto c8mrkcontinue;
1731 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1732 if (SCM_VELTS (scmptr))
1733 goto freechars;
1734 case scm_tc7_ssymbol:
1735 if SCM_GC8MARKP(scmptr)
1736 goto c8mrkcontinue;
1737 break;
1738 case scm_tcs_subrs:
1739 continue;
1740 case scm_tc7_port:
1741 if SCM_GC8MARKP (scmptr)
1742 goto c8mrkcontinue;
1743 if SCM_OPENP (scmptr)
1744 {
1745 int k = SCM_PTOBNUM (scmptr);
1746 if (!(k < scm_numptob))
1747 goto sweeperr;
1748 /* Keep "revealed" ports alive. */
1749 if (scm_revealed_count (scmptr) > 0)
1750 continue;
1751 /* Yes, I really do mean scm_ptobs[k].free */
1752 /* rather than ftobs[k].close. .close */
1753 /* is for explicit CLOSE-PORT by user */
1754 m += (scm_ptobs[k].free) (scmptr);
1755 SCM_SETSTREAM (scmptr, 0);
1756 scm_remove_from_port_table (scmptr);
1757 scm_gc_ports_collected++;
1758 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1759 }
1760 break;
1761 case scm_tc7_smob:
1762 switch SCM_GCTYP16 (scmptr)
1763 {
1764 case scm_tc_free_cell:
1765 case scm_tc16_real:
1766 if SCM_GC8MARKP (scmptr)
1767 goto c8mrkcontinue;
1768 break;
1769 #ifdef SCM_BIGDIG
1770 case scm_tc16_big:
1771 if SCM_GC8MARKP (scmptr)
1772 goto c8mrkcontinue;
1773 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1774 goto freechars;
1775 #endif /* def SCM_BIGDIG */
1776 case scm_tc16_complex:
1777 if SCM_GC8MARKP (scmptr)
1778 goto c8mrkcontinue;
1779 m += 2 * sizeof (double);
1780 goto freechars;
1781 default:
1782 if SCM_GC8MARKP (scmptr)
1783 goto c8mrkcontinue;
1784
1785 {
1786 int k;
1787 k = SCM_SMOBNUM (scmptr);
1788 if (!(k < scm_numsmob))
1789 goto sweeperr;
1790 m += (scm_smobs[k].free) ((SCM) scmptr);
1791 break;
1792 }
1793 }
1794 break;
1795 default:
1796 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1797 }
1798 #if 0
1799 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1800 exit (2);
1801 #endif
1802 #ifndef GUILE_NEW_GC_SCHEME
1803 n += span;
1804 #else
1805 if (!--left_to_collect)
1806 {
1807 SCM_SETCAR (scmptr, nfreelist);
1808 *freelist->clustertail = scmptr;
1809 freelist->clustertail = SCM_CDRLOC (scmptr);
1810
1811 nfreelist = SCM_EOL;
1812 freelist->collected += span * freelist->cluster_size;
1813 left_to_collect = freelist->cluster_size;
1814 }
1815 else
1816 #endif
1817 {
1818 /* Stick the new cell on the front of nfreelist. It's
1819 critical that we mark this cell as freed; otherwise, the
1820 conservative collector might trace it as some other type
1821 of object. */
1822 SCM_SETCAR (scmptr, scm_tc_free_cell);
1823 SCM_SETCDR (scmptr, nfreelist);
1824 nfreelist = scmptr;
1825 }
1826
1827 continue;
1828 c8mrkcontinue:
1829 SCM_CLRGC8MARK (scmptr);
1830 continue;
1831 cmrkcontinue:
1832 SCM_CLRGCMARK (scmptr);
1833 }
1834 #ifdef GC_FREE_SEGMENTS
1835 if (n == seg_size)
1836 {
1837 register long j;
1838
1839 freelist->heap_size -= seg_size;
1840 free ((char *) scm_heap_table[i].bounds[0]);
1841 scm_heap_table[i].bounds[0] = 0;
1842 for (j = i + 1; j < scm_n_heap_segs; j++)
1843 scm_heap_table[j - 1] = scm_heap_table[j];
1844 scm_n_heap_segs -= 1;
1845 i--; /* We need to scan the segment just moved. */
1846 }
1847 else
1848 #endif /* ifdef GC_FREE_SEGMENTS */
1849 {
1850 /* Update the real freelist pointer to point to the head of
1851 the list of free cells we've built for this segment. */
1852 freelist->cells = nfreelist;
1853 #ifdef GUILE_NEW_GC_SCHEME
1854 freelist->left_to_collect = left_to_collect;
1855 #endif
1856 }
1857
1858 #ifndef GUILE_NEW_GC_SCHEME
1859 freelist->collected += n;
1860 #endif
1861
1862 #ifdef GUILE_DEBUG_FREELIST
1863 #ifdef GUILE_NEW_GC_SCHEME
1864 scm_check_freelist (freelist == &scm_master_freelist
1865 ? scm_freelist
1866 : scm_freelist2);
1867 #else
1868 scm_check_freelist (freelist);
1869 #endif
1870 scm_map_free_list ();
1871 #endif
1872 }
1873
1874 #ifdef GUILE_NEW_GC_SCHEME
1875 gc_sweep_freelist_finish (&scm_master_freelist);
1876 gc_sweep_freelist_finish (&scm_master_freelist2);
1877
1878 /* When we move to POSIX threads private freelists should probably
1879 be GC-protected instead. */
1880 scm_freelist = SCM_EOL;
1881 scm_freelist2 = SCM_EOL;
1882 #endif
1883
1884 /* Scan weak vectors. */
1885 {
1886 SCM *ptr, w;
1887 for (w = scm_weak_vectors; w != SCM_EOL; w = SCM_WVECT_GC_CHAIN (w))
1888 {
1889 if (!SCM_IS_WHVEC_ANY (w))
1890 {
1891 register long j, n;
1892
1893 ptr = SCM_VELTS (w);
1894 n = SCM_LENGTH (w);
1895 for (j = 0; j < n; ++j)
1896 if (SCM_FREEP (ptr[j]))
1897 ptr[j] = SCM_BOOL_F;
1898 }
1899 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1900 {
1901 SCM obj = w;
1902 register long n = SCM_LENGTH (w);
1903 register long j;
1904
1905 ptr = SCM_VELTS (w);
1906
1907 for (j = 0; j < n; ++j)
1908 {
1909 SCM * fixup;
1910 SCM alist;
1911 int weak_keys;
1912 int weak_values;
1913
1914 weak_keys = SCM_IS_WHVEC (obj) || SCM_IS_WHVEC_B (obj);
1915 weak_values = SCM_IS_WHVEC_V (obj) || SCM_IS_WHVEC_B (obj);
1916
1917 fixup = ptr + j;
1918 alist = *fixup;
1919
1920 while ( SCM_CONSP (alist)
1921 && SCM_CONSP (SCM_CAR (alist)))
1922 {
1923 SCM key;
1924 SCM value;
1925
1926 key = SCM_CAAR (alist);
1927 value = SCM_CDAR (alist);
1928 if ( (weak_keys && SCM_FREEP (key))
1929 || (weak_values && SCM_FREEP (value)))
1930 {
1931 *fixup = SCM_CDR (alist);
1932 }
1933 else
1934 fixup = SCM_CDRLOC (alist);
1935 alist = SCM_CDR (alist);
1936 }
1937 }
1938 }
1939 }
1940 }
1941 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1942 #ifdef GUILE_NEW_GC_SCHEME
1943 scm_gc_yield -= scm_cells_allocated;
1944 #endif
1945 scm_mallocated -= m;
1946 scm_gc_malloc_collected = m;
1947 }
1948
1949
1950 \f
1951
1952 /* {Front end to malloc}
1953 *
1954 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1955 *
1956 * These functions provide services comperable to malloc, realloc, and
1957 * free. They are for allocating malloced parts of scheme objects.
1958 * The primary purpose of the front end is to impose calls to gc.
1959 */
1960
1961 /* scm_must_malloc
1962 * Return newly malloced storage or throw an error.
1963 *
1964 * The parameter WHAT is a string for error reporting.
1965 * If the threshold scm_mtrigger will be passed by this
1966 * allocation, or if the first call to malloc fails,
1967 * garbage collect -- on the presumption that some objects
1968 * using malloced storage may be collected.
1969 *
1970 * The limit scm_mtrigger may be raised by this allocation.
1971 */
1972 void *
1973 scm_must_malloc (scm_sizet size, const char *what)
1974 {
1975 void *ptr;
1976 unsigned long nm = scm_mallocated + size;
1977
1978 if (nm <= scm_mtrigger)
1979 {
1980 SCM_SYSCALL (ptr = malloc (size));
1981 if (NULL != ptr)
1982 {
1983 scm_mallocated = nm;
1984 return ptr;
1985 }
1986 }
1987
1988 scm_igc (what);
1989
1990 nm = scm_mallocated + size;
1991 SCM_SYSCALL (ptr = malloc (size));
1992 if (NULL != ptr)
1993 {
1994 scm_mallocated = nm;
1995 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1996 if (nm > scm_mtrigger)
1997 scm_mtrigger = nm + nm / 2;
1998 else
1999 scm_mtrigger += scm_mtrigger / 2;
2000 }
2001 return ptr;
2002 }
2003
2004 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2005 return 0; /* never reached */
2006 }
2007
2008
2009 /* scm_must_realloc
2010 * is similar to scm_must_malloc.
2011 */
2012 void *
2013 scm_must_realloc (void *where,
2014 scm_sizet old_size,
2015 scm_sizet size,
2016 const char *what)
2017 {
2018 void *ptr;
2019 scm_sizet nm = scm_mallocated + size - old_size;
2020
2021 if (nm <= scm_mtrigger)
2022 {
2023 SCM_SYSCALL (ptr = realloc (where, size));
2024 if (NULL != ptr)
2025 {
2026 scm_mallocated = nm;
2027 return ptr;
2028 }
2029 }
2030
2031 scm_igc (what);
2032
2033 nm = scm_mallocated + size - old_size;
2034 SCM_SYSCALL (ptr = realloc (where, size));
2035 if (NULL != ptr)
2036 {
2037 scm_mallocated = nm;
2038 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2039 if (nm > scm_mtrigger)
2040 scm_mtrigger = nm + nm / 2;
2041 else
2042 scm_mtrigger += scm_mtrigger / 2;
2043 }
2044 return ptr;
2045 }
2046
2047 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
2048 return 0; /* never reached */
2049 }
2050
2051 void
2052 scm_must_free (void *obj)
2053 {
2054 if (obj)
2055 free (obj);
2056 else
2057 scm_wta (SCM_INUM0, "already free", "");
2058 }
2059
2060 /* Announce that there has been some malloc done that will be freed
2061 * during gc. A typical use is for a smob that uses some malloced
2062 * memory but can not get it from scm_must_malloc (for whatever
2063 * reason). When a new object of this smob is created you call
2064 * scm_done_malloc with the size of the object. When your smob free
2065 * function is called, be sure to include this size in the return
2066 * value. */
2067
2068 void
2069 scm_done_malloc (long size)
2070 {
2071 scm_mallocated += size;
2072
2073 if (scm_mallocated > scm_mtrigger)
2074 {
2075 scm_igc ("foreign mallocs");
2076 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2077 {
2078 if (scm_mallocated > scm_mtrigger)
2079 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2080 else
2081 scm_mtrigger += scm_mtrigger / 2;
2082 }
2083 }
2084 }
2085
2086
2087 \f
2088
2089 /* {Heap Segments}
2090 *
2091 * Each heap segment is an array of objects of a particular size.
2092 * Every segment has an associated (possibly shared) freelist.
2093 * A table of segment records is kept that records the upper and
2094 * lower extents of the segment; this is used during the conservative
2095 * phase of gc to identify probably gc roots (because they point
2096 * into valid segments at reasonable offsets). */
2097
2098 /* scm_expmem
2099 * is true if the first segment was smaller than INIT_HEAP_SEG.
2100 * If scm_expmem is set to one, subsequent segment allocations will
2101 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2102 */
2103 int scm_expmem = 0;
2104
2105 scm_sizet scm_max_segment_size;
2106
2107 /* scm_heap_org
2108 * is the lowest base address of any heap segment.
2109 */
2110 SCM_CELLPTR scm_heap_org;
2111
2112 scm_heap_seg_data_t * scm_heap_table = 0;
2113 int scm_n_heap_segs = 0;
2114
2115 /* init_heap_seg
2116 * initializes a new heap segment and return the number of objects it contains.
2117 *
2118 * The segment origin, segment size in bytes, and the span of objects
2119 * in cells are input parameters. The freelist is both input and output.
2120 *
2121 * This function presume that the scm_heap_table has already been expanded
2122 * to accomodate a new segment record.
2123 */
2124
2125
2126 static scm_sizet
2127 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2128 {
2129 register SCM_CELLPTR ptr;
2130 #ifdef SCM_POINTERS_MUNGED
2131 register SCM scmptr;
2132 #else
2133 #undef scmptr
2134 #define scmptr ptr
2135 #endif
2136 SCM_CELLPTR seg_end;
2137 int new_seg_index;
2138 int n_new_cells;
2139 int span = freelist->span;
2140
2141 if (seg_org == NULL)
2142 return 0;
2143
2144 ptr = CELL_UP (seg_org, span);
2145
2146 /* Compute the ceiling on valid object pointers w/in this segment.
2147 */
2148 seg_end = CELL_DN ((char *) seg_org + size, span);
2149
2150 /* Find the right place and insert the segment record.
2151 *
2152 */
2153 for (new_seg_index = 0;
2154 ( (new_seg_index < scm_n_heap_segs)
2155 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2156 new_seg_index++)
2157 ;
2158
2159 {
2160 int i;
2161 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2162 scm_heap_table[i] = scm_heap_table[i - 1];
2163 }
2164
2165 ++scm_n_heap_segs;
2166
2167 scm_heap_table[new_seg_index].valid = 0;
2168 scm_heap_table[new_seg_index].span = span;
2169 scm_heap_table[new_seg_index].freelist = freelist;
2170 scm_heap_table[new_seg_index].bounds[0] = (SCM_CELLPTR)ptr;
2171 scm_heap_table[new_seg_index].bounds[1] = (SCM_CELLPTR)seg_end;
2172
2173
2174 /* Compute the least valid object pointer w/in this segment
2175 */
2176 ptr = CELL_UP (ptr, span);
2177
2178
2179 /*n_new_cells*/
2180 n_new_cells = seg_end - ptr;
2181
2182 #ifdef GUILE_NEW_GC_SCHEME
2183
2184 freelist->heap_size += n_new_cells;
2185
2186 /* Partition objects in this segment into clusters */
2187 {
2188 SCM clusters;
2189 SCM *clusterp = &clusters;
2190 int n_cluster_cells = span * freelist->cluster_size;
2191
2192 while (n_new_cells > span) /* at least one spine + one freecell */
2193 {
2194 /* Determine end of cluster
2195 */
2196 if (n_new_cells >= n_cluster_cells)
2197 {
2198 seg_end = ptr + n_cluster_cells;
2199 n_new_cells -= n_cluster_cells;
2200 }
2201 else
2202 /* [cmm] looks like the segment size doesn't divide cleanly by
2203 cluster size. bad cmm! */
2204 abort();
2205
2206 /* Allocate cluster spine
2207 */
2208 *clusterp = PTR2SCM (ptr);
2209 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2210 clusterp = SCM_CDRLOC (*clusterp);
2211 ptr += span;
2212
2213 while (ptr < seg_end)
2214 {
2215 #ifdef SCM_POINTERS_MUNGED
2216 scmptr = PTR2SCM (ptr);
2217 #endif
2218 SCM_SETCAR (scmptr, scm_tc_free_cell);
2219 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2220 ptr += span;
2221 }
2222
2223 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
2224 }
2225
2226 /* Patch up the last cluster pointer in the segment
2227 * to join it to the input freelist.
2228 */
2229 *clusterp = freelist->clusters;
2230 freelist->clusters = clusters;
2231 }
2232
2233 #else /* GUILE_NEW_GC_SCHEME */
2234
2235 /* Prepend objects in this segment to the freelist.
2236 */
2237 while (ptr < seg_end)
2238 {
2239 #ifdef SCM_POINTERS_MUNGED
2240 scmptr = PTR2SCM (ptr);
2241 #endif
2242 SCM_SETCAR (scmptr, (SCM) scm_tc_free_cell);
2243 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
2244 ptr += span;
2245 }
2246
2247 ptr -= span;
2248
2249 /* Patch up the last freelist pointer in the segment
2250 * to join it to the input freelist.
2251 */
2252 SCM_SETCDR (PTR2SCM (ptr), freelist->cells);
2253 freelist->cells = PTR2SCM (CELL_UP (seg_org, span));
2254
2255 freelist->heap_size += n_new_cells;
2256
2257 #endif /* GUILE_NEW_GC_SCHEME */
2258
2259 #ifdef DEBUGINFO
2260 fprintf (stderr, "H");
2261 #endif
2262 return size;
2263 #ifdef scmptr
2264 #undef scmptr
2265 #endif
2266 }
2267
2268 #ifndef GUILE_NEW_GC_SCHEME
2269 #define round_to_cluster_size(freelist, len) len
2270 #else
2271
2272 static scm_sizet
2273 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2274 {
2275 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2276
2277 return
2278 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2279 + ALIGNMENT_SLACK (freelist);
2280 }
2281
2282 #endif
2283
2284 static void
2285 alloc_some_heap (scm_freelist_t *freelist)
2286 {
2287 scm_heap_seg_data_t * tmptable;
2288 SCM_CELLPTR ptr;
2289 long len;
2290
2291 /* Critical code sections (such as the garbage collector)
2292 * aren't supposed to add heap segments.
2293 */
2294 if (scm_gc_heap_lock)
2295 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
2296
2297 /* Expand the heap tables to have room for the new segment.
2298 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2299 * only if the allocation of the segment itself succeeds.
2300 */
2301 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
2302
2303 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
2304 realloc ((char *)scm_heap_table, len)));
2305 if (!tmptable)
2306 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
2307 else
2308 scm_heap_table = tmptable;
2309
2310
2311 /* Pick a size for the new heap segment.
2312 * The rule for picking the size of a segment is explained in
2313 * gc.h
2314 */
2315 #ifdef GUILE_NEW_GC_SCHEME
2316 {
2317 /* Assure that the new segment is predicted to be large enough for
2318 * the new trigger.
2319 */
2320 int slack = freelist->gc_trigger - freelist->collected;
2321 int min_cells = 100 * slack / (99 - freelist->gc_trigger_fraction);
2322 len = SCM_EXPHEAP (freelist->heap_size);
2323 #ifdef DEBUGINFO
2324 fprintf (stderr, "(%d < %d)", len, min_cells);
2325 #endif
2326 if (len < min_cells)
2327 len = min_cells + 1;
2328 len *= sizeof (scm_cell);
2329 }
2330
2331 if (len > scm_max_segment_size)
2332 len = scm_max_segment_size;
2333 #else
2334 if (scm_expmem)
2335 {
2336 len = (scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell));
2337 if ((scm_sizet) SCM_EXPHEAP (freelist->heap_size * sizeof (scm_cell))
2338 != len)
2339 len = 0;
2340 }
2341 else
2342 len = SCM_HEAP_SEG_SIZE;
2343 #endif /* GUILE_NEW_GC_SCHEME */
2344
2345 {
2346 scm_sizet smallest;
2347
2348 #ifndef GUILE_NEW_GC_SCHEME
2349 smallest = (freelist->span * sizeof (scm_cell));
2350 #else
2351 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2352 #endif
2353
2354 if (len < smallest)
2355 len = smallest;
2356
2357 /* Allocate with decaying ambition. */
2358 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2359 && (len >= smallest))
2360 {
2361 scm_sizet rounded_len = round_to_cluster_size(freelist, len);
2362 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2363 if (ptr)
2364 {
2365 init_heap_seg (ptr, rounded_len, freelist);
2366 return;
2367 }
2368 len /= 2;
2369 }
2370 }
2371
2372 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2373 }
2374
2375
2376
2377 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2378 (SCM name),
2379 "")
2380 #define FUNC_NAME s_scm_unhash_name
2381 {
2382 int x;
2383 int bound;
2384 SCM_VALIDATE_SYMBOL (1,name);
2385 SCM_DEFER_INTS;
2386 bound = scm_n_heap_segs;
2387 for (x = 0; x < bound; ++x)
2388 {
2389 SCM_CELLPTR p;
2390 SCM_CELLPTR pbound;
2391 p = (SCM_CELLPTR)scm_heap_table[x].bounds[0];
2392 pbound = (SCM_CELLPTR)scm_heap_table[x].bounds[1];
2393 while (p < pbound)
2394 {
2395 SCM incar;
2396 incar = p->car;
2397 if (1 == (7 & (int)incar))
2398 {
2399 --incar;
2400 if ( ((name == SCM_BOOL_T) || (SCM_CAR (incar) == name))
2401 && (SCM_CDR (incar) != 0)
2402 && (SCM_UNPACK (SCM_CDR (incar)) != 1))
2403 {
2404 p->car = name;
2405 }
2406 }
2407 ++p;
2408 }
2409 }
2410 SCM_ALLOW_INTS;
2411 return name;
2412 }
2413 #undef FUNC_NAME
2414
2415
2416 \f
2417 /* {GC Protection Helper Functions}
2418 */
2419
2420
2421 void
2422 scm_remember (SCM *ptr)
2423 { /* empty */ }
2424
2425
2426 /*
2427 These crazy functions prevent garbage collection
2428 of arguments after the first argument by
2429 ensuring they remain live throughout the
2430 function because they are used in the last
2431 line of the code block.
2432 It'd be better to have a nice compiler hint to
2433 aid the conservative stack-scanning GC. --03/09/00 gjb */
2434 SCM
2435 scm_return_first (SCM elt, ...)
2436 {
2437 return elt;
2438 }
2439
2440 int
2441 scm_return_first_int (int i, ...)
2442 {
2443 return i;
2444 }
2445
2446
2447 SCM
2448 scm_permanent_object (SCM obj)
2449 {
2450 SCM_REDEFER_INTS;
2451 scm_permobjs = scm_cons (obj, scm_permobjs);
2452 SCM_REALLOW_INTS;
2453 return obj;
2454 }
2455
2456
2457 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2458 even if all other references are dropped, until someone applies
2459 scm_unprotect_object to it. This function returns OBJ.
2460
2461 Calls to scm_protect_object nest. For every object OBJ, there is a
2462 counter which scm_protect_object(OBJ) increments and
2463 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2464 an object's counter is greater than zero, the garbage collector
2465 will not free it.
2466
2467 Of course, that's not how it's implemented. scm_protect_object and
2468 scm_unprotect_object just maintain a list of references to things.
2469 Since the GC knows about this list, all objects it mentions stay
2470 alive. scm_protect_object adds its argument to the list;
2471 scm_unprotect_object removes the first occurrence of its argument
2472 to the list. */
2473 SCM
2474 scm_protect_object (SCM obj)
2475 {
2476 scm_protects = scm_cons (obj, scm_protects);
2477
2478 return obj;
2479 }
2480
2481
2482 /* Remove any protection for OBJ established by a prior call to
2483 scm_protect_object. This function returns OBJ.
2484
2485 See scm_protect_object for more information. */
2486 SCM
2487 scm_unprotect_object (SCM obj)
2488 {
2489 SCM *tail_ptr = &scm_protects;
2490
2491 while (SCM_CONSP (*tail_ptr))
2492 if (SCM_CAR (*tail_ptr) == obj)
2493 {
2494 *tail_ptr = SCM_CDR (*tail_ptr);
2495 break;
2496 }
2497 else
2498 tail_ptr = SCM_CDRLOC (*tail_ptr);
2499
2500 return obj;
2501 }
2502
2503 int terminating;
2504
2505 /* called on process termination. */
2506 #ifdef HAVE_ATEXIT
2507 static void
2508 cleanup (void)
2509 #else
2510 #ifdef HAVE_ON_EXIT
2511 extern int on_exit (void (*procp) (), int arg);
2512
2513 static void
2514 cleanup (int status, void *arg)
2515 #else
2516 #error Dont know how to setup a cleanup handler on your system.
2517 #endif
2518 #endif
2519 {
2520 terminating = 1;
2521 scm_flush_all_ports ();
2522 }
2523
2524 \f
2525 static int
2526 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2527 {
2528 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2529 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2530 rounded_size,
2531 freelist))
2532 {
2533 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2534 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2535 rounded_size,
2536 freelist))
2537 return 1;
2538 }
2539 else
2540 scm_expmem = 1;
2541
2542 #ifdef GUILE_NEW_GC_SCHEME
2543 if (freelist->gc_trigger_fraction)
2544 freelist->gc_trigger = (freelist->heap_size * freelist->gc_trigger_fraction
2545 / 100);
2546 freelist->grow_heap_p = (freelist->heap_size < freelist->gc_trigger);
2547 #endif
2548
2549 return 0;
2550 }
2551
2552 \f
2553 #ifdef GUILE_NEW_GC_SCHEME
2554 static void
2555 init_freelist (scm_freelist_t *freelist,
2556 int span,
2557 int cluster_size,
2558 int gc_trigger)
2559 {
2560 freelist->clusters = SCM_EOL;
2561 freelist->cluster_size = cluster_size + 1;
2562 freelist->left_to_collect = 0;
2563 freelist->clusters_allocated = 0;
2564 if (gc_trigger < 0)
2565 freelist->gc_trigger_fraction = - gc_trigger;
2566 else
2567 {
2568 freelist->gc_trigger = gc_trigger;
2569 freelist->gc_trigger_fraction = 0;
2570 }
2571 freelist->span = span;
2572 freelist->collected = 0;
2573 freelist->heap_size = 0;
2574 }
2575
2576 int
2577 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2578 scm_sizet init_heap_size_2, int gc_trigger_2,
2579 scm_sizet max_segment_size)
2580 #else
2581 int
2582 scm_init_storage (scm_sizet init_heap_size_1, scm_sizet init_heap_size_2)
2583 #endif
2584 {
2585 scm_sizet j;
2586
2587 if (!init_heap_size_1)
2588 init_heap_size_1 = SCM_INIT_HEAP_SIZE_1;
2589 if (!init_heap_size_2)
2590 init_heap_size_2 = SCM_INIT_HEAP_SIZE_2;
2591
2592 j = SCM_NUM_PROTECTS;
2593 while (j)
2594 scm_sys_protects[--j] = SCM_BOOL_F;
2595 scm_block_gc = 1;
2596
2597 #ifdef GUILE_NEW_GC_SCHEME
2598 scm_freelist = SCM_EOL;
2599 scm_freelist2 = SCM_EOL;
2600 init_freelist (&scm_master_freelist,
2601 1, SCM_CLUSTER_SIZE_1,
2602 gc_trigger_1 ? gc_trigger_1 : SCM_GC_TRIGGER_1);
2603 init_freelist (&scm_master_freelist2,
2604 2, SCM_CLUSTER_SIZE_2,
2605 gc_trigger_2 ? gc_trigger_2 : SCM_GC_TRIGGER_2);
2606 scm_max_segment_size
2607 = max_segment_size ? max_segment_size : SCM_MAX_SEGMENT_SIZE;
2608 #else
2609 scm_freelist.cells = SCM_EOL;
2610 scm_freelist.span = 1;
2611 scm_freelist.collected = 0;
2612 scm_freelist.heap_size = 0;
2613
2614 scm_freelist2.cells = SCM_EOL;
2615 scm_freelist2.span = 2;
2616 scm_freelist2.collected = 0;
2617 scm_freelist2.heap_size = 0;
2618 #endif
2619
2620 scm_expmem = 0;
2621
2622 j = SCM_HEAP_SEG_SIZE;
2623 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2624 scm_heap_table = ((scm_heap_seg_data_t *)
2625 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2626
2627 #ifdef GUILE_NEW_GC_SCHEME
2628 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2629 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2630 return 1;
2631 #else
2632 if (make_initial_segment (init_heap_size_1, &scm_freelist) ||
2633 make_initial_segment (init_heap_size_2, &scm_freelist2))
2634 return 1;
2635 #endif
2636
2637 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2638
2639 /* scm_hplims[0] can change. do not remove scm_heap_org */
2640 scm_weak_vectors = SCM_EOL;
2641
2642 /* Initialise the list of ports. */
2643 scm_port_table = (scm_port **)
2644 malloc (sizeof (scm_port *) * scm_port_table_room);
2645 if (!scm_port_table)
2646 return 1;
2647
2648 #ifdef HAVE_ATEXIT
2649 atexit (cleanup);
2650 #else
2651 #ifdef HAVE_ON_EXIT
2652 on_exit (cleanup, 0);
2653 #endif
2654 #endif
2655
2656 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2657 SCM_SETCDR (scm_undefineds, scm_undefineds);
2658
2659 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2660 scm_nullstr = scm_makstr (0L, 0);
2661 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2662 scm_symhash = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2663 scm_weak_symhash = scm_make_weak_key_hash_table ((SCM) SCM_MAKINUM (scm_symhash_dim));
2664 scm_symhash_vars = scm_make_vector ((SCM) SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2665 scm_stand_in_procs = SCM_EOL;
2666 scm_permobjs = SCM_EOL;
2667 scm_protects = SCM_EOL;
2668 scm_asyncs = SCM_EOL;
2669 scm_sysintern ("most-positive-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2670 scm_sysintern ("most-negative-fixnum", (SCM) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2671 #ifdef SCM_BIGDIG
2672 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2673 #endif
2674 return 0;
2675 }
2676 \f
2677
2678 void
2679 scm_init_gc ()
2680 {
2681 #include "gc.x"
2682 }
2683
2684 /*
2685 Local Variables:
2686 c-file-style: "gnu"
2687 End:
2688 */