* Removed unused identifier MIN_GC_YIELD.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
63
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
66
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
69 #endif
70
71 #ifdef HAVE_MALLOC_H
72 #include <malloc.h>
73 #endif
74
75 #ifdef HAVE_UNISTD_H
76 #include <unistd.h>
77 #endif
78
79 #ifdef __STDC__
80 #include <stdarg.h>
81 #define var_start(x, y) va_start(x, y)
82 #else
83 #include <varargs.h>
84 #define var_start(x, y) va_start(x)
85 #endif
86
87 \f
88 /* {heap tuning parameters}
89 *
90 * These are parameters for controlling memory allocation. The heap
91 * is the area out of which scm_cons, and object headers are allocated.
92 *
93 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
94 * 64 bit machine. The units of the _SIZE parameters are bytes.
95 * Cons pairs and object headers occupy one heap cell.
96 *
97 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
98 * allocated initially the heap will grow by half its current size
99 * each subsequent time more heap is needed.
100 *
101 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
102 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
103 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
104 * is in scm_init_storage() and alloc_some_heap() in sys.c
105 *
106 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
107 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
108 *
109 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
110 * is needed.
111 *
112 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
113 * trigger a GC.
114 *
115 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
116 * reclaimed by a GC triggered by must_malloc. If less than this is
117 * reclaimed, the trigger threshold is raised. [I don't know what a
118 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
119 * work around a oscillation that caused almost constant GC.]
120 */
121
122 /*
123 * Heap size 45000 and 40% min yield gives quick startup and no extra
124 * heap allocation. Having higher values on min yield may lead to
125 * large heaps, especially if code behaviour is varying its
126 * maximum consumption between different freelists.
127 */
128 int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
129 int scm_default_min_yield_1 = 40;
130 #define SCM_CLUSTER_SIZE_1 2000L
131
132 int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
133 /* The following value may seem large, but note that if we get to GC at
134 * all, this means that we have a numerically intensive application
135 */
136 int scm_default_min_yield_2 = 40;
137 #define SCM_CLUSTER_SIZE_2 1000L
138
139 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
140
141 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
142 #ifdef _QC
143 # define SCM_HEAP_SEG_SIZE 32768L
144 #else
145 # ifdef sequent
146 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
147 # else
148 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
149 # endif
150 #endif
151 /* Make heap grow with factor 1.5 */
152 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
153 #define SCM_INIT_MALLOC_LIMIT 100000
154 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
155
156 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
157 bounds for allocated storage */
158
159 #ifdef PROT386
160 /*in 386 protected mode we must only adjust the offset */
161 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
162 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
163 #else
164 # ifdef _UNICOS
165 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
166 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
167 # else
168 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
169 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
170 # endif /* UNICOS */
171 #endif /* PROT386 */
172 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
173 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
174 #define SCM_HEAP_SIZE \
175 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
176 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
177
178
179 \f
180 /* scm_freelists
181 */
182
183 typedef struct scm_freelist_t {
184 /* collected cells */
185 SCM cells;
186 /* number of cells left to collect before cluster is full */
187 unsigned int left_to_collect;
188 /* number of clusters which have been allocated */
189 unsigned int clusters_allocated;
190 /* a list of freelists, each of size cluster_size,
191 * except the last one which may be shorter
192 */
193 SCM clusters;
194 SCM *clustertail;
195 /* this is the number of objects in each cluster, including the spine cell */
196 int cluster_size;
197 /* indicates that we should grow heap instead of GC:ing
198 */
199 int grow_heap_p;
200 /* minimum yield on this list in order not to grow the heap
201 */
202 long min_yield;
203 /* defines min_yield as percent of total heap size
204 */
205 int min_yield_fraction;
206 /* number of cells per object on this list */
207 int span;
208 /* number of collected cells during last GC */
209 long collected;
210 /* number of collected cells during penultimate GC */
211 long collected_1;
212 /* total number of cells in heap segments
213 * belonging to this list.
214 */
215 long heap_size;
216 } scm_freelist_t;
217
218 SCM scm_freelist = SCM_EOL;
219 scm_freelist_t scm_master_freelist = {
220 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
221 };
222 SCM scm_freelist2 = SCM_EOL;
223 scm_freelist_t scm_master_freelist2 = {
224 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
225 };
226
227 /* scm_mtrigger
228 * is the number of bytes of must_malloc allocation needed to trigger gc.
229 */
230 unsigned long scm_mtrigger;
231
232
233 /* scm_gc_heap_lock
234 * If set, don't expand the heap. Set only during gc, during which no allocation
235 * is supposed to take place anyway.
236 */
237 int scm_gc_heap_lock = 0;
238
239 /* GC Blocking
240 * Don't pause for collection if this is set -- just
241 * expand the heap.
242 */
243 int scm_block_gc = 1;
244
245 /* During collection, this accumulates objects holding
246 * weak references.
247 */
248 SCM scm_weak_vectors;
249
250 /* GC Statistics Keeping
251 */
252 unsigned long scm_cells_allocated = 0;
253 long scm_mallocated = 0;
254 unsigned long scm_gc_cells_collected;
255 unsigned long scm_gc_yield;
256 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
257 unsigned long scm_gc_malloc_collected;
258 unsigned long scm_gc_ports_collected;
259 unsigned long scm_gc_rt;
260 unsigned long scm_gc_time_taken = 0;
261
262 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
263 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
264 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
265 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
266 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
267 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
268
269 typedef struct scm_heap_seg_data_t
270 {
271 /* lower and upper bounds of the segment */
272 SCM_CELLPTR bounds[2];
273
274 /* address of the head-of-freelist pointer for this segment's cells.
275 All segments usually point to the same one, scm_freelist. */
276 scm_freelist_t *freelist;
277
278 /* number of cells per object in this segment */
279 int span;
280 } scm_heap_seg_data_t;
281
282
283
284 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
285 static void alloc_some_heap (scm_freelist_t *);
286
287
288 \f
289 /* Debugging functions. */
290
291 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
292
293 /* Return the number of the heap segment containing CELL. */
294 static int
295 which_seg (SCM cell)
296 {
297 int i;
298
299 for (i = 0; i < scm_n_heap_segs; i++)
300 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
301 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
302 return i;
303 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
304 SCM_UNPACK (cell));
305 abort ();
306 }
307
308
309 static void
310 map_free_list (scm_freelist_t *master, SCM freelist)
311 {
312 int last_seg = -1, count = 0;
313 SCM f;
314
315 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
316 {
317 int this_seg = which_seg (f);
318
319 if (this_seg != last_seg)
320 {
321 if (last_seg != -1)
322 fprintf (stderr, " %5d %d-cells in segment %d\n",
323 count, master->span, last_seg);
324 last_seg = this_seg;
325 count = 0;
326 }
327 count++;
328 }
329 if (last_seg != -1)
330 fprintf (stderr, " %5d %d-cells in segment %d\n",
331 count, master->span, last_seg);
332 }
333
334 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
335 (),
336 "Print debugging information about the free-list.\n"
337 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
338 #define FUNC_NAME s_scm_map_free_list
339 {
340 int i;
341 fprintf (stderr, "%d segments total (%d:%d",
342 scm_n_heap_segs,
343 scm_heap_table[0].span,
344 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
345 for (i = 1; i < scm_n_heap_segs; i++)
346 fprintf (stderr, ", %d:%d",
347 scm_heap_table[i].span,
348 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
349 fprintf (stderr, ")\n");
350 map_free_list (&scm_master_freelist, scm_freelist);
351 map_free_list (&scm_master_freelist2, scm_freelist2);
352 fflush (stderr);
353
354 return SCM_UNSPECIFIED;
355 }
356 #undef FUNC_NAME
357
358 static int last_cluster;
359 static int last_size;
360
361 static int
362 free_list_length (char *title, int i, SCM freelist)
363 {
364 SCM ls;
365 int n = 0;
366 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
367 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
368 ++n;
369 else
370 {
371 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
372 abort ();
373 }
374 if (n != last_size)
375 {
376 if (i > 0)
377 {
378 if (last_cluster == i - 1)
379 fprintf (stderr, "\t%d\n", last_size);
380 else
381 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
382 }
383 if (i >= 0)
384 fprintf (stderr, "%s %d", title, i);
385 else
386 fprintf (stderr, "%s\t%d\n", title, n);
387 last_cluster = i;
388 last_size = n;
389 }
390 return n;
391 }
392
393 static void
394 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
395 {
396 SCM clusters;
397 int i = 0, len, n = 0;
398 fprintf (stderr, "%s\n\n", title);
399 n += free_list_length ("free list", -1, freelist);
400 for (clusters = master->clusters;
401 SCM_NNULLP (clusters);
402 clusters = SCM_CDR (clusters))
403 {
404 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
405 n += len;
406 }
407 if (last_cluster == i - 1)
408 fprintf (stderr, "\t%d\n", last_size);
409 else
410 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
411 fprintf (stderr, "\ntotal %d objects\n\n", n);
412 }
413
414 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
415 (),
416 "Print debugging information about the free-list.\n"
417 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
418 #define FUNC_NAME s_scm_free_list_length
419 {
420 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
421 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
422 return SCM_UNSPECIFIED;
423 }
424 #undef FUNC_NAME
425
426 #endif
427
428 #ifdef GUILE_DEBUG_FREELIST
429
430 /* Number of calls to SCM_NEWCELL since startup. */
431 static unsigned long scm_newcell_count;
432 static unsigned long scm_newcell2_count;
433
434 /* Search freelist for anything that isn't marked as a free cell.
435 Abort if we find something. */
436 static void
437 scm_check_freelist (SCM freelist)
438 {
439 SCM f;
440 int i = 0;
441
442 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
443 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
444 {
445 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
446 scm_newcell_count, i);
447 fflush (stderr);
448 abort ();
449 }
450 }
451
452 static int scm_debug_check_freelist = 0;
453
454 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
455 (SCM flag),
456 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
457 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
458 "compile-time flag was selected.\n")
459 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
460 {
461 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
462 return SCM_UNSPECIFIED;
463 }
464 #undef FUNC_NAME
465
466
467 SCM
468 scm_debug_newcell (void)
469 {
470 SCM new;
471
472 scm_newcell_count++;
473 if (scm_debug_check_freelist)
474 {
475 scm_check_freelist (scm_freelist);
476 scm_gc();
477 }
478
479 /* The rest of this is supposed to be identical to the SCM_NEWCELL
480 macro. */
481 if (SCM_IMP (scm_freelist))
482 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
483 else
484 {
485 new = scm_freelist;
486 scm_freelist = SCM_CDR (scm_freelist);
487 SCM_SETCAR (new, scm_tc16_allocated);
488 }
489
490 return new;
491 }
492
493 SCM
494 scm_debug_newcell2 (void)
495 {
496 SCM new;
497
498 scm_newcell2_count++;
499 if (scm_debug_check_freelist)
500 {
501 scm_check_freelist (scm_freelist2);
502 scm_gc ();
503 }
504
505 /* The rest of this is supposed to be identical to the SCM_NEWCELL
506 macro. */
507 if (SCM_IMP (scm_freelist2))
508 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
509 else
510 {
511 new = scm_freelist2;
512 scm_freelist2 = SCM_CDR (scm_freelist2);
513 SCM_SETCAR (new, scm_tc16_allocated);
514 }
515
516 return new;
517 }
518
519 #endif /* GUILE_DEBUG_FREELIST */
520
521 \f
522
523 static unsigned long
524 master_cells_allocated (scm_freelist_t *master)
525 {
526 int objects = master->clusters_allocated * (master->cluster_size - 1);
527 if (SCM_NULLP (master->clusters))
528 objects -= master->left_to_collect;
529 return master->span * objects;
530 }
531
532 static unsigned long
533 freelist_length (SCM freelist)
534 {
535 int n;
536 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
537 ++n;
538 return n;
539 }
540
541 static unsigned long
542 compute_cells_allocated ()
543 {
544 return (scm_cells_allocated
545 + master_cells_allocated (&scm_master_freelist)
546 + master_cells_allocated (&scm_master_freelist2)
547 - scm_master_freelist.span * freelist_length (scm_freelist)
548 - scm_master_freelist2.span * freelist_length (scm_freelist2));
549 }
550
551 /* {Scheme Interface to GC}
552 */
553
554 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
555 (),
556 "Returns an association list of statistics about Guile's current use of storage. ")
557 #define FUNC_NAME s_scm_gc_stats
558 {
559 int i;
560 int n;
561 SCM heap_segs;
562 long int local_scm_mtrigger;
563 long int local_scm_mallocated;
564 long int local_scm_heap_size;
565 long int local_scm_cells_allocated;
566 long int local_scm_gc_time_taken;
567 SCM answer;
568
569 SCM_DEFER_INTS;
570
571 ++scm_block_gc;
572
573 retry:
574 heap_segs = SCM_EOL;
575 n = scm_n_heap_segs;
576 for (i = scm_n_heap_segs; i--; )
577 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
578 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
579 heap_segs);
580 if (scm_n_heap_segs != n)
581 goto retry;
582
583 --scm_block_gc;
584
585 /* Below, we cons to produce the resulting list. We want a snapshot of
586 * the heap situation before consing.
587 */
588 local_scm_mtrigger = scm_mtrigger;
589 local_scm_mallocated = scm_mallocated;
590 local_scm_heap_size = SCM_HEAP_SIZE;
591 local_scm_cells_allocated = compute_cells_allocated ();
592 local_scm_gc_time_taken = scm_gc_time_taken;
593
594 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
595 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
596 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
597 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
598 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
599 scm_cons (sym_heap_segments, heap_segs),
600 SCM_UNDEFINED);
601 SCM_ALLOW_INTS;
602 return answer;
603 }
604 #undef FUNC_NAME
605
606
607 void
608 scm_gc_start (const char *what)
609 {
610 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
611 scm_gc_cells_collected = 0;
612 scm_gc_yield_1 = scm_gc_yield;
613 scm_gc_yield = (scm_cells_allocated
614 + master_cells_allocated (&scm_master_freelist)
615 + master_cells_allocated (&scm_master_freelist2));
616 scm_gc_malloc_collected = 0;
617 scm_gc_ports_collected = 0;
618 }
619
620
621 void
622 scm_gc_end ()
623 {
624 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
625 scm_gc_time_taken += scm_gc_rt;
626 }
627
628
629 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
630 (SCM obj),
631 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
632 "returned by this function for @var{obj}")
633 #define FUNC_NAME s_scm_object_address
634 {
635 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
636 }
637 #undef FUNC_NAME
638
639
640 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
641 (),
642 "Scans all of SCM objects and reclaims for further use those that are\n"
643 "no longer accessible.")
644 #define FUNC_NAME s_scm_gc
645 {
646 SCM_DEFER_INTS;
647 scm_igc ("call");
648 SCM_ALLOW_INTS;
649 return SCM_UNSPECIFIED;
650 }
651 #undef FUNC_NAME
652
653
654 \f
655 /* {C Interface For When GC is Triggered}
656 */
657
658 static void
659 adjust_min_yield (scm_freelist_t *freelist)
660 {
661 /* min yield is adjusted upwards so that next predicted total yield
662 * (allocated cells actually freed by GC) becomes
663 * `min_yield_fraction' of total heap size. Note, however, that
664 * the absolute value of min_yield will correspond to `collected'
665 * on one master (the one which currently is triggering GC).
666 *
667 * The reason why we look at total yield instead of cells collected
668 * on one list is that we want to take other freelists into account.
669 * On this freelist, we know that (local) yield = collected cells,
670 * but that's probably not the case on the other lists.
671 *
672 * (We might consider computing a better prediction, for example
673 * by computing an average over multiple GC:s.)
674 */
675 if (freelist->min_yield_fraction)
676 {
677 /* Pick largest of last two yields. */
678 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
679 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
680 #ifdef DEBUGINFO
681 fprintf (stderr, " after GC = %d, delta = %d\n",
682 scm_cells_allocated,
683 delta);
684 #endif
685 if (delta > 0)
686 freelist->min_yield += delta;
687 }
688 }
689
690 /* When we get POSIX threads support, the master will be global and
691 * common while the freelist will be individual for each thread.
692 */
693
694 SCM
695 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
696 {
697 SCM cell;
698 ++scm_ints_disabled;
699 do
700 {
701 if (SCM_NULLP (master->clusters))
702 {
703 if (master->grow_heap_p || scm_block_gc)
704 {
705 master->grow_heap_p = 0;
706 alloc_some_heap (master);
707 }
708 else
709 {
710 #ifdef DEBUGINFO
711 fprintf (stderr, "allocated = %d, ",
712 scm_cells_allocated
713 + master_cells_allocated (&scm_master_freelist)
714 + master_cells_allocated (&scm_master_freelist2));
715 #endif
716 scm_igc ("cells");
717 adjust_min_yield (master);
718 }
719 }
720 cell = SCM_CAR (master->clusters);
721 master->clusters = SCM_CDR (master->clusters);
722 ++master->clusters_allocated;
723 }
724 while (SCM_NULLP (cell));
725 --scm_ints_disabled;
726 *freelist = SCM_CDR (cell);
727 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
728 return cell;
729 }
730
731 #if 0
732 /* This is a support routine which can be used to reserve a cluster
733 * for some special use, such as debugging. It won't be useful until
734 * free cells are preserved between garbage collections.
735 */
736
737 void
738 scm_alloc_cluster (scm_freelist_t *master)
739 {
740 SCM freelist, cell;
741 cell = scm_gc_for_newcell (master, &freelist);
742 SCM_SETCDR (cell, freelist);
743 return cell;
744 }
745 #endif
746
747
748 scm_c_hook_t scm_before_gc_c_hook;
749 scm_c_hook_t scm_before_mark_c_hook;
750 scm_c_hook_t scm_before_sweep_c_hook;
751 scm_c_hook_t scm_after_sweep_c_hook;
752 scm_c_hook_t scm_after_gc_c_hook;
753
754 void
755 scm_igc (const char *what)
756 {
757 int j;
758
759 scm_c_hook_run (&scm_before_gc_c_hook, 0);
760 #ifdef DEBUGINFO
761 fprintf (stderr,
762 SCM_NULLP (scm_freelist)
763 ? "*"
764 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
765 #endif
766 #ifdef USE_THREADS
767 /* During the critical section, only the current thread may run. */
768 SCM_THREAD_CRITICAL_SECTION_START;
769 #endif
770
771 /* fprintf (stderr, "gc: %s\n", what); */
772
773 scm_gc_start (what);
774
775 if (!scm_stack_base || scm_block_gc)
776 {
777 scm_gc_end ();
778 return;
779 }
780
781 if (scm_mallocated < 0)
782 /* The byte count of allocated objects has underflowed. This is
783 probably because you forgot to report the sizes of objects you
784 have allocated, by calling scm_done_malloc or some such. When
785 the GC freed them, it subtracted their size from
786 scm_mallocated, which underflowed. */
787 abort ();
788
789 if (scm_gc_heap_lock)
790 /* We've invoked the collector while a GC is already in progress.
791 That should never happen. */
792 abort ();
793
794 ++scm_gc_heap_lock;
795
796 /* flush dead entries from the continuation stack */
797 {
798 int x;
799 int bound;
800 SCM * elts;
801 elts = SCM_VELTS (scm_continuation_stack);
802 bound = SCM_LENGTH (scm_continuation_stack);
803 x = SCM_INUM (scm_continuation_stack_ptr);
804 while (x < bound)
805 {
806 elts[x] = SCM_BOOL_F;
807 ++x;
808 }
809 }
810
811 scm_c_hook_run (&scm_before_mark_c_hook, 0);
812
813 #ifndef USE_THREADS
814
815 /* Protect from the C stack. This must be the first marking
816 * done because it provides information about what objects
817 * are "in-use" by the C code. "in-use" objects are those
818 * for which the values from SCM_LENGTH and SCM_CHARS must remain
819 * usable. This requirement is stricter than a liveness
820 * requirement -- in particular, it constrains the implementation
821 * of scm_vector_set_length_x.
822 */
823 SCM_FLUSH_REGISTER_WINDOWS;
824 /* This assumes that all registers are saved into the jmp_buf */
825 setjmp (scm_save_regs_gc_mark);
826 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
827 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
828 sizeof scm_save_regs_gc_mark)
829 / sizeof (SCM_STACKITEM)));
830
831 {
832 scm_sizet stack_len = scm_stack_size (scm_stack_base);
833 #ifdef SCM_STACK_GROWS_UP
834 scm_mark_locations (scm_stack_base, stack_len);
835 #else
836 scm_mark_locations (scm_stack_base - stack_len, stack_len);
837 #endif
838 }
839
840 #else /* USE_THREADS */
841
842 /* Mark every thread's stack and registers */
843 scm_threads_mark_stacks ();
844
845 #endif /* USE_THREADS */
846
847 /* FIXME: insert a phase to un-protect string-data preserved
848 * in scm_vector_set_length_x.
849 */
850
851 j = SCM_NUM_PROTECTS;
852 while (j--)
853 scm_gc_mark (scm_sys_protects[j]);
854
855 /* FIXME: we should have a means to register C functions to be run
856 * in different phases of GC
857 */
858 scm_mark_subr_table ();
859
860 #ifndef USE_THREADS
861 scm_gc_mark (scm_root->handle);
862 #endif
863
864 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
865
866 scm_gc_sweep ();
867
868 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
869
870 --scm_gc_heap_lock;
871 scm_gc_end ();
872
873 #ifdef USE_THREADS
874 SCM_THREAD_CRITICAL_SECTION_END;
875 #endif
876 scm_c_hook_run (&scm_after_gc_c_hook, 0);
877 }
878
879 \f
880
881 /* {Mark/Sweep}
882 */
883
884
885
886 /* Mark an object precisely.
887 */
888 void
889 scm_gc_mark (SCM p)
890 {
891 register long i;
892 register SCM ptr;
893
894 ptr = p;
895
896 gc_mark_loop:
897 if (SCM_IMP (ptr))
898 return;
899
900 gc_mark_nimp:
901 if (SCM_NCELLP (ptr))
902 scm_wta (ptr, "rogue pointer in heap", NULL);
903
904 switch (SCM_TYP7 (ptr))
905 {
906 case scm_tcs_cons_nimcar:
907 if (SCM_GCMARKP (ptr))
908 break;
909 SCM_SETGCMARK (ptr);
910 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
911 {
912 ptr = SCM_CAR (ptr);
913 goto gc_mark_nimp;
914 }
915 scm_gc_mark (SCM_CAR (ptr));
916 ptr = SCM_GCCDR (ptr);
917 goto gc_mark_nimp;
918 case scm_tcs_cons_imcar:
919 if (SCM_GCMARKP (ptr))
920 break;
921 SCM_SETGCMARK (ptr);
922 ptr = SCM_GCCDR (ptr);
923 goto gc_mark_loop;
924 case scm_tc7_pws:
925 if (SCM_GCMARKP (ptr))
926 break;
927 SCM_SETGCMARK (ptr);
928 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
929 ptr = SCM_GCCDR (ptr);
930 goto gc_mark_loop;
931 case scm_tcs_cons_gloc:
932 if (SCM_GCMARKP (ptr))
933 break;
934 SCM_SETGCMARK (ptr);
935 {
936 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
937 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
938 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
939 * pointer to a struct vtable data region. The fact that these are
940 * accessed in the same way restricts the possibilites to change the
941 * data layout of structs or heap cells.
942 */
943 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
944 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
945 switch (vtable_data [scm_vtable_index_vcell])
946 {
947 default:
948 {
949 /* ptr is a gloc */
950 SCM gloc_car = SCM_PACK (word0);
951 scm_gc_mark (gloc_car);
952 ptr = SCM_GCCDR (ptr);
953 goto gc_mark_loop;
954 }
955 case 1: /* ! */
956 case 0: /* ! */
957 {
958 /* ptr is a struct */
959 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
960 int len = SCM_LENGTH (layout);
961 char * fields_desc = SCM_CHARS (layout);
962 /* We're using SCM_GCCDR here like STRUCT_DATA, except
963 that it removes the mark */
964 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
965
966 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
967 {
968 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
969 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
970 }
971 if (len)
972 {
973 int x;
974
975 for (x = 0; x < len - 2; x += 2, ++struct_data)
976 if (fields_desc[x] == 'p')
977 scm_gc_mark (SCM_PACK (*struct_data));
978 if (fields_desc[x] == 'p')
979 {
980 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
981 for (x = *struct_data; x; --x)
982 scm_gc_mark (SCM_PACK (*++struct_data));
983 else
984 scm_gc_mark (SCM_PACK (*struct_data));
985 }
986 }
987 if (vtable_data [scm_vtable_index_vcell] == 0)
988 {
989 vtable_data [scm_vtable_index_vcell] = 1;
990 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
991 goto gc_mark_loop;
992 }
993 }
994 }
995 }
996 break;
997 case scm_tcs_closures:
998 if (SCM_GCMARKP (ptr))
999 break;
1000 SCM_SETGCMARK (ptr);
1001 if (SCM_IMP (SCM_CDR (ptr)))
1002 {
1003 ptr = SCM_CLOSCAR (ptr);
1004 goto gc_mark_nimp;
1005 }
1006 scm_gc_mark (SCM_CLOSCAR (ptr));
1007 ptr = SCM_GCCDR (ptr);
1008 goto gc_mark_nimp;
1009 case scm_tc7_vector:
1010 case scm_tc7_lvector:
1011 #ifdef CCLO
1012 case scm_tc7_cclo:
1013 #endif
1014 if (SCM_GC8MARKP (ptr))
1015 break;
1016 SCM_SETGC8MARK (ptr);
1017 i = SCM_LENGTH (ptr);
1018 if (i == 0)
1019 break;
1020 while (--i > 0)
1021 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1022 scm_gc_mark (SCM_VELTS (ptr)[i]);
1023 ptr = SCM_VELTS (ptr)[0];
1024 goto gc_mark_loop;
1025 case scm_tc7_contin:
1026 if SCM_GC8MARKP
1027 (ptr) break;
1028 SCM_SETGC8MARK (ptr);
1029 if (SCM_VELTS (ptr))
1030 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1031 (scm_sizet)
1032 (SCM_LENGTH (ptr) +
1033 (sizeof (SCM_STACKITEM) + -1 +
1034 sizeof (scm_contregs)) /
1035 sizeof (SCM_STACKITEM)));
1036 break;
1037 #ifdef HAVE_ARRAYS
1038 case scm_tc7_bvect:
1039 case scm_tc7_byvect:
1040 case scm_tc7_ivect:
1041 case scm_tc7_uvect:
1042 case scm_tc7_fvect:
1043 case scm_tc7_dvect:
1044 case scm_tc7_cvect:
1045 case scm_tc7_svect:
1046 #ifdef HAVE_LONG_LONGS
1047 case scm_tc7_llvect:
1048 #endif
1049 #endif
1050 case scm_tc7_string:
1051 SCM_SETGC8MARK (ptr);
1052 break;
1053
1054 case scm_tc7_substring:
1055 if (SCM_GC8MARKP(ptr))
1056 break;
1057 SCM_SETGC8MARK (ptr);
1058 ptr = SCM_CDR (ptr);
1059 goto gc_mark_loop;
1060
1061 case scm_tc7_wvect:
1062 if (SCM_GC8MARKP(ptr))
1063 break;
1064 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1065 scm_weak_vectors = ptr;
1066 SCM_SETGC8MARK (ptr);
1067 if (SCM_IS_WHVEC_ANY (ptr))
1068 {
1069 int x;
1070 int len;
1071 int weak_keys;
1072 int weak_values;
1073
1074 len = SCM_LENGTH (ptr);
1075 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1076 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1077
1078 for (x = 0; x < len; ++x)
1079 {
1080 SCM alist;
1081 alist = SCM_VELTS (ptr)[x];
1082
1083 /* mark everything on the alist except the keys or
1084 * values, according to weak_values and weak_keys. */
1085 while ( SCM_CONSP (alist)
1086 && !SCM_GCMARKP (alist)
1087 && SCM_CONSP (SCM_CAR (alist)))
1088 {
1089 SCM kvpair;
1090 SCM next_alist;
1091
1092 kvpair = SCM_CAR (alist);
1093 next_alist = SCM_CDR (alist);
1094 /*
1095 * Do not do this:
1096 * SCM_SETGCMARK (alist);
1097 * SCM_SETGCMARK (kvpair);
1098 *
1099 * It may be that either the key or value is protected by
1100 * an escaped reference to part of the spine of this alist.
1101 * If we mark the spine here, and only mark one or neither of the
1102 * key and value, they may never be properly marked.
1103 * This leads to a horrible situation in which an alist containing
1104 * freelist cells is exported.
1105 *
1106 * So only mark the spines of these arrays last of all marking.
1107 * If somebody confuses us by constructing a weak vector
1108 * with a circular alist then we are hosed, but at least we
1109 * won't prematurely drop table entries.
1110 */
1111 if (!weak_keys)
1112 scm_gc_mark (SCM_CAR (kvpair));
1113 if (!weak_values)
1114 scm_gc_mark (SCM_GCCDR (kvpair));
1115 alist = next_alist;
1116 }
1117 if (SCM_NIMP (alist))
1118 scm_gc_mark (alist);
1119 }
1120 }
1121 break;
1122
1123 case scm_tc7_msymbol:
1124 if (SCM_GC8MARKP(ptr))
1125 break;
1126 SCM_SETGC8MARK (ptr);
1127 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1128 ptr = SCM_SYMBOL_PROPS (ptr);
1129 goto gc_mark_loop;
1130 case scm_tc7_ssymbol:
1131 if (SCM_GC8MARKP(ptr))
1132 break;
1133 SCM_SETGC8MARK (ptr);
1134 break;
1135 case scm_tcs_subrs:
1136 break;
1137 case scm_tc7_port:
1138 i = SCM_PTOBNUM (ptr);
1139 if (!(i < scm_numptob))
1140 goto def;
1141 if (SCM_GC8MARKP (ptr))
1142 break;
1143 SCM_SETGC8MARK (ptr);
1144 if (SCM_PTAB_ENTRY(ptr))
1145 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1146 if (scm_ptobs[i].mark)
1147 {
1148 ptr = (scm_ptobs[i].mark) (ptr);
1149 goto gc_mark_loop;
1150 }
1151 else
1152 return;
1153 break;
1154 case scm_tc7_smob:
1155 if (SCM_GC8MARKP (ptr))
1156 break;
1157 SCM_SETGC8MARK (ptr);
1158 switch (SCM_GCTYP16 (ptr))
1159 { /* should be faster than going through scm_smobs */
1160 case scm_tc_free_cell:
1161 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1162 case scm_tc16_allocated:
1163 case scm_tc16_big:
1164 case scm_tc16_real:
1165 case scm_tc16_complex:
1166 break;
1167 default:
1168 i = SCM_SMOBNUM (ptr);
1169 if (!(i < scm_numsmob))
1170 goto def;
1171 if (scm_smobs[i].mark)
1172 {
1173 ptr = (scm_smobs[i].mark) (ptr);
1174 goto gc_mark_loop;
1175 }
1176 else
1177 return;
1178 }
1179 break;
1180 default:
1181 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1182 }
1183 }
1184
1185
1186 /* Mark a Region Conservatively
1187 */
1188
1189 void
1190 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1191 {
1192 register long m = n;
1193 register int i, j;
1194 register SCM_CELLPTR ptr;
1195
1196 while (0 <= --m)
1197 if (SCM_CELLP (* (SCM *) &x[m]))
1198 {
1199 ptr = SCM2PTR (* (SCM *) &x[m]);
1200 i = 0;
1201 j = scm_n_heap_segs - 1;
1202 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1203 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1204 {
1205 while (i <= j)
1206 {
1207 int seg_id;
1208 seg_id = -1;
1209 if ( (i == j)
1210 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1211 seg_id = i;
1212 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1213 seg_id = j;
1214 else
1215 {
1216 int k;
1217 k = (i + j) / 2;
1218 if (k == i)
1219 break;
1220 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1221 {
1222 j = k;
1223 ++i;
1224 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1225 continue;
1226 else
1227 break;
1228 }
1229 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1230 {
1231 i = k;
1232 --j;
1233 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1234 continue;
1235 else
1236 break;
1237 }
1238 }
1239 if (scm_heap_table[seg_id].span == 1
1240 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1241 scm_gc_mark (* (SCM *) &x[m]);
1242 break;
1243 }
1244
1245 }
1246 }
1247 }
1248
1249
1250 /* The function scm_cellp determines whether an SCM value can be regarded as a
1251 * pointer to a cell on the heap. Binary search is used in order to determine
1252 * the heap segment that contains the cell.
1253 */
1254 int
1255 scm_cellp (SCM value)
1256 {
1257 if (SCM_CELLP (value)) {
1258 scm_cell * ptr = SCM2PTR (value);
1259 unsigned int i = 0;
1260 unsigned int j = scm_n_heap_segs - 1;
1261
1262 while (i < j) {
1263 int k = (i + j) / 2;
1264 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1265 j = k;
1266 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1267 i = k + 1;
1268 }
1269 }
1270
1271 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1272 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1273 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1274 return 1;
1275 } else {
1276 return 0;
1277 }
1278 } else {
1279 return 0;
1280 }
1281 }
1282
1283
1284 static void
1285 gc_sweep_freelist_start (scm_freelist_t *freelist)
1286 {
1287 freelist->cells = SCM_EOL;
1288 freelist->left_to_collect = freelist->cluster_size;
1289 freelist->clusters_allocated = 0;
1290 freelist->clusters = SCM_EOL;
1291 freelist->clustertail = &freelist->clusters;
1292 freelist->collected_1 = freelist->collected;
1293 freelist->collected = 0;
1294 }
1295
1296 static void
1297 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1298 {
1299 int collected;
1300 *freelist->clustertail = freelist->cells;
1301 if (SCM_NNULLP (freelist->cells))
1302 {
1303 SCM c = freelist->cells;
1304 SCM_SETCAR (c, SCM_CDR (c));
1305 SCM_SETCDR (c, SCM_EOL);
1306 freelist->collected +=
1307 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1308 }
1309 scm_gc_cells_collected += freelist->collected;
1310
1311 /* Although freelist->min_yield is used to test freelist->collected
1312 * (which is the local GC yield for freelist), it is adjusted so
1313 * that *total* yield is freelist->min_yield_fraction of total heap
1314 * size. This means that a too low yield is compensated by more
1315 * heap on the list which is currently doing most work, which is
1316 * just what we want.
1317 */
1318 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1319 freelist->grow_heap_p = (collected < freelist->min_yield);
1320 }
1321
1322 void
1323 scm_gc_sweep ()
1324 {
1325 register SCM_CELLPTR ptr;
1326 register SCM nfreelist;
1327 register scm_freelist_t *freelist;
1328 register long m;
1329 register int span;
1330 long i;
1331 scm_sizet seg_size;
1332
1333 m = 0;
1334
1335 gc_sweep_freelist_start (&scm_master_freelist);
1336 gc_sweep_freelist_start (&scm_master_freelist2);
1337
1338 for (i = 0; i < scm_n_heap_segs; i++)
1339 {
1340 register unsigned int left_to_collect;
1341 register scm_sizet j;
1342
1343 /* Unmarked cells go onto the front of the freelist this heap
1344 segment points to. Rather than updating the real freelist
1345 pointer as we go along, we accumulate the new head in
1346 nfreelist. Then, if it turns out that the entire segment is
1347 free, we free (i.e., malloc's free) the whole segment, and
1348 simply don't assign nfreelist back into the real freelist. */
1349 freelist = scm_heap_table[i].freelist;
1350 nfreelist = freelist->cells;
1351 left_to_collect = freelist->left_to_collect;
1352 span = scm_heap_table[i].span;
1353
1354 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1355 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1356 for (j = seg_size + span; j -= span; ptr += span)
1357 {
1358 SCM scmptr = PTR2SCM (ptr);
1359
1360 switch SCM_TYP7 (scmptr)
1361 {
1362 case scm_tcs_cons_gloc:
1363 {
1364 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1365 * struct or a gloc. See the corresponding comment in
1366 * scm_gc_mark.
1367 */
1368 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1369 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1370 if (SCM_GCMARKP (scmptr))
1371 {
1372 if (vtable_data [scm_vtable_index_vcell] == 1)
1373 vtable_data [scm_vtable_index_vcell] = 0;
1374 goto cmrkcontinue;
1375 }
1376 else
1377 {
1378 if (vtable_data [scm_vtable_index_vcell] == 0
1379 || vtable_data [scm_vtable_index_vcell] == 1)
1380 {
1381 scm_struct_free_t free
1382 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1383 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1384 }
1385 }
1386 }
1387 break;
1388 case scm_tcs_cons_imcar:
1389 case scm_tcs_cons_nimcar:
1390 case scm_tcs_closures:
1391 case scm_tc7_pws:
1392 if (SCM_GCMARKP (scmptr))
1393 goto cmrkcontinue;
1394 break;
1395 case scm_tc7_wvect:
1396 if (SCM_GC8MARKP (scmptr))
1397 {
1398 goto c8mrkcontinue;
1399 }
1400 else
1401 {
1402 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1403 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1404 break;
1405 }
1406
1407 case scm_tc7_vector:
1408 case scm_tc7_lvector:
1409 #ifdef CCLO
1410 case scm_tc7_cclo:
1411 #endif
1412 if (SCM_GC8MARKP (scmptr))
1413 goto c8mrkcontinue;
1414
1415 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1416 freechars:
1417 scm_must_free (SCM_CHARS (scmptr));
1418 /* SCM_SETCHARS(scmptr, 0);*/
1419 break;
1420 #ifdef HAVE_ARRAYS
1421 case scm_tc7_bvect:
1422 if SCM_GC8MARKP (scmptr)
1423 goto c8mrkcontinue;
1424 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1425 goto freechars;
1426 case scm_tc7_byvect:
1427 if SCM_GC8MARKP (scmptr)
1428 goto c8mrkcontinue;
1429 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1430 goto freechars;
1431 case scm_tc7_ivect:
1432 case scm_tc7_uvect:
1433 if SCM_GC8MARKP (scmptr)
1434 goto c8mrkcontinue;
1435 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1436 goto freechars;
1437 case scm_tc7_svect:
1438 if SCM_GC8MARKP (scmptr)
1439 goto c8mrkcontinue;
1440 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1441 goto freechars;
1442 #ifdef HAVE_LONG_LONGS
1443 case scm_tc7_llvect:
1444 if SCM_GC8MARKP (scmptr)
1445 goto c8mrkcontinue;
1446 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1447 goto freechars;
1448 #endif
1449 case scm_tc7_fvect:
1450 if SCM_GC8MARKP (scmptr)
1451 goto c8mrkcontinue;
1452 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1453 goto freechars;
1454 case scm_tc7_dvect:
1455 if SCM_GC8MARKP (scmptr)
1456 goto c8mrkcontinue;
1457 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1458 goto freechars;
1459 case scm_tc7_cvect:
1460 if SCM_GC8MARKP (scmptr)
1461 goto c8mrkcontinue;
1462 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1463 goto freechars;
1464 #endif
1465 case scm_tc7_substring:
1466 if (SCM_GC8MARKP (scmptr))
1467 goto c8mrkcontinue;
1468 break;
1469 case scm_tc7_string:
1470 if (SCM_GC8MARKP (scmptr))
1471 goto c8mrkcontinue;
1472 m += SCM_HUGE_LENGTH (scmptr) + 1;
1473 goto freechars;
1474 case scm_tc7_msymbol:
1475 if (SCM_GC8MARKP (scmptr))
1476 goto c8mrkcontinue;
1477 m += (SCM_LENGTH (scmptr) + 1
1478 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1479 scm_must_free ((char *)SCM_SLOTS (scmptr));
1480 break;
1481 case scm_tc7_contin:
1482 if SCM_GC8MARKP (scmptr)
1483 goto c8mrkcontinue;
1484 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1485 if (SCM_VELTS (scmptr))
1486 goto freechars;
1487 case scm_tc7_ssymbol:
1488 if SCM_GC8MARKP(scmptr)
1489 goto c8mrkcontinue;
1490 break;
1491 case scm_tcs_subrs:
1492 continue;
1493 case scm_tc7_port:
1494 if SCM_GC8MARKP (scmptr)
1495 goto c8mrkcontinue;
1496 if SCM_OPENP (scmptr)
1497 {
1498 int k = SCM_PTOBNUM (scmptr);
1499 if (!(k < scm_numptob))
1500 goto sweeperr;
1501 /* Keep "revealed" ports alive. */
1502 if (scm_revealed_count (scmptr) > 0)
1503 continue;
1504 /* Yes, I really do mean scm_ptobs[k].free */
1505 /* rather than ftobs[k].close. .close */
1506 /* is for explicit CLOSE-PORT by user */
1507 m += (scm_ptobs[k].free) (scmptr);
1508 SCM_SETSTREAM (scmptr, 0);
1509 scm_remove_from_port_table (scmptr);
1510 scm_gc_ports_collected++;
1511 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1512 }
1513 break;
1514 case scm_tc7_smob:
1515 switch SCM_GCTYP16 (scmptr)
1516 {
1517 case scm_tc_free_cell:
1518 case scm_tc16_real:
1519 if SCM_GC8MARKP (scmptr)
1520 goto c8mrkcontinue;
1521 break;
1522 #ifdef SCM_BIGDIG
1523 case scm_tc16_big:
1524 if SCM_GC8MARKP (scmptr)
1525 goto c8mrkcontinue;
1526 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1527 goto freechars;
1528 #endif /* def SCM_BIGDIG */
1529 case scm_tc16_complex:
1530 if SCM_GC8MARKP (scmptr)
1531 goto c8mrkcontinue;
1532 m += 2 * sizeof (double);
1533 goto freechars;
1534 default:
1535 if SCM_GC8MARKP (scmptr)
1536 goto c8mrkcontinue;
1537
1538 {
1539 int k;
1540 k = SCM_SMOBNUM (scmptr);
1541 if (!(k < scm_numsmob))
1542 goto sweeperr;
1543 m += (scm_smobs[k].free) (scmptr);
1544 break;
1545 }
1546 }
1547 break;
1548 default:
1549 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1550 }
1551 #if 0
1552 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1553 exit (2);
1554 #endif
1555 if (!--left_to_collect)
1556 {
1557 SCM_SETCAR (scmptr, nfreelist);
1558 *freelist->clustertail = scmptr;
1559 freelist->clustertail = SCM_CDRLOC (scmptr);
1560
1561 nfreelist = SCM_EOL;
1562 freelist->collected += span * freelist->cluster_size;
1563 left_to_collect = freelist->cluster_size;
1564 }
1565 else
1566 {
1567 /* Stick the new cell on the front of nfreelist. It's
1568 critical that we mark this cell as freed; otherwise, the
1569 conservative collector might trace it as some other type
1570 of object. */
1571 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1572 SCM_SETCDR (scmptr, nfreelist);
1573 nfreelist = scmptr;
1574 }
1575
1576 continue;
1577 c8mrkcontinue:
1578 SCM_CLRGC8MARK (scmptr);
1579 continue;
1580 cmrkcontinue:
1581 SCM_CLRGCMARK (scmptr);
1582 }
1583 #ifdef GC_FREE_SEGMENTS
1584 if (n == seg_size)
1585 {
1586 register long j;
1587
1588 freelist->heap_size -= seg_size;
1589 free ((char *) scm_heap_table[i].bounds[0]);
1590 scm_heap_table[i].bounds[0] = 0;
1591 for (j = i + 1; j < scm_n_heap_segs; j++)
1592 scm_heap_table[j - 1] = scm_heap_table[j];
1593 scm_n_heap_segs -= 1;
1594 i--; /* We need to scan the segment just moved. */
1595 }
1596 else
1597 #endif /* ifdef GC_FREE_SEGMENTS */
1598 {
1599 /* Update the real freelist pointer to point to the head of
1600 the list of free cells we've built for this segment. */
1601 freelist->cells = nfreelist;
1602 freelist->left_to_collect = left_to_collect;
1603 }
1604
1605 #ifdef GUILE_DEBUG_FREELIST
1606 scm_check_freelist (freelist == &scm_master_freelist
1607 ? scm_freelist
1608 : scm_freelist2);
1609 scm_map_free_list ();
1610 #endif
1611 }
1612
1613 gc_sweep_freelist_finish (&scm_master_freelist);
1614 gc_sweep_freelist_finish (&scm_master_freelist2);
1615
1616 /* When we move to POSIX threads private freelists should probably
1617 be GC-protected instead. */
1618 scm_freelist = SCM_EOL;
1619 scm_freelist2 = SCM_EOL;
1620
1621 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1622 scm_gc_yield -= scm_cells_allocated;
1623 scm_mallocated -= m;
1624 scm_gc_malloc_collected = m;
1625 }
1626
1627
1628 \f
1629
1630 /* {Front end to malloc}
1631 *
1632 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1633 *
1634 * These functions provide services comperable to malloc, realloc, and
1635 * free. They are for allocating malloced parts of scheme objects.
1636 * The primary purpose of the front end is to impose calls to gc.
1637 */
1638
1639
1640 /* scm_must_malloc
1641 * Return newly malloced storage or throw an error.
1642 *
1643 * The parameter WHAT is a string for error reporting.
1644 * If the threshold scm_mtrigger will be passed by this
1645 * allocation, or if the first call to malloc fails,
1646 * garbage collect -- on the presumption that some objects
1647 * using malloced storage may be collected.
1648 *
1649 * The limit scm_mtrigger may be raised by this allocation.
1650 */
1651 void *
1652 scm_must_malloc (scm_sizet size, const char *what)
1653 {
1654 void *ptr;
1655 unsigned long nm = scm_mallocated + size;
1656
1657 if (nm <= scm_mtrigger)
1658 {
1659 SCM_SYSCALL (ptr = malloc (size));
1660 if (NULL != ptr)
1661 {
1662 scm_mallocated = nm;
1663 #ifdef GUILE_DEBUG_MALLOC
1664 scm_malloc_register (ptr, what);
1665 #endif
1666 return ptr;
1667 }
1668 }
1669
1670 scm_igc (what);
1671
1672 nm = scm_mallocated + size;
1673 SCM_SYSCALL (ptr = malloc (size));
1674 if (NULL != ptr)
1675 {
1676 scm_mallocated = nm;
1677 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1678 if (nm > scm_mtrigger)
1679 scm_mtrigger = nm + nm / 2;
1680 else
1681 scm_mtrigger += scm_mtrigger / 2;
1682 }
1683 #ifdef GUILE_DEBUG_MALLOC
1684 scm_malloc_register (ptr, what);
1685 #endif
1686
1687 return ptr;
1688 }
1689
1690 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1691 return 0; /* never reached */
1692 }
1693
1694
1695 /* scm_must_realloc
1696 * is similar to scm_must_malloc.
1697 */
1698 void *
1699 scm_must_realloc (void *where,
1700 scm_sizet old_size,
1701 scm_sizet size,
1702 const char *what)
1703 {
1704 void *ptr;
1705 scm_sizet nm = scm_mallocated + size - old_size;
1706
1707 if (nm <= scm_mtrigger)
1708 {
1709 SCM_SYSCALL (ptr = realloc (where, size));
1710 if (NULL != ptr)
1711 {
1712 scm_mallocated = nm;
1713 #ifdef GUILE_DEBUG_MALLOC
1714 scm_malloc_reregister (where, ptr, what);
1715 #endif
1716 return ptr;
1717 }
1718 }
1719
1720 scm_igc (what);
1721
1722 nm = scm_mallocated + size - old_size;
1723 SCM_SYSCALL (ptr = realloc (where, size));
1724 if (NULL != ptr)
1725 {
1726 scm_mallocated = nm;
1727 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1728 if (nm > scm_mtrigger)
1729 scm_mtrigger = nm + nm / 2;
1730 else
1731 scm_mtrigger += scm_mtrigger / 2;
1732 }
1733 #ifdef GUILE_DEBUG_MALLOC
1734 scm_malloc_reregister (where, ptr, what);
1735 #endif
1736 return ptr;
1737 }
1738
1739 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1740 return 0; /* never reached */
1741 }
1742
1743 void
1744 scm_must_free (void *obj)
1745 {
1746 #ifdef GUILE_DEBUG_MALLOC
1747 scm_malloc_unregister (obj);
1748 #endif
1749 if (obj)
1750 free (obj);
1751 else
1752 scm_wta (SCM_INUM0, "already free", "");
1753 }
1754
1755 /* Announce that there has been some malloc done that will be freed
1756 * during gc. A typical use is for a smob that uses some malloced
1757 * memory but can not get it from scm_must_malloc (for whatever
1758 * reason). When a new object of this smob is created you call
1759 * scm_done_malloc with the size of the object. When your smob free
1760 * function is called, be sure to include this size in the return
1761 * value. */
1762
1763 void
1764 scm_done_malloc (long size)
1765 {
1766 scm_mallocated += size;
1767
1768 if (scm_mallocated > scm_mtrigger)
1769 {
1770 scm_igc ("foreign mallocs");
1771 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1772 {
1773 if (scm_mallocated > scm_mtrigger)
1774 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1775 else
1776 scm_mtrigger += scm_mtrigger / 2;
1777 }
1778 }
1779 }
1780
1781
1782 \f
1783
1784 /* {Heap Segments}
1785 *
1786 * Each heap segment is an array of objects of a particular size.
1787 * Every segment has an associated (possibly shared) freelist.
1788 * A table of segment records is kept that records the upper and
1789 * lower extents of the segment; this is used during the conservative
1790 * phase of gc to identify probably gc roots (because they point
1791 * into valid segments at reasonable offsets). */
1792
1793 /* scm_expmem
1794 * is true if the first segment was smaller than INIT_HEAP_SEG.
1795 * If scm_expmem is set to one, subsequent segment allocations will
1796 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1797 */
1798 int scm_expmem = 0;
1799
1800 scm_sizet scm_max_segment_size;
1801
1802 /* scm_heap_org
1803 * is the lowest base address of any heap segment.
1804 */
1805 SCM_CELLPTR scm_heap_org;
1806
1807 scm_heap_seg_data_t * scm_heap_table = 0;
1808 int scm_n_heap_segs = 0;
1809
1810 /* init_heap_seg
1811 * initializes a new heap segment and return the number of objects it contains.
1812 *
1813 * The segment origin, segment size in bytes, and the span of objects
1814 * in cells are input parameters. The freelist is both input and output.
1815 *
1816 * This function presume that the scm_heap_table has already been expanded
1817 * to accomodate a new segment record.
1818 */
1819
1820
1821 static scm_sizet
1822 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1823 {
1824 register SCM_CELLPTR ptr;
1825 SCM_CELLPTR seg_end;
1826 int new_seg_index;
1827 int n_new_cells;
1828 int span = freelist->span;
1829
1830 if (seg_org == NULL)
1831 return 0;
1832
1833 ptr = CELL_UP (seg_org, span);
1834
1835 /* Compute the ceiling on valid object pointers w/in this segment.
1836 */
1837 seg_end = CELL_DN ((char *) seg_org + size, span);
1838
1839 /* Find the right place and insert the segment record.
1840 *
1841 */
1842 for (new_seg_index = 0;
1843 ( (new_seg_index < scm_n_heap_segs)
1844 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1845 new_seg_index++)
1846 ;
1847
1848 {
1849 int i;
1850 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1851 scm_heap_table[i] = scm_heap_table[i - 1];
1852 }
1853
1854 ++scm_n_heap_segs;
1855
1856 scm_heap_table[new_seg_index].span = span;
1857 scm_heap_table[new_seg_index].freelist = freelist;
1858 scm_heap_table[new_seg_index].bounds[0] = ptr;
1859 scm_heap_table[new_seg_index].bounds[1] = seg_end;
1860
1861
1862 /* Compute the least valid object pointer w/in this segment
1863 */
1864 ptr = CELL_UP (ptr, span);
1865
1866
1867 /*n_new_cells*/
1868 n_new_cells = seg_end - ptr;
1869
1870 freelist->heap_size += n_new_cells;
1871
1872 /* Partition objects in this segment into clusters */
1873 {
1874 SCM clusters;
1875 SCM *clusterp = &clusters;
1876 int n_cluster_cells = span * freelist->cluster_size;
1877
1878 while (n_new_cells > span) /* at least one spine + one freecell */
1879 {
1880 /* Determine end of cluster
1881 */
1882 if (n_new_cells >= n_cluster_cells)
1883 {
1884 seg_end = ptr + n_cluster_cells;
1885 n_new_cells -= n_cluster_cells;
1886 }
1887 else
1888 /* [cmm] looks like the segment size doesn't divide cleanly by
1889 cluster size. bad cmm! */
1890 abort();
1891
1892 /* Allocate cluster spine
1893 */
1894 *clusterp = PTR2SCM (ptr);
1895 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1896 clusterp = SCM_CDRLOC (*clusterp);
1897 ptr += span;
1898
1899 while (ptr < seg_end)
1900 {
1901 SCM scmptr = PTR2SCM (ptr);
1902
1903 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1904 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1905 ptr += span;
1906 }
1907
1908 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1909 }
1910
1911 /* Patch up the last cluster pointer in the segment
1912 * to join it to the input freelist.
1913 */
1914 *clusterp = freelist->clusters;
1915 freelist->clusters = clusters;
1916 }
1917
1918 #ifdef DEBUGINFO
1919 fprintf (stderr, "H");
1920 #endif
1921 return size;
1922 }
1923
1924 static scm_sizet
1925 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1926 {
1927 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1928
1929 return
1930 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1931 + ALIGNMENT_SLACK (freelist);
1932 }
1933
1934 static void
1935 alloc_some_heap (scm_freelist_t *freelist)
1936 {
1937 scm_heap_seg_data_t * tmptable;
1938 SCM_CELLPTR ptr;
1939 long len;
1940
1941 /* Critical code sections (such as the garbage collector)
1942 * aren't supposed to add heap segments.
1943 */
1944 if (scm_gc_heap_lock)
1945 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1946
1947 /* Expand the heap tables to have room for the new segment.
1948 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1949 * only if the allocation of the segment itself succeeds.
1950 */
1951 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
1952
1953 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
1954 realloc ((char *)scm_heap_table, len)));
1955 if (!tmptable)
1956 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1957 else
1958 scm_heap_table = tmptable;
1959
1960
1961 /* Pick a size for the new heap segment.
1962 * The rule for picking the size of a segment is explained in
1963 * gc.h
1964 */
1965 {
1966 /* Assure that the new segment is predicted to be large enough.
1967 *
1968 * New yield should at least equal GC fraction of new heap size, i.e.
1969 *
1970 * y + dh > f * (h + dh)
1971 *
1972 * y : yield
1973 * f : min yield fraction
1974 * h : heap size
1975 * dh : size of new heap segment
1976 *
1977 * This gives dh > (f * h - y) / (1 - f)
1978 */
1979 int f = freelist->min_yield_fraction;
1980 long h = SCM_HEAP_SIZE;
1981 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
1982 len = SCM_EXPHEAP (freelist->heap_size);
1983 #ifdef DEBUGINFO
1984 fprintf (stderr, "(%d < %d)", len, min_cells);
1985 #endif
1986 if (len < min_cells)
1987 len = min_cells + freelist->cluster_size;
1988 len *= sizeof (scm_cell);
1989 /* force new sampling */
1990 freelist->collected = LONG_MAX;
1991 }
1992
1993 if (len > scm_max_segment_size)
1994 len = scm_max_segment_size;
1995
1996 {
1997 scm_sizet smallest;
1998
1999 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2000
2001 if (len < smallest)
2002 len = smallest;
2003
2004 /* Allocate with decaying ambition. */
2005 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2006 && (len >= smallest))
2007 {
2008 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2009 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2010 if (ptr)
2011 {
2012 init_heap_seg (ptr, rounded_len, freelist);
2013 return;
2014 }
2015 len /= 2;
2016 }
2017 }
2018
2019 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2020 }
2021
2022
2023 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2024 (SCM name),
2025 "")
2026 #define FUNC_NAME s_scm_unhash_name
2027 {
2028 int x;
2029 int bound;
2030 SCM_VALIDATE_SYMBOL (1,name);
2031 SCM_DEFER_INTS;
2032 bound = scm_n_heap_segs;
2033 for (x = 0; x < bound; ++x)
2034 {
2035 SCM_CELLPTR p;
2036 SCM_CELLPTR pbound;
2037 p = scm_heap_table[x].bounds[0];
2038 pbound = scm_heap_table[x].bounds[1];
2039 while (p < pbound)
2040 {
2041 SCM cell = PTR2SCM (p);
2042 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2043 {
2044 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2045 * struct cell. See the corresponding comment in scm_gc_mark.
2046 */
2047 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2048 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2049 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2050 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2051 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2052 {
2053 SCM_SET_CELL_OBJECT_0 (cell, name);
2054 }
2055 }
2056 ++p;
2057 }
2058 }
2059 SCM_ALLOW_INTS;
2060 return name;
2061 }
2062 #undef FUNC_NAME
2063
2064
2065 \f
2066 /* {GC Protection Helper Functions}
2067 */
2068
2069
2070 void
2071 scm_remember (SCM *ptr)
2072 { /* empty */ }
2073
2074
2075 /*
2076 These crazy functions prevent garbage collection
2077 of arguments after the first argument by
2078 ensuring they remain live throughout the
2079 function because they are used in the last
2080 line of the code block.
2081 It'd be better to have a nice compiler hint to
2082 aid the conservative stack-scanning GC. --03/09/00 gjb */
2083 SCM
2084 scm_return_first (SCM elt, ...)
2085 {
2086 return elt;
2087 }
2088
2089 int
2090 scm_return_first_int (int i, ...)
2091 {
2092 return i;
2093 }
2094
2095
2096 SCM
2097 scm_permanent_object (SCM obj)
2098 {
2099 SCM_REDEFER_INTS;
2100 scm_permobjs = scm_cons (obj, scm_permobjs);
2101 SCM_REALLOW_INTS;
2102 return obj;
2103 }
2104
2105
2106 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2107 other references are dropped, until the object is unprotected by calling
2108 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2109 i. e. it is possible to protect the same object several times, but it is
2110 necessary to unprotect the object the same number of times to actually get
2111 the object unprotected. It is an error to unprotect an object more often
2112 than it has been protected before. The function scm_protect_object returns
2113 OBJ.
2114 */
2115
2116 /* Implementation note: For every object X, there is a counter which
2117 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2118 */
2119
2120 SCM
2121 scm_protect_object (SCM obj)
2122 {
2123 SCM handle;
2124
2125 /* This critical section barrier will be replaced by a mutex. */
2126 SCM_REDEFER_INTS;
2127
2128 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2129 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2130
2131 SCM_REALLOW_INTS;
2132
2133 return obj;
2134 }
2135
2136
2137 /* Remove any protection for OBJ established by a prior call to
2138 scm_protect_object. This function returns OBJ.
2139
2140 See scm_protect_object for more information. */
2141 SCM
2142 scm_unprotect_object (SCM obj)
2143 {
2144 SCM handle;
2145
2146 /* This critical section barrier will be replaced by a mutex. */
2147 SCM_REDEFER_INTS;
2148
2149 handle = scm_hashq_get_handle (scm_protects, obj);
2150
2151 if (SCM_IMP (handle))
2152 {
2153 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2154 abort ();
2155 }
2156 else
2157 {
2158 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2159 if (count == 0)
2160 scm_hashq_remove_x (scm_protects, obj);
2161 else
2162 SCM_SETCDR (handle, SCM_MAKINUM (count));
2163 }
2164
2165 SCM_REALLOW_INTS;
2166
2167 return obj;
2168 }
2169
2170 int terminating;
2171
2172 /* called on process termination. */
2173 #ifdef HAVE_ATEXIT
2174 static void
2175 cleanup (void)
2176 #else
2177 #ifdef HAVE_ON_EXIT
2178 extern int on_exit (void (*procp) (), int arg);
2179
2180 static void
2181 cleanup (int status, void *arg)
2182 #else
2183 #error Dont know how to setup a cleanup handler on your system.
2184 #endif
2185 #endif
2186 {
2187 terminating = 1;
2188 scm_flush_all_ports ();
2189 }
2190
2191 \f
2192 static int
2193 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2194 {
2195 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2196 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2197 rounded_size,
2198 freelist))
2199 {
2200 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2201 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2202 rounded_size,
2203 freelist))
2204 return 1;
2205 }
2206 else
2207 scm_expmem = 1;
2208
2209 if (freelist->min_yield_fraction)
2210 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2211 / 100);
2212 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2213
2214 return 0;
2215 }
2216
2217 \f
2218 static void
2219 init_freelist (scm_freelist_t *freelist,
2220 int span,
2221 int cluster_size,
2222 int min_yield)
2223 {
2224 freelist->clusters = SCM_EOL;
2225 freelist->cluster_size = cluster_size + 1;
2226 freelist->left_to_collect = 0;
2227 freelist->clusters_allocated = 0;
2228 freelist->min_yield = 0;
2229 freelist->min_yield_fraction = min_yield;
2230 freelist->span = span;
2231 freelist->collected = 0;
2232 freelist->collected_1 = 0;
2233 freelist->heap_size = 0;
2234 }
2235
2236 int
2237 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2238 scm_sizet init_heap_size_2, int gc_trigger_2,
2239 scm_sizet max_segment_size)
2240 {
2241 scm_sizet j;
2242
2243 if (!init_heap_size_1)
2244 init_heap_size_1 = scm_default_init_heap_size_1;
2245 if (!init_heap_size_2)
2246 init_heap_size_2 = scm_default_init_heap_size_2;
2247
2248 j = SCM_NUM_PROTECTS;
2249 while (j)
2250 scm_sys_protects[--j] = SCM_BOOL_F;
2251 scm_block_gc = 1;
2252
2253 scm_freelist = SCM_EOL;
2254 scm_freelist2 = SCM_EOL;
2255 init_freelist (&scm_master_freelist,
2256 1, SCM_CLUSTER_SIZE_1,
2257 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2258 init_freelist (&scm_master_freelist2,
2259 2, SCM_CLUSTER_SIZE_2,
2260 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2261 scm_max_segment_size
2262 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2263
2264 scm_expmem = 0;
2265
2266 j = SCM_HEAP_SEG_SIZE;
2267 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2268 scm_heap_table = ((scm_heap_seg_data_t *)
2269 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2270
2271 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2272 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2273 return 1;
2274
2275 /* scm_hplims[0] can change. do not remove scm_heap_org */
2276 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2277
2278 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2279 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2280 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2281 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2282 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2283
2284 /* Initialise the list of ports. */
2285 scm_port_table = (scm_port **)
2286 malloc (sizeof (scm_port *) * scm_port_table_room);
2287 if (!scm_port_table)
2288 return 1;
2289
2290 #ifdef HAVE_ATEXIT
2291 atexit (cleanup);
2292 #else
2293 #ifdef HAVE_ON_EXIT
2294 on_exit (cleanup, 0);
2295 #endif
2296 #endif
2297
2298 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2299 SCM_SETCDR (scm_undefineds, scm_undefineds);
2300
2301 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2302 scm_nullstr = scm_makstr (0L, 0);
2303 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2304 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2305 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2306 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2307 scm_stand_in_procs = SCM_EOL;
2308 scm_permobjs = SCM_EOL;
2309 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2310 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2311 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2312 #ifdef SCM_BIGDIG
2313 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2314 #endif
2315 return 0;
2316 }
2317
2318 \f
2319
2320 SCM scm_after_gc_hook;
2321
2322 #if (SCM_DEBUG_DEPRECATED == 0)
2323 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2324 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2325 static SCM gc_async;
2326
2327
2328 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2329 * is run after the gc, as soon as the asynchronous events are handled by the
2330 * evaluator.
2331 */
2332 static SCM
2333 gc_async_thunk (void)
2334 {
2335 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2336
2337 #if (SCM_DEBUG_DEPRECATED == 0)
2338
2339 /* The following code will be removed in Guile 1.5. */
2340 if (SCM_NFALSEP (scm_gc_vcell))
2341 {
2342 SCM proc = SCM_CDR (scm_gc_vcell);
2343
2344 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2345 scm_apply (proc, SCM_EOL, SCM_EOL);
2346 }
2347
2348 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2349
2350 return SCM_UNSPECIFIED;
2351 }
2352
2353
2354 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2355 * the garbage collection. The only purpose of this function is to mark the
2356 * gc_async (which will eventually lead to the execution of the
2357 * gc_async_thunk).
2358 */
2359 static void *
2360 mark_gc_async (void * hook_data, void *func_data, void *data)
2361 {
2362 scm_system_async_mark (gc_async);
2363 return NULL;
2364 }
2365
2366
2367 void
2368 scm_init_gc ()
2369 {
2370 SCM after_gc_thunk;
2371
2372 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2373
2374 #if (SCM_DEBUG_DEPRECATED == 0)
2375 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2376 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2377 /* Dirk:FIXME:: We don't really want a binding here. */
2378 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2379 gc_async = scm_system_async (after_gc_thunk);
2380
2381 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2382
2383 #include "libguile/gc.x"
2384 }
2385
2386 /*
2387 Local Variables:
2388 c-file-style: "gnu"
2389 End:
2390 */