* Cleaned up initialization of asyncs.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/stime.h"
51 #include "libguile/stackchk.h"
52 #include "libguile/struct.h"
53 #include "libguile/smob.h"
54 #include "libguile/unif.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/root.h"
58 #include "libguile/strings.h"
59 #include "libguile/vectors.h"
60 #include "libguile/weaks.h"
61 #include "libguile/hashtab.h"
62
63 #include "libguile/validate.h"
64 #include "libguile/gc.h"
65
66 #ifdef GUILE_DEBUG_MALLOC
67 #include "libguile/debug-malloc.h"
68 #endif
69
70 #ifdef HAVE_MALLOC_H
71 #include <malloc.h>
72 #endif
73
74 #ifdef HAVE_UNISTD_H
75 #include <unistd.h>
76 #endif
77
78 #ifdef __STDC__
79 #include <stdarg.h>
80 #define var_start(x, y) va_start(x, y)
81 #else
82 #include <varargs.h>
83 #define var_start(x, y) va_start(x)
84 #endif
85
86 \f
87 /* {heap tuning parameters}
88 *
89 * These are parameters for controlling memory allocation. The heap
90 * is the area out of which scm_cons, and object headers are allocated.
91 *
92 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
93 * 64 bit machine. The units of the _SIZE parameters are bytes.
94 * Cons pairs and object headers occupy one heap cell.
95 *
96 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
97 * allocated initially the heap will grow by half its current size
98 * each subsequent time more heap is needed.
99 *
100 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
101 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
102 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
103 * is in scm_init_storage() and alloc_some_heap() in sys.c
104 *
105 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
106 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 *
108 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
109 * is needed.
110 *
111 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
112 * trigger a GC.
113 *
114 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
115 * reclaimed by a GC triggered by must_malloc. If less than this is
116 * reclaimed, the trigger threshold is raised. [I don't know what a
117 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
118 * work around a oscillation that caused almost constant GC.]
119 */
120
121 /*
122 * Heap size 45000 and 40% min yield gives quick startup and no extra
123 * heap allocation. Having higher values on min yield may lead to
124 * large heaps, especially if code behaviour is varying its
125 * maximum consumption between different freelists.
126 */
127 int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
128 int scm_default_min_yield_1 = 40;
129 #define SCM_CLUSTER_SIZE_1 2000L
130
131 int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
132 /* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
134 */
135 int scm_default_min_yield_2 = 40;
136 #define SCM_CLUSTER_SIZE_2 1000L
137
138 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
139
140 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
141 #ifdef _QC
142 # define SCM_HEAP_SEG_SIZE 32768L
143 #else
144 # ifdef sequent
145 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
146 # else
147 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
148 # endif
149 #endif
150 /* Make heap grow with factor 1.5 */
151 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
152 #define SCM_INIT_MALLOC_LIMIT 100000
153 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
154
155 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
156 bounds for allocated storage */
157
158 #ifdef PROT386
159 /*in 386 protected mode we must only adjust the offset */
160 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
161 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
162 #else
163 # ifdef _UNICOS
164 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
165 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
166 # else
167 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
168 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
169 # endif /* UNICOS */
170 #endif /* PROT386 */
171 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
172 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
173 #define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
175 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
176
177
178 \f
179 /* scm_freelists
180 */
181
182 typedef struct scm_freelist_t {
183 /* collected cells */
184 SCM cells;
185 /* number of cells left to collect before cluster is full */
186 unsigned int left_to_collect;
187 /* number of clusters which have been allocated */
188 unsigned int clusters_allocated;
189 /* a list of freelists, each of size cluster_size,
190 * except the last one which may be shorter
191 */
192 SCM clusters;
193 SCM *clustertail;
194 /* this is the number of objects in each cluster, including the spine cell */
195 int cluster_size;
196 /* indicates that we should grow heap instead of GC:ing
197 */
198 int grow_heap_p;
199 /* minimum yield on this list in order not to grow the heap
200 */
201 long min_yield;
202 /* defines min_yield as percent of total heap size
203 */
204 int min_yield_fraction;
205 /* number of cells per object on this list */
206 int span;
207 /* number of collected cells during last GC */
208 long collected;
209 /* number of collected cells during penultimate GC */
210 long collected_1;
211 /* total number of cells in heap segments
212 * belonging to this list.
213 */
214 long heap_size;
215 } scm_freelist_t;
216
217 SCM scm_freelist = SCM_EOL;
218 scm_freelist_t scm_master_freelist = {
219 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
220 };
221 SCM scm_freelist2 = SCM_EOL;
222 scm_freelist_t scm_master_freelist2 = {
223 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
224 };
225
226 /* scm_mtrigger
227 * is the number of bytes of must_malloc allocation needed to trigger gc.
228 */
229 unsigned long scm_mtrigger;
230
231
232 /* scm_gc_heap_lock
233 * If set, don't expand the heap. Set only during gc, during which no allocation
234 * is supposed to take place anyway.
235 */
236 int scm_gc_heap_lock = 0;
237
238 /* GC Blocking
239 * Don't pause for collection if this is set -- just
240 * expand the heap.
241 */
242
243 int scm_block_gc = 1;
244
245 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
246 * collection (GC) more space is allocated for the heap.
247 */
248 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
249
250 /* During collection, this accumulates objects holding
251 * weak references.
252 */
253 SCM scm_weak_vectors;
254
255 /* GC Statistics Keeping
256 */
257 unsigned long scm_cells_allocated = 0;
258 long scm_mallocated = 0;
259 unsigned long scm_gc_cells_collected;
260 unsigned long scm_gc_yield;
261 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
262 unsigned long scm_gc_malloc_collected;
263 unsigned long scm_gc_ports_collected;
264 unsigned long scm_gc_rt;
265 unsigned long scm_gc_time_taken = 0;
266
267 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
273
274 typedef struct scm_heap_seg_data_t
275 {
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds[2];
278
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t *freelist;
282
283 /* number of cells per object in this segment */
284 int span;
285 } scm_heap_seg_data_t;
286
287
288
289 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
290 static void alloc_some_heap (scm_freelist_t *);
291
292
293 \f
294 /* Debugging functions. */
295
296 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
297
298 /* Return the number of the heap segment containing CELL. */
299 static int
300 which_seg (SCM cell)
301 {
302 int i;
303
304 for (i = 0; i < scm_n_heap_segs; i++)
305 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
306 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
307 return i;
308 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
309 SCM_UNPACK (cell));
310 abort ();
311 }
312
313
314 static void
315 map_free_list (scm_freelist_t *master, SCM freelist)
316 {
317 int last_seg = -1, count = 0;
318 SCM f;
319
320 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f))
321 {
322 int this_seg = which_seg (f);
323
324 if (this_seg != last_seg)
325 {
326 if (last_seg != -1)
327 fprintf (stderr, " %5d %d-cells in segment %d\n",
328 count, master->span, last_seg);
329 last_seg = this_seg;
330 count = 0;
331 }
332 count++;
333 }
334 if (last_seg != -1)
335 fprintf (stderr, " %5d %d-cells in segment %d\n",
336 count, master->span, last_seg);
337 }
338
339 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
340 (),
341 "Print debugging information about the free-list.\n"
342 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
343 #define FUNC_NAME s_scm_map_free_list
344 {
345 int i;
346 fprintf (stderr, "%d segments total (%d:%d",
347 scm_n_heap_segs,
348 scm_heap_table[0].span,
349 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
350 for (i = 1; i < scm_n_heap_segs; i++)
351 fprintf (stderr, ", %d:%d",
352 scm_heap_table[i].span,
353 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
354 fprintf (stderr, ")\n");
355 map_free_list (&scm_master_freelist, scm_freelist);
356 map_free_list (&scm_master_freelist2, scm_freelist2);
357 fflush (stderr);
358
359 return SCM_UNSPECIFIED;
360 }
361 #undef FUNC_NAME
362
363 static int last_cluster;
364 static int last_size;
365
366 static int
367 free_list_length (char *title, int i, SCM freelist)
368 {
369 SCM ls;
370 int n = 0;
371 for (ls = freelist; SCM_NNULLP (ls); ls = SCM_CDR (ls))
372 if (SCM_CELL_TYPE (ls) == scm_tc_free_cell)
373 ++n;
374 else
375 {
376 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
377 abort ();
378 }
379 if (n != last_size)
380 {
381 if (i > 0)
382 {
383 if (last_cluster == i - 1)
384 fprintf (stderr, "\t%d\n", last_size);
385 else
386 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
387 }
388 if (i >= 0)
389 fprintf (stderr, "%s %d", title, i);
390 else
391 fprintf (stderr, "%s\t%d\n", title, n);
392 last_cluster = i;
393 last_size = n;
394 }
395 return n;
396 }
397
398 static void
399 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
400 {
401 SCM clusters;
402 int i = 0, len, n = 0;
403 fprintf (stderr, "%s\n\n", title);
404 n += free_list_length ("free list", -1, freelist);
405 for (clusters = master->clusters;
406 SCM_NNULLP (clusters);
407 clusters = SCM_CDR (clusters))
408 {
409 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
410 n += len;
411 }
412 if (last_cluster == i - 1)
413 fprintf (stderr, "\t%d\n", last_size);
414 else
415 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
416 fprintf (stderr, "\ntotal %d objects\n\n", n);
417 }
418
419 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
420 (),
421 "Print debugging information about the free-list.\n"
422 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
423 #define FUNC_NAME s_scm_free_list_length
424 {
425 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
426 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
427 return SCM_UNSPECIFIED;
428 }
429 #undef FUNC_NAME
430
431 #endif
432
433 #ifdef GUILE_DEBUG_FREELIST
434
435 /* Number of calls to SCM_NEWCELL since startup. */
436 static unsigned long scm_newcell_count;
437 static unsigned long scm_newcell2_count;
438
439 /* Search freelist for anything that isn't marked as a free cell.
440 Abort if we find something. */
441 static void
442 scm_check_freelist (SCM freelist)
443 {
444 SCM f;
445 int i = 0;
446
447 for (f = freelist; SCM_NIMP (f); f = SCM_CDR (f), i++)
448 if (SCM_CAR (f) != (SCM) scm_tc_free_cell)
449 {
450 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
451 scm_newcell_count, i);
452 fflush (stderr);
453 abort ();
454 }
455 }
456
457 static int scm_debug_check_freelist = 0;
458
459 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
460 (SCM flag),
461 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
462 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
463 "compile-time flag was selected.\n")
464 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
465 {
466 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
467 return SCM_UNSPECIFIED;
468 }
469 #undef FUNC_NAME
470
471
472 SCM
473 scm_debug_newcell (void)
474 {
475 SCM new;
476
477 scm_newcell_count++;
478 if (scm_debug_check_freelist)
479 {
480 scm_check_freelist (scm_freelist);
481 scm_gc();
482 }
483
484 /* The rest of this is supposed to be identical to the SCM_NEWCELL
485 macro. */
486 if (SCM_IMP (scm_freelist))
487 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
488 else
489 {
490 new = scm_freelist;
491 scm_freelist = SCM_CDR (scm_freelist);
492 SCM_SETCAR (new, scm_tc16_allocated);
493 }
494
495 return new;
496 }
497
498 SCM
499 scm_debug_newcell2 (void)
500 {
501 SCM new;
502
503 scm_newcell2_count++;
504 if (scm_debug_check_freelist)
505 {
506 scm_check_freelist (scm_freelist2);
507 scm_gc ();
508 }
509
510 /* The rest of this is supposed to be identical to the SCM_NEWCELL
511 macro. */
512 if (SCM_IMP (scm_freelist2))
513 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
514 else
515 {
516 new = scm_freelist2;
517 scm_freelist2 = SCM_CDR (scm_freelist2);
518 SCM_SETCAR (new, scm_tc16_allocated);
519 }
520
521 return new;
522 }
523
524 #endif /* GUILE_DEBUG_FREELIST */
525
526 \f
527
528 static unsigned long
529 master_cells_allocated (scm_freelist_t *master)
530 {
531 int objects = master->clusters_allocated * (master->cluster_size - 1);
532 if (SCM_NULLP (master->clusters))
533 objects -= master->left_to_collect;
534 return master->span * objects;
535 }
536
537 static unsigned long
538 freelist_length (SCM freelist)
539 {
540 int n;
541 for (n = 0; SCM_NNULLP (freelist); freelist = SCM_CDR (freelist))
542 ++n;
543 return n;
544 }
545
546 static unsigned long
547 compute_cells_allocated ()
548 {
549 return (scm_cells_allocated
550 + master_cells_allocated (&scm_master_freelist)
551 + master_cells_allocated (&scm_master_freelist2)
552 - scm_master_freelist.span * freelist_length (scm_freelist)
553 - scm_master_freelist2.span * freelist_length (scm_freelist2));
554 }
555
556 /* {Scheme Interface to GC}
557 */
558
559 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
560 (),
561 "Returns an association list of statistics about Guile's current use of storage. ")
562 #define FUNC_NAME s_scm_gc_stats
563 {
564 int i;
565 int n;
566 SCM heap_segs;
567 long int local_scm_mtrigger;
568 long int local_scm_mallocated;
569 long int local_scm_heap_size;
570 long int local_scm_cells_allocated;
571 long int local_scm_gc_time_taken;
572 SCM answer;
573
574 SCM_DEFER_INTS;
575
576 ++scm_block_gc;
577
578 retry:
579 heap_segs = SCM_EOL;
580 n = scm_n_heap_segs;
581 for (i = scm_n_heap_segs; i--; )
582 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
583 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
584 heap_segs);
585 if (scm_n_heap_segs != n)
586 goto retry;
587
588 --scm_block_gc;
589
590 /* Below, we cons to produce the resulting list. We want a snapshot of
591 * the heap situation before consing.
592 */
593 local_scm_mtrigger = scm_mtrigger;
594 local_scm_mallocated = scm_mallocated;
595 local_scm_heap_size = SCM_HEAP_SIZE;
596 local_scm_cells_allocated = compute_cells_allocated ();
597 local_scm_gc_time_taken = scm_gc_time_taken;
598
599 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
600 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
601 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
602 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
603 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
604 scm_cons (sym_heap_segments, heap_segs),
605 SCM_UNDEFINED);
606 SCM_ALLOW_INTS;
607 return answer;
608 }
609 #undef FUNC_NAME
610
611
612 void
613 scm_gc_start (const char *what)
614 {
615 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
616 scm_gc_cells_collected = 0;
617 scm_gc_yield_1 = scm_gc_yield;
618 scm_gc_yield = (scm_cells_allocated
619 + master_cells_allocated (&scm_master_freelist)
620 + master_cells_allocated (&scm_master_freelist2));
621 scm_gc_malloc_collected = 0;
622 scm_gc_ports_collected = 0;
623 }
624
625
626 void
627 scm_gc_end ()
628 {
629 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
630 scm_gc_time_taken += scm_gc_rt;
631 }
632
633
634 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
635 (SCM obj),
636 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
637 "returned by this function for @var{obj}")
638 #define FUNC_NAME s_scm_object_address
639 {
640 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
641 }
642 #undef FUNC_NAME
643
644
645 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
646 (),
647 "Scans all of SCM objects and reclaims for further use those that are\n"
648 "no longer accessible.")
649 #define FUNC_NAME s_scm_gc
650 {
651 SCM_DEFER_INTS;
652 scm_igc ("call");
653 SCM_ALLOW_INTS;
654 return SCM_UNSPECIFIED;
655 }
656 #undef FUNC_NAME
657
658
659 \f
660 /* {C Interface For When GC is Triggered}
661 */
662
663 static void
664 adjust_min_yield (scm_freelist_t *freelist)
665 {
666 /* min yield is adjusted upwards so that next predicted total yield
667 * (allocated cells actually freed by GC) becomes
668 * `min_yield_fraction' of total heap size. Note, however, that
669 * the absolute value of min_yield will correspond to `collected'
670 * on one master (the one which currently is triggering GC).
671 *
672 * The reason why we look at total yield instead of cells collected
673 * on one list is that we want to take other freelists into account.
674 * On this freelist, we know that (local) yield = collected cells,
675 * but that's probably not the case on the other lists.
676 *
677 * (We might consider computing a better prediction, for example
678 * by computing an average over multiple GC:s.)
679 */
680 if (freelist->min_yield_fraction)
681 {
682 /* Pick largest of last two yields. */
683 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
684 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
685 #ifdef DEBUGINFO
686 fprintf (stderr, " after GC = %d, delta = %d\n",
687 scm_cells_allocated,
688 delta);
689 #endif
690 if (delta > 0)
691 freelist->min_yield += delta;
692 }
693 }
694
695 /* When we get POSIX threads support, the master will be global and
696 * common while the freelist will be individual for each thread.
697 */
698
699 SCM
700 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
701 {
702 SCM cell;
703 ++scm_ints_disabled;
704 do
705 {
706 if (SCM_NULLP (master->clusters))
707 {
708 if (master->grow_heap_p)
709 {
710 master->grow_heap_p = 0;
711 alloc_some_heap (master);
712 }
713 else
714 {
715 #ifdef DEBUGINFO
716 fprintf (stderr, "allocated = %d, ",
717 scm_cells_allocated
718 + master_cells_allocated (&scm_master_freelist)
719 + master_cells_allocated (&scm_master_freelist2));
720 #endif
721 scm_igc ("cells");
722 adjust_min_yield (master);
723 }
724 }
725 cell = SCM_CAR (master->clusters);
726 master->clusters = SCM_CDR (master->clusters);
727 ++master->clusters_allocated;
728 }
729 while (SCM_NULLP (cell));
730 --scm_ints_disabled;
731 *freelist = SCM_CDR (cell);
732 SCM_SET_CELL_TYPE (cell, scm_tc16_allocated);
733 return cell;
734 }
735
736 #if 0
737 /* This is a support routine which can be used to reserve a cluster
738 * for some special use, such as debugging. It won't be useful until
739 * free cells are preserved between garbage collections.
740 */
741
742 void
743 scm_alloc_cluster (scm_freelist_t *master)
744 {
745 SCM freelist, cell;
746 cell = scm_gc_for_newcell (master, &freelist);
747 SCM_SETCDR (cell, freelist);
748 return cell;
749 }
750 #endif
751
752
753 scm_c_hook_t scm_before_gc_c_hook;
754 scm_c_hook_t scm_before_mark_c_hook;
755 scm_c_hook_t scm_before_sweep_c_hook;
756 scm_c_hook_t scm_after_sweep_c_hook;
757 scm_c_hook_t scm_after_gc_c_hook;
758
759 void
760 scm_igc (const char *what)
761 {
762 int j;
763
764 scm_c_hook_run (&scm_before_gc_c_hook, 0);
765 #ifdef DEBUGINFO
766 fprintf (stderr,
767 SCM_NULLP (scm_freelist)
768 ? "*"
769 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
770 #endif
771 #ifdef USE_THREADS
772 /* During the critical section, only the current thread may run. */
773 SCM_THREAD_CRITICAL_SECTION_START;
774 #endif
775
776 /* fprintf (stderr, "gc: %s\n", what); */
777
778 scm_gc_start (what);
779
780 if (!scm_stack_base || scm_block_gc)
781 {
782 scm_gc_end ();
783 return;
784 }
785
786 if (scm_mallocated < 0)
787 /* The byte count of allocated objects has underflowed. This is
788 probably because you forgot to report the sizes of objects you
789 have allocated, by calling scm_done_malloc or some such. When
790 the GC freed them, it subtracted their size from
791 scm_mallocated, which underflowed. */
792 abort ();
793
794 if (scm_gc_heap_lock)
795 /* We've invoked the collector while a GC is already in progress.
796 That should never happen. */
797 abort ();
798
799 ++scm_gc_heap_lock;
800
801 /* flush dead entries from the continuation stack */
802 {
803 int x;
804 int bound;
805 SCM * elts;
806 elts = SCM_VELTS (scm_continuation_stack);
807 bound = SCM_LENGTH (scm_continuation_stack);
808 x = SCM_INUM (scm_continuation_stack_ptr);
809 while (x < bound)
810 {
811 elts[x] = SCM_BOOL_F;
812 ++x;
813 }
814 }
815
816 scm_c_hook_run (&scm_before_mark_c_hook, 0);
817
818 #ifndef USE_THREADS
819
820 /* Protect from the C stack. This must be the first marking
821 * done because it provides information about what objects
822 * are "in-use" by the C code. "in-use" objects are those
823 * for which the values from SCM_LENGTH and SCM_CHARS must remain
824 * usable. This requirement is stricter than a liveness
825 * requirement -- in particular, it constrains the implementation
826 * of scm_vector_set_length_x.
827 */
828 SCM_FLUSH_REGISTER_WINDOWS;
829 /* This assumes that all registers are saved into the jmp_buf */
830 setjmp (scm_save_regs_gc_mark);
831 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
832 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
833 sizeof scm_save_regs_gc_mark)
834 / sizeof (SCM_STACKITEM)));
835
836 {
837 scm_sizet stack_len = scm_stack_size (scm_stack_base);
838 #ifdef SCM_STACK_GROWS_UP
839 scm_mark_locations (scm_stack_base, stack_len);
840 #else
841 scm_mark_locations (scm_stack_base - stack_len, stack_len);
842 #endif
843 }
844
845 #else /* USE_THREADS */
846
847 /* Mark every thread's stack and registers */
848 scm_threads_mark_stacks ();
849
850 #endif /* USE_THREADS */
851
852 /* FIXME: insert a phase to un-protect string-data preserved
853 * in scm_vector_set_length_x.
854 */
855
856 j = SCM_NUM_PROTECTS;
857 while (j--)
858 scm_gc_mark (scm_sys_protects[j]);
859
860 /* FIXME: we should have a means to register C functions to be run
861 * in different phases of GC
862 */
863 scm_mark_subr_table ();
864
865 #ifndef USE_THREADS
866 scm_gc_mark (scm_root->handle);
867 #endif
868
869 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
870
871 scm_gc_sweep ();
872
873 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
874
875 --scm_gc_heap_lock;
876 scm_gc_end ();
877
878 #ifdef USE_THREADS
879 SCM_THREAD_CRITICAL_SECTION_END;
880 #endif
881 scm_c_hook_run (&scm_after_gc_c_hook, 0);
882 }
883
884 \f
885
886 /* {Mark/Sweep}
887 */
888
889
890
891 /* Mark an object precisely.
892 */
893 void
894 scm_gc_mark (SCM p)
895 {
896 register long i;
897 register SCM ptr;
898
899 ptr = p;
900
901 gc_mark_loop:
902 if (SCM_IMP (ptr))
903 return;
904
905 gc_mark_nimp:
906 if (SCM_NCELLP (ptr))
907 scm_wta (ptr, "rogue pointer in heap", NULL);
908
909 switch (SCM_TYP7 (ptr))
910 {
911 case scm_tcs_cons_nimcar:
912 if (SCM_GCMARKP (ptr))
913 break;
914 SCM_SETGCMARK (ptr);
915 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
916 {
917 ptr = SCM_CAR (ptr);
918 goto gc_mark_nimp;
919 }
920 scm_gc_mark (SCM_CAR (ptr));
921 ptr = SCM_GCCDR (ptr);
922 goto gc_mark_nimp;
923 case scm_tcs_cons_imcar:
924 if (SCM_GCMARKP (ptr))
925 break;
926 SCM_SETGCMARK (ptr);
927 ptr = SCM_GCCDR (ptr);
928 goto gc_mark_loop;
929 case scm_tc7_pws:
930 if (SCM_GCMARKP (ptr))
931 break;
932 SCM_SETGCMARK (ptr);
933 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
934 ptr = SCM_GCCDR (ptr);
935 goto gc_mark_loop;
936 case scm_tcs_cons_gloc:
937 if (SCM_GCMARKP (ptr))
938 break;
939 SCM_SETGCMARK (ptr);
940 {
941 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
942 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
943 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
944 * pointer to a struct vtable data region. The fact that these are
945 * accessed in the same way restricts the possibilites to change the
946 * data layout of structs or heap cells.
947 */
948 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
949 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
950 switch (vtable_data [scm_vtable_index_vcell])
951 {
952 default:
953 {
954 /* ptr is a gloc */
955 SCM gloc_car = SCM_PACK (word0);
956 scm_gc_mark (gloc_car);
957 ptr = SCM_GCCDR (ptr);
958 goto gc_mark_loop;
959 }
960 case 1: /* ! */
961 case 0: /* ! */
962 {
963 /* ptr is a struct */
964 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
965 int len = SCM_LENGTH (layout);
966 char * fields_desc = SCM_CHARS (layout);
967 /* We're using SCM_GCCDR here like STRUCT_DATA, except
968 that it removes the mark */
969 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
970
971 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
972 {
973 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
974 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
975 }
976 if (len)
977 {
978 int x;
979
980 for (x = 0; x < len - 2; x += 2, ++struct_data)
981 if (fields_desc[x] == 'p')
982 scm_gc_mark (SCM_PACK (*struct_data));
983 if (fields_desc[x] == 'p')
984 {
985 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
986 for (x = *struct_data; x; --x)
987 scm_gc_mark (SCM_PACK (*++struct_data));
988 else
989 scm_gc_mark (SCM_PACK (*struct_data));
990 }
991 }
992 if (vtable_data [scm_vtable_index_vcell] == 0)
993 {
994 vtable_data [scm_vtable_index_vcell] = 1;
995 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
996 goto gc_mark_loop;
997 }
998 }
999 }
1000 }
1001 break;
1002 case scm_tcs_closures:
1003 if (SCM_GCMARKP (ptr))
1004 break;
1005 SCM_SETGCMARK (ptr);
1006 if (SCM_IMP (SCM_CDR (ptr)))
1007 {
1008 ptr = SCM_CLOSCAR (ptr);
1009 goto gc_mark_nimp;
1010 }
1011 scm_gc_mark (SCM_CLOSCAR (ptr));
1012 ptr = SCM_GCCDR (ptr);
1013 goto gc_mark_nimp;
1014 case scm_tc7_vector:
1015 case scm_tc7_lvector:
1016 #ifdef CCLO
1017 case scm_tc7_cclo:
1018 #endif
1019 if (SCM_GC8MARKP (ptr))
1020 break;
1021 SCM_SETGC8MARK (ptr);
1022 i = SCM_LENGTH (ptr);
1023 if (i == 0)
1024 break;
1025 while (--i > 0)
1026 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1027 scm_gc_mark (SCM_VELTS (ptr)[i]);
1028 ptr = SCM_VELTS (ptr)[0];
1029 goto gc_mark_loop;
1030 case scm_tc7_contin:
1031 if SCM_GC8MARKP
1032 (ptr) break;
1033 SCM_SETGC8MARK (ptr);
1034 if (SCM_VELTS (ptr))
1035 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1036 (scm_sizet)
1037 (SCM_LENGTH (ptr) +
1038 (sizeof (SCM_STACKITEM) + -1 +
1039 sizeof (scm_contregs)) /
1040 sizeof (SCM_STACKITEM)));
1041 break;
1042 #ifdef HAVE_ARRAYS
1043 case scm_tc7_bvect:
1044 case scm_tc7_byvect:
1045 case scm_tc7_ivect:
1046 case scm_tc7_uvect:
1047 case scm_tc7_fvect:
1048 case scm_tc7_dvect:
1049 case scm_tc7_cvect:
1050 case scm_tc7_svect:
1051 #ifdef HAVE_LONG_LONGS
1052 case scm_tc7_llvect:
1053 #endif
1054 #endif
1055 case scm_tc7_string:
1056 SCM_SETGC8MARK (ptr);
1057 break;
1058
1059 case scm_tc7_substring:
1060 if (SCM_GC8MARKP(ptr))
1061 break;
1062 SCM_SETGC8MARK (ptr);
1063 ptr = SCM_CDR (ptr);
1064 goto gc_mark_loop;
1065
1066 case scm_tc7_wvect:
1067 if (SCM_GC8MARKP(ptr))
1068 break;
1069 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1070 scm_weak_vectors = ptr;
1071 SCM_SETGC8MARK (ptr);
1072 if (SCM_IS_WHVEC_ANY (ptr))
1073 {
1074 int x;
1075 int len;
1076 int weak_keys;
1077 int weak_values;
1078
1079 len = SCM_LENGTH (ptr);
1080 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1081 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1082
1083 for (x = 0; x < len; ++x)
1084 {
1085 SCM alist;
1086 alist = SCM_VELTS (ptr)[x];
1087
1088 /* mark everything on the alist except the keys or
1089 * values, according to weak_values and weak_keys. */
1090 while ( SCM_CONSP (alist)
1091 && !SCM_GCMARKP (alist)
1092 && SCM_CONSP (SCM_CAR (alist)))
1093 {
1094 SCM kvpair;
1095 SCM next_alist;
1096
1097 kvpair = SCM_CAR (alist);
1098 next_alist = SCM_CDR (alist);
1099 /*
1100 * Do not do this:
1101 * SCM_SETGCMARK (alist);
1102 * SCM_SETGCMARK (kvpair);
1103 *
1104 * It may be that either the key or value is protected by
1105 * an escaped reference to part of the spine of this alist.
1106 * If we mark the spine here, and only mark one or neither of the
1107 * key and value, they may never be properly marked.
1108 * This leads to a horrible situation in which an alist containing
1109 * freelist cells is exported.
1110 *
1111 * So only mark the spines of these arrays last of all marking.
1112 * If somebody confuses us by constructing a weak vector
1113 * with a circular alist then we are hosed, but at least we
1114 * won't prematurely drop table entries.
1115 */
1116 if (!weak_keys)
1117 scm_gc_mark (SCM_CAR (kvpair));
1118 if (!weak_values)
1119 scm_gc_mark (SCM_GCCDR (kvpair));
1120 alist = next_alist;
1121 }
1122 if (SCM_NIMP (alist))
1123 scm_gc_mark (alist);
1124 }
1125 }
1126 break;
1127
1128 case scm_tc7_msymbol:
1129 if (SCM_GC8MARKP(ptr))
1130 break;
1131 SCM_SETGC8MARK (ptr);
1132 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1133 ptr = SCM_SYMBOL_PROPS (ptr);
1134 goto gc_mark_loop;
1135 case scm_tc7_ssymbol:
1136 if (SCM_GC8MARKP(ptr))
1137 break;
1138 SCM_SETGC8MARK (ptr);
1139 break;
1140 case scm_tcs_subrs:
1141 break;
1142 case scm_tc7_port:
1143 i = SCM_PTOBNUM (ptr);
1144 if (!(i < scm_numptob))
1145 goto def;
1146 if (SCM_GC8MARKP (ptr))
1147 break;
1148 SCM_SETGC8MARK (ptr);
1149 if (SCM_PTAB_ENTRY(ptr))
1150 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1151 if (scm_ptobs[i].mark)
1152 {
1153 ptr = (scm_ptobs[i].mark) (ptr);
1154 goto gc_mark_loop;
1155 }
1156 else
1157 return;
1158 break;
1159 case scm_tc7_smob:
1160 if (SCM_GC8MARKP (ptr))
1161 break;
1162 SCM_SETGC8MARK (ptr);
1163 switch (SCM_GCTYP16 (ptr))
1164 { /* should be faster than going through scm_smobs */
1165 case scm_tc_free_cell:
1166 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1167 case scm_tc16_allocated:
1168 case scm_tc16_big:
1169 case scm_tc16_real:
1170 case scm_tc16_complex:
1171 break;
1172 default:
1173 i = SCM_SMOBNUM (ptr);
1174 if (!(i < scm_numsmob))
1175 goto def;
1176 if (scm_smobs[i].mark)
1177 {
1178 ptr = (scm_smobs[i].mark) (ptr);
1179 goto gc_mark_loop;
1180 }
1181 else
1182 return;
1183 }
1184 break;
1185 default:
1186 def:scm_wta (ptr, "unknown type in ", "gc_mark");
1187 }
1188 }
1189
1190
1191 /* Mark a Region Conservatively
1192 */
1193
1194 void
1195 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1196 {
1197 register long m = n;
1198 register int i, j;
1199 register SCM_CELLPTR ptr;
1200
1201 while (0 <= --m)
1202 if (SCM_CELLP (* (SCM *) &x[m]))
1203 {
1204 ptr = SCM2PTR (* (SCM *) &x[m]);
1205 i = 0;
1206 j = scm_n_heap_segs - 1;
1207 if ( SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1208 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1209 {
1210 while (i <= j)
1211 {
1212 int seg_id;
1213 seg_id = -1;
1214 if ( (i == j)
1215 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1216 seg_id = i;
1217 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1218 seg_id = j;
1219 else
1220 {
1221 int k;
1222 k = (i + j) / 2;
1223 if (k == i)
1224 break;
1225 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1226 {
1227 j = k;
1228 ++i;
1229 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1230 continue;
1231 else
1232 break;
1233 }
1234 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1235 {
1236 i = k;
1237 --j;
1238 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1239 continue;
1240 else
1241 break;
1242 }
1243 }
1244 if (scm_heap_table[seg_id].span == 1
1245 || SCM_DOUBLE_CELLP (* (SCM *) &x[m]))
1246 scm_gc_mark (* (SCM *) &x[m]);
1247 break;
1248 }
1249
1250 }
1251 }
1252 }
1253
1254
1255 /* The function scm_cellp determines whether an SCM value can be regarded as a
1256 * pointer to a cell on the heap. Binary search is used in order to determine
1257 * the heap segment that contains the cell.
1258 */
1259 int
1260 scm_cellp (SCM value)
1261 {
1262 if (SCM_CELLP (value)) {
1263 scm_cell * ptr = SCM2PTR (value);
1264 unsigned int i = 0;
1265 unsigned int j = scm_n_heap_segs - 1;
1266
1267 while (i < j) {
1268 int k = (i + j) / 2;
1269 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1270 j = k;
1271 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1272 i = k + 1;
1273 }
1274 }
1275
1276 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1277 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1278 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1279 return 1;
1280 } else {
1281 return 0;
1282 }
1283 } else {
1284 return 0;
1285 }
1286 }
1287
1288
1289 static void
1290 gc_sweep_freelist_start (scm_freelist_t *freelist)
1291 {
1292 freelist->cells = SCM_EOL;
1293 freelist->left_to_collect = freelist->cluster_size;
1294 freelist->clusters_allocated = 0;
1295 freelist->clusters = SCM_EOL;
1296 freelist->clustertail = &freelist->clusters;
1297 freelist->collected_1 = freelist->collected;
1298 freelist->collected = 0;
1299 }
1300
1301 static void
1302 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1303 {
1304 int collected;
1305 *freelist->clustertail = freelist->cells;
1306 if (SCM_NNULLP (freelist->cells))
1307 {
1308 SCM c = freelist->cells;
1309 SCM_SETCAR (c, SCM_CDR (c));
1310 SCM_SETCDR (c, SCM_EOL);
1311 freelist->collected +=
1312 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1313 }
1314 scm_gc_cells_collected += freelist->collected;
1315
1316 /* Although freelist->min_yield is used to test freelist->collected
1317 * (which is the local GC yield for freelist), it is adjusted so
1318 * that *total* yield is freelist->min_yield_fraction of total heap
1319 * size. This means that a too low yield is compensated by more
1320 * heap on the list which is currently doing most work, which is
1321 * just what we want.
1322 */
1323 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1324 freelist->grow_heap_p = (collected < freelist->min_yield);
1325 }
1326
1327 void
1328 scm_gc_sweep ()
1329 {
1330 register SCM_CELLPTR ptr;
1331 register SCM nfreelist;
1332 register scm_freelist_t *freelist;
1333 register long m;
1334 register int span;
1335 long i;
1336 scm_sizet seg_size;
1337
1338 m = 0;
1339
1340 gc_sweep_freelist_start (&scm_master_freelist);
1341 gc_sweep_freelist_start (&scm_master_freelist2);
1342
1343 for (i = 0; i < scm_n_heap_segs; i++)
1344 {
1345 register unsigned int left_to_collect;
1346 register scm_sizet j;
1347
1348 /* Unmarked cells go onto the front of the freelist this heap
1349 segment points to. Rather than updating the real freelist
1350 pointer as we go along, we accumulate the new head in
1351 nfreelist. Then, if it turns out that the entire segment is
1352 free, we free (i.e., malloc's free) the whole segment, and
1353 simply don't assign nfreelist back into the real freelist. */
1354 freelist = scm_heap_table[i].freelist;
1355 nfreelist = freelist->cells;
1356 left_to_collect = freelist->left_to_collect;
1357 span = scm_heap_table[i].span;
1358
1359 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1360 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1361 for (j = seg_size + span; j -= span; ptr += span)
1362 {
1363 SCM scmptr = PTR2SCM (ptr);
1364
1365 switch SCM_TYP7 (scmptr)
1366 {
1367 case scm_tcs_cons_gloc:
1368 {
1369 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1370 * struct or a gloc. See the corresponding comment in
1371 * scm_gc_mark.
1372 */
1373 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1374 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1375 if (SCM_GCMARKP (scmptr))
1376 {
1377 if (vtable_data [scm_vtable_index_vcell] == 1)
1378 vtable_data [scm_vtable_index_vcell] = 0;
1379 goto cmrkcontinue;
1380 }
1381 else
1382 {
1383 if (vtable_data [scm_vtable_index_vcell] == 0
1384 || vtable_data [scm_vtable_index_vcell] == 1)
1385 {
1386 scm_struct_free_t free
1387 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1388 m += free (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1389 }
1390 }
1391 }
1392 break;
1393 case scm_tcs_cons_imcar:
1394 case scm_tcs_cons_nimcar:
1395 case scm_tcs_closures:
1396 case scm_tc7_pws:
1397 if (SCM_GCMARKP (scmptr))
1398 goto cmrkcontinue;
1399 break;
1400 case scm_tc7_wvect:
1401 if (SCM_GC8MARKP (scmptr))
1402 {
1403 goto c8mrkcontinue;
1404 }
1405 else
1406 {
1407 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1408 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1409 break;
1410 }
1411
1412 case scm_tc7_vector:
1413 case scm_tc7_lvector:
1414 #ifdef CCLO
1415 case scm_tc7_cclo:
1416 #endif
1417 if (SCM_GC8MARKP (scmptr))
1418 goto c8mrkcontinue;
1419
1420 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1421 freechars:
1422 scm_must_free (SCM_CHARS (scmptr));
1423 /* SCM_SETCHARS(scmptr, 0);*/
1424 break;
1425 #ifdef HAVE_ARRAYS
1426 case scm_tc7_bvect:
1427 if SCM_GC8MARKP (scmptr)
1428 goto c8mrkcontinue;
1429 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1430 goto freechars;
1431 case scm_tc7_byvect:
1432 if SCM_GC8MARKP (scmptr)
1433 goto c8mrkcontinue;
1434 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1435 goto freechars;
1436 case scm_tc7_ivect:
1437 case scm_tc7_uvect:
1438 if SCM_GC8MARKP (scmptr)
1439 goto c8mrkcontinue;
1440 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1441 goto freechars;
1442 case scm_tc7_svect:
1443 if SCM_GC8MARKP (scmptr)
1444 goto c8mrkcontinue;
1445 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1446 goto freechars;
1447 #ifdef HAVE_LONG_LONGS
1448 case scm_tc7_llvect:
1449 if SCM_GC8MARKP (scmptr)
1450 goto c8mrkcontinue;
1451 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1452 goto freechars;
1453 #endif
1454 case scm_tc7_fvect:
1455 if SCM_GC8MARKP (scmptr)
1456 goto c8mrkcontinue;
1457 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1458 goto freechars;
1459 case scm_tc7_dvect:
1460 if SCM_GC8MARKP (scmptr)
1461 goto c8mrkcontinue;
1462 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1463 goto freechars;
1464 case scm_tc7_cvect:
1465 if SCM_GC8MARKP (scmptr)
1466 goto c8mrkcontinue;
1467 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1468 goto freechars;
1469 #endif
1470 case scm_tc7_substring:
1471 if (SCM_GC8MARKP (scmptr))
1472 goto c8mrkcontinue;
1473 break;
1474 case scm_tc7_string:
1475 if (SCM_GC8MARKP (scmptr))
1476 goto c8mrkcontinue;
1477 m += SCM_HUGE_LENGTH (scmptr) + 1;
1478 goto freechars;
1479 case scm_tc7_msymbol:
1480 if (SCM_GC8MARKP (scmptr))
1481 goto c8mrkcontinue;
1482 m += (SCM_LENGTH (scmptr) + 1
1483 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1484 scm_must_free ((char *)SCM_SLOTS (scmptr));
1485 break;
1486 case scm_tc7_contin:
1487 if SCM_GC8MARKP (scmptr)
1488 goto c8mrkcontinue;
1489 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1490 if (SCM_VELTS (scmptr))
1491 goto freechars;
1492 case scm_tc7_ssymbol:
1493 if SCM_GC8MARKP(scmptr)
1494 goto c8mrkcontinue;
1495 break;
1496 case scm_tcs_subrs:
1497 continue;
1498 case scm_tc7_port:
1499 if SCM_GC8MARKP (scmptr)
1500 goto c8mrkcontinue;
1501 if SCM_OPENP (scmptr)
1502 {
1503 int k = SCM_PTOBNUM (scmptr);
1504 if (!(k < scm_numptob))
1505 goto sweeperr;
1506 /* Keep "revealed" ports alive. */
1507 if (scm_revealed_count (scmptr) > 0)
1508 continue;
1509 /* Yes, I really do mean scm_ptobs[k].free */
1510 /* rather than ftobs[k].close. .close */
1511 /* is for explicit CLOSE-PORT by user */
1512 m += (scm_ptobs[k].free) (scmptr);
1513 SCM_SETSTREAM (scmptr, 0);
1514 scm_remove_from_port_table (scmptr);
1515 scm_gc_ports_collected++;
1516 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1517 }
1518 break;
1519 case scm_tc7_smob:
1520 switch SCM_GCTYP16 (scmptr)
1521 {
1522 case scm_tc_free_cell:
1523 case scm_tc16_real:
1524 if SCM_GC8MARKP (scmptr)
1525 goto c8mrkcontinue;
1526 break;
1527 #ifdef SCM_BIGDIG
1528 case scm_tc16_big:
1529 if SCM_GC8MARKP (scmptr)
1530 goto c8mrkcontinue;
1531 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1532 goto freechars;
1533 #endif /* def SCM_BIGDIG */
1534 case scm_tc16_complex:
1535 if SCM_GC8MARKP (scmptr)
1536 goto c8mrkcontinue;
1537 m += 2 * sizeof (double);
1538 goto freechars;
1539 default:
1540 if SCM_GC8MARKP (scmptr)
1541 goto c8mrkcontinue;
1542
1543 {
1544 int k;
1545 k = SCM_SMOBNUM (scmptr);
1546 if (!(k < scm_numsmob))
1547 goto sweeperr;
1548 m += (scm_smobs[k].free) (scmptr);
1549 break;
1550 }
1551 }
1552 break;
1553 default:
1554 sweeperr:scm_wta (scmptr, "unknown type in ", "gc_sweep");
1555 }
1556 #if 0
1557 if (SCM_CAR (scmptr) == (SCM) scm_tc_free_cell)
1558 exit (2);
1559 #endif
1560 if (!--left_to_collect)
1561 {
1562 SCM_SETCAR (scmptr, nfreelist);
1563 *freelist->clustertail = scmptr;
1564 freelist->clustertail = SCM_CDRLOC (scmptr);
1565
1566 nfreelist = SCM_EOL;
1567 freelist->collected += span * freelist->cluster_size;
1568 left_to_collect = freelist->cluster_size;
1569 }
1570 else
1571 {
1572 /* Stick the new cell on the front of nfreelist. It's
1573 critical that we mark this cell as freed; otherwise, the
1574 conservative collector might trace it as some other type
1575 of object. */
1576 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1577 SCM_SETCDR (scmptr, nfreelist);
1578 nfreelist = scmptr;
1579 }
1580
1581 continue;
1582 c8mrkcontinue:
1583 SCM_CLRGC8MARK (scmptr);
1584 continue;
1585 cmrkcontinue:
1586 SCM_CLRGCMARK (scmptr);
1587 }
1588 #ifdef GC_FREE_SEGMENTS
1589 if (n == seg_size)
1590 {
1591 register long j;
1592
1593 freelist->heap_size -= seg_size;
1594 free ((char *) scm_heap_table[i].bounds[0]);
1595 scm_heap_table[i].bounds[0] = 0;
1596 for (j = i + 1; j < scm_n_heap_segs; j++)
1597 scm_heap_table[j - 1] = scm_heap_table[j];
1598 scm_n_heap_segs -= 1;
1599 i--; /* We need to scan the segment just moved. */
1600 }
1601 else
1602 #endif /* ifdef GC_FREE_SEGMENTS */
1603 {
1604 /* Update the real freelist pointer to point to the head of
1605 the list of free cells we've built for this segment. */
1606 freelist->cells = nfreelist;
1607 freelist->left_to_collect = left_to_collect;
1608 }
1609
1610 #ifdef GUILE_DEBUG_FREELIST
1611 scm_check_freelist (freelist == &scm_master_freelist
1612 ? scm_freelist
1613 : scm_freelist2);
1614 scm_map_free_list ();
1615 #endif
1616 }
1617
1618 gc_sweep_freelist_finish (&scm_master_freelist);
1619 gc_sweep_freelist_finish (&scm_master_freelist2);
1620
1621 /* When we move to POSIX threads private freelists should probably
1622 be GC-protected instead. */
1623 scm_freelist = SCM_EOL;
1624 scm_freelist2 = SCM_EOL;
1625
1626 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1627 scm_gc_yield -= scm_cells_allocated;
1628 scm_mallocated -= m;
1629 scm_gc_malloc_collected = m;
1630 }
1631
1632
1633 \f
1634
1635 /* {Front end to malloc}
1636 *
1637 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1638 *
1639 * These functions provide services comperable to malloc, realloc, and
1640 * free. They are for allocating malloced parts of scheme objects.
1641 * The primary purpose of the front end is to impose calls to gc.
1642 */
1643
1644
1645 /* scm_must_malloc
1646 * Return newly malloced storage or throw an error.
1647 *
1648 * The parameter WHAT is a string for error reporting.
1649 * If the threshold scm_mtrigger will be passed by this
1650 * allocation, or if the first call to malloc fails,
1651 * garbage collect -- on the presumption that some objects
1652 * using malloced storage may be collected.
1653 *
1654 * The limit scm_mtrigger may be raised by this allocation.
1655 */
1656 void *
1657 scm_must_malloc (scm_sizet size, const char *what)
1658 {
1659 void *ptr;
1660 unsigned long nm = scm_mallocated + size;
1661
1662 if (nm <= scm_mtrigger)
1663 {
1664 SCM_SYSCALL (ptr = malloc (size));
1665 if (NULL != ptr)
1666 {
1667 scm_mallocated = nm;
1668 #ifdef GUILE_DEBUG_MALLOC
1669 scm_malloc_register (ptr, what);
1670 #endif
1671 return ptr;
1672 }
1673 }
1674
1675 scm_igc (what);
1676
1677 nm = scm_mallocated + size;
1678 SCM_SYSCALL (ptr = malloc (size));
1679 if (NULL != ptr)
1680 {
1681 scm_mallocated = nm;
1682 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1683 if (nm > scm_mtrigger)
1684 scm_mtrigger = nm + nm / 2;
1685 else
1686 scm_mtrigger += scm_mtrigger / 2;
1687 }
1688 #ifdef GUILE_DEBUG_MALLOC
1689 scm_malloc_register (ptr, what);
1690 #endif
1691
1692 return ptr;
1693 }
1694
1695 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1696 return 0; /* never reached */
1697 }
1698
1699
1700 /* scm_must_realloc
1701 * is similar to scm_must_malloc.
1702 */
1703 void *
1704 scm_must_realloc (void *where,
1705 scm_sizet old_size,
1706 scm_sizet size,
1707 const char *what)
1708 {
1709 void *ptr;
1710 scm_sizet nm = scm_mallocated + size - old_size;
1711
1712 if (nm <= scm_mtrigger)
1713 {
1714 SCM_SYSCALL (ptr = realloc (where, size));
1715 if (NULL != ptr)
1716 {
1717 scm_mallocated = nm;
1718 #ifdef GUILE_DEBUG_MALLOC
1719 scm_malloc_reregister (where, ptr, what);
1720 #endif
1721 return ptr;
1722 }
1723 }
1724
1725 scm_igc (what);
1726
1727 nm = scm_mallocated + size - old_size;
1728 SCM_SYSCALL (ptr = realloc (where, size));
1729 if (NULL != ptr)
1730 {
1731 scm_mallocated = nm;
1732 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1733 if (nm > scm_mtrigger)
1734 scm_mtrigger = nm + nm / 2;
1735 else
1736 scm_mtrigger += scm_mtrigger / 2;
1737 }
1738 #ifdef GUILE_DEBUG_MALLOC
1739 scm_malloc_reregister (where, ptr, what);
1740 #endif
1741 return ptr;
1742 }
1743
1744 scm_wta (SCM_MAKINUM (size), (char *) SCM_NALLOC, what);
1745 return 0; /* never reached */
1746 }
1747
1748 void
1749 scm_must_free (void *obj)
1750 {
1751 #ifdef GUILE_DEBUG_MALLOC
1752 scm_malloc_unregister (obj);
1753 #endif
1754 if (obj)
1755 free (obj);
1756 else
1757 scm_wta (SCM_INUM0, "already free", "");
1758 }
1759
1760 /* Announce that there has been some malloc done that will be freed
1761 * during gc. A typical use is for a smob that uses some malloced
1762 * memory but can not get it from scm_must_malloc (for whatever
1763 * reason). When a new object of this smob is created you call
1764 * scm_done_malloc with the size of the object. When your smob free
1765 * function is called, be sure to include this size in the return
1766 * value. */
1767
1768 void
1769 scm_done_malloc (long size)
1770 {
1771 scm_mallocated += size;
1772
1773 if (scm_mallocated > scm_mtrigger)
1774 {
1775 scm_igc ("foreign mallocs");
1776 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1777 {
1778 if (scm_mallocated > scm_mtrigger)
1779 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1780 else
1781 scm_mtrigger += scm_mtrigger / 2;
1782 }
1783 }
1784 }
1785
1786
1787 \f
1788
1789 /* {Heap Segments}
1790 *
1791 * Each heap segment is an array of objects of a particular size.
1792 * Every segment has an associated (possibly shared) freelist.
1793 * A table of segment records is kept that records the upper and
1794 * lower extents of the segment; this is used during the conservative
1795 * phase of gc to identify probably gc roots (because they point
1796 * into valid segments at reasonable offsets). */
1797
1798 /* scm_expmem
1799 * is true if the first segment was smaller than INIT_HEAP_SEG.
1800 * If scm_expmem is set to one, subsequent segment allocations will
1801 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1802 */
1803 int scm_expmem = 0;
1804
1805 scm_sizet scm_max_segment_size;
1806
1807 /* scm_heap_org
1808 * is the lowest base address of any heap segment.
1809 */
1810 SCM_CELLPTR scm_heap_org;
1811
1812 scm_heap_seg_data_t * scm_heap_table = 0;
1813 int scm_n_heap_segs = 0;
1814
1815 /* init_heap_seg
1816 * initializes a new heap segment and return the number of objects it contains.
1817 *
1818 * The segment origin, segment size in bytes, and the span of objects
1819 * in cells are input parameters. The freelist is both input and output.
1820 *
1821 * This function presume that the scm_heap_table has already been expanded
1822 * to accomodate a new segment record.
1823 */
1824
1825
1826 static scm_sizet
1827 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1828 {
1829 register SCM_CELLPTR ptr;
1830 SCM_CELLPTR seg_end;
1831 int new_seg_index;
1832 int n_new_cells;
1833 int span = freelist->span;
1834
1835 if (seg_org == NULL)
1836 return 0;
1837
1838 ptr = CELL_UP (seg_org, span);
1839
1840 /* Compute the ceiling on valid object pointers w/in this segment.
1841 */
1842 seg_end = CELL_DN ((char *) seg_org + size, span);
1843
1844 /* Find the right place and insert the segment record.
1845 *
1846 */
1847 for (new_seg_index = 0;
1848 ( (new_seg_index < scm_n_heap_segs)
1849 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1850 new_seg_index++)
1851 ;
1852
1853 {
1854 int i;
1855 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1856 scm_heap_table[i] = scm_heap_table[i - 1];
1857 }
1858
1859 ++scm_n_heap_segs;
1860
1861 scm_heap_table[new_seg_index].span = span;
1862 scm_heap_table[new_seg_index].freelist = freelist;
1863 scm_heap_table[new_seg_index].bounds[0] = ptr;
1864 scm_heap_table[new_seg_index].bounds[1] = seg_end;
1865
1866
1867 /* Compute the least valid object pointer w/in this segment
1868 */
1869 ptr = CELL_UP (ptr, span);
1870
1871
1872 /*n_new_cells*/
1873 n_new_cells = seg_end - ptr;
1874
1875 freelist->heap_size += n_new_cells;
1876
1877 /* Partition objects in this segment into clusters */
1878 {
1879 SCM clusters;
1880 SCM *clusterp = &clusters;
1881 int n_cluster_cells = span * freelist->cluster_size;
1882
1883 while (n_new_cells > span) /* at least one spine + one freecell */
1884 {
1885 /* Determine end of cluster
1886 */
1887 if (n_new_cells >= n_cluster_cells)
1888 {
1889 seg_end = ptr + n_cluster_cells;
1890 n_new_cells -= n_cluster_cells;
1891 }
1892 else
1893 /* [cmm] looks like the segment size doesn't divide cleanly by
1894 cluster size. bad cmm! */
1895 abort();
1896
1897 /* Allocate cluster spine
1898 */
1899 *clusterp = PTR2SCM (ptr);
1900 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
1901 clusterp = SCM_CDRLOC (*clusterp);
1902 ptr += span;
1903
1904 while (ptr < seg_end)
1905 {
1906 SCM scmptr = PTR2SCM (ptr);
1907
1908 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1909 SCM_SETCDR (scmptr, PTR2SCM (ptr + span));
1910 ptr += span;
1911 }
1912
1913 SCM_SETCDR (PTR2SCM (ptr - span), SCM_EOL);
1914 }
1915
1916 /* Patch up the last cluster pointer in the segment
1917 * to join it to the input freelist.
1918 */
1919 *clusterp = freelist->clusters;
1920 freelist->clusters = clusters;
1921 }
1922
1923 #ifdef DEBUGINFO
1924 fprintf (stderr, "H");
1925 #endif
1926 return size;
1927 }
1928
1929 static scm_sizet
1930 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
1931 {
1932 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
1933
1934 return
1935 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
1936 + ALIGNMENT_SLACK (freelist);
1937 }
1938
1939 static void
1940 alloc_some_heap (scm_freelist_t *freelist)
1941 {
1942 scm_heap_seg_data_t * tmptable;
1943 SCM_CELLPTR ptr;
1944 long len;
1945
1946 /* Critical code sections (such as the garbage collector)
1947 * aren't supposed to add heap segments.
1948 */
1949 if (scm_gc_heap_lock)
1950 scm_wta (SCM_UNDEFINED, "need larger initial", "heap");
1951
1952 /* Expand the heap tables to have room for the new segment.
1953 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
1954 * only if the allocation of the segment itself succeeds.
1955 */
1956 len = (1 + scm_n_heap_segs) * sizeof (scm_heap_seg_data_t);
1957
1958 SCM_SYSCALL (tmptable = ((scm_heap_seg_data_t *)
1959 realloc ((char *)scm_heap_table, len)));
1960 if (!tmptable)
1961 scm_wta (SCM_UNDEFINED, "could not grow", "hplims");
1962 else
1963 scm_heap_table = tmptable;
1964
1965
1966 /* Pick a size for the new heap segment.
1967 * The rule for picking the size of a segment is explained in
1968 * gc.h
1969 */
1970 {
1971 /* Assure that the new segment is predicted to be large enough.
1972 *
1973 * New yield should at least equal GC fraction of new heap size, i.e.
1974 *
1975 * y + dh > f * (h + dh)
1976 *
1977 * y : yield
1978 * f : min yield fraction
1979 * h : heap size
1980 * dh : size of new heap segment
1981 *
1982 * This gives dh > (f * h - y) / (1 - f)
1983 */
1984 int f = freelist->min_yield_fraction;
1985 long h = SCM_HEAP_SIZE;
1986 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
1987 len = SCM_EXPHEAP (freelist->heap_size);
1988 #ifdef DEBUGINFO
1989 fprintf (stderr, "(%d < %d)", len, min_cells);
1990 #endif
1991 if (len < min_cells)
1992 len = min_cells + freelist->cluster_size;
1993 len *= sizeof (scm_cell);
1994 /* force new sampling */
1995 freelist->collected = LONG_MAX;
1996 }
1997
1998 if (len > scm_max_segment_size)
1999 len = scm_max_segment_size;
2000
2001 {
2002 scm_sizet smallest;
2003
2004 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2005
2006 if (len < smallest)
2007 len = smallest;
2008
2009 /* Allocate with decaying ambition. */
2010 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2011 && (len >= smallest))
2012 {
2013 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2014 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2015 if (ptr)
2016 {
2017 init_heap_seg (ptr, rounded_len, freelist);
2018 return;
2019 }
2020 len /= 2;
2021 }
2022 }
2023
2024 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
2025 }
2026
2027
2028 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2029 (SCM name),
2030 "")
2031 #define FUNC_NAME s_scm_unhash_name
2032 {
2033 int x;
2034 int bound;
2035 SCM_VALIDATE_SYMBOL (1,name);
2036 SCM_DEFER_INTS;
2037 bound = scm_n_heap_segs;
2038 for (x = 0; x < bound; ++x)
2039 {
2040 SCM_CELLPTR p;
2041 SCM_CELLPTR pbound;
2042 p = scm_heap_table[x].bounds[0];
2043 pbound = scm_heap_table[x].bounds[1];
2044 while (p < pbound)
2045 {
2046 SCM cell = PTR2SCM (p);
2047 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2048 {
2049 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2050 * struct cell. See the corresponding comment in scm_gc_mark.
2051 */
2052 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2053 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2054 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2055 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2056 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2057 {
2058 SCM_SET_CELL_OBJECT_0 (cell, name);
2059 }
2060 }
2061 ++p;
2062 }
2063 }
2064 SCM_ALLOW_INTS;
2065 return name;
2066 }
2067 #undef FUNC_NAME
2068
2069
2070 \f
2071 /* {GC Protection Helper Functions}
2072 */
2073
2074
2075 void
2076 scm_remember (SCM *ptr)
2077 { /* empty */ }
2078
2079
2080 /*
2081 These crazy functions prevent garbage collection
2082 of arguments after the first argument by
2083 ensuring they remain live throughout the
2084 function because they are used in the last
2085 line of the code block.
2086 It'd be better to have a nice compiler hint to
2087 aid the conservative stack-scanning GC. --03/09/00 gjb */
2088 SCM
2089 scm_return_first (SCM elt, ...)
2090 {
2091 return elt;
2092 }
2093
2094 int
2095 scm_return_first_int (int i, ...)
2096 {
2097 return i;
2098 }
2099
2100
2101 SCM
2102 scm_permanent_object (SCM obj)
2103 {
2104 SCM_REDEFER_INTS;
2105 scm_permobjs = scm_cons (obj, scm_permobjs);
2106 SCM_REALLOW_INTS;
2107 return obj;
2108 }
2109
2110
2111 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2112 other references are dropped, until the object is unprotected by calling
2113 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2114 i. e. it is possible to protect the same object several times, but it is
2115 necessary to unprotect the object the same number of times to actually get
2116 the object unprotected. It is an error to unprotect an object more often
2117 than it has been protected before. The function scm_protect_object returns
2118 OBJ.
2119 */
2120
2121 /* Implementation note: For every object X, there is a counter which
2122 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2123 */
2124
2125 SCM
2126 scm_protect_object (SCM obj)
2127 {
2128 SCM handle;
2129
2130 /* This critical section barrier will be replaced by a mutex. */
2131 SCM_REDEFER_INTS;
2132
2133 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2134 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2135
2136 SCM_REALLOW_INTS;
2137
2138 return obj;
2139 }
2140
2141
2142 /* Remove any protection for OBJ established by a prior call to
2143 scm_protect_object. This function returns OBJ.
2144
2145 See scm_protect_object for more information. */
2146 SCM
2147 scm_unprotect_object (SCM obj)
2148 {
2149 SCM handle;
2150
2151 /* This critical section barrier will be replaced by a mutex. */
2152 SCM_REDEFER_INTS;
2153
2154 handle = scm_hashq_get_handle (scm_protects, obj);
2155
2156 if (SCM_IMP (handle))
2157 {
2158 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2159 abort ();
2160 }
2161 else
2162 {
2163 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2164 if (count == 0)
2165 scm_hashq_remove_x (scm_protects, obj);
2166 else
2167 SCM_SETCDR (handle, SCM_MAKINUM (count));
2168 }
2169
2170 SCM_REALLOW_INTS;
2171
2172 return obj;
2173 }
2174
2175 int terminating;
2176
2177 /* called on process termination. */
2178 #ifdef HAVE_ATEXIT
2179 static void
2180 cleanup (void)
2181 #else
2182 #ifdef HAVE_ON_EXIT
2183 extern int on_exit (void (*procp) (), int arg);
2184
2185 static void
2186 cleanup (int status, void *arg)
2187 #else
2188 #error Dont know how to setup a cleanup handler on your system.
2189 #endif
2190 #endif
2191 {
2192 terminating = 1;
2193 scm_flush_all_ports ();
2194 }
2195
2196 \f
2197 static int
2198 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2199 {
2200 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2201 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2202 rounded_size,
2203 freelist))
2204 {
2205 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2206 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2207 rounded_size,
2208 freelist))
2209 return 1;
2210 }
2211 else
2212 scm_expmem = 1;
2213
2214 if (freelist->min_yield_fraction)
2215 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2216 / 100);
2217 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2218
2219 return 0;
2220 }
2221
2222 \f
2223 static void
2224 init_freelist (scm_freelist_t *freelist,
2225 int span,
2226 int cluster_size,
2227 int min_yield)
2228 {
2229 freelist->clusters = SCM_EOL;
2230 freelist->cluster_size = cluster_size + 1;
2231 freelist->left_to_collect = 0;
2232 freelist->clusters_allocated = 0;
2233 freelist->min_yield = 0;
2234 freelist->min_yield_fraction = min_yield;
2235 freelist->span = span;
2236 freelist->collected = 0;
2237 freelist->collected_1 = 0;
2238 freelist->heap_size = 0;
2239 }
2240
2241 int
2242 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2243 scm_sizet init_heap_size_2, int gc_trigger_2,
2244 scm_sizet max_segment_size)
2245 {
2246 scm_sizet j;
2247
2248 if (!init_heap_size_1)
2249 init_heap_size_1 = scm_default_init_heap_size_1;
2250 if (!init_heap_size_2)
2251 init_heap_size_2 = scm_default_init_heap_size_2;
2252
2253 j = SCM_NUM_PROTECTS;
2254 while (j)
2255 scm_sys_protects[--j] = SCM_BOOL_F;
2256 scm_block_gc = 1;
2257
2258 scm_freelist = SCM_EOL;
2259 scm_freelist2 = SCM_EOL;
2260 init_freelist (&scm_master_freelist,
2261 1, SCM_CLUSTER_SIZE_1,
2262 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2263 init_freelist (&scm_master_freelist2,
2264 2, SCM_CLUSTER_SIZE_2,
2265 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2266 scm_max_segment_size
2267 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2268
2269 scm_expmem = 0;
2270
2271 j = SCM_HEAP_SEG_SIZE;
2272 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2273 scm_heap_table = ((scm_heap_seg_data_t *)
2274 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2275
2276 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2277 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2278 return 1;
2279
2280 /* scm_hplims[0] can change. do not remove scm_heap_org */
2281 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2282
2283 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2284 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2285 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2286 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2287 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2288
2289 /* Initialise the list of ports. */
2290 scm_port_table = (scm_port **)
2291 malloc (sizeof (scm_port *) * scm_port_table_room);
2292 if (!scm_port_table)
2293 return 1;
2294
2295 #ifdef HAVE_ATEXIT
2296 atexit (cleanup);
2297 #else
2298 #ifdef HAVE_ON_EXIT
2299 on_exit (cleanup, 0);
2300 #endif
2301 #endif
2302
2303 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2304 SCM_SETCDR (scm_undefineds, scm_undefineds);
2305
2306 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2307 scm_nullstr = scm_makstr (0L, 0);
2308 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2309 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2310 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2311 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2312 scm_stand_in_procs = SCM_EOL;
2313 scm_permobjs = SCM_EOL;
2314 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2315 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2316 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2317 #ifdef SCM_BIGDIG
2318 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2319 #endif
2320 return 0;
2321 }
2322
2323 \f
2324
2325 SCM scm_after_gc_hook;
2326
2327 #if (SCM_DEBUG_DEPRECATED == 0)
2328 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2329 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2330 static SCM gc_async;
2331
2332
2333 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2334 * is run after the gc, as soon as the asynchronous events are handled by the
2335 * evaluator.
2336 */
2337 static SCM
2338 gc_async_thunk (void)
2339 {
2340 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2341
2342 #if (SCM_DEBUG_DEPRECATED == 0)
2343
2344 /* The following code will be removed in Guile 1.5. */
2345 if (SCM_NFALSEP (scm_gc_vcell))
2346 {
2347 SCM proc = SCM_CDR (scm_gc_vcell);
2348
2349 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2350 scm_apply (proc, SCM_EOL, SCM_EOL);
2351 }
2352
2353 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2354
2355 return SCM_UNSPECIFIED;
2356 }
2357
2358
2359 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2360 * the garbage collection. The only purpose of this function is to mark the
2361 * gc_async (which will eventually lead to the execution of the
2362 * gc_async_thunk).
2363 */
2364 static void *
2365 mark_gc_async (void * hook_data, void *func_data, void *data)
2366 {
2367 scm_system_async_mark (gc_async);
2368 return NULL;
2369 }
2370
2371
2372 void
2373 scm_init_gc ()
2374 {
2375 SCM after_gc_thunk;
2376
2377 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2378
2379 #if (SCM_DEBUG_DEPRECATED == 0)
2380 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2381 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2382 /* Dirk:FIXME:: We don't really want a binding here. */
2383 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2384 gc_async = scm_system_async (after_gc_thunk);
2385
2386 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2387
2388 #include "libguile/gc.x"
2389 }
2390
2391 /*
2392 Local Variables:
2393 c-file-style: "gnu"
2394 End:
2395 */