* gc.c (scm_must_malloc): changed the comment explaining when
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000, 2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/gc.h"
75
76 #ifdef GUILE_DEBUG_MALLOC
77 #include "libguile/debug-malloc.h"
78 #endif
79
80 #ifdef HAVE_MALLOC_H
81 #include <malloc.h>
82 #endif
83
84 #ifdef HAVE_UNISTD_H
85 #include <unistd.h>
86 #endif
87
88 #ifdef __STDC__
89 #include <stdarg.h>
90 #define var_start(x, y) va_start(x, y)
91 #else
92 #include <varargs.h>
93 #define var_start(x, y) va_start(x)
94 #endif
95
96 \f
97
98 unsigned int scm_gc_running_p = 0;
99
100 \f
101
102 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103
104 unsigned int scm_debug_cell_accesses_p = 0;
105
106
107 /* Assert that the given object is a valid reference to a valid cell. This
108 * test involves to determine whether the object is a cell pointer, whether
109 * this pointer actually points into a heap segment and whether the cell
110 * pointed to is not a free cell.
111 */
112 void
113 scm_assert_cell_valid (SCM cell)
114 {
115 if (scm_debug_cell_accesses_p)
116 {
117 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
118
119 if (!scm_cellp (cell))
120 {
121 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
122 abort ();
123 }
124 else if (!scm_gc_running_p)
125 {
126 /* Dirk::FIXME:: During garbage collection there occur references to
127 free cells. This is allright during conservative marking, but
128 should not happen otherwise (I think). The case of free cells
129 accessed during conservative marking is handled in function
130 scm_mark_locations. However, there still occur accesses to free
131 cells during gc. I don't understand why this happens. If it is
132 a bug and gets fixed, the following test should also work while
133 gc is running.
134 */
135 if (SCM_FREE_CELL_P (cell))
136 {
137 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
138 abort ();
139 }
140 }
141 scm_debug_cell_accesses_p = 1; /* re-enable */
142 }
143 }
144
145
146 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
147 (SCM flag),
148 "If FLAG is #f, cell access checking is disabled.\n"
149 "If FLAG is #t, cell access checking is enabled.\n"
150 "This procedure only exists because the compile-time flag\n"
151 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
152 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
153 {
154 if (SCM_FALSEP (flag)) {
155 scm_debug_cell_accesses_p = 0;
156 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
157 scm_debug_cell_accesses_p = 1;
158 } else {
159 SCM_WRONG_TYPE_ARG (1, flag);
160 }
161 return SCM_UNSPECIFIED;
162 }
163 #undef FUNC_NAME
164
165 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
166
167 \f
168
169 /* {heap tuning parameters}
170 *
171 * These are parameters for controlling memory allocation. The heap
172 * is the area out of which scm_cons, and object headers are allocated.
173 *
174 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
175 * 64 bit machine. The units of the _SIZE parameters are bytes.
176 * Cons pairs and object headers occupy one heap cell.
177 *
178 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
179 * allocated initially the heap will grow by half its current size
180 * each subsequent time more heap is needed.
181 *
182 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
183 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
184 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
185 * is in scm_init_storage() and alloc_some_heap() in sys.c
186 *
187 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
188 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
189 *
190 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
191 * is needed.
192 *
193 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
194 * trigger a GC.
195 *
196 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
197 * reclaimed by a GC triggered by must_malloc. If less than this is
198 * reclaimed, the trigger threshold is raised. [I don't know what a
199 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
200 * work around a oscillation that caused almost constant GC.]
201 */
202
203 /*
204 * Heap size 45000 and 40% min yield gives quick startup and no extra
205 * heap allocation. Having higher values on min yield may lead to
206 * large heaps, especially if code behaviour is varying its
207 * maximum consumption between different freelists.
208 */
209
210 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
211 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
212 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
213 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
214 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
215 int scm_default_min_yield_1 = 40;
216
217 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
218 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
219 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
220 /* The following value may seem large, but note that if we get to GC at
221 * all, this means that we have a numerically intensive application
222 */
223 int scm_default_min_yield_2 = 40;
224
225 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
226
227 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
228 #ifdef _QC
229 # define SCM_HEAP_SEG_SIZE 32768L
230 #else
231 # ifdef sequent
232 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
233 # else
234 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
235 # endif
236 #endif
237 /* Make heap grow with factor 1.5 */
238 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
239 #define SCM_INIT_MALLOC_LIMIT 100000
240 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
241
242 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
243 aligned inner bounds for allocated storage */
244
245 #ifdef PROT386
246 /*in 386 protected mode we must only adjust the offset */
247 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
248 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
249 #else
250 # ifdef _UNICOS
251 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
252 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
253 # else
254 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
255 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
256 # endif /* UNICOS */
257 #endif /* PROT386 */
258
259 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
260
261 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
262 #define CLUSTER_SIZE_IN_BYTES(freelist) \
263 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
264
265 \f
266 /* scm_freelists
267 */
268
269 typedef struct scm_freelist_t {
270 /* collected cells */
271 SCM cells;
272 /* number of cells left to collect before cluster is full */
273 unsigned int left_to_collect;
274 /* number of clusters which have been allocated */
275 unsigned int clusters_allocated;
276 /* a list of freelists, each of size cluster_size,
277 * except the last one which may be shorter
278 */
279 SCM clusters;
280 SCM *clustertail;
281 /* this is the number of objects in each cluster, including the spine cell */
282 int cluster_size;
283 /* indicates that we should grow heap instead of GC:ing
284 */
285 int grow_heap_p;
286 /* minimum yield on this list in order not to grow the heap
287 */
288 long min_yield;
289 /* defines min_yield as percent of total heap size
290 */
291 int min_yield_fraction;
292 /* number of cells per object on this list */
293 int span;
294 /* number of collected cells during last GC */
295 long collected;
296 /* number of collected cells during penultimate GC */
297 long collected_1;
298 /* total number of cells in heap segments
299 * belonging to this list.
300 */
301 long heap_size;
302 } scm_freelist_t;
303
304 SCM scm_freelist = SCM_EOL;
305 scm_freelist_t scm_master_freelist = {
306 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
307 };
308 SCM scm_freelist2 = SCM_EOL;
309 scm_freelist_t scm_master_freelist2 = {
310 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
311 };
312
313 /* scm_mtrigger
314 * is the number of bytes of must_malloc allocation needed to trigger gc.
315 */
316 unsigned long scm_mtrigger;
317
318 /* scm_gc_heap_lock
319 * If set, don't expand the heap. Set only during gc, during which no allocation
320 * is supposed to take place anyway.
321 */
322 int scm_gc_heap_lock = 0;
323
324 /* GC Blocking
325 * Don't pause for collection if this is set -- just
326 * expand the heap.
327 */
328 int scm_block_gc = 1;
329
330 /* During collection, this accumulates objects holding
331 * weak references.
332 */
333 SCM scm_weak_vectors;
334
335 /* During collection, this accumulates structures which are to be freed.
336 */
337 SCM scm_structs_to_free;
338
339 /* GC Statistics Keeping
340 */
341 unsigned long scm_cells_allocated = 0;
342 long scm_mallocated = 0;
343 unsigned long scm_gc_cells_collected;
344 unsigned long scm_gc_yield;
345 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
346 unsigned long scm_gc_malloc_collected;
347 unsigned long scm_gc_ports_collected;
348 unsigned long scm_gc_time_taken = 0;
349 static unsigned long t_before_gc;
350 static unsigned long t_before_sweep;
351 unsigned long scm_gc_mark_time_taken = 0;
352 unsigned long scm_gc_sweep_time_taken = 0;
353 unsigned long scm_gc_times = 0;
354 unsigned long scm_gc_cells_swept = 0;
355 double scm_gc_cells_marked_acc = 0.;
356 double scm_gc_cells_swept_acc = 0.;
357
358 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
359 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
360 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
361 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
362 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
363 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
364 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
365 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
366 SCM_SYMBOL (sym_times, "gc-times");
367 SCM_SYMBOL (sym_cells_marked, "cells-marked");
368 SCM_SYMBOL (sym_cells_swept, "cells-swept");
369
370 typedef struct scm_heap_seg_data_t
371 {
372 /* lower and upper bounds of the segment */
373 SCM_CELLPTR bounds[2];
374
375 /* address of the head-of-freelist pointer for this segment's cells.
376 All segments usually point to the same one, scm_freelist. */
377 scm_freelist_t *freelist;
378
379 /* number of cells per object in this segment */
380 int span;
381 } scm_heap_seg_data_t;
382
383
384
385 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
386
387 typedef enum { return_on_error, abort_on_error } policy_on_error;
388 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
389
390
391 #define SCM_HEAP_SIZE \
392 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
393 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
394
395 #define BVEC_GROW_SIZE 256
396 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
397 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
398
399 /* mark space allocation */
400
401 typedef struct scm_mark_space_t
402 {
403 scm_c_bvec_limb_t *bvec_space;
404 struct scm_mark_space_t *next;
405 } scm_mark_space_t;
406
407 static scm_mark_space_t *current_mark_space;
408 static scm_mark_space_t **mark_space_ptr;
409 static int current_mark_space_offset;
410 static scm_mark_space_t *mark_space_head;
411
412 static scm_c_bvec_limb_t *
413 get_bvec ()
414 #define FUNC_NAME "get_bvec"
415 {
416 scm_c_bvec_limb_t *res;
417
418 if (!current_mark_space)
419 {
420 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
421 if (!current_mark_space)
422 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
423
424 current_mark_space->bvec_space = NULL;
425 current_mark_space->next = NULL;
426
427 *mark_space_ptr = current_mark_space;
428 mark_space_ptr = &(current_mark_space->next);
429
430 return get_bvec ();
431 }
432
433 if (!(current_mark_space->bvec_space))
434 {
435 SCM_SYSCALL (current_mark_space->bvec_space =
436 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
437 if (!(current_mark_space->bvec_space))
438 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
439
440 current_mark_space_offset = 0;
441
442 return get_bvec ();
443 }
444
445 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
446 {
447 current_mark_space = NULL;
448
449 return get_bvec ();
450 }
451
452 res = current_mark_space->bvec_space + current_mark_space_offset;
453 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
454
455 return res;
456 }
457 #undef FUNC_NAME
458
459
460 static void
461 clear_mark_space ()
462 {
463 scm_mark_space_t *ms;
464
465 for (ms = mark_space_head; ms; ms = ms->next)
466 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
467 }
468
469
470 \f
471 /* Debugging functions. */
472
473 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
474
475 /* Return the number of the heap segment containing CELL. */
476 static int
477 which_seg (SCM cell)
478 {
479 int i;
480
481 for (i = 0; i < scm_n_heap_segs; i++)
482 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
483 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
484 return i;
485 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
486 SCM_UNPACK (cell));
487 abort ();
488 }
489
490
491 static void
492 map_free_list (scm_freelist_t *master, SCM freelist)
493 {
494 int last_seg = -1, count = 0;
495 SCM f;
496
497 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
498 {
499 int this_seg = which_seg (f);
500
501 if (this_seg != last_seg)
502 {
503 if (last_seg != -1)
504 fprintf (stderr, " %5d %d-cells in segment %d\n",
505 count, master->span, last_seg);
506 last_seg = this_seg;
507 count = 0;
508 }
509 count++;
510 }
511 if (last_seg != -1)
512 fprintf (stderr, " %5d %d-cells in segment %d\n",
513 count, master->span, last_seg);
514 }
515
516 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
517 (),
518 "Print debugging information about the free-list.\n"
519 "@code{map-free-list} is only included in\n"
520 "@code{--enable-guile-debug} builds of Guile.")
521 #define FUNC_NAME s_scm_map_free_list
522 {
523 int i;
524 fprintf (stderr, "%d segments total (%d:%d",
525 scm_n_heap_segs,
526 scm_heap_table[0].span,
527 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
528 for (i = 1; i < scm_n_heap_segs; i++)
529 fprintf (stderr, ", %d:%d",
530 scm_heap_table[i].span,
531 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
532 fprintf (stderr, ")\n");
533 map_free_list (&scm_master_freelist, scm_freelist);
534 map_free_list (&scm_master_freelist2, scm_freelist2);
535 fflush (stderr);
536
537 return SCM_UNSPECIFIED;
538 }
539 #undef FUNC_NAME
540
541 static int last_cluster;
542 static int last_size;
543
544 static int
545 free_list_length (char *title, int i, SCM freelist)
546 {
547 SCM ls;
548 int n = 0;
549 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
550 if (SCM_FREE_CELL_P (ls))
551 ++n;
552 else
553 {
554 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
555 abort ();
556 }
557 if (n != last_size)
558 {
559 if (i > 0)
560 {
561 if (last_cluster == i - 1)
562 fprintf (stderr, "\t%d\n", last_size);
563 else
564 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
565 }
566 if (i >= 0)
567 fprintf (stderr, "%s %d", title, i);
568 else
569 fprintf (stderr, "%s\t%d\n", title, n);
570 last_cluster = i;
571 last_size = n;
572 }
573 return n;
574 }
575
576 static void
577 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
578 {
579 SCM clusters;
580 int i = 0, len, n = 0;
581 fprintf (stderr, "%s\n\n", title);
582 n += free_list_length ("free list", -1, freelist);
583 for (clusters = master->clusters;
584 SCM_NNULLP (clusters);
585 clusters = SCM_CDR (clusters))
586 {
587 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
588 n += len;
589 }
590 if (last_cluster == i - 1)
591 fprintf (stderr, "\t%d\n", last_size);
592 else
593 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
594 fprintf (stderr, "\ntotal %d objects\n\n", n);
595 }
596
597 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
598 (),
599 "Print debugging information about the free-list.\n"
600 "@code{free-list-length} is only included in\n"
601 "@code{--enable-guile-debug} builds of Guile.")
602 #define FUNC_NAME s_scm_free_list_length
603 {
604 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
605 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
606 return SCM_UNSPECIFIED;
607 }
608 #undef FUNC_NAME
609
610 #endif
611
612 #ifdef GUILE_DEBUG_FREELIST
613
614 /* Non-zero if freelist debugging is in effect. Set this via
615 `gc-set-debug-check-freelist!'. */
616 static int scm_debug_check_freelist = 0;
617
618 /* Number of calls to SCM_NEWCELL since startup. */
619 static unsigned long scm_newcell_count;
620 static unsigned long scm_newcell2_count;
621
622 /* Search freelist for anything that isn't marked as a free cell.
623 Abort if we find something. */
624 static void
625 scm_check_freelist (SCM freelist)
626 {
627 SCM f;
628 int i = 0;
629
630 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
631 if (!SCM_FREE_CELL_P (f))
632 {
633 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
634 scm_newcell_count, i);
635 abort ();
636 }
637 }
638
639 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
640 (SCM flag),
641 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
642 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
643 "compile-time flag was selected.\n")
644 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
645 {
646 /* [cmm] I did a double-take when I read this code the first time.
647 well, FWIW. */
648 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
649 return SCM_UNSPECIFIED;
650 }
651 #undef FUNC_NAME
652
653
654 SCM
655 scm_debug_newcell (void)
656 {
657 SCM new;
658
659 scm_newcell_count++;
660 if (scm_debug_check_freelist)
661 {
662 scm_check_freelist (scm_freelist);
663 scm_gc();
664 }
665
666 /* The rest of this is supposed to be identical to the SCM_NEWCELL
667 macro. */
668 if (SCM_NULLP (scm_freelist))
669 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
670 else
671 {
672 new = scm_freelist;
673 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
674 }
675
676 return new;
677 }
678
679 SCM
680 scm_debug_newcell2 (void)
681 {
682 SCM new;
683
684 scm_newcell2_count++;
685 if (scm_debug_check_freelist)
686 {
687 scm_check_freelist (scm_freelist2);
688 scm_gc ();
689 }
690
691 /* The rest of this is supposed to be identical to the SCM_NEWCELL
692 macro. */
693 if (SCM_NULLP (scm_freelist2))
694 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
695 else
696 {
697 new = scm_freelist2;
698 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
699 }
700
701 return new;
702 }
703
704 #endif /* GUILE_DEBUG_FREELIST */
705
706 \f
707
708 static unsigned long
709 master_cells_allocated (scm_freelist_t *master)
710 {
711 /* the '- 1' below is to ignore the cluster spine cells. */
712 int objects = master->clusters_allocated * (master->cluster_size - 1);
713 if (SCM_NULLP (master->clusters))
714 objects -= master->left_to_collect;
715 return master->span * objects;
716 }
717
718 static unsigned long
719 freelist_length (SCM freelist)
720 {
721 int n;
722 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
723 ++n;
724 return n;
725 }
726
727 static unsigned long
728 compute_cells_allocated ()
729 {
730 return (scm_cells_allocated
731 + master_cells_allocated (&scm_master_freelist)
732 + master_cells_allocated (&scm_master_freelist2)
733 - scm_master_freelist.span * freelist_length (scm_freelist)
734 - scm_master_freelist2.span * freelist_length (scm_freelist2));
735 }
736
737 /* {Scheme Interface to GC}
738 */
739
740 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
741 (),
742 "Returns an association list of statistics about Guile's current use of storage. ")
743 #define FUNC_NAME s_scm_gc_stats
744 {
745 int i;
746 int n;
747 SCM heap_segs;
748 long int local_scm_mtrigger;
749 long int local_scm_mallocated;
750 long int local_scm_heap_size;
751 long int local_scm_cells_allocated;
752 long int local_scm_gc_time_taken;
753 long int local_scm_gc_times;
754 long int local_scm_gc_mark_time_taken;
755 long int local_scm_gc_sweep_time_taken;
756 double local_scm_gc_cells_swept;
757 double local_scm_gc_cells_marked;
758 SCM answer;
759
760 SCM_DEFER_INTS;
761
762 ++scm_block_gc;
763
764 retry:
765 heap_segs = SCM_EOL;
766 n = scm_n_heap_segs;
767 for (i = scm_n_heap_segs; i--; )
768 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
769 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
770 heap_segs);
771 if (scm_n_heap_segs != n)
772 goto retry;
773
774 --scm_block_gc;
775
776 /* Below, we cons to produce the resulting list. We want a snapshot of
777 * the heap situation before consing.
778 */
779 local_scm_mtrigger = scm_mtrigger;
780 local_scm_mallocated = scm_mallocated;
781 local_scm_heap_size = SCM_HEAP_SIZE;
782 local_scm_cells_allocated = compute_cells_allocated ();
783 local_scm_gc_time_taken = scm_gc_time_taken;
784 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
785 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
786 local_scm_gc_times = scm_gc_times;
787 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
788 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
789
790 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
791 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
792 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
793 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
794 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
795 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
796 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
797 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
798 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
799 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
800 scm_cons (sym_heap_segments, heap_segs),
801 SCM_UNDEFINED);
802 SCM_ALLOW_INTS;
803 return answer;
804 }
805 #undef FUNC_NAME
806
807
808 static void
809 gc_start_stats (const char *what)
810 {
811 t_before_gc = scm_c_get_internal_run_time ();
812 scm_gc_cells_swept = 0;
813 scm_gc_cells_collected = 0;
814 scm_gc_yield_1 = scm_gc_yield;
815 scm_gc_yield = (scm_cells_allocated
816 + master_cells_allocated (&scm_master_freelist)
817 + master_cells_allocated (&scm_master_freelist2));
818 scm_gc_malloc_collected = 0;
819 scm_gc_ports_collected = 0;
820 }
821
822
823 static void
824 gc_end_stats ()
825 {
826 unsigned long t = scm_c_get_internal_run_time ();
827 scm_gc_time_taken += (t - t_before_gc);
828 scm_gc_sweep_time_taken += (t - t_before_sweep);
829 ++scm_gc_times;
830
831 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
832 scm_gc_cells_swept_acc += scm_gc_cells_swept;
833 }
834
835
836 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
837 (SCM obj),
838 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
839 "returned by this function for @var{obj}")
840 #define FUNC_NAME s_scm_object_address
841 {
842 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
843 }
844 #undef FUNC_NAME
845
846
847 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
848 (),
849 "Scans all of SCM objects and reclaims for further use those that are\n"
850 "no longer accessible.")
851 #define FUNC_NAME s_scm_gc
852 {
853 SCM_DEFER_INTS;
854 scm_igc ("call");
855 SCM_ALLOW_INTS;
856 return SCM_UNSPECIFIED;
857 }
858 #undef FUNC_NAME
859
860
861 \f
862 /* {C Interface For When GC is Triggered}
863 */
864
865 static void
866 adjust_min_yield (scm_freelist_t *freelist)
867 {
868 /* min yield is adjusted upwards so that next predicted total yield
869 * (allocated cells actually freed by GC) becomes
870 * `min_yield_fraction' of total heap size. Note, however, that
871 * the absolute value of min_yield will correspond to `collected'
872 * on one master (the one which currently is triggering GC).
873 *
874 * The reason why we look at total yield instead of cells collected
875 * on one list is that we want to take other freelists into account.
876 * On this freelist, we know that (local) yield = collected cells,
877 * but that's probably not the case on the other lists.
878 *
879 * (We might consider computing a better prediction, for example
880 * by computing an average over multiple GC:s.)
881 */
882 if (freelist->min_yield_fraction)
883 {
884 /* Pick largest of last two yields. */
885 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
886 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
887 #ifdef DEBUGINFO
888 fprintf (stderr, " after GC = %d, delta = %d\n",
889 scm_cells_allocated,
890 delta);
891 #endif
892 if (delta > 0)
893 freelist->min_yield += delta;
894 }
895 }
896
897
898 /* When we get POSIX threads support, the master will be global and
899 * common while the freelist will be individual for each thread.
900 */
901
902 SCM
903 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
904 {
905 SCM cell;
906 ++scm_ints_disabled;
907 do
908 {
909 if (SCM_NULLP (master->clusters))
910 {
911 if (master->grow_heap_p || scm_block_gc)
912 {
913 /* In order to reduce gc frequency, try to allocate a new heap
914 * segment first, even if gc might find some free cells. If we
915 * can't obtain a new heap segment, we will try gc later.
916 */
917 master->grow_heap_p = 0;
918 alloc_some_heap (master, return_on_error);
919 }
920 if (SCM_NULLP (master->clusters))
921 {
922 /* The heap was not grown, either because it wasn't scheduled to
923 * grow, or because there was not enough memory available. In
924 * both cases we have to try gc to get some free cells.
925 */
926 #ifdef DEBUGINFO
927 fprintf (stderr, "allocated = %d, ",
928 scm_cells_allocated
929 + master_cells_allocated (&scm_master_freelist)
930 + master_cells_allocated (&scm_master_freelist2));
931 #endif
932 scm_igc ("cells");
933 adjust_min_yield (master);
934 if (SCM_NULLP (master->clusters))
935 {
936 /* gc could not free any cells. Now, we _must_ allocate a
937 * new heap segment, because there is no other possibility
938 * to provide a new cell for the caller.
939 */
940 alloc_some_heap (master, abort_on_error);
941 }
942 }
943 }
944 cell = SCM_CAR (master->clusters);
945 master->clusters = SCM_CDR (master->clusters);
946 ++master->clusters_allocated;
947 }
948 while (SCM_NULLP (cell));
949
950 #ifdef GUILE_DEBUG_FREELIST
951 scm_check_freelist (cell);
952 #endif
953
954 --scm_ints_disabled;
955 *freelist = SCM_FREE_CELL_CDR (cell);
956 return cell;
957 }
958
959
960 #if 0
961 /* This is a support routine which can be used to reserve a cluster
962 * for some special use, such as debugging. It won't be useful until
963 * free cells are preserved between garbage collections.
964 */
965
966 void
967 scm_alloc_cluster (scm_freelist_t *master)
968 {
969 SCM freelist, cell;
970 cell = scm_gc_for_newcell (master, &freelist);
971 SCM_SETCDR (cell, freelist);
972 return cell;
973 }
974 #endif
975
976
977 scm_c_hook_t scm_before_gc_c_hook;
978 scm_c_hook_t scm_before_mark_c_hook;
979 scm_c_hook_t scm_before_sweep_c_hook;
980 scm_c_hook_t scm_after_sweep_c_hook;
981 scm_c_hook_t scm_after_gc_c_hook;
982
983
984 void
985 scm_igc (const char *what)
986 {
987 int j;
988
989 ++scm_gc_running_p;
990 scm_c_hook_run (&scm_before_gc_c_hook, 0);
991 #ifdef DEBUGINFO
992 fprintf (stderr,
993 SCM_NULLP (scm_freelist)
994 ? "*"
995 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
996 #endif
997 #ifdef USE_THREADS
998 /* During the critical section, only the current thread may run. */
999 SCM_THREAD_CRITICAL_SECTION_START;
1000 #endif
1001
1002 /* fprintf (stderr, "gc: %s\n", what); */
1003
1004 if (!scm_stack_base || scm_block_gc)
1005 {
1006 --scm_gc_running_p;
1007 return;
1008 }
1009
1010 gc_start_stats (what);
1011
1012 if (scm_mallocated < 0)
1013 /* The byte count of allocated objects has underflowed. This is
1014 probably because you forgot to report the sizes of objects you
1015 have allocated, by calling scm_done_malloc or some such. When
1016 the GC freed them, it subtracted their size from
1017 scm_mallocated, which underflowed. */
1018 abort ();
1019
1020 if (scm_gc_heap_lock)
1021 /* We've invoked the collector while a GC is already in progress.
1022 That should never happen. */
1023 abort ();
1024
1025 ++scm_gc_heap_lock;
1026
1027 /* flush dead entries from the continuation stack */
1028 {
1029 int x;
1030 int bound;
1031 SCM * elts;
1032 elts = SCM_VELTS (scm_continuation_stack);
1033 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1034 x = SCM_INUM (scm_continuation_stack_ptr);
1035 while (x < bound)
1036 {
1037 elts[x] = SCM_BOOL_F;
1038 ++x;
1039 }
1040 }
1041
1042 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1043
1044 clear_mark_space ();
1045
1046 #ifndef USE_THREADS
1047
1048 /* Mark objects on the C stack. */
1049 SCM_FLUSH_REGISTER_WINDOWS;
1050 /* This assumes that all registers are saved into the jmp_buf */
1051 setjmp (scm_save_regs_gc_mark);
1052 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1053 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1054 sizeof scm_save_regs_gc_mark)
1055 / sizeof (SCM_STACKITEM)));
1056
1057 {
1058 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1059 #ifdef SCM_STACK_GROWS_UP
1060 scm_mark_locations (scm_stack_base, stack_len);
1061 #else
1062 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1063 #endif
1064 }
1065
1066 #else /* USE_THREADS */
1067
1068 /* Mark every thread's stack and registers */
1069 scm_threads_mark_stacks ();
1070
1071 #endif /* USE_THREADS */
1072
1073 j = SCM_NUM_PROTECTS;
1074 while (j--)
1075 scm_gc_mark (scm_sys_protects[j]);
1076
1077 /* FIXME: we should have a means to register C functions to be run
1078 * in different phases of GC
1079 */
1080 scm_mark_subr_table ();
1081
1082 #ifndef USE_THREADS
1083 scm_gc_mark (scm_root->handle);
1084 #endif
1085
1086 t_before_sweep = scm_c_get_internal_run_time ();
1087 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1088
1089 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1090
1091 scm_gc_sweep ();
1092
1093 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1094
1095 --scm_gc_heap_lock;
1096 gc_end_stats ();
1097
1098 #ifdef USE_THREADS
1099 SCM_THREAD_CRITICAL_SECTION_END;
1100 #endif
1101 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1102 --scm_gc_running_p;
1103 }
1104
1105 \f
1106
1107 /* {Mark/Sweep}
1108 */
1109
1110 #define MARK scm_gc_mark
1111 #define FNAME "scm_gc_mark"
1112
1113 #endif /*!MARK_DEPENDENCIES*/
1114
1115 /* Mark an object precisely.
1116 */
1117 void
1118 MARK (SCM p)
1119 #define FUNC_NAME FNAME
1120 {
1121 register long i;
1122 register SCM ptr;
1123
1124 #ifndef MARK_DEPENDENCIES
1125 # define RECURSE scm_gc_mark
1126 #else
1127 /* go through the usual marking, but not for self-cycles. */
1128 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1129 #endif
1130 ptr = p;
1131
1132 #ifdef MARK_DEPENDENCIES
1133 goto gc_mark_loop_first_time;
1134 #endif
1135
1136 gc_mark_loop:
1137 if (SCM_IMP (ptr))
1138 return;
1139
1140 gc_mark_nimp:
1141
1142 #ifdef MARK_DEPENDENCIES
1143 if (SCM_EQ_P (ptr, p))
1144 return;
1145
1146 scm_gc_mark (ptr);
1147 return;
1148
1149 gc_mark_loop_first_time:
1150 #endif
1151
1152 if (!SCM_CELLP (ptr))
1153 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1154
1155 #if (defined (GUILE_DEBUG_FREELIST))
1156
1157 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1158 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1159
1160 #endif
1161
1162 #ifndef MARK_DEPENDENCIES
1163
1164 if (SCM_GCMARKP (ptr))
1165 return;
1166
1167 SCM_SETGCMARK (ptr);
1168
1169 #endif
1170
1171 switch (SCM_TYP7 (ptr))
1172 {
1173 case scm_tcs_cons_nimcar:
1174 if (SCM_IMP (SCM_CDR (ptr)))
1175 {
1176 ptr = SCM_CAR (ptr);
1177 goto gc_mark_nimp;
1178 }
1179 RECURSE (SCM_CAR (ptr));
1180 ptr = SCM_CDR (ptr);
1181 goto gc_mark_nimp;
1182 case scm_tcs_cons_imcar:
1183 ptr = SCM_CDR (ptr);
1184 goto gc_mark_loop;
1185 case scm_tc7_pws:
1186 RECURSE (SCM_CELL_OBJECT_2 (ptr));
1187 ptr = SCM_CDR (ptr);
1188 goto gc_mark_loop;
1189 case scm_tcs_cons_gloc:
1190 {
1191 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1192 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1193 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1194 * pointer to a struct vtable data region. The fact that these are
1195 * accessed in the same way restricts the possibilites to change the
1196 * data layout of structs or heap cells.
1197 */
1198 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1199 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1200 if (vtable_data [scm_vtable_index_vcell] != 0)
1201 {
1202 /* ptr is a gloc */
1203 SCM gloc_car = SCM_PACK (word0);
1204 RECURSE (gloc_car);
1205 ptr = SCM_CDR (ptr);
1206 goto gc_mark_loop;
1207 }
1208 else
1209 {
1210 /* ptr is a struct */
1211 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1212 int len = SCM_SYMBOL_LENGTH (layout);
1213 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1214 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1215
1216 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1217 {
1218 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1219 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1220 }
1221 if (len)
1222 {
1223 int x;
1224
1225 for (x = 0; x < len - 2; x += 2, ++struct_data)
1226 if (fields_desc[x] == 'p')
1227 RECURSE (SCM_PACK (*struct_data));
1228 if (fields_desc[x] == 'p')
1229 {
1230 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1231 for (x = *struct_data++; x; --x, ++struct_data)
1232 RECURSE (SCM_PACK (*struct_data));
1233 else
1234 RECURSE (SCM_PACK (*struct_data));
1235 }
1236 }
1237 /* mark vtable */
1238 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1239 goto gc_mark_loop;
1240 }
1241 }
1242 break;
1243 case scm_tcs_closures:
1244 if (SCM_IMP (SCM_CDR (ptr)))
1245 {
1246 ptr = SCM_CLOSCAR (ptr);
1247 goto gc_mark_nimp;
1248 }
1249 RECURSE (SCM_CLOSCAR (ptr));
1250 ptr = SCM_CDR (ptr);
1251 goto gc_mark_nimp;
1252 case scm_tc7_vector:
1253 i = SCM_VECTOR_LENGTH (ptr);
1254 if (i == 0)
1255 break;
1256 while (--i > 0)
1257 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1258 RECURSE (SCM_VELTS (ptr)[i]);
1259 ptr = SCM_VELTS (ptr)[0];
1260 goto gc_mark_loop;
1261 #ifdef CCLO
1262 case scm_tc7_cclo:
1263 {
1264 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1265 unsigned long int j;
1266 for (j = 1; j != i; ++j)
1267 {
1268 SCM obj = SCM_CCLO_REF (ptr, j);
1269 if (!SCM_IMP (obj))
1270 RECURSE (obj);
1271 }
1272 ptr = SCM_CCLO_REF (ptr, 0);
1273 goto gc_mark_loop;
1274 }
1275 #endif
1276 #ifdef HAVE_ARRAYS
1277 case scm_tc7_bvect:
1278 case scm_tc7_byvect:
1279 case scm_tc7_ivect:
1280 case scm_tc7_uvect:
1281 case scm_tc7_fvect:
1282 case scm_tc7_dvect:
1283 case scm_tc7_cvect:
1284 case scm_tc7_svect:
1285 #ifdef HAVE_LONG_LONGS
1286 case scm_tc7_llvect:
1287 #endif
1288 #endif
1289 case scm_tc7_string:
1290 break;
1291
1292 case scm_tc7_substring:
1293 ptr = SCM_CDR (ptr);
1294 goto gc_mark_loop;
1295
1296 case scm_tc7_wvect:
1297 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1298 scm_weak_vectors = ptr;
1299 if (SCM_IS_WHVEC_ANY (ptr))
1300 {
1301 int x;
1302 int len;
1303 int weak_keys;
1304 int weak_values;
1305
1306 len = SCM_VECTOR_LENGTH (ptr);
1307 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1308 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1309
1310 for (x = 0; x < len; ++x)
1311 {
1312 SCM alist;
1313 alist = SCM_VELTS (ptr)[x];
1314
1315 /* mark everything on the alist except the keys or
1316 * values, according to weak_values and weak_keys. */
1317 while ( SCM_CONSP (alist)
1318 && !SCM_GCMARKP (alist)
1319 && SCM_CONSP (SCM_CAR (alist)))
1320 {
1321 SCM kvpair;
1322 SCM next_alist;
1323
1324 kvpair = SCM_CAR (alist);
1325 next_alist = SCM_CDR (alist);
1326 /*
1327 * Do not do this:
1328 * SCM_SETGCMARK (alist);
1329 * SCM_SETGCMARK (kvpair);
1330 *
1331 * It may be that either the key or value is protected by
1332 * an escaped reference to part of the spine of this alist.
1333 * If we mark the spine here, and only mark one or neither of the
1334 * key and value, they may never be properly marked.
1335 * This leads to a horrible situation in which an alist containing
1336 * freelist cells is exported.
1337 *
1338 * So only mark the spines of these arrays last of all marking.
1339 * If somebody confuses us by constructing a weak vector
1340 * with a circular alist then we are hosed, but at least we
1341 * won't prematurely drop table entries.
1342 */
1343 if (!weak_keys)
1344 RECURSE (SCM_CAR (kvpair));
1345 if (!weak_values)
1346 RECURSE (SCM_CDR (kvpair));
1347 alist = next_alist;
1348 }
1349 if (SCM_NIMP (alist))
1350 RECURSE (alist);
1351 }
1352 }
1353 break;
1354
1355 case scm_tc7_symbol:
1356 ptr = SCM_PROP_SLOTS (ptr);
1357 goto gc_mark_loop;
1358 case scm_tcs_subrs:
1359 break;
1360 case scm_tc7_port:
1361 i = SCM_PTOBNUM (ptr);
1362 if (!(i < scm_numptob))
1363 goto def;
1364 if (SCM_PTAB_ENTRY(ptr))
1365 RECURSE (SCM_FILENAME (ptr));
1366 if (scm_ptobs[i].mark)
1367 {
1368 ptr = (scm_ptobs[i].mark) (ptr);
1369 goto gc_mark_loop;
1370 }
1371 else
1372 return;
1373 break;
1374 case scm_tc7_smob:
1375 switch (SCM_TYP16 (ptr))
1376 { /* should be faster than going through scm_smobs */
1377 case scm_tc_free_cell:
1378 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1379 case scm_tc16_big:
1380 case scm_tc16_real:
1381 case scm_tc16_complex:
1382 break;
1383 default:
1384 i = SCM_SMOBNUM (ptr);
1385 if (!(i < scm_numsmob))
1386 goto def;
1387 if (scm_smobs[i].mark)
1388 {
1389 ptr = (scm_smobs[i].mark) (ptr);
1390 goto gc_mark_loop;
1391 }
1392 else
1393 return;
1394 }
1395 break;
1396 default:
1397 def:
1398 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1399 }
1400 #undef RECURSE
1401 }
1402 #undef FUNC_NAME
1403
1404 #ifndef MARK_DEPENDENCIES
1405
1406 #undef MARK
1407 #undef FNAME
1408
1409 /* And here we define `scm_gc_mark_dependencies', by including this
1410 * same file in itself.
1411 */
1412 #define MARK scm_gc_mark_dependencies
1413 #define FNAME "scm_gc_mark_dependencies"
1414 #define MARK_DEPENDENCIES
1415 #include "gc.c"
1416 #undef MARK_DEPENDENCIES
1417 #undef MARK
1418 #undef FNAME
1419
1420
1421 /* Mark a Region Conservatively
1422 */
1423
1424 void
1425 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1426 {
1427 unsigned long m;
1428
1429 for (m = 0; m < n; ++m)
1430 {
1431 SCM obj = * (SCM *) &x[m];
1432 if (SCM_CELLP (obj))
1433 {
1434 SCM_CELLPTR ptr = SCM2PTR (obj);
1435 int i = 0;
1436 int j = scm_n_heap_segs - 1;
1437 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1438 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1439 {
1440 while (i <= j)
1441 {
1442 int seg_id;
1443 seg_id = -1;
1444 if ((i == j)
1445 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1446 seg_id = i;
1447 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1448 seg_id = j;
1449 else
1450 {
1451 int k;
1452 k = (i + j) / 2;
1453 if (k == i)
1454 break;
1455 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1456 {
1457 j = k;
1458 ++i;
1459 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1460 continue;
1461 else
1462 break;
1463 }
1464 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1465 {
1466 i = k;
1467 --j;
1468 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1469 continue;
1470 else
1471 break;
1472 }
1473 }
1474
1475 if (SCM_GC_IN_CARD_HEADERP (ptr))
1476 break;
1477
1478 if (scm_heap_table[seg_id].span == 1
1479 || DOUBLECELL_ALIGNED_P (obj))
1480 scm_gc_mark (obj);
1481
1482 break;
1483 }
1484 }
1485 }
1486 }
1487 }
1488
1489
1490 /* The function scm_cellp determines whether an SCM value can be regarded as a
1491 * pointer to a cell on the heap. Binary search is used in order to determine
1492 * the heap segment that contains the cell.
1493 */
1494 int
1495 scm_cellp (SCM value)
1496 {
1497 if (SCM_CELLP (value)) {
1498 scm_cell * ptr = SCM2PTR (value);
1499 unsigned int i = 0;
1500 unsigned int j = scm_n_heap_segs - 1;
1501
1502 while (i < j) {
1503 int k = (i + j) / 2;
1504 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1505 j = k;
1506 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1507 i = k + 1;
1508 }
1509 }
1510
1511 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1512 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1513 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1514 && !SCM_GC_IN_CARD_HEADERP (ptr)
1515 )
1516 return 1;
1517 else
1518 return 0;
1519 } else
1520 return 0;
1521 }
1522
1523
1524 static void
1525 gc_sweep_freelist_start (scm_freelist_t *freelist)
1526 {
1527 freelist->cells = SCM_EOL;
1528 freelist->left_to_collect = freelist->cluster_size;
1529 freelist->clusters_allocated = 0;
1530 freelist->clusters = SCM_EOL;
1531 freelist->clustertail = &freelist->clusters;
1532 freelist->collected_1 = freelist->collected;
1533 freelist->collected = 0;
1534 }
1535
1536 static void
1537 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1538 {
1539 int collected;
1540 *freelist->clustertail = freelist->cells;
1541 if (!SCM_NULLP (freelist->cells))
1542 {
1543 SCM c = freelist->cells;
1544 SCM_SETCAR (c, SCM_CDR (c));
1545 SCM_SETCDR (c, SCM_EOL);
1546 freelist->collected +=
1547 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1548 }
1549 scm_gc_cells_collected += freelist->collected;
1550
1551 /* Although freelist->min_yield is used to test freelist->collected
1552 * (which is the local GC yield for freelist), it is adjusted so
1553 * that *total* yield is freelist->min_yield_fraction of total heap
1554 * size. This means that a too low yield is compensated by more
1555 * heap on the list which is currently doing most work, which is
1556 * just what we want.
1557 */
1558 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1559 freelist->grow_heap_p = (collected < freelist->min_yield);
1560 }
1561
1562 #define NEXT_DATA_CELL(ptr, span) \
1563 do { \
1564 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1565 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1566 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1567 : nxt__); \
1568 } while (0)
1569
1570 void
1571 scm_gc_sweep ()
1572 #define FUNC_NAME "scm_gc_sweep"
1573 {
1574 register SCM_CELLPTR ptr;
1575 register SCM nfreelist;
1576 register scm_freelist_t *freelist;
1577 register long m;
1578 register int span;
1579 long i;
1580 scm_sizet seg_size;
1581
1582 m = 0;
1583
1584 gc_sweep_freelist_start (&scm_master_freelist);
1585 gc_sweep_freelist_start (&scm_master_freelist2);
1586
1587 for (i = 0; i < scm_n_heap_segs; i++)
1588 {
1589 register unsigned int left_to_collect;
1590 register scm_sizet j;
1591
1592 /* Unmarked cells go onto the front of the freelist this heap
1593 segment points to. Rather than updating the real freelist
1594 pointer as we go along, we accumulate the new head in
1595 nfreelist. Then, if it turns out that the entire segment is
1596 free, we free (i.e., malloc's free) the whole segment, and
1597 simply don't assign nfreelist back into the real freelist. */
1598 freelist = scm_heap_table[i].freelist;
1599 nfreelist = freelist->cells;
1600 left_to_collect = freelist->left_to_collect;
1601 span = scm_heap_table[i].span;
1602
1603 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1604 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1605
1606 /* use only data cells in seg_size */
1607 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1608
1609 scm_gc_cells_swept += seg_size;
1610
1611 for (j = seg_size + span; j -= span; ptr += span)
1612 {
1613 SCM scmptr;
1614
1615 if (SCM_GC_IN_CARD_HEADERP (ptr))
1616 {
1617 SCM_CELLPTR nxt;
1618
1619 /* cheat here */
1620 nxt = ptr;
1621 NEXT_DATA_CELL (nxt, span);
1622 j += span;
1623
1624 ptr = nxt - span;
1625 continue;
1626 }
1627
1628 scmptr = PTR2SCM (ptr);
1629
1630 if (SCM_GCMARKP (scmptr))
1631 continue;
1632
1633 switch SCM_TYP7 (scmptr)
1634 {
1635 case scm_tcs_cons_gloc:
1636 {
1637 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1638 * struct or a gloc. See the corresponding comment in
1639 * scm_gc_mark.
1640 */
1641 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1642 - scm_tc3_cons_gloc);
1643 /* access as struct */
1644 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1645 if (vtable_data[scm_vtable_index_vcell] == 0)
1646 {
1647 /* Structs need to be freed in a special order.
1648 * This is handled by GC C hooks in struct.c.
1649 */
1650 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1651 scm_structs_to_free = scmptr;
1652 continue;
1653 }
1654 /* fall through so that scmptr gets collected */
1655 }
1656 break;
1657 case scm_tcs_cons_imcar:
1658 case scm_tcs_cons_nimcar:
1659 case scm_tcs_closures:
1660 case scm_tc7_pws:
1661 break;
1662 case scm_tc7_wvect:
1663 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1664 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1665 break;
1666 case scm_tc7_vector:
1667 {
1668 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1669 if (length > 0)
1670 {
1671 m += length * sizeof (scm_bits_t);
1672 scm_must_free (SCM_VECTOR_BASE (scmptr));
1673 }
1674 break;
1675 }
1676 #ifdef CCLO
1677 case scm_tc7_cclo:
1678 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1679 scm_must_free (SCM_CCLO_BASE (scmptr));
1680 break;
1681 #endif
1682 #ifdef HAVE_ARRAYS
1683 case scm_tc7_bvect:
1684 {
1685 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1686 if (length > 0)
1687 {
1688 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1689 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1690 }
1691 }
1692 break;
1693 case scm_tc7_byvect:
1694 case scm_tc7_ivect:
1695 case scm_tc7_uvect:
1696 case scm_tc7_svect:
1697 #ifdef HAVE_LONG_LONGS
1698 case scm_tc7_llvect:
1699 #endif
1700 case scm_tc7_fvect:
1701 case scm_tc7_dvect:
1702 case scm_tc7_cvect:
1703 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1704 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1705 break;
1706 #endif
1707 case scm_tc7_substring:
1708 break;
1709 case scm_tc7_string:
1710 m += SCM_STRING_LENGTH (scmptr) + 1;
1711 scm_must_free (SCM_STRING_CHARS (scmptr));
1712 break;
1713 case scm_tc7_symbol:
1714 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1715 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1716 break;
1717 case scm_tcs_subrs:
1718 /* the various "subrs" (primitives) are never freed */
1719 continue;
1720 case scm_tc7_port:
1721 if SCM_OPENP (scmptr)
1722 {
1723 int k = SCM_PTOBNUM (scmptr);
1724 if (!(k < scm_numptob))
1725 goto sweeperr;
1726 /* Keep "revealed" ports alive. */
1727 if (scm_revealed_count (scmptr) > 0)
1728 continue;
1729 /* Yes, I really do mean scm_ptobs[k].free */
1730 /* rather than ftobs[k].close. .close */
1731 /* is for explicit CLOSE-PORT by user */
1732 m += (scm_ptobs[k].free) (scmptr);
1733 SCM_SETSTREAM (scmptr, 0);
1734 scm_remove_from_port_table (scmptr);
1735 scm_gc_ports_collected++;
1736 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1737 }
1738 break;
1739 case scm_tc7_smob:
1740 switch SCM_TYP16 (scmptr)
1741 {
1742 case scm_tc_free_cell:
1743 case scm_tc16_real:
1744 break;
1745 #ifdef SCM_BIGDIG
1746 case scm_tc16_big:
1747 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1748 scm_must_free (SCM_BDIGITS (scmptr));
1749 break;
1750 #endif /* def SCM_BIGDIG */
1751 case scm_tc16_complex:
1752 m += sizeof (scm_complex_t);
1753 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1754 break;
1755 default:
1756 {
1757 int k;
1758 k = SCM_SMOBNUM (scmptr);
1759 if (!(k < scm_numsmob))
1760 goto sweeperr;
1761 m += (scm_smobs[k].free) (scmptr);
1762 break;
1763 }
1764 }
1765 break;
1766 default:
1767 sweeperr:
1768 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1769 }
1770
1771 if (!--left_to_collect)
1772 {
1773 SCM_SETCAR (scmptr, nfreelist);
1774 *freelist->clustertail = scmptr;
1775 freelist->clustertail = SCM_CDRLOC (scmptr);
1776
1777 nfreelist = SCM_EOL;
1778 freelist->collected += span * freelist->cluster_size;
1779 left_to_collect = freelist->cluster_size;
1780 }
1781 else
1782 {
1783 /* Stick the new cell on the front of nfreelist. It's
1784 critical that we mark this cell as freed; otherwise, the
1785 conservative collector might trace it as some other type
1786 of object. */
1787 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1788 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1789 nfreelist = scmptr;
1790 }
1791 }
1792
1793 #ifdef GC_FREE_SEGMENTS
1794 if (n == seg_size)
1795 {
1796 register long j;
1797
1798 freelist->heap_size -= seg_size;
1799 free ((char *) scm_heap_table[i].bounds[0]);
1800 scm_heap_table[i].bounds[0] = 0;
1801 for (j = i + 1; j < scm_n_heap_segs; j++)
1802 scm_heap_table[j - 1] = scm_heap_table[j];
1803 scm_n_heap_segs -= 1;
1804 i--; /* We need to scan the segment just moved. */
1805 }
1806 else
1807 #endif /* ifdef GC_FREE_SEGMENTS */
1808 {
1809 /* Update the real freelist pointer to point to the head of
1810 the list of free cells we've built for this segment. */
1811 freelist->cells = nfreelist;
1812 freelist->left_to_collect = left_to_collect;
1813 }
1814
1815 #ifdef GUILE_DEBUG_FREELIST
1816 scm_map_free_list ();
1817 #endif
1818 }
1819
1820 gc_sweep_freelist_finish (&scm_master_freelist);
1821 gc_sweep_freelist_finish (&scm_master_freelist2);
1822
1823 /* When we move to POSIX threads private freelists should probably
1824 be GC-protected instead. */
1825 scm_freelist = SCM_EOL;
1826 scm_freelist2 = SCM_EOL;
1827
1828 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1829 scm_gc_yield -= scm_cells_allocated;
1830 scm_mallocated -= m;
1831 scm_gc_malloc_collected = m;
1832 }
1833 #undef FUNC_NAME
1834
1835
1836 \f
1837 /* {Front end to malloc}
1838 *
1839 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1840 * scm_done_free
1841 *
1842 * These functions provide services comparable to malloc, realloc, and
1843 * free. They should be used when allocating memory that will be under
1844 * control of the garbage collector, i.e., if the memory may be freed
1845 * during garbage collection.
1846 */
1847
1848 /* scm_must_malloc
1849 * Return newly malloced storage or throw an error.
1850 *
1851 * The parameter WHAT is a string for error reporting.
1852 * If the threshold scm_mtrigger will be passed by this
1853 * allocation, or if the first call to malloc fails,
1854 * garbage collect -- on the presumption that some objects
1855 * using malloced storage may be collected.
1856 *
1857 * The limit scm_mtrigger may be raised by this allocation.
1858 */
1859 void *
1860 scm_must_malloc (scm_sizet size, const char *what)
1861 {
1862 void *ptr;
1863 unsigned long nm = scm_mallocated + size;
1864
1865 if (nm <= scm_mtrigger)
1866 {
1867 SCM_SYSCALL (ptr = malloc (size));
1868 if (NULL != ptr)
1869 {
1870 scm_mallocated = nm;
1871 #ifdef GUILE_DEBUG_MALLOC
1872 scm_malloc_register (ptr, what);
1873 #endif
1874 return ptr;
1875 }
1876 }
1877
1878 scm_igc (what);
1879
1880 nm = scm_mallocated + size;
1881 SCM_SYSCALL (ptr = malloc (size));
1882 if (NULL != ptr)
1883 {
1884 scm_mallocated = nm;
1885 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1886 if (nm > scm_mtrigger)
1887 scm_mtrigger = nm + nm / 2;
1888 else
1889 scm_mtrigger += scm_mtrigger / 2;
1890 }
1891 #ifdef GUILE_DEBUG_MALLOC
1892 scm_malloc_register (ptr, what);
1893 #endif
1894
1895 return ptr;
1896 }
1897
1898 scm_memory_error (what);
1899 }
1900
1901
1902 /* scm_must_realloc
1903 * is similar to scm_must_malloc.
1904 */
1905 void *
1906 scm_must_realloc (void *where,
1907 scm_sizet old_size,
1908 scm_sizet size,
1909 const char *what)
1910 {
1911 void *ptr;
1912 scm_sizet nm = scm_mallocated + size - old_size;
1913
1914 if (nm <= scm_mtrigger)
1915 {
1916 SCM_SYSCALL (ptr = realloc (where, size));
1917 if (NULL != ptr)
1918 {
1919 scm_mallocated = nm;
1920 #ifdef GUILE_DEBUG_MALLOC
1921 scm_malloc_reregister (where, ptr, what);
1922 #endif
1923 return ptr;
1924 }
1925 }
1926
1927 scm_igc (what);
1928
1929 nm = scm_mallocated + size - old_size;
1930 SCM_SYSCALL (ptr = realloc (where, size));
1931 if (NULL != ptr)
1932 {
1933 scm_mallocated = nm;
1934 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1935 if (nm > scm_mtrigger)
1936 scm_mtrigger = nm + nm / 2;
1937 else
1938 scm_mtrigger += scm_mtrigger / 2;
1939 }
1940 #ifdef GUILE_DEBUG_MALLOC
1941 scm_malloc_reregister (where, ptr, what);
1942 #endif
1943 return ptr;
1944 }
1945
1946 scm_memory_error (what);
1947 }
1948
1949
1950 void
1951 scm_must_free (void *obj)
1952 #define FUNC_NAME "scm_must_free"
1953 {
1954 #ifdef GUILE_DEBUG_MALLOC
1955 scm_malloc_unregister (obj);
1956 #endif
1957 if (obj)
1958 free (obj);
1959 else
1960 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1961 }
1962 #undef FUNC_NAME
1963
1964
1965 /* Announce that there has been some malloc done that will be freed
1966 * during gc. A typical use is for a smob that uses some malloced
1967 * memory but can not get it from scm_must_malloc (for whatever
1968 * reason). When a new object of this smob is created you call
1969 * scm_done_malloc with the size of the object. When your smob free
1970 * function is called, be sure to include this size in the return
1971 * value.
1972 *
1973 * If you can't actually free the memory in the smob free function,
1974 * for whatever reason (like reference counting), you still can (and
1975 * should) report the amount of memory freed when you actually free it.
1976 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1977 * eh? Or even better, call scm_done_free. */
1978
1979 void
1980 scm_done_malloc (long size)
1981 {
1982 scm_mallocated += size;
1983
1984 if (scm_mallocated > scm_mtrigger)
1985 {
1986 scm_igc ("foreign mallocs");
1987 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1988 {
1989 if (scm_mallocated > scm_mtrigger)
1990 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1991 else
1992 scm_mtrigger += scm_mtrigger / 2;
1993 }
1994 }
1995 }
1996
1997 void
1998 scm_done_free (long size)
1999 {
2000 scm_mallocated -= size;
2001 }
2002
2003
2004 \f
2005 /* {Heap Segments}
2006 *
2007 * Each heap segment is an array of objects of a particular size.
2008 * Every segment has an associated (possibly shared) freelist.
2009 * A table of segment records is kept that records the upper and
2010 * lower extents of the segment; this is used during the conservative
2011 * phase of gc to identify probably gc roots (because they point
2012 * into valid segments at reasonable offsets). */
2013
2014 /* scm_expmem
2015 * is true if the first segment was smaller than INIT_HEAP_SEG.
2016 * If scm_expmem is set to one, subsequent segment allocations will
2017 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2018 */
2019 int scm_expmem = 0;
2020
2021 scm_sizet scm_max_segment_size;
2022
2023 /* scm_heap_org
2024 * is the lowest base address of any heap segment.
2025 */
2026 SCM_CELLPTR scm_heap_org;
2027
2028 scm_heap_seg_data_t * scm_heap_table = 0;
2029 static unsigned int heap_segment_table_size = 0;
2030 int scm_n_heap_segs = 0;
2031
2032 /* init_heap_seg
2033 * initializes a new heap segment and returns the number of objects it contains.
2034 *
2035 * The segment origin and segment size in bytes are input parameters.
2036 * The freelist is both input and output.
2037 *
2038 * This function presumes that the scm_heap_table has already been expanded
2039 * to accomodate a new segment record and that the markbit space was reserved
2040 * for all the cards in this segment.
2041 */
2042
2043 #define INIT_CARD(card, span) \
2044 do { \
2045 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2046 if ((span) == 2) \
2047 SCM_GC_SET_CARD_DOUBLECELL (card); \
2048 } while (0)
2049
2050 static scm_sizet
2051 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2052 {
2053 register SCM_CELLPTR ptr;
2054 SCM_CELLPTR seg_end;
2055 int new_seg_index;
2056 int n_new_cells;
2057 int span = freelist->span;
2058
2059 if (seg_org == NULL)
2060 return 0;
2061
2062 /* Align the begin ptr up.
2063 */
2064 ptr = SCM_GC_CARD_UP (seg_org);
2065
2066 /* Compute the ceiling on valid object pointers w/in this segment.
2067 */
2068 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2069
2070 /* Find the right place and insert the segment record.
2071 *
2072 */
2073 for (new_seg_index = 0;
2074 ( (new_seg_index < scm_n_heap_segs)
2075 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2076 new_seg_index++)
2077 ;
2078
2079 {
2080 int i;
2081 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2082 scm_heap_table[i] = scm_heap_table[i - 1];
2083 }
2084
2085 ++scm_n_heap_segs;
2086
2087 scm_heap_table[new_seg_index].span = span;
2088 scm_heap_table[new_seg_index].freelist = freelist;
2089 scm_heap_table[new_seg_index].bounds[0] = ptr;
2090 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2091
2092 /*n_new_cells*/
2093 n_new_cells = seg_end - ptr;
2094
2095 freelist->heap_size += n_new_cells;
2096
2097 /* Partition objects in this segment into clusters */
2098 {
2099 SCM clusters;
2100 SCM *clusterp = &clusters;
2101
2102 NEXT_DATA_CELL (ptr, span);
2103 while (ptr < seg_end)
2104 {
2105 scm_cell *nxt = ptr;
2106 scm_cell *prv = NULL;
2107 scm_cell *last_card = NULL;
2108 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2109 NEXT_DATA_CELL(nxt, span);
2110
2111 /* Allocate cluster spine
2112 */
2113 *clusterp = PTR2SCM (ptr);
2114 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2115 clusterp = SCM_CDRLOC (*clusterp);
2116 ptr = nxt;
2117
2118 while (n_data_cells--)
2119 {
2120 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2121 SCM scmptr = PTR2SCM (ptr);
2122 nxt = ptr;
2123 NEXT_DATA_CELL (nxt, span);
2124 prv = ptr;
2125
2126 if (card != last_card)
2127 {
2128 INIT_CARD (card, span);
2129 last_card = card;
2130 }
2131
2132 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2133 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2134
2135 ptr = nxt;
2136 }
2137
2138 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2139 }
2140
2141 /* sanity check */
2142 {
2143 scm_cell *ref = seg_end;
2144 NEXT_DATA_CELL (ref, span);
2145 if (ref != ptr)
2146 /* [cmm] looks like the segment size doesn't divide cleanly by
2147 cluster size. bad cmm! */
2148 abort();
2149 }
2150
2151 /* Patch up the last cluster pointer in the segment
2152 * to join it to the input freelist.
2153 */
2154 *clusterp = freelist->clusters;
2155 freelist->clusters = clusters;
2156 }
2157
2158 #ifdef DEBUGINFO
2159 fprintf (stderr, "H");
2160 #endif
2161 return size;
2162 }
2163
2164 static scm_sizet
2165 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2166 {
2167 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2168
2169 return
2170 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2171 + ALIGNMENT_SLACK (freelist);
2172 }
2173
2174 static void
2175 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2176 #define FUNC_NAME "alloc_some_heap"
2177 {
2178 SCM_CELLPTR ptr;
2179 long len;
2180
2181 if (scm_gc_heap_lock)
2182 {
2183 /* Critical code sections (such as the garbage collector) aren't
2184 * supposed to add heap segments.
2185 */
2186 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2187 abort ();
2188 }
2189
2190 if (scm_n_heap_segs == heap_segment_table_size)
2191 {
2192 /* We have to expand the heap segment table to have room for the new
2193 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2194 * init_heap_seg only if the allocation of the segment itself succeeds.
2195 */
2196 unsigned int new_table_size = scm_n_heap_segs + 1;
2197 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2198 scm_heap_seg_data_t * new_heap_table;
2199
2200 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2201 realloc ((char *)scm_heap_table, size)));
2202 if (!new_heap_table)
2203 {
2204 if (error_policy == abort_on_error)
2205 {
2206 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2207 abort ();
2208 }
2209 else
2210 {
2211 return;
2212 }
2213 }
2214 else
2215 {
2216 scm_heap_table = new_heap_table;
2217 heap_segment_table_size = new_table_size;
2218 }
2219 }
2220
2221 /* Pick a size for the new heap segment.
2222 * The rule for picking the size of a segment is explained in
2223 * gc.h
2224 */
2225 {
2226 /* Assure that the new segment is predicted to be large enough.
2227 *
2228 * New yield should at least equal GC fraction of new heap size, i.e.
2229 *
2230 * y + dh > f * (h + dh)
2231 *
2232 * y : yield
2233 * f : min yield fraction
2234 * h : heap size
2235 * dh : size of new heap segment
2236 *
2237 * This gives dh > (f * h - y) / (1 - f)
2238 */
2239 int f = freelist->min_yield_fraction;
2240 long h = SCM_HEAP_SIZE;
2241 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2242 len = SCM_EXPHEAP (freelist->heap_size);
2243 #ifdef DEBUGINFO
2244 fprintf (stderr, "(%d < %d)", len, min_cells);
2245 #endif
2246 if (len < min_cells)
2247 len = min_cells + freelist->cluster_size;
2248 len *= sizeof (scm_cell);
2249 /* force new sampling */
2250 freelist->collected = LONG_MAX;
2251 }
2252
2253 if (len > scm_max_segment_size)
2254 len = scm_max_segment_size;
2255
2256 {
2257 scm_sizet smallest;
2258
2259 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2260
2261 if (len < smallest)
2262 len = smallest;
2263
2264 /* Allocate with decaying ambition. */
2265 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2266 && (len >= smallest))
2267 {
2268 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2269 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2270 if (ptr)
2271 {
2272 init_heap_seg (ptr, rounded_len, freelist);
2273 return;
2274 }
2275 len /= 2;
2276 }
2277 }
2278
2279 if (error_policy == abort_on_error)
2280 {
2281 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2282 abort ();
2283 }
2284 }
2285 #undef FUNC_NAME
2286
2287
2288 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2289 (SCM name),
2290 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2291 "is @code{#t}.")
2292 #define FUNC_NAME s_scm_unhash_name
2293 {
2294 int x;
2295 int bound;
2296 SCM_VALIDATE_SYMBOL (1,name);
2297 SCM_DEFER_INTS;
2298 bound = scm_n_heap_segs;
2299 for (x = 0; x < bound; ++x)
2300 {
2301 SCM_CELLPTR p;
2302 SCM_CELLPTR pbound;
2303 p = scm_heap_table[x].bounds[0];
2304 pbound = scm_heap_table[x].bounds[1];
2305 while (p < pbound)
2306 {
2307 SCM cell = PTR2SCM (p);
2308 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2309 {
2310 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2311 * struct cell. See the corresponding comment in scm_gc_mark.
2312 */
2313 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2314 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2315 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2316 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2317 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2318 {
2319 SCM_SET_CELL_OBJECT_0 (cell, name);
2320 }
2321 }
2322 ++p;
2323 }
2324 }
2325 SCM_ALLOW_INTS;
2326 return name;
2327 }
2328 #undef FUNC_NAME
2329
2330
2331 \f
2332 /* {GC Protection Helper Functions}
2333 */
2334
2335
2336 /*
2337 * If within a function you need to protect one or more scheme objects from
2338 * garbage collection, pass them as parameters to one of the
2339 * scm_remember_upto_here* functions below. These functions don't do
2340 * anything, but since the compiler does not know that they are actually
2341 * no-ops, it will generate code that calls these functions with the given
2342 * parameters. Therefore, you can be sure that the compiler will keep those
2343 * scheme values alive (on the stack or in a register) up to the point where
2344 * scm_remember_upto_here* is called. In other words, place the call to
2345 * scm_remember_upt_here* _behind_ the last code in your function, that
2346 * depends on the scheme object to exist.
2347 *
2348 * Example: We want to make sure, that the string object str does not get
2349 * garbage collected during the execution of 'some_function', because
2350 * otherwise the characters belonging to str would be freed and
2351 * 'some_function' might access freed memory. To make sure that the compiler
2352 * keeps str alive on the stack or in a register such that it is visible to
2353 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2354 * call to 'some_function'. Note that this would not be necessary if str was
2355 * used anyway after the call to 'some_function'.
2356 * char *chars = SCM_STRING_CHARS (str);
2357 * some_function (chars);
2358 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2359 */
2360
2361 void
2362 scm_remember_upto_here_1 (SCM obj)
2363 {
2364 /* Empty. Protects a single object from garbage collection. */
2365 }
2366
2367 void
2368 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2369 {
2370 /* Empty. Protects two objects from garbage collection. */
2371 }
2372
2373 void
2374 scm_remember_upto_here (SCM obj, ...)
2375 {
2376 /* Empty. Protects any number of objects from garbage collection. */
2377 }
2378
2379
2380 #if (SCM_DEBUG_DEPRECATED == 0)
2381
2382 void
2383 scm_remember (SCM *ptr)
2384 {
2385 /* empty */
2386 }
2387
2388 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2389
2390 /*
2391 These crazy functions prevent garbage collection
2392 of arguments after the first argument by
2393 ensuring they remain live throughout the
2394 function because they are used in the last
2395 line of the code block.
2396 It'd be better to have a nice compiler hint to
2397 aid the conservative stack-scanning GC. --03/09/00 gjb */
2398 SCM
2399 scm_return_first (SCM elt, ...)
2400 {
2401 return elt;
2402 }
2403
2404 int
2405 scm_return_first_int (int i, ...)
2406 {
2407 return i;
2408 }
2409
2410
2411 SCM
2412 scm_permanent_object (SCM obj)
2413 {
2414 SCM_REDEFER_INTS;
2415 scm_permobjs = scm_cons (obj, scm_permobjs);
2416 SCM_REALLOW_INTS;
2417 return obj;
2418 }
2419
2420
2421 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2422 other references are dropped, until the object is unprotected by calling
2423 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2424 i. e. it is possible to protect the same object several times, but it is
2425 necessary to unprotect the object the same number of times to actually get
2426 the object unprotected. It is an error to unprotect an object more often
2427 than it has been protected before. The function scm_protect_object returns
2428 OBJ.
2429 */
2430
2431 /* Implementation note: For every object X, there is a counter which
2432 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2433 */
2434
2435 SCM
2436 scm_protect_object (SCM obj)
2437 {
2438 SCM handle;
2439
2440 /* This critical section barrier will be replaced by a mutex. */
2441 SCM_REDEFER_INTS;
2442
2443 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2444 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2445
2446 SCM_REALLOW_INTS;
2447
2448 return obj;
2449 }
2450
2451
2452 /* Remove any protection for OBJ established by a prior call to
2453 scm_protect_object. This function returns OBJ.
2454
2455 See scm_protect_object for more information. */
2456 SCM
2457 scm_unprotect_object (SCM obj)
2458 {
2459 SCM handle;
2460
2461 /* This critical section barrier will be replaced by a mutex. */
2462 SCM_REDEFER_INTS;
2463
2464 handle = scm_hashq_get_handle (scm_protects, obj);
2465
2466 if (SCM_IMP (handle))
2467 {
2468 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2469 abort ();
2470 }
2471 else
2472 {
2473 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2474 if (count == 0)
2475 scm_hashq_remove_x (scm_protects, obj);
2476 else
2477 SCM_SETCDR (handle, SCM_MAKINUM (count));
2478 }
2479
2480 SCM_REALLOW_INTS;
2481
2482 return obj;
2483 }
2484
2485 int terminating;
2486
2487 /* called on process termination. */
2488 #ifdef HAVE_ATEXIT
2489 static void
2490 cleanup (void)
2491 #else
2492 #ifdef HAVE_ON_EXIT
2493 extern int on_exit (void (*procp) (), int arg);
2494
2495 static void
2496 cleanup (int status, void *arg)
2497 #else
2498 #error Dont know how to setup a cleanup handler on your system.
2499 #endif
2500 #endif
2501 {
2502 terminating = 1;
2503 scm_flush_all_ports ();
2504 }
2505
2506 \f
2507 static int
2508 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2509 {
2510 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2511
2512 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2513 rounded_size,
2514 freelist))
2515 {
2516 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2517 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2518 rounded_size,
2519 freelist))
2520 return 1;
2521 }
2522 else
2523 scm_expmem = 1;
2524
2525 if (freelist->min_yield_fraction)
2526 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2527 / 100);
2528 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2529
2530 return 0;
2531 }
2532
2533 \f
2534 static void
2535 init_freelist (scm_freelist_t *freelist,
2536 int span,
2537 int cluster_size,
2538 int min_yield)
2539 {
2540 freelist->clusters = SCM_EOL;
2541 freelist->cluster_size = cluster_size + 1;
2542 freelist->left_to_collect = 0;
2543 freelist->clusters_allocated = 0;
2544 freelist->min_yield = 0;
2545 freelist->min_yield_fraction = min_yield;
2546 freelist->span = span;
2547 freelist->collected = 0;
2548 freelist->collected_1 = 0;
2549 freelist->heap_size = 0;
2550 }
2551
2552
2553 /* Get an integer from an environment variable. */
2554 static int
2555 scm_i_getenv_int (const char *var, int def)
2556 {
2557 char *end, *val = getenv (var);
2558 long res;
2559 if (!val)
2560 return def;
2561 res = strtol (val, &end, 10);
2562 if (end == val)
2563 return def;
2564 return res;
2565 }
2566
2567
2568 int
2569 scm_init_storage ()
2570 {
2571 scm_sizet gc_trigger_1;
2572 scm_sizet gc_trigger_2;
2573 scm_sizet init_heap_size_1;
2574 scm_sizet init_heap_size_2;
2575 scm_sizet j;
2576
2577 j = SCM_NUM_PROTECTS;
2578 while (j)
2579 scm_sys_protects[--j] = SCM_BOOL_F;
2580 scm_block_gc = 1;
2581
2582 scm_freelist = SCM_EOL;
2583 scm_freelist2 = SCM_EOL;
2584 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2585 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2586 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2587 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2588 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2589
2590 scm_expmem = 0;
2591
2592 j = SCM_HEAP_SEG_SIZE;
2593 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2594 scm_heap_table = ((scm_heap_seg_data_t *)
2595 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2596 heap_segment_table_size = 2;
2597
2598 mark_space_ptr = &mark_space_head;
2599
2600 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2601 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2602 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2603 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2604 return 1;
2605
2606 /* scm_hplims[0] can change. do not remove scm_heap_org */
2607 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2608
2609 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2610 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2611 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2612 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2613 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2614
2615 /* Initialise the list of ports. */
2616 scm_port_table = (scm_port **)
2617 malloc (sizeof (scm_port *) * scm_port_table_room);
2618 if (!scm_port_table)
2619 return 1;
2620
2621 #ifdef HAVE_ATEXIT
2622 atexit (cleanup);
2623 #else
2624 #ifdef HAVE_ON_EXIT
2625 on_exit (cleanup, 0);
2626 #endif
2627 #endif
2628
2629 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2630 SCM_SETCDR (scm_undefineds, scm_undefineds);
2631
2632 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2633 scm_nullstr = scm_makstr (0L, 0);
2634 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2635
2636 #define DEFAULT_SYMHASH_SIZE 277
2637 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2638 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2639
2640 scm_stand_in_procs = SCM_EOL;
2641 scm_permobjs = SCM_EOL;
2642 scm_protects = scm_c_make_hash_table (31);
2643
2644 return 0;
2645 }
2646
2647 \f
2648
2649 SCM scm_after_gc_hook;
2650
2651 #if (SCM_DEBUG_DEPRECATED == 0)
2652 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2653 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2654 static SCM gc_async;
2655
2656
2657 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2658 * is run after the gc, as soon as the asynchronous events are handled by the
2659 * evaluator.
2660 */
2661 static SCM
2662 gc_async_thunk (void)
2663 {
2664 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2665
2666 #if (SCM_DEBUG_DEPRECATED == 0)
2667
2668 /* The following code will be removed in Guile 1.5. */
2669 if (SCM_NFALSEP (scm_gc_vcell))
2670 {
2671 SCM proc = SCM_CDR (scm_gc_vcell);
2672
2673 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2674 scm_apply (proc, SCM_EOL, SCM_EOL);
2675 }
2676
2677 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2678
2679 return SCM_UNSPECIFIED;
2680 }
2681
2682
2683 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2684 * the garbage collection. The only purpose of this function is to mark the
2685 * gc_async (which will eventually lead to the execution of the
2686 * gc_async_thunk).
2687 */
2688 static void *
2689 mark_gc_async (void * hook_data, void *func_data, void *data)
2690 {
2691 scm_system_async_mark (gc_async);
2692 return NULL;
2693 }
2694
2695
2696 void
2697 scm_init_gc ()
2698 {
2699 SCM after_gc_thunk;
2700
2701 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2702
2703 #if (SCM_DEBUG_DEPRECATED == 0)
2704 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2705 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2706 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2707 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2708
2709 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2710
2711 #ifndef SCM_MAGIC_SNARFER
2712 #include "libguile/gc.x"
2713 #endif
2714 }
2715
2716 #endif /*MARK_DEPENDENCIES*/
2717
2718 /*
2719 Local Variables:
2720 c-file-style: "gnu"
2721 End:
2722 */