038822a505bf0fa4c2f16ef45a43936841536c7f
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include "libguile/_scm.h"
55 #include "libguile/eval.h"
56 #include "libguile/stime.h"
57 #include "libguile/stackchk.h"
58 #include "libguile/struct.h"
59 #include "libguile/smob.h"
60 #include "libguile/unif.h"
61 #include "libguile/async.h"
62 #include "libguile/ports.h"
63 #include "libguile/root.h"
64 #include "libguile/strings.h"
65 #include "libguile/vectors.h"
66 #include "libguile/weaks.h"
67 #include "libguile/hashtab.h"
68 #include "libguile/tags.h"
69
70 #include "libguile/validate.h"
71 #include "libguile/gc.h"
72
73 #ifdef GUILE_DEBUG_MALLOC
74 #include "libguile/debug-malloc.h"
75 #endif
76
77 #ifdef HAVE_MALLOC_H
78 #include <malloc.h>
79 #endif
80
81 #ifdef HAVE_UNISTD_H
82 #include <unistd.h>
83 #endif
84
85 #ifdef __STDC__
86 #include <stdarg.h>
87 #define var_start(x, y) va_start(x, y)
88 #else
89 #include <varargs.h>
90 #define var_start(x, y) va_start(x)
91 #endif
92
93 \f
94
95 unsigned int scm_gc_running_p = 0;
96
97 \f
98
99 #if (SCM_DEBUG_CELL_ACCESSES == 1)
100
101 unsigned int scm_debug_cell_accesses_p = 0;
102
103
104 /* Assert that the given object is a valid reference to a valid cell. This
105 * test involves to determine whether the object is a cell pointer, whether
106 * this pointer actually points into a heap segment and whether the cell
107 * pointed to is not a free cell.
108 */
109 void
110 scm_assert_cell_valid (SCM cell)
111 {
112 if (scm_debug_cell_accesses_p)
113 {
114 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
115
116 if (!scm_cellp (cell))
117 {
118 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
119 abort ();
120 }
121 else if (!scm_gc_running_p)
122 {
123 /* Dirk::FIXME:: During garbage collection there occur references to
124 free cells. This is allright during conservative marking, but
125 should not happen otherwise (I think). The case of free cells
126 accessed during conservative marking is handled in function
127 scm_mark_locations. However, there still occur accesses to free
128 cells during gc. I don't understand why this happens. If it is
129 a bug and gets fixed, the following test should also work while
130 gc is running.
131 */
132 if (SCM_FREE_CELL_P (cell))
133 {
134 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
135 abort ();
136 }
137 }
138 scm_debug_cell_accesses_p = 1; /* re-enable */
139 }
140 }
141
142
143 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
144 (SCM flag),
145 "If FLAG is #f, cell access checking is disabled.\n"
146 "If FLAG is #t, cell access checking is enabled.\n"
147 "This procedure only exists because the compile-time flag\n"
148 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
149 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
150 {
151 if (SCM_FALSEP (flag)) {
152 scm_debug_cell_accesses_p = 0;
153 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
154 scm_debug_cell_accesses_p = 1;
155 } else {
156 SCM_WRONG_TYPE_ARG (1, flag);
157 }
158 return SCM_UNSPECIFIED;
159 }
160 #undef FUNC_NAME
161
162 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
163
164 \f
165
166 /* {heap tuning parameters}
167 *
168 * These are parameters for controlling memory allocation. The heap
169 * is the area out of which scm_cons, and object headers are allocated.
170 *
171 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
172 * 64 bit machine. The units of the _SIZE parameters are bytes.
173 * Cons pairs and object headers occupy one heap cell.
174 *
175 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
176 * allocated initially the heap will grow by half its current size
177 * each subsequent time more heap is needed.
178 *
179 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
180 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
181 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
182 * is in scm_init_storage() and alloc_some_heap() in sys.c
183 *
184 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
185 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
186 *
187 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
188 * is needed.
189 *
190 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
191 * trigger a GC.
192 *
193 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
194 * reclaimed by a GC triggered by must_malloc. If less than this is
195 * reclaimed, the trigger threshold is raised. [I don't know what a
196 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
197 * work around a oscillation that caused almost constant GC.]
198 */
199
200 /*
201 * Heap size 45000 and 40% min yield gives quick startup and no extra
202 * heap allocation. Having higher values on min yield may lead to
203 * large heaps, especially if code behaviour is varying its
204 * maximum consumption between different freelists.
205 */
206
207 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
208 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
209 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
210 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
211 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
212 int scm_default_min_yield_1 = 40;
213
214 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
215 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
216 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
217 /* The following value may seem large, but note that if we get to GC at
218 * all, this means that we have a numerically intensive application
219 */
220 int scm_default_min_yield_2 = 40;
221
222 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
223
224 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
225 #ifdef _QC
226 # define SCM_HEAP_SEG_SIZE 32768L
227 #else
228 # ifdef sequent
229 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
230 # else
231 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
232 # endif
233 #endif
234 /* Make heap grow with factor 1.5 */
235 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
236 #define SCM_INIT_MALLOC_LIMIT 100000
237 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
238
239 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
240 aligned inner bounds for allocated storage */
241
242 #ifdef PROT386
243 /*in 386 protected mode we must only adjust the offset */
244 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
245 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
246 #else
247 # ifdef _UNICOS
248 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
249 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
250 # else
251 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
252 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
253 # endif /* UNICOS */
254 #endif /* PROT386 */
255
256 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
257
258 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
259 #define CLUSTER_SIZE_IN_BYTES(freelist) \
260 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
261
262 \f
263 /* scm_freelists
264 */
265
266 typedef struct scm_freelist_t {
267 /* collected cells */
268 SCM cells;
269 /* number of cells left to collect before cluster is full */
270 unsigned int left_to_collect;
271 /* number of clusters which have been allocated */
272 unsigned int clusters_allocated;
273 /* a list of freelists, each of size cluster_size,
274 * except the last one which may be shorter
275 */
276 SCM clusters;
277 SCM *clustertail;
278 /* this is the number of objects in each cluster, including the spine cell */
279 int cluster_size;
280 /* indicates that we should grow heap instead of GC:ing
281 */
282 int grow_heap_p;
283 /* minimum yield on this list in order not to grow the heap
284 */
285 long min_yield;
286 /* defines min_yield as percent of total heap size
287 */
288 int min_yield_fraction;
289 /* number of cells per object on this list */
290 int span;
291 /* number of collected cells during last GC */
292 long collected;
293 /* number of collected cells during penultimate GC */
294 long collected_1;
295 /* total number of cells in heap segments
296 * belonging to this list.
297 */
298 long heap_size;
299 } scm_freelist_t;
300
301 SCM scm_freelist = SCM_EOL;
302 scm_freelist_t scm_master_freelist = {
303 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
304 };
305 SCM scm_freelist2 = SCM_EOL;
306 scm_freelist_t scm_master_freelist2 = {
307 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
308 };
309
310 /* scm_mtrigger
311 * is the number of bytes of must_malloc allocation needed to trigger gc.
312 */
313 unsigned long scm_mtrigger;
314
315 /* scm_gc_heap_lock
316 * If set, don't expand the heap. Set only during gc, during which no allocation
317 * is supposed to take place anyway.
318 */
319 int scm_gc_heap_lock = 0;
320
321 /* GC Blocking
322 * Don't pause for collection if this is set -- just
323 * expand the heap.
324 */
325 int scm_block_gc = 1;
326
327 /* During collection, this accumulates objects holding
328 * weak references.
329 */
330 SCM scm_weak_vectors;
331
332 /* During collection, this accumulates structures which are to be freed.
333 */
334 SCM scm_structs_to_free;
335
336 /* GC Statistics Keeping
337 */
338 unsigned long scm_cells_allocated = 0;
339 long scm_mallocated = 0;
340 unsigned long scm_gc_cells_collected;
341 unsigned long scm_gc_yield;
342 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
343 unsigned long scm_gc_malloc_collected;
344 unsigned long scm_gc_ports_collected;
345 unsigned long scm_gc_time_taken = 0;
346 static unsigned long t_before_gc;
347 static unsigned long t_before_sweep;
348 unsigned long scm_gc_mark_time_taken = 0;
349 unsigned long scm_gc_sweep_time_taken = 0;
350 unsigned long scm_gc_times = 0;
351 unsigned long scm_gc_cells_swept = 0;
352 double scm_gc_cells_marked_acc = 0.;
353 double scm_gc_cells_swept_acc = 0.;
354
355 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
356 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
357 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
358 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
359 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
360 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
361 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
362 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
363 SCM_SYMBOL (sym_times, "gc-times");
364 SCM_SYMBOL (sym_cells_marked, "cells-marked");
365 SCM_SYMBOL (sym_cells_swept, "cells-swept");
366
367 typedef struct scm_heap_seg_data_t
368 {
369 /* lower and upper bounds of the segment */
370 SCM_CELLPTR bounds[2];
371
372 /* address of the head-of-freelist pointer for this segment's cells.
373 All segments usually point to the same one, scm_freelist. */
374 scm_freelist_t *freelist;
375
376 /* number of cells per object in this segment */
377 int span;
378 } scm_heap_seg_data_t;
379
380
381
382 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
383
384 typedef enum { return_on_error, abort_on_error } policy_on_error;
385 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
386
387
388 #define SCM_HEAP_SIZE \
389 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
390 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
391
392 #define BVEC_GROW_SIZE 256
393 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
394 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
395
396 /* mark space allocation */
397
398 typedef struct scm_mark_space_t
399 {
400 scm_c_bvec_limb_t *bvec_space;
401 struct scm_mark_space_t *next;
402 } scm_mark_space_t;
403
404 static scm_mark_space_t *current_mark_space;
405 static scm_mark_space_t **mark_space_ptr;
406 static int current_mark_space_offset;
407 static scm_mark_space_t *mark_space_head;
408
409 static scm_c_bvec_limb_t *
410 get_bvec ()
411 #define FUNC_NAME "get_bvec"
412 {
413 scm_c_bvec_limb_t *res;
414
415 if (!current_mark_space)
416 {
417 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
418 if (!current_mark_space)
419 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
420
421 current_mark_space->bvec_space = NULL;
422 current_mark_space->next = NULL;
423
424 *mark_space_ptr = current_mark_space;
425 mark_space_ptr = &(current_mark_space->next);
426
427 return get_bvec ();
428 }
429
430 if (!(current_mark_space->bvec_space))
431 {
432 SCM_SYSCALL (current_mark_space->bvec_space =
433 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
434 if (!(current_mark_space->bvec_space))
435 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
436
437 current_mark_space_offset = 0;
438
439 return get_bvec ();
440 }
441
442 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
443 {
444 current_mark_space = NULL;
445
446 return get_bvec ();
447 }
448
449 res = current_mark_space->bvec_space + current_mark_space_offset;
450 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
451
452 return res;
453 }
454 #undef FUNC_NAME
455
456
457 static void
458 clear_mark_space ()
459 {
460 scm_mark_space_t *ms;
461
462 for (ms = mark_space_head; ms; ms = ms->next)
463 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
464 }
465
466
467 \f
468 /* Debugging functions. */
469
470 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
471
472 /* Return the number of the heap segment containing CELL. */
473 static int
474 which_seg (SCM cell)
475 {
476 int i;
477
478 for (i = 0; i < scm_n_heap_segs; i++)
479 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
480 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
481 return i;
482 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
483 SCM_UNPACK (cell));
484 abort ();
485 }
486
487
488 static void
489 map_free_list (scm_freelist_t *master, SCM freelist)
490 {
491 int last_seg = -1, count = 0;
492 SCM f;
493
494 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
495 {
496 int this_seg = which_seg (f);
497
498 if (this_seg != last_seg)
499 {
500 if (last_seg != -1)
501 fprintf (stderr, " %5d %d-cells in segment %d\n",
502 count, master->span, last_seg);
503 last_seg = this_seg;
504 count = 0;
505 }
506 count++;
507 }
508 if (last_seg != -1)
509 fprintf (stderr, " %5d %d-cells in segment %d\n",
510 count, master->span, last_seg);
511 }
512
513 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
514 (),
515 "Print debugging information about the free-list.\n"
516 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
517 #define FUNC_NAME s_scm_map_free_list
518 {
519 int i;
520 fprintf (stderr, "%d segments total (%d:%d",
521 scm_n_heap_segs,
522 scm_heap_table[0].span,
523 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
524 for (i = 1; i < scm_n_heap_segs; i++)
525 fprintf (stderr, ", %d:%d",
526 scm_heap_table[i].span,
527 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
528 fprintf (stderr, ")\n");
529 map_free_list (&scm_master_freelist, scm_freelist);
530 map_free_list (&scm_master_freelist2, scm_freelist2);
531 fflush (stderr);
532
533 return SCM_UNSPECIFIED;
534 }
535 #undef FUNC_NAME
536
537 static int last_cluster;
538 static int last_size;
539
540 static int
541 free_list_length (char *title, int i, SCM freelist)
542 {
543 SCM ls;
544 int n = 0;
545 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
546 if (SCM_FREE_CELL_P (ls))
547 ++n;
548 else
549 {
550 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
551 abort ();
552 }
553 if (n != last_size)
554 {
555 if (i > 0)
556 {
557 if (last_cluster == i - 1)
558 fprintf (stderr, "\t%d\n", last_size);
559 else
560 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
561 }
562 if (i >= 0)
563 fprintf (stderr, "%s %d", title, i);
564 else
565 fprintf (stderr, "%s\t%d\n", title, n);
566 last_cluster = i;
567 last_size = n;
568 }
569 return n;
570 }
571
572 static void
573 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
574 {
575 SCM clusters;
576 int i = 0, len, n = 0;
577 fprintf (stderr, "%s\n\n", title);
578 n += free_list_length ("free list", -1, freelist);
579 for (clusters = master->clusters;
580 SCM_NNULLP (clusters);
581 clusters = SCM_CDR (clusters))
582 {
583 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
584 n += len;
585 }
586 if (last_cluster == i - 1)
587 fprintf (stderr, "\t%d\n", last_size);
588 else
589 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
590 fprintf (stderr, "\ntotal %d objects\n\n", n);
591 }
592
593 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
594 (),
595 "Print debugging information about the free-list.\n"
596 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
597 #define FUNC_NAME s_scm_free_list_length
598 {
599 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
600 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
601 return SCM_UNSPECIFIED;
602 }
603 #undef FUNC_NAME
604
605 #endif
606
607 #ifdef GUILE_DEBUG_FREELIST
608
609 /* Number of calls to SCM_NEWCELL since startup. */
610 static unsigned long scm_newcell_count;
611 static unsigned long scm_newcell2_count;
612
613 /* Search freelist for anything that isn't marked as a free cell.
614 Abort if we find something. */
615 static void
616 scm_check_freelist (SCM freelist)
617 {
618 SCM f;
619 int i = 0;
620
621 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
622 if (!SCM_FREE_CELL_P (f))
623 {
624 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
625 scm_newcell_count, i);
626 abort ();
627 }
628 }
629
630 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
631 (SCM flag),
632 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
633 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
634 "compile-time flag was selected.\n")
635 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
636 {
637 /* [cmm] I did a double-take when I read this code the first time.
638 well, FWIW. */
639 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
640 return SCM_UNSPECIFIED;
641 }
642 #undef FUNC_NAME
643
644
645 SCM
646 scm_debug_newcell (void)
647 {
648 SCM new;
649
650 scm_newcell_count++;
651 if (scm_debug_check_freelist)
652 {
653 scm_check_freelist (scm_freelist);
654 scm_gc();
655 }
656
657 /* The rest of this is supposed to be identical to the SCM_NEWCELL
658 macro. */
659 if (SCM_NULLP (scm_freelist))
660 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
661 else
662 {
663 new = scm_freelist;
664 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
665 }
666
667 return new;
668 }
669
670 SCM
671 scm_debug_newcell2 (void)
672 {
673 SCM new;
674
675 scm_newcell2_count++;
676 if (scm_debug_check_freelist)
677 {
678 scm_check_freelist (scm_freelist2);
679 scm_gc ();
680 }
681
682 /* The rest of this is supposed to be identical to the SCM_NEWCELL
683 macro. */
684 if (SCM_NULLP (scm_freelist2))
685 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
686 else
687 {
688 new = scm_freelist2;
689 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
690 }
691
692 return new;
693 }
694
695 #endif /* GUILE_DEBUG_FREELIST */
696
697 \f
698
699 static unsigned long
700 master_cells_allocated (scm_freelist_t *master)
701 {
702 /* the '- 1' below is to ignore the cluster spine cells. */
703 int objects = master->clusters_allocated * (master->cluster_size - 1);
704 if (SCM_NULLP (master->clusters))
705 objects -= master->left_to_collect;
706 return master->span * objects;
707 }
708
709 static unsigned long
710 freelist_length (SCM freelist)
711 {
712 int n;
713 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
714 ++n;
715 return n;
716 }
717
718 static unsigned long
719 compute_cells_allocated ()
720 {
721 return (scm_cells_allocated
722 + master_cells_allocated (&scm_master_freelist)
723 + master_cells_allocated (&scm_master_freelist2)
724 - scm_master_freelist.span * freelist_length (scm_freelist)
725 - scm_master_freelist2.span * freelist_length (scm_freelist2));
726 }
727
728 /* {Scheme Interface to GC}
729 */
730
731 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
732 (),
733 "Returns an association list of statistics about Guile's current use of storage. ")
734 #define FUNC_NAME s_scm_gc_stats
735 {
736 int i;
737 int n;
738 SCM heap_segs;
739 long int local_scm_mtrigger;
740 long int local_scm_mallocated;
741 long int local_scm_heap_size;
742 long int local_scm_cells_allocated;
743 long int local_scm_gc_time_taken;
744 long int local_scm_gc_times;
745 long int local_scm_gc_mark_time_taken;
746 long int local_scm_gc_sweep_time_taken;
747 double local_scm_gc_cells_swept;
748 double local_scm_gc_cells_marked;
749 SCM answer;
750
751 SCM_DEFER_INTS;
752
753 ++scm_block_gc;
754
755 retry:
756 heap_segs = SCM_EOL;
757 n = scm_n_heap_segs;
758 for (i = scm_n_heap_segs; i--; )
759 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
760 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
761 heap_segs);
762 if (scm_n_heap_segs != n)
763 goto retry;
764
765 --scm_block_gc;
766
767 /* Below, we cons to produce the resulting list. We want a snapshot of
768 * the heap situation before consing.
769 */
770 local_scm_mtrigger = scm_mtrigger;
771 local_scm_mallocated = scm_mallocated;
772 local_scm_heap_size = SCM_HEAP_SIZE;
773 local_scm_cells_allocated = compute_cells_allocated ();
774 local_scm_gc_time_taken = scm_gc_time_taken;
775 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
776 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
777 local_scm_gc_times = scm_gc_times;
778 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
779 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
780
781 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
782 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
783 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
784 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
785 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
786 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
787 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
788 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
789 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
790 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
791 scm_cons (sym_heap_segments, heap_segs),
792 SCM_UNDEFINED);
793 SCM_ALLOW_INTS;
794 return answer;
795 }
796 #undef FUNC_NAME
797
798
799 static void
800 gc_start_stats (const char *what)
801 {
802 t_before_gc = scm_c_get_internal_run_time ();
803 scm_gc_cells_swept = 0;
804 scm_gc_cells_collected = 0;
805 scm_gc_yield_1 = scm_gc_yield;
806 scm_gc_yield = (scm_cells_allocated
807 + master_cells_allocated (&scm_master_freelist)
808 + master_cells_allocated (&scm_master_freelist2));
809 scm_gc_malloc_collected = 0;
810 scm_gc_ports_collected = 0;
811 }
812
813
814 static void
815 gc_end_stats ()
816 {
817 unsigned long t = scm_c_get_internal_run_time ();
818 scm_gc_time_taken += (t - t_before_gc);
819 scm_gc_sweep_time_taken += (t - t_before_sweep);
820 ++scm_gc_times;
821
822 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
823 scm_gc_cells_swept_acc += scm_gc_cells_swept;
824 }
825
826
827 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
828 (SCM obj),
829 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
830 "returned by this function for @var{obj}")
831 #define FUNC_NAME s_scm_object_address
832 {
833 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
834 }
835 #undef FUNC_NAME
836
837
838 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
839 (),
840 "Scans all of SCM objects and reclaims for further use those that are\n"
841 "no longer accessible.")
842 #define FUNC_NAME s_scm_gc
843 {
844 SCM_DEFER_INTS;
845 scm_igc ("call");
846 SCM_ALLOW_INTS;
847 return SCM_UNSPECIFIED;
848 }
849 #undef FUNC_NAME
850
851
852 \f
853 /* {C Interface For When GC is Triggered}
854 */
855
856 static void
857 adjust_min_yield (scm_freelist_t *freelist)
858 {
859 /* min yield is adjusted upwards so that next predicted total yield
860 * (allocated cells actually freed by GC) becomes
861 * `min_yield_fraction' of total heap size. Note, however, that
862 * the absolute value of min_yield will correspond to `collected'
863 * on one master (the one which currently is triggering GC).
864 *
865 * The reason why we look at total yield instead of cells collected
866 * on one list is that we want to take other freelists into account.
867 * On this freelist, we know that (local) yield = collected cells,
868 * but that's probably not the case on the other lists.
869 *
870 * (We might consider computing a better prediction, for example
871 * by computing an average over multiple GC:s.)
872 */
873 if (freelist->min_yield_fraction)
874 {
875 /* Pick largest of last two yields. */
876 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
877 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
878 #ifdef DEBUGINFO
879 fprintf (stderr, " after GC = %d, delta = %d\n",
880 scm_cells_allocated,
881 delta);
882 #endif
883 if (delta > 0)
884 freelist->min_yield += delta;
885 }
886 }
887
888
889 /* When we get POSIX threads support, the master will be global and
890 * common while the freelist will be individual for each thread.
891 */
892
893 SCM
894 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
895 {
896 SCM cell;
897 ++scm_ints_disabled;
898 do
899 {
900 if (SCM_NULLP (master->clusters))
901 {
902 if (master->grow_heap_p || scm_block_gc)
903 {
904 /* In order to reduce gc frequency, try to allocate a new heap
905 * segment first, even if gc might find some free cells. If we
906 * can't obtain a new heap segment, we will try gc later.
907 */
908 master->grow_heap_p = 0;
909 alloc_some_heap (master, return_on_error);
910 }
911 if (SCM_NULLP (master->clusters))
912 {
913 /* The heap was not grown, either because it wasn't scheduled to
914 * grow, or because there was not enough memory available. In
915 * both cases we have to try gc to get some free cells.
916 */
917 #ifdef DEBUGINFO
918 fprintf (stderr, "allocated = %d, ",
919 scm_cells_allocated
920 + master_cells_allocated (&scm_master_freelist)
921 + master_cells_allocated (&scm_master_freelist2));
922 #endif
923 scm_igc ("cells");
924 adjust_min_yield (master);
925 if (SCM_NULLP (master->clusters))
926 {
927 /* gc could not free any cells. Now, we _must_ allocate a
928 * new heap segment, because there is no other possibility
929 * to provide a new cell for the caller.
930 */
931 alloc_some_heap (master, abort_on_error);
932 }
933 }
934 }
935 cell = SCM_CAR (master->clusters);
936 master->clusters = SCM_CDR (master->clusters);
937 ++master->clusters_allocated;
938 }
939 while (SCM_NULLP (cell));
940
941 #ifdef GUILE_DEBUG_FREELIST
942 scm_check_freelist (cell);
943 #endif
944
945 --scm_ints_disabled;
946 *freelist = SCM_FREE_CELL_CDR (cell);
947 return cell;
948 }
949
950
951 #if 0
952 /* This is a support routine which can be used to reserve a cluster
953 * for some special use, such as debugging. It won't be useful until
954 * free cells are preserved between garbage collections.
955 */
956
957 void
958 scm_alloc_cluster (scm_freelist_t *master)
959 {
960 SCM freelist, cell;
961 cell = scm_gc_for_newcell (master, &freelist);
962 SCM_SETCDR (cell, freelist);
963 return cell;
964 }
965 #endif
966
967
968 scm_c_hook_t scm_before_gc_c_hook;
969 scm_c_hook_t scm_before_mark_c_hook;
970 scm_c_hook_t scm_before_sweep_c_hook;
971 scm_c_hook_t scm_after_sweep_c_hook;
972 scm_c_hook_t scm_after_gc_c_hook;
973
974
975 void
976 scm_igc (const char *what)
977 {
978 int j;
979
980 ++scm_gc_running_p;
981 scm_c_hook_run (&scm_before_gc_c_hook, 0);
982 #ifdef DEBUGINFO
983 fprintf (stderr,
984 SCM_NULLP (scm_freelist)
985 ? "*"
986 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
987 #endif
988 #ifdef USE_THREADS
989 /* During the critical section, only the current thread may run. */
990 SCM_THREAD_CRITICAL_SECTION_START;
991 #endif
992
993 /* fprintf (stderr, "gc: %s\n", what); */
994
995 if (!scm_stack_base || scm_block_gc)
996 {
997 --scm_gc_running_p;
998 return;
999 }
1000
1001 gc_start_stats (what);
1002
1003 if (scm_mallocated < 0)
1004 /* The byte count of allocated objects has underflowed. This is
1005 probably because you forgot to report the sizes of objects you
1006 have allocated, by calling scm_done_malloc or some such. When
1007 the GC freed them, it subtracted their size from
1008 scm_mallocated, which underflowed. */
1009 abort ();
1010
1011 if (scm_gc_heap_lock)
1012 /* We've invoked the collector while a GC is already in progress.
1013 That should never happen. */
1014 abort ();
1015
1016 ++scm_gc_heap_lock;
1017
1018 /* flush dead entries from the continuation stack */
1019 {
1020 int x;
1021 int bound;
1022 SCM * elts;
1023 elts = SCM_VELTS (scm_continuation_stack);
1024 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1025 x = SCM_INUM (scm_continuation_stack_ptr);
1026 while (x < bound)
1027 {
1028 elts[x] = SCM_BOOL_F;
1029 ++x;
1030 }
1031 }
1032
1033 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1034
1035 clear_mark_space ();
1036
1037 #ifndef USE_THREADS
1038
1039 /* Mark objects on the C stack. */
1040 SCM_FLUSH_REGISTER_WINDOWS;
1041 /* This assumes that all registers are saved into the jmp_buf */
1042 setjmp (scm_save_regs_gc_mark);
1043 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1044 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1045 sizeof scm_save_regs_gc_mark)
1046 / sizeof (SCM_STACKITEM)));
1047
1048 {
1049 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1050 #ifdef SCM_STACK_GROWS_UP
1051 scm_mark_locations (scm_stack_base, stack_len);
1052 #else
1053 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1054 #endif
1055 }
1056
1057 #else /* USE_THREADS */
1058
1059 /* Mark every thread's stack and registers */
1060 scm_threads_mark_stacks ();
1061
1062 #endif /* USE_THREADS */
1063
1064 j = SCM_NUM_PROTECTS;
1065 while (j--)
1066 scm_gc_mark (scm_sys_protects[j]);
1067
1068 /* FIXME: we should have a means to register C functions to be run
1069 * in different phases of GC
1070 */
1071 scm_mark_subr_table ();
1072
1073 #ifndef USE_THREADS
1074 scm_gc_mark (scm_root->handle);
1075 #endif
1076
1077 t_before_sweep = scm_c_get_internal_run_time ();
1078 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1079
1080 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1081
1082 scm_gc_sweep ();
1083
1084 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1085
1086 --scm_gc_heap_lock;
1087 gc_end_stats ();
1088
1089 #ifdef USE_THREADS
1090 SCM_THREAD_CRITICAL_SECTION_END;
1091 #endif
1092 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1093 --scm_gc_running_p;
1094 }
1095
1096 \f
1097
1098 /* {Mark/Sweep}
1099 */
1100
1101 #define MARK scm_gc_mark
1102 #define FNAME "scm_gc_mark"
1103
1104 #endif /*!MARK_DEPENDENCIES*/
1105
1106 /* Mark an object precisely.
1107 */
1108 void
1109 MARK (SCM p)
1110 #define FUNC_NAME FNAME
1111 {
1112 register long i;
1113 register SCM ptr;
1114
1115 #ifndef MARK_DEPENDENCIES
1116 # define RECURSE scm_gc_mark
1117 #else
1118 /* go through the usual marking, but not for self-cycles. */
1119 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1120 #endif
1121 ptr = p;
1122
1123 #ifdef MARK_DEPENDENCIES
1124 goto gc_mark_loop_first_time;
1125 #endif
1126
1127 gc_mark_loop:
1128 if (SCM_IMP (ptr))
1129 return;
1130
1131 gc_mark_nimp:
1132
1133 #ifdef MARK_DEPENDENCIES
1134 if (SCM_EQ_P (ptr, p))
1135 return;
1136
1137 scm_gc_mark (ptr);
1138 return;
1139
1140 gc_mark_loop_first_time:
1141 #endif
1142
1143 if (!SCM_CELLP (ptr))
1144 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1145
1146 #if (defined (GUILE_DEBUG_FREELIST))
1147
1148 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1149 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1150
1151 #endif
1152
1153 #ifndef MARK_DEPENDENCIES
1154
1155 if (SCM_GCMARKP (ptr))
1156 return;
1157
1158 SCM_SETGCMARK (ptr);
1159
1160 #endif
1161
1162 switch (SCM_TYP7 (ptr))
1163 {
1164 case scm_tcs_cons_nimcar:
1165 if (SCM_IMP (SCM_CDR (ptr)))
1166 {
1167 ptr = SCM_CAR (ptr);
1168 goto gc_mark_nimp;
1169 }
1170 RECURSE (SCM_CAR (ptr));
1171 ptr = SCM_CDR (ptr);
1172 goto gc_mark_nimp;
1173 case scm_tcs_cons_imcar:
1174 ptr = SCM_CDR (ptr);
1175 goto gc_mark_loop;
1176 case scm_tc7_pws:
1177 RECURSE (SCM_CELL_OBJECT_2 (ptr));
1178 ptr = SCM_CDR (ptr);
1179 goto gc_mark_loop;
1180 case scm_tcs_cons_gloc:
1181 {
1182 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1183 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1184 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1185 * pointer to a struct vtable data region. The fact that these are
1186 * accessed in the same way restricts the possibilites to change the
1187 * data layout of structs or heap cells.
1188 */
1189 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1190 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1191 if (vtable_data [scm_vtable_index_vcell] != 0)
1192 {
1193 /* ptr is a gloc */
1194 SCM gloc_car = SCM_PACK (word0);
1195 RECURSE (gloc_car);
1196 ptr = SCM_CDR (ptr);
1197 goto gc_mark_loop;
1198 }
1199 else
1200 {
1201 /* ptr is a struct */
1202 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1203 int len = SCM_SYMBOL_LENGTH (layout);
1204 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1205 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1206
1207 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1208 {
1209 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1210 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1211 }
1212 if (len)
1213 {
1214 int x;
1215
1216 for (x = 0; x < len - 2; x += 2, ++struct_data)
1217 if (fields_desc[x] == 'p')
1218 RECURSE (SCM_PACK (*struct_data));
1219 if (fields_desc[x] == 'p')
1220 {
1221 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1222 for (x = *struct_data++; x; --x, ++struct_data)
1223 RECURSE (SCM_PACK (*struct_data));
1224 else
1225 RECURSE (SCM_PACK (*struct_data));
1226 }
1227 }
1228 /* mark vtable */
1229 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1230 goto gc_mark_loop;
1231 }
1232 }
1233 break;
1234 case scm_tcs_closures:
1235 if (SCM_IMP (SCM_CDR (ptr)))
1236 {
1237 ptr = SCM_CLOSCAR (ptr);
1238 goto gc_mark_nimp;
1239 }
1240 RECURSE (SCM_CLOSCAR (ptr));
1241 ptr = SCM_CDR (ptr);
1242 goto gc_mark_nimp;
1243 case scm_tc7_vector:
1244 i = SCM_VECTOR_LENGTH (ptr);
1245 if (i == 0)
1246 break;
1247 while (--i > 0)
1248 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1249 RECURSE (SCM_VELTS (ptr)[i]);
1250 ptr = SCM_VELTS (ptr)[0];
1251 goto gc_mark_loop;
1252 #ifdef CCLO
1253 case scm_tc7_cclo:
1254 {
1255 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1256 unsigned long int j;
1257 for (j = 1; j != i; ++j)
1258 {
1259 SCM obj = SCM_CCLO_REF (ptr, j);
1260 if (!SCM_IMP (obj))
1261 RECURSE (obj);
1262 }
1263 ptr = SCM_CCLO_REF (ptr, 0);
1264 goto gc_mark_loop;
1265 }
1266 #endif
1267 #ifdef HAVE_ARRAYS
1268 case scm_tc7_bvect:
1269 case scm_tc7_byvect:
1270 case scm_tc7_ivect:
1271 case scm_tc7_uvect:
1272 case scm_tc7_fvect:
1273 case scm_tc7_dvect:
1274 case scm_tc7_cvect:
1275 case scm_tc7_svect:
1276 #ifdef HAVE_LONG_LONGS
1277 case scm_tc7_llvect:
1278 #endif
1279 #endif
1280 case scm_tc7_string:
1281 break;
1282
1283 case scm_tc7_substring:
1284 ptr = SCM_CDR (ptr);
1285 goto gc_mark_loop;
1286
1287 case scm_tc7_wvect:
1288 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1289 scm_weak_vectors = ptr;
1290 if (SCM_IS_WHVEC_ANY (ptr))
1291 {
1292 int x;
1293 int len;
1294 int weak_keys;
1295 int weak_values;
1296
1297 len = SCM_VECTOR_LENGTH (ptr);
1298 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1299 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1300
1301 for (x = 0; x < len; ++x)
1302 {
1303 SCM alist;
1304 alist = SCM_VELTS (ptr)[x];
1305
1306 /* mark everything on the alist except the keys or
1307 * values, according to weak_values and weak_keys. */
1308 while ( SCM_CONSP (alist)
1309 && !SCM_GCMARKP (alist)
1310 && SCM_CONSP (SCM_CAR (alist)))
1311 {
1312 SCM kvpair;
1313 SCM next_alist;
1314
1315 kvpair = SCM_CAR (alist);
1316 next_alist = SCM_CDR (alist);
1317 /*
1318 * Do not do this:
1319 * SCM_SETGCMARK (alist);
1320 * SCM_SETGCMARK (kvpair);
1321 *
1322 * It may be that either the key or value is protected by
1323 * an escaped reference to part of the spine of this alist.
1324 * If we mark the spine here, and only mark one or neither of the
1325 * key and value, they may never be properly marked.
1326 * This leads to a horrible situation in which an alist containing
1327 * freelist cells is exported.
1328 *
1329 * So only mark the spines of these arrays last of all marking.
1330 * If somebody confuses us by constructing a weak vector
1331 * with a circular alist then we are hosed, but at least we
1332 * won't prematurely drop table entries.
1333 */
1334 if (!weak_keys)
1335 RECURSE (SCM_CAR (kvpair));
1336 if (!weak_values)
1337 RECURSE (SCM_CDR (kvpair));
1338 alist = next_alist;
1339 }
1340 if (SCM_NIMP (alist))
1341 RECURSE (alist);
1342 }
1343 }
1344 break;
1345
1346 case scm_tc7_symbol:
1347 ptr = SCM_PROP_SLOTS (ptr);
1348 goto gc_mark_loop;
1349 case scm_tcs_subrs:
1350 break;
1351 case scm_tc7_port:
1352 i = SCM_PTOBNUM (ptr);
1353 if (!(i < scm_numptob))
1354 goto def;
1355 if (SCM_PTAB_ENTRY(ptr))
1356 RECURSE (SCM_FILENAME (ptr));
1357 if (scm_ptobs[i].mark)
1358 {
1359 ptr = (scm_ptobs[i].mark) (ptr);
1360 goto gc_mark_loop;
1361 }
1362 else
1363 return;
1364 break;
1365 case scm_tc7_smob:
1366 switch (SCM_TYP16 (ptr))
1367 { /* should be faster than going through scm_smobs */
1368 case scm_tc_free_cell:
1369 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1370 case scm_tc16_big:
1371 case scm_tc16_real:
1372 case scm_tc16_complex:
1373 break;
1374 default:
1375 i = SCM_SMOBNUM (ptr);
1376 if (!(i < scm_numsmob))
1377 goto def;
1378 if (scm_smobs[i].mark)
1379 {
1380 ptr = (scm_smobs[i].mark) (ptr);
1381 goto gc_mark_loop;
1382 }
1383 else
1384 return;
1385 }
1386 break;
1387 default:
1388 def:
1389 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1390 }
1391 #undef RECURSE
1392 }
1393 #undef FUNC_NAME
1394
1395 #ifndef MARK_DEPENDENCIES
1396
1397 #undef MARK
1398 #undef FNAME
1399
1400 /* And here we define `scm_gc_mark_dependencies', by including this
1401 * same file in itself.
1402 */
1403 #define MARK scm_gc_mark_dependencies
1404 #define FNAME "scm_gc_mark_dependencies"
1405 #define MARK_DEPENDENCIES
1406 #include "gc.c"
1407 #undef MARK_DEPENDENCIES
1408 #undef MARK
1409 #undef FNAME
1410
1411
1412 /* Mark a Region Conservatively
1413 */
1414
1415 void
1416 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1417 {
1418 unsigned long m;
1419
1420 for (m = 0; m < n; ++m)
1421 {
1422 SCM obj = * (SCM *) &x[m];
1423 if (SCM_CELLP (obj))
1424 {
1425 SCM_CELLPTR ptr = SCM2PTR (obj);
1426 int i = 0;
1427 int j = scm_n_heap_segs - 1;
1428 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1429 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1430 {
1431 while (i <= j)
1432 {
1433 int seg_id;
1434 seg_id = -1;
1435 if ((i == j)
1436 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1437 seg_id = i;
1438 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1439 seg_id = j;
1440 else
1441 {
1442 int k;
1443 k = (i + j) / 2;
1444 if (k == i)
1445 break;
1446 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1447 {
1448 j = k;
1449 ++i;
1450 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1451 continue;
1452 else
1453 break;
1454 }
1455 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1456 {
1457 i = k;
1458 --j;
1459 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1460 continue;
1461 else
1462 break;
1463 }
1464 }
1465
1466 if (SCM_GC_IN_CARD_HEADERP (ptr))
1467 break;
1468
1469 if (scm_heap_table[seg_id].span == 1
1470 || DOUBLECELL_ALIGNED_P (obj))
1471 scm_gc_mark (obj);
1472
1473 break;
1474 }
1475 }
1476 }
1477 }
1478 }
1479
1480
1481 /* The function scm_cellp determines whether an SCM value can be regarded as a
1482 * pointer to a cell on the heap. Binary search is used in order to determine
1483 * the heap segment that contains the cell.
1484 */
1485 int
1486 scm_cellp (SCM value)
1487 {
1488 if (SCM_CELLP (value)) {
1489 scm_cell * ptr = SCM2PTR (value);
1490 unsigned int i = 0;
1491 unsigned int j = scm_n_heap_segs - 1;
1492
1493 while (i < j) {
1494 int k = (i + j) / 2;
1495 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1496 j = k;
1497 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1498 i = k + 1;
1499 }
1500 }
1501
1502 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1503 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1504 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1505 && !SCM_GC_IN_CARD_HEADERP (ptr)
1506 )
1507 return 1;
1508 else
1509 return 0;
1510 } else
1511 return 0;
1512 }
1513
1514
1515 static void
1516 gc_sweep_freelist_start (scm_freelist_t *freelist)
1517 {
1518 freelist->cells = SCM_EOL;
1519 freelist->left_to_collect = freelist->cluster_size;
1520 freelist->clusters_allocated = 0;
1521 freelist->clusters = SCM_EOL;
1522 freelist->clustertail = &freelist->clusters;
1523 freelist->collected_1 = freelist->collected;
1524 freelist->collected = 0;
1525 }
1526
1527 static void
1528 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1529 {
1530 int collected;
1531 *freelist->clustertail = freelist->cells;
1532 if (!SCM_NULLP (freelist->cells))
1533 {
1534 SCM c = freelist->cells;
1535 SCM_SETCAR (c, SCM_CDR (c));
1536 SCM_SETCDR (c, SCM_EOL);
1537 freelist->collected +=
1538 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1539 }
1540 scm_gc_cells_collected += freelist->collected;
1541
1542 /* Although freelist->min_yield is used to test freelist->collected
1543 * (which is the local GC yield for freelist), it is adjusted so
1544 * that *total* yield is freelist->min_yield_fraction of total heap
1545 * size. This means that a too low yield is compensated by more
1546 * heap on the list which is currently doing most work, which is
1547 * just what we want.
1548 */
1549 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1550 freelist->grow_heap_p = (collected < freelist->min_yield);
1551 }
1552
1553 #define NEXT_DATA_CELL(ptr, span) \
1554 do { \
1555 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1556 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1557 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1558 : nxt__); \
1559 } while (0)
1560
1561 void
1562 scm_gc_sweep ()
1563 #define FUNC_NAME "scm_gc_sweep"
1564 {
1565 register SCM_CELLPTR ptr;
1566 register SCM nfreelist;
1567 register scm_freelist_t *freelist;
1568 register long m;
1569 register int span;
1570 long i;
1571 scm_sizet seg_size;
1572
1573 m = 0;
1574
1575 gc_sweep_freelist_start (&scm_master_freelist);
1576 gc_sweep_freelist_start (&scm_master_freelist2);
1577
1578 for (i = 0; i < scm_n_heap_segs; i++)
1579 {
1580 register unsigned int left_to_collect;
1581 register scm_sizet j;
1582
1583 /* Unmarked cells go onto the front of the freelist this heap
1584 segment points to. Rather than updating the real freelist
1585 pointer as we go along, we accumulate the new head in
1586 nfreelist. Then, if it turns out that the entire segment is
1587 free, we free (i.e., malloc's free) the whole segment, and
1588 simply don't assign nfreelist back into the real freelist. */
1589 freelist = scm_heap_table[i].freelist;
1590 nfreelist = freelist->cells;
1591 left_to_collect = freelist->left_to_collect;
1592 span = scm_heap_table[i].span;
1593
1594 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1595 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1596
1597 /* use only data cells in seg_size */
1598 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1599
1600 scm_gc_cells_swept += seg_size;
1601
1602 for (j = seg_size + span; j -= span; ptr += span)
1603 {
1604 SCM scmptr;
1605
1606 if (SCM_GC_IN_CARD_HEADERP (ptr))
1607 {
1608 SCM_CELLPTR nxt;
1609
1610 /* cheat here */
1611 nxt = ptr;
1612 NEXT_DATA_CELL (nxt, span);
1613 j += span;
1614
1615 ptr = nxt - span;
1616 continue;
1617 }
1618
1619 scmptr = PTR2SCM (ptr);
1620
1621 if (SCM_GCMARKP (scmptr))
1622 continue;
1623
1624 switch SCM_TYP7 (scmptr)
1625 {
1626 case scm_tcs_cons_gloc:
1627 {
1628 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1629 * struct or a gloc. See the corresponding comment in
1630 * scm_gc_mark.
1631 */
1632 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1633 - scm_tc3_cons_gloc);
1634 /* access as struct */
1635 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1636 if (vtable_data[scm_vtable_index_vcell] == 0)
1637 {
1638 /* Structs need to be freed in a special order.
1639 * This is handled by GC C hooks in struct.c.
1640 */
1641 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1642 scm_structs_to_free = scmptr;
1643 continue;
1644 }
1645 /* fall through so that scmptr gets collected */
1646 }
1647 break;
1648 case scm_tcs_cons_imcar:
1649 case scm_tcs_cons_nimcar:
1650 case scm_tcs_closures:
1651 case scm_tc7_pws:
1652 break;
1653 case scm_tc7_wvect:
1654 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1655 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1656 break;
1657 case scm_tc7_vector:
1658 {
1659 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1660 if (length > 0)
1661 {
1662 m += length * sizeof (scm_bits_t);
1663 scm_must_free (SCM_VECTOR_BASE (scmptr));
1664 }
1665 break;
1666 }
1667 #ifdef CCLO
1668 case scm_tc7_cclo:
1669 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1670 scm_must_free (SCM_CCLO_BASE (scmptr));
1671 break;
1672 #endif
1673 #ifdef HAVE_ARRAYS
1674 case scm_tc7_bvect:
1675 {
1676 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1677 if (length > 0)
1678 {
1679 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1680 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1681 }
1682 }
1683 break;
1684 case scm_tc7_byvect:
1685 case scm_tc7_ivect:
1686 case scm_tc7_uvect:
1687 case scm_tc7_svect:
1688 #ifdef HAVE_LONG_LONGS
1689 case scm_tc7_llvect:
1690 #endif
1691 case scm_tc7_fvect:
1692 case scm_tc7_dvect:
1693 case scm_tc7_cvect:
1694 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1695 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1696 break;
1697 #endif
1698 case scm_tc7_substring:
1699 break;
1700 case scm_tc7_string:
1701 m += SCM_STRING_LENGTH (scmptr) + 1;
1702 scm_must_free (SCM_STRING_CHARS (scmptr));
1703 break;
1704 case scm_tc7_symbol:
1705 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1706 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1707 break;
1708 case scm_tcs_subrs:
1709 /* the various "subrs" (primitives) are never freed */
1710 continue;
1711 case scm_tc7_port:
1712 if SCM_OPENP (scmptr)
1713 {
1714 int k = SCM_PTOBNUM (scmptr);
1715 if (!(k < scm_numptob))
1716 goto sweeperr;
1717 /* Keep "revealed" ports alive. */
1718 if (scm_revealed_count (scmptr) > 0)
1719 continue;
1720 /* Yes, I really do mean scm_ptobs[k].free */
1721 /* rather than ftobs[k].close. .close */
1722 /* is for explicit CLOSE-PORT by user */
1723 m += (scm_ptobs[k].free) (scmptr);
1724 SCM_SETSTREAM (scmptr, 0);
1725 scm_remove_from_port_table (scmptr);
1726 scm_gc_ports_collected++;
1727 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1728 }
1729 break;
1730 case scm_tc7_smob:
1731 switch SCM_TYP16 (scmptr)
1732 {
1733 case scm_tc_free_cell:
1734 case scm_tc16_real:
1735 break;
1736 #ifdef SCM_BIGDIG
1737 case scm_tc16_big:
1738 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1739 scm_must_free (SCM_BDIGITS (scmptr));
1740 break;
1741 #endif /* def SCM_BIGDIG */
1742 case scm_tc16_complex:
1743 m += sizeof (scm_complex_t);
1744 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1745 break;
1746 default:
1747 {
1748 int k;
1749 k = SCM_SMOBNUM (scmptr);
1750 if (!(k < scm_numsmob))
1751 goto sweeperr;
1752 m += (scm_smobs[k].free) (scmptr);
1753 break;
1754 }
1755 }
1756 break;
1757 default:
1758 sweeperr:
1759 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1760 }
1761
1762 if (!--left_to_collect)
1763 {
1764 SCM_SETCAR (scmptr, nfreelist);
1765 *freelist->clustertail = scmptr;
1766 freelist->clustertail = SCM_CDRLOC (scmptr);
1767
1768 nfreelist = SCM_EOL;
1769 freelist->collected += span * freelist->cluster_size;
1770 left_to_collect = freelist->cluster_size;
1771 }
1772 else
1773 {
1774 /* Stick the new cell on the front of nfreelist. It's
1775 critical that we mark this cell as freed; otherwise, the
1776 conservative collector might trace it as some other type
1777 of object. */
1778 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1779 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1780 nfreelist = scmptr;
1781 }
1782 }
1783
1784 #ifdef GC_FREE_SEGMENTS
1785 if (n == seg_size)
1786 {
1787 register long j;
1788
1789 freelist->heap_size -= seg_size;
1790 free ((char *) scm_heap_table[i].bounds[0]);
1791 scm_heap_table[i].bounds[0] = 0;
1792 for (j = i + 1; j < scm_n_heap_segs; j++)
1793 scm_heap_table[j - 1] = scm_heap_table[j];
1794 scm_n_heap_segs -= 1;
1795 i--; /* We need to scan the segment just moved. */
1796 }
1797 else
1798 #endif /* ifdef GC_FREE_SEGMENTS */
1799 {
1800 /* Update the real freelist pointer to point to the head of
1801 the list of free cells we've built for this segment. */
1802 freelist->cells = nfreelist;
1803 freelist->left_to_collect = left_to_collect;
1804 }
1805
1806 #ifdef GUILE_DEBUG_FREELIST
1807 scm_map_free_list ();
1808 #endif
1809 }
1810
1811 gc_sweep_freelist_finish (&scm_master_freelist);
1812 gc_sweep_freelist_finish (&scm_master_freelist2);
1813
1814 /* When we move to POSIX threads private freelists should probably
1815 be GC-protected instead. */
1816 scm_freelist = SCM_EOL;
1817 scm_freelist2 = SCM_EOL;
1818
1819 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1820 scm_gc_yield -= scm_cells_allocated;
1821 scm_mallocated -= m;
1822 scm_gc_malloc_collected = m;
1823 }
1824 #undef FUNC_NAME
1825
1826
1827 \f
1828 /* {Front end to malloc}
1829 *
1830 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1831 * scm_done_free
1832 *
1833 * These functions provide services comperable to malloc, realloc, and
1834 * free. They are for allocating malloced parts of scheme objects.
1835 * The primary purpose of the front end is to impose calls to gc. */
1836
1837
1838 /* scm_must_malloc
1839 * Return newly malloced storage or throw an error.
1840 *
1841 * The parameter WHAT is a string for error reporting.
1842 * If the threshold scm_mtrigger will be passed by this
1843 * allocation, or if the first call to malloc fails,
1844 * garbage collect -- on the presumption that some objects
1845 * using malloced storage may be collected.
1846 *
1847 * The limit scm_mtrigger may be raised by this allocation.
1848 */
1849 void *
1850 scm_must_malloc (scm_sizet size, const char *what)
1851 {
1852 void *ptr;
1853 unsigned long nm = scm_mallocated + size;
1854
1855 if (nm <= scm_mtrigger)
1856 {
1857 SCM_SYSCALL (ptr = malloc (size));
1858 if (NULL != ptr)
1859 {
1860 scm_mallocated = nm;
1861 #ifdef GUILE_DEBUG_MALLOC
1862 scm_malloc_register (ptr, what);
1863 #endif
1864 return ptr;
1865 }
1866 }
1867
1868 scm_igc (what);
1869
1870 nm = scm_mallocated + size;
1871 SCM_SYSCALL (ptr = malloc (size));
1872 if (NULL != ptr)
1873 {
1874 scm_mallocated = nm;
1875 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1876 if (nm > scm_mtrigger)
1877 scm_mtrigger = nm + nm / 2;
1878 else
1879 scm_mtrigger += scm_mtrigger / 2;
1880 }
1881 #ifdef GUILE_DEBUG_MALLOC
1882 scm_malloc_register (ptr, what);
1883 #endif
1884
1885 return ptr;
1886 }
1887
1888 scm_memory_error (what);
1889 }
1890
1891
1892 /* scm_must_realloc
1893 * is similar to scm_must_malloc.
1894 */
1895 void *
1896 scm_must_realloc (void *where,
1897 scm_sizet old_size,
1898 scm_sizet size,
1899 const char *what)
1900 {
1901 void *ptr;
1902 scm_sizet nm = scm_mallocated + size - old_size;
1903
1904 if (nm <= scm_mtrigger)
1905 {
1906 SCM_SYSCALL (ptr = realloc (where, size));
1907 if (NULL != ptr)
1908 {
1909 scm_mallocated = nm;
1910 #ifdef GUILE_DEBUG_MALLOC
1911 scm_malloc_reregister (where, ptr, what);
1912 #endif
1913 return ptr;
1914 }
1915 }
1916
1917 scm_igc (what);
1918
1919 nm = scm_mallocated + size - old_size;
1920 SCM_SYSCALL (ptr = realloc (where, size));
1921 if (NULL != ptr)
1922 {
1923 scm_mallocated = nm;
1924 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1925 if (nm > scm_mtrigger)
1926 scm_mtrigger = nm + nm / 2;
1927 else
1928 scm_mtrigger += scm_mtrigger / 2;
1929 }
1930 #ifdef GUILE_DEBUG_MALLOC
1931 scm_malloc_reregister (where, ptr, what);
1932 #endif
1933 return ptr;
1934 }
1935
1936 scm_memory_error (what);
1937 }
1938
1939
1940 void
1941 scm_must_free (void *obj)
1942 #define FUNC_NAME "scm_must_free"
1943 {
1944 #ifdef GUILE_DEBUG_MALLOC
1945 scm_malloc_unregister (obj);
1946 #endif
1947 if (obj)
1948 free (obj);
1949 else
1950 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1951 }
1952 #undef FUNC_NAME
1953
1954
1955 /* Announce that there has been some malloc done that will be freed
1956 * during gc. A typical use is for a smob that uses some malloced
1957 * memory but can not get it from scm_must_malloc (for whatever
1958 * reason). When a new object of this smob is created you call
1959 * scm_done_malloc with the size of the object. When your smob free
1960 * function is called, be sure to include this size in the return
1961 * value.
1962 *
1963 * If you can't actually free the memory in the smob free function,
1964 * for whatever reason (like reference counting), you still can (and
1965 * should) report the amount of memory freed when you actually free it.
1966 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1967 * eh? Or even better, call scm_done_free. */
1968
1969 void
1970 scm_done_malloc (long size)
1971 {
1972 scm_mallocated += size;
1973
1974 if (scm_mallocated > scm_mtrigger)
1975 {
1976 scm_igc ("foreign mallocs");
1977 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1978 {
1979 if (scm_mallocated > scm_mtrigger)
1980 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1981 else
1982 scm_mtrigger += scm_mtrigger / 2;
1983 }
1984 }
1985 }
1986
1987 void
1988 scm_done_free (long size)
1989 {
1990 scm_mallocated -= size;
1991 }
1992
1993
1994 \f
1995 /* {Heap Segments}
1996 *
1997 * Each heap segment is an array of objects of a particular size.
1998 * Every segment has an associated (possibly shared) freelist.
1999 * A table of segment records is kept that records the upper and
2000 * lower extents of the segment; this is used during the conservative
2001 * phase of gc to identify probably gc roots (because they point
2002 * into valid segments at reasonable offsets). */
2003
2004 /* scm_expmem
2005 * is true if the first segment was smaller than INIT_HEAP_SEG.
2006 * If scm_expmem is set to one, subsequent segment allocations will
2007 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2008 */
2009 int scm_expmem = 0;
2010
2011 scm_sizet scm_max_segment_size;
2012
2013 /* scm_heap_org
2014 * is the lowest base address of any heap segment.
2015 */
2016 SCM_CELLPTR scm_heap_org;
2017
2018 scm_heap_seg_data_t * scm_heap_table = 0;
2019 static unsigned int heap_segment_table_size = 0;
2020 int scm_n_heap_segs = 0;
2021
2022 /* init_heap_seg
2023 * initializes a new heap segment and returns the number of objects it contains.
2024 *
2025 * The segment origin and segment size in bytes are input parameters.
2026 * The freelist is both input and output.
2027 *
2028 * This function presumes that the scm_heap_table has already been expanded
2029 * to accomodate a new segment record and that the markbit space was reserved
2030 * for all the cards in this segment.
2031 */
2032
2033 #define INIT_CARD(card, span) \
2034 do { \
2035 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2036 if ((span) == 2) \
2037 SCM_GC_SET_CARD_DOUBLECELL (card); \
2038 } while (0)
2039
2040 static scm_sizet
2041 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2042 {
2043 register SCM_CELLPTR ptr;
2044 SCM_CELLPTR seg_end;
2045 int new_seg_index;
2046 int n_new_cells;
2047 int span = freelist->span;
2048
2049 if (seg_org == NULL)
2050 return 0;
2051
2052 /* Align the begin ptr up.
2053 */
2054 ptr = SCM_GC_CARD_UP (seg_org);
2055
2056 /* Compute the ceiling on valid object pointers w/in this segment.
2057 */
2058 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2059
2060 /* Find the right place and insert the segment record.
2061 *
2062 */
2063 for (new_seg_index = 0;
2064 ( (new_seg_index < scm_n_heap_segs)
2065 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2066 new_seg_index++)
2067 ;
2068
2069 {
2070 int i;
2071 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2072 scm_heap_table[i] = scm_heap_table[i - 1];
2073 }
2074
2075 ++scm_n_heap_segs;
2076
2077 scm_heap_table[new_seg_index].span = span;
2078 scm_heap_table[new_seg_index].freelist = freelist;
2079 scm_heap_table[new_seg_index].bounds[0] = ptr;
2080 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2081
2082 /*n_new_cells*/
2083 n_new_cells = seg_end - ptr;
2084
2085 freelist->heap_size += n_new_cells;
2086
2087 /* Partition objects in this segment into clusters */
2088 {
2089 SCM clusters;
2090 SCM *clusterp = &clusters;
2091
2092 NEXT_DATA_CELL (ptr, span);
2093 while (ptr < seg_end)
2094 {
2095 scm_cell *nxt = ptr;
2096 scm_cell *prv = NULL;
2097 scm_cell *last_card = NULL;
2098 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2099 NEXT_DATA_CELL(nxt, span);
2100
2101 /* Allocate cluster spine
2102 */
2103 *clusterp = PTR2SCM (ptr);
2104 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2105 clusterp = SCM_CDRLOC (*clusterp);
2106 ptr = nxt;
2107
2108 while (n_data_cells--)
2109 {
2110 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2111 SCM scmptr = PTR2SCM (ptr);
2112 nxt = ptr;
2113 NEXT_DATA_CELL (nxt, span);
2114 prv = ptr;
2115
2116 if (card != last_card)
2117 {
2118 INIT_CARD (card, span);
2119 last_card = card;
2120 }
2121
2122 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2123 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2124
2125 ptr = nxt;
2126 }
2127
2128 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2129 }
2130
2131 /* sanity check */
2132 {
2133 scm_cell *ref = seg_end;
2134 NEXT_DATA_CELL (ref, span);
2135 if (ref != ptr)
2136 /* [cmm] looks like the segment size doesn't divide cleanly by
2137 cluster size. bad cmm! */
2138 abort();
2139 }
2140
2141 /* Patch up the last cluster pointer in the segment
2142 * to join it to the input freelist.
2143 */
2144 *clusterp = freelist->clusters;
2145 freelist->clusters = clusters;
2146 }
2147
2148 #ifdef DEBUGINFO
2149 fprintf (stderr, "H");
2150 #endif
2151 return size;
2152 }
2153
2154 static scm_sizet
2155 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2156 {
2157 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2158
2159 return
2160 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2161 + ALIGNMENT_SLACK (freelist);
2162 }
2163
2164 static void
2165 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2166 #define FUNC_NAME "alloc_some_heap"
2167 {
2168 SCM_CELLPTR ptr;
2169 long len;
2170
2171 if (scm_gc_heap_lock)
2172 {
2173 /* Critical code sections (such as the garbage collector) aren't
2174 * supposed to add heap segments.
2175 */
2176 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2177 abort ();
2178 }
2179
2180 if (scm_n_heap_segs == heap_segment_table_size)
2181 {
2182 /* We have to expand the heap segment table to have room for the new
2183 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2184 * init_heap_seg only if the allocation of the segment itself succeeds.
2185 */
2186 unsigned int new_table_size = scm_n_heap_segs + 1;
2187 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2188 scm_heap_seg_data_t * new_heap_table;
2189
2190 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2191 realloc ((char *)scm_heap_table, size)));
2192 if (!new_heap_table)
2193 {
2194 if (error_policy == abort_on_error)
2195 {
2196 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2197 abort ();
2198 }
2199 else
2200 {
2201 return;
2202 }
2203 }
2204 else
2205 {
2206 scm_heap_table = new_heap_table;
2207 heap_segment_table_size = new_table_size;
2208 }
2209 }
2210
2211 /* Pick a size for the new heap segment.
2212 * The rule for picking the size of a segment is explained in
2213 * gc.h
2214 */
2215 {
2216 /* Assure that the new segment is predicted to be large enough.
2217 *
2218 * New yield should at least equal GC fraction of new heap size, i.e.
2219 *
2220 * y + dh > f * (h + dh)
2221 *
2222 * y : yield
2223 * f : min yield fraction
2224 * h : heap size
2225 * dh : size of new heap segment
2226 *
2227 * This gives dh > (f * h - y) / (1 - f)
2228 */
2229 int f = freelist->min_yield_fraction;
2230 long h = SCM_HEAP_SIZE;
2231 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2232 len = SCM_EXPHEAP (freelist->heap_size);
2233 #ifdef DEBUGINFO
2234 fprintf (stderr, "(%d < %d)", len, min_cells);
2235 #endif
2236 if (len < min_cells)
2237 len = min_cells + freelist->cluster_size;
2238 len *= sizeof (scm_cell);
2239 /* force new sampling */
2240 freelist->collected = LONG_MAX;
2241 }
2242
2243 if (len > scm_max_segment_size)
2244 len = scm_max_segment_size;
2245
2246 {
2247 scm_sizet smallest;
2248
2249 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2250
2251 if (len < smallest)
2252 len = smallest;
2253
2254 /* Allocate with decaying ambition. */
2255 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2256 && (len >= smallest))
2257 {
2258 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2259 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2260 if (ptr)
2261 {
2262 init_heap_seg (ptr, rounded_len, freelist);
2263 return;
2264 }
2265 len /= 2;
2266 }
2267 }
2268
2269 if (error_policy == abort_on_error)
2270 {
2271 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2272 abort ();
2273 }
2274 }
2275 #undef FUNC_NAME
2276
2277
2278 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2279 (SCM name),
2280 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2281 "is @code{#t}.")
2282 #define FUNC_NAME s_scm_unhash_name
2283 {
2284 int x;
2285 int bound;
2286 SCM_VALIDATE_SYMBOL (1,name);
2287 SCM_DEFER_INTS;
2288 bound = scm_n_heap_segs;
2289 for (x = 0; x < bound; ++x)
2290 {
2291 SCM_CELLPTR p;
2292 SCM_CELLPTR pbound;
2293 p = scm_heap_table[x].bounds[0];
2294 pbound = scm_heap_table[x].bounds[1];
2295 while (p < pbound)
2296 {
2297 SCM cell = PTR2SCM (p);
2298 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2299 {
2300 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2301 * struct cell. See the corresponding comment in scm_gc_mark.
2302 */
2303 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2304 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2305 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2306 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2307 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2308 {
2309 SCM_SET_CELL_OBJECT_0 (cell, name);
2310 }
2311 }
2312 ++p;
2313 }
2314 }
2315 SCM_ALLOW_INTS;
2316 return name;
2317 }
2318 #undef FUNC_NAME
2319
2320
2321 \f
2322 /* {GC Protection Helper Functions}
2323 */
2324
2325
2326 /*
2327 * If within a function you need to protect one or more scheme objects from
2328 * garbage collection, pass them as parameters to one of the
2329 * scm_remember_upto_here* functions below. These functions don't do
2330 * anything, but since the compiler does not know that they are actually
2331 * no-ops, it will generate code that calls these functions with the given
2332 * parameters. Therefore, you can be sure that the compiler will keep those
2333 * scheme values alive (on the stack or in a register) up to the point where
2334 * scm_remember_upto_here* is called. In other words, place the call to
2335 * scm_remember_upt_here* _behind_ the last code in your function, that
2336 * depends on the scheme object to exist.
2337 *
2338 * Example: We want to make sure, that the string object str does not get
2339 * garbage collected during the execution of 'some_function', because
2340 * otherwise the characters belonging to str would be freed and
2341 * 'some_function' might access freed memory. To make sure that the compiler
2342 * keeps str alive on the stack or in a register such that it is visible to
2343 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2344 * call to 'some_function'. Note that this would not be necessary if str was
2345 * used anyway after the call to 'some_function'.
2346 * char *chars = SCM_STRING_CHARS (str);
2347 * some_function (chars);
2348 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2349 */
2350
2351 void
2352 scm_remember_upto_here_1 (SCM obj)
2353 {
2354 /* Empty. Protects a single object from garbage collection. */
2355 }
2356
2357 void
2358 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2359 {
2360 /* Empty. Protects two objects from garbage collection. */
2361 }
2362
2363 void
2364 scm_remember_upto_here (SCM obj, ...)
2365 {
2366 /* Empty. Protects any number of objects from garbage collection. */
2367 }
2368
2369
2370 #if (SCM_DEBUG_DEPRECATED == 0)
2371
2372 void
2373 scm_remember (SCM *ptr)
2374 {
2375 /* empty */
2376 }
2377
2378 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2379
2380 /*
2381 These crazy functions prevent garbage collection
2382 of arguments after the first argument by
2383 ensuring they remain live throughout the
2384 function because they are used in the last
2385 line of the code block.
2386 It'd be better to have a nice compiler hint to
2387 aid the conservative stack-scanning GC. --03/09/00 gjb */
2388 SCM
2389 scm_return_first (SCM elt, ...)
2390 {
2391 return elt;
2392 }
2393
2394 int
2395 scm_return_first_int (int i, ...)
2396 {
2397 return i;
2398 }
2399
2400
2401 SCM
2402 scm_permanent_object (SCM obj)
2403 {
2404 SCM_REDEFER_INTS;
2405 scm_permobjs = scm_cons (obj, scm_permobjs);
2406 SCM_REALLOW_INTS;
2407 return obj;
2408 }
2409
2410
2411 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2412 other references are dropped, until the object is unprotected by calling
2413 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2414 i. e. it is possible to protect the same object several times, but it is
2415 necessary to unprotect the object the same number of times to actually get
2416 the object unprotected. It is an error to unprotect an object more often
2417 than it has been protected before. The function scm_protect_object returns
2418 OBJ.
2419 */
2420
2421 /* Implementation note: For every object X, there is a counter which
2422 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2423 */
2424
2425 SCM
2426 scm_protect_object (SCM obj)
2427 {
2428 SCM handle;
2429
2430 /* This critical section barrier will be replaced by a mutex. */
2431 SCM_REDEFER_INTS;
2432
2433 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2434 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2435
2436 SCM_REALLOW_INTS;
2437
2438 return obj;
2439 }
2440
2441
2442 /* Remove any protection for OBJ established by a prior call to
2443 scm_protect_object. This function returns OBJ.
2444
2445 See scm_protect_object for more information. */
2446 SCM
2447 scm_unprotect_object (SCM obj)
2448 {
2449 SCM handle;
2450
2451 /* This critical section barrier will be replaced by a mutex. */
2452 SCM_REDEFER_INTS;
2453
2454 handle = scm_hashq_get_handle (scm_protects, obj);
2455
2456 if (SCM_IMP (handle))
2457 {
2458 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2459 abort ();
2460 }
2461 else
2462 {
2463 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2464 if (count == 0)
2465 scm_hashq_remove_x (scm_protects, obj);
2466 else
2467 SCM_SETCDR (handle, SCM_MAKINUM (count));
2468 }
2469
2470 SCM_REALLOW_INTS;
2471
2472 return obj;
2473 }
2474
2475 int terminating;
2476
2477 /* called on process termination. */
2478 #ifdef HAVE_ATEXIT
2479 static void
2480 cleanup (void)
2481 #else
2482 #ifdef HAVE_ON_EXIT
2483 extern int on_exit (void (*procp) (), int arg);
2484
2485 static void
2486 cleanup (int status, void *arg)
2487 #else
2488 #error Dont know how to setup a cleanup handler on your system.
2489 #endif
2490 #endif
2491 {
2492 terminating = 1;
2493 scm_flush_all_ports ();
2494 }
2495
2496 \f
2497 static int
2498 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2499 {
2500 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2501
2502 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2503 rounded_size,
2504 freelist))
2505 {
2506 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2507 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2508 rounded_size,
2509 freelist))
2510 return 1;
2511 }
2512 else
2513 scm_expmem = 1;
2514
2515 if (freelist->min_yield_fraction)
2516 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2517 / 100);
2518 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2519
2520 return 0;
2521 }
2522
2523 \f
2524 static void
2525 init_freelist (scm_freelist_t *freelist,
2526 int span,
2527 int cluster_size,
2528 int min_yield)
2529 {
2530 freelist->clusters = SCM_EOL;
2531 freelist->cluster_size = cluster_size + 1;
2532 freelist->left_to_collect = 0;
2533 freelist->clusters_allocated = 0;
2534 freelist->min_yield = 0;
2535 freelist->min_yield_fraction = min_yield;
2536 freelist->span = span;
2537 freelist->collected = 0;
2538 freelist->collected_1 = 0;
2539 freelist->heap_size = 0;
2540 }
2541
2542
2543 /* Get an integer from an environment variable. */
2544 static int
2545 scm_i_getenv_int (const char *var, int def)
2546 {
2547 char *end, *val = getenv (var);
2548 long res;
2549 if (!val)
2550 return def;
2551 res = strtol (val, &end, 10);
2552 if (end == val)
2553 return def;
2554 return res;
2555 }
2556
2557
2558 int
2559 scm_init_storage ()
2560 {
2561 scm_sizet gc_trigger_1;
2562 scm_sizet gc_trigger_2;
2563 scm_sizet init_heap_size_1;
2564 scm_sizet init_heap_size_2;
2565 scm_sizet j;
2566
2567 j = SCM_NUM_PROTECTS;
2568 while (j)
2569 scm_sys_protects[--j] = SCM_BOOL_F;
2570 scm_block_gc = 1;
2571
2572 scm_freelist = SCM_EOL;
2573 scm_freelist2 = SCM_EOL;
2574 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2575 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2576 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2577 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2578 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2579
2580 scm_expmem = 0;
2581
2582 j = SCM_HEAP_SEG_SIZE;
2583 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2584 scm_heap_table = ((scm_heap_seg_data_t *)
2585 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2586 heap_segment_table_size = 2;
2587
2588 mark_space_ptr = &mark_space_head;
2589
2590 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2591 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2592 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2593 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2594 return 1;
2595
2596 /* scm_hplims[0] can change. do not remove scm_heap_org */
2597 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2598
2599 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2600 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2601 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2602 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2603 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2604
2605 /* Initialise the list of ports. */
2606 scm_port_table = (scm_port **)
2607 malloc (sizeof (scm_port *) * scm_port_table_room);
2608 if (!scm_port_table)
2609 return 1;
2610
2611 #ifdef HAVE_ATEXIT
2612 atexit (cleanup);
2613 #else
2614 #ifdef HAVE_ON_EXIT
2615 on_exit (cleanup, 0);
2616 #endif
2617 #endif
2618
2619 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2620 SCM_SETCDR (scm_undefineds, scm_undefineds);
2621
2622 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2623 scm_nullstr = scm_makstr (0L, 0);
2624 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2625
2626 #define DEFAULT_SYMHASH_SIZE 277
2627 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2628 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2629
2630 scm_stand_in_procs = SCM_EOL;
2631 scm_permobjs = SCM_EOL;
2632 scm_protects = scm_c_make_hash_table (31);
2633
2634 return 0;
2635 }
2636
2637 \f
2638
2639 SCM scm_after_gc_hook;
2640
2641 #if (SCM_DEBUG_DEPRECATED == 0)
2642 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2643 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2644 static SCM gc_async;
2645
2646
2647 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2648 * is run after the gc, as soon as the asynchronous events are handled by the
2649 * evaluator.
2650 */
2651 static SCM
2652 gc_async_thunk (void)
2653 {
2654 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2655
2656 #if (SCM_DEBUG_DEPRECATED == 0)
2657
2658 /* The following code will be removed in Guile 1.5. */
2659 if (SCM_NFALSEP (scm_gc_vcell))
2660 {
2661 SCM proc = SCM_CDR (scm_gc_vcell);
2662
2663 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2664 scm_apply (proc, SCM_EOL, SCM_EOL);
2665 }
2666
2667 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2668
2669 return SCM_UNSPECIFIED;
2670 }
2671
2672
2673 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2674 * the garbage collection. The only purpose of this function is to mark the
2675 * gc_async (which will eventually lead to the execution of the
2676 * gc_async_thunk).
2677 */
2678 static void *
2679 mark_gc_async (void * hook_data, void *func_data, void *data)
2680 {
2681 scm_system_async_mark (gc_async);
2682 return NULL;
2683 }
2684
2685
2686 void
2687 scm_init_gc ()
2688 {
2689 SCM after_gc_thunk;
2690
2691 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2692
2693 #if (SCM_DEBUG_DEPRECATED == 0)
2694 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2695 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2696 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2697 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2698
2699 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2700
2701 #ifndef SCM_MAGIC_SNARFER
2702 #include "libguile/gc.x"
2703 #endif
2704 }
2705
2706 #endif /*MARK_DEPENDENCIES*/
2707
2708 /*
2709 Local Variables:
2710 c-file-style: "gnu"
2711 End:
2712 */