* _scm.h: Removed #include <errno.h>.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000, 2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/gc.h"
75
76 #ifdef GUILE_DEBUG_MALLOC
77 #include "libguile/debug-malloc.h"
78 #endif
79
80 #ifdef HAVE_MALLOC_H
81 #include <malloc.h>
82 #endif
83
84 #ifdef HAVE_UNISTD_H
85 #include <unistd.h>
86 #endif
87
88 #ifdef __STDC__
89 #include <stdarg.h>
90 #define var_start(x, y) va_start(x, y)
91 #else
92 #include <varargs.h>
93 #define var_start(x, y) va_start(x)
94 #endif
95
96 \f
97
98 unsigned int scm_gc_running_p = 0;
99
100 \f
101
102 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103
104 unsigned int scm_debug_cell_accesses_p = 0;
105
106
107 /* Assert that the given object is a valid reference to a valid cell. This
108 * test involves to determine whether the object is a cell pointer, whether
109 * this pointer actually points into a heap segment and whether the cell
110 * pointed to is not a free cell.
111 */
112 void
113 scm_assert_cell_valid (SCM cell)
114 {
115 if (scm_debug_cell_accesses_p)
116 {
117 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
118
119 if (!scm_cellp (cell))
120 {
121 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
122 abort ();
123 }
124 else if (!scm_gc_running_p)
125 {
126 /* Dirk::FIXME:: During garbage collection there occur references to
127 free cells. This is allright during conservative marking, but
128 should not happen otherwise (I think). The case of free cells
129 accessed during conservative marking is handled in function
130 scm_mark_locations. However, there still occur accesses to free
131 cells during gc. I don't understand why this happens. If it is
132 a bug and gets fixed, the following test should also work while
133 gc is running.
134 */
135 if (SCM_FREE_CELL_P (cell))
136 {
137 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
138 abort ();
139 }
140 }
141 scm_debug_cell_accesses_p = 1; /* re-enable */
142 }
143 }
144
145
146 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
147 (SCM flag),
148 "If FLAG is #f, cell access checking is disabled.\n"
149 "If FLAG is #t, cell access checking is enabled.\n"
150 "This procedure only exists because the compile-time flag\n"
151 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
152 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
153 {
154 if (SCM_FALSEP (flag)) {
155 scm_debug_cell_accesses_p = 0;
156 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
157 scm_debug_cell_accesses_p = 1;
158 } else {
159 SCM_WRONG_TYPE_ARG (1, flag);
160 }
161 return SCM_UNSPECIFIED;
162 }
163 #undef FUNC_NAME
164
165 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
166
167 \f
168
169 /* {heap tuning parameters}
170 *
171 * These are parameters for controlling memory allocation. The heap
172 * is the area out of which scm_cons, and object headers are allocated.
173 *
174 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
175 * 64 bit machine. The units of the _SIZE parameters are bytes.
176 * Cons pairs and object headers occupy one heap cell.
177 *
178 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
179 * allocated initially the heap will grow by half its current size
180 * each subsequent time more heap is needed.
181 *
182 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
183 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
184 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
185 * is in scm_init_storage() and alloc_some_heap() in sys.c
186 *
187 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
188 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
189 *
190 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
191 * is needed.
192 *
193 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
194 * trigger a GC.
195 *
196 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
197 * reclaimed by a GC triggered by must_malloc. If less than this is
198 * reclaimed, the trigger threshold is raised. [I don't know what a
199 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
200 * work around a oscillation that caused almost constant GC.]
201 */
202
203 /*
204 * Heap size 45000 and 40% min yield gives quick startup and no extra
205 * heap allocation. Having higher values on min yield may lead to
206 * large heaps, especially if code behaviour is varying its
207 * maximum consumption between different freelists.
208 */
209
210 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
211 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
212 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
213 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
214 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
215 int scm_default_min_yield_1 = 40;
216
217 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
218 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
219 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
220 /* The following value may seem large, but note that if we get to GC at
221 * all, this means that we have a numerically intensive application
222 */
223 int scm_default_min_yield_2 = 40;
224
225 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
226
227 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
228 #ifdef _QC
229 # define SCM_HEAP_SEG_SIZE 32768L
230 #else
231 # ifdef sequent
232 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
233 # else
234 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
235 # endif
236 #endif
237 /* Make heap grow with factor 1.5 */
238 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
239 #define SCM_INIT_MALLOC_LIMIT 100000
240 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
241
242 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
243 aligned inner bounds for allocated storage */
244
245 #ifdef PROT386
246 /*in 386 protected mode we must only adjust the offset */
247 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
248 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
249 #else
250 # ifdef _UNICOS
251 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
252 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
253 # else
254 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
255 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
256 # endif /* UNICOS */
257 #endif /* PROT386 */
258
259 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
260
261 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
262 #define CLUSTER_SIZE_IN_BYTES(freelist) \
263 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
264
265 \f
266 /* scm_freelists
267 */
268
269 typedef struct scm_freelist_t {
270 /* collected cells */
271 SCM cells;
272 /* number of cells left to collect before cluster is full */
273 unsigned int left_to_collect;
274 /* number of clusters which have been allocated */
275 unsigned int clusters_allocated;
276 /* a list of freelists, each of size cluster_size,
277 * except the last one which may be shorter
278 */
279 SCM clusters;
280 SCM *clustertail;
281 /* this is the number of objects in each cluster, including the spine cell */
282 int cluster_size;
283 /* indicates that we should grow heap instead of GC:ing
284 */
285 int grow_heap_p;
286 /* minimum yield on this list in order not to grow the heap
287 */
288 long min_yield;
289 /* defines min_yield as percent of total heap size
290 */
291 int min_yield_fraction;
292 /* number of cells per object on this list */
293 int span;
294 /* number of collected cells during last GC */
295 long collected;
296 /* number of collected cells during penultimate GC */
297 long collected_1;
298 /* total number of cells in heap segments
299 * belonging to this list.
300 */
301 long heap_size;
302 } scm_freelist_t;
303
304 SCM scm_freelist = SCM_EOL;
305 scm_freelist_t scm_master_freelist = {
306 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
307 };
308 SCM scm_freelist2 = SCM_EOL;
309 scm_freelist_t scm_master_freelist2 = {
310 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
311 };
312
313 /* scm_mtrigger
314 * is the number of bytes of must_malloc allocation needed to trigger gc.
315 */
316 unsigned long scm_mtrigger;
317
318 /* scm_gc_heap_lock
319 * If set, don't expand the heap. Set only during gc, during which no allocation
320 * is supposed to take place anyway.
321 */
322 int scm_gc_heap_lock = 0;
323
324 /* GC Blocking
325 * Don't pause for collection if this is set -- just
326 * expand the heap.
327 */
328 int scm_block_gc = 1;
329
330 /* During collection, this accumulates objects holding
331 * weak references.
332 */
333 SCM scm_weak_vectors;
334
335 /* During collection, this accumulates structures which are to be freed.
336 */
337 SCM scm_structs_to_free;
338
339 /* GC Statistics Keeping
340 */
341 unsigned long scm_cells_allocated = 0;
342 long scm_mallocated = 0;
343 unsigned long scm_gc_cells_collected;
344 unsigned long scm_gc_yield;
345 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
346 unsigned long scm_gc_malloc_collected;
347 unsigned long scm_gc_ports_collected;
348 unsigned long scm_gc_time_taken = 0;
349 static unsigned long t_before_gc;
350 static unsigned long t_before_sweep;
351 unsigned long scm_gc_mark_time_taken = 0;
352 unsigned long scm_gc_sweep_time_taken = 0;
353 unsigned long scm_gc_times = 0;
354 unsigned long scm_gc_cells_swept = 0;
355 double scm_gc_cells_marked_acc = 0.;
356 double scm_gc_cells_swept_acc = 0.;
357
358 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
359 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
360 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
361 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
362 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
363 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
364 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
365 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
366 SCM_SYMBOL (sym_times, "gc-times");
367 SCM_SYMBOL (sym_cells_marked, "cells-marked");
368 SCM_SYMBOL (sym_cells_swept, "cells-swept");
369
370 typedef struct scm_heap_seg_data_t
371 {
372 /* lower and upper bounds of the segment */
373 SCM_CELLPTR bounds[2];
374
375 /* address of the head-of-freelist pointer for this segment's cells.
376 All segments usually point to the same one, scm_freelist. */
377 scm_freelist_t *freelist;
378
379 /* number of cells per object in this segment */
380 int span;
381 } scm_heap_seg_data_t;
382
383
384
385 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
386
387 typedef enum { return_on_error, abort_on_error } policy_on_error;
388 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
389
390
391 #define SCM_HEAP_SIZE \
392 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
393 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
394
395 #define BVEC_GROW_SIZE 256
396 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
397 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
398
399 /* mark space allocation */
400
401 typedef struct scm_mark_space_t
402 {
403 scm_c_bvec_limb_t *bvec_space;
404 struct scm_mark_space_t *next;
405 } scm_mark_space_t;
406
407 static scm_mark_space_t *current_mark_space;
408 static scm_mark_space_t **mark_space_ptr;
409 static int current_mark_space_offset;
410 static scm_mark_space_t *mark_space_head;
411
412 static scm_c_bvec_limb_t *
413 get_bvec ()
414 #define FUNC_NAME "get_bvec"
415 {
416 scm_c_bvec_limb_t *res;
417
418 if (!current_mark_space)
419 {
420 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
421 if (!current_mark_space)
422 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
423
424 current_mark_space->bvec_space = NULL;
425 current_mark_space->next = NULL;
426
427 *mark_space_ptr = current_mark_space;
428 mark_space_ptr = &(current_mark_space->next);
429
430 return get_bvec ();
431 }
432
433 if (!(current_mark_space->bvec_space))
434 {
435 SCM_SYSCALL (current_mark_space->bvec_space =
436 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
437 if (!(current_mark_space->bvec_space))
438 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
439
440 current_mark_space_offset = 0;
441
442 return get_bvec ();
443 }
444
445 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
446 {
447 current_mark_space = NULL;
448
449 return get_bvec ();
450 }
451
452 res = current_mark_space->bvec_space + current_mark_space_offset;
453 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
454
455 return res;
456 }
457 #undef FUNC_NAME
458
459
460 static void
461 clear_mark_space ()
462 {
463 scm_mark_space_t *ms;
464
465 for (ms = mark_space_head; ms; ms = ms->next)
466 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
467 }
468
469
470 \f
471 /* Debugging functions. */
472
473 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
474
475 /* Return the number of the heap segment containing CELL. */
476 static int
477 which_seg (SCM cell)
478 {
479 int i;
480
481 for (i = 0; i < scm_n_heap_segs; i++)
482 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
483 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
484 return i;
485 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
486 SCM_UNPACK (cell));
487 abort ();
488 }
489
490
491 static void
492 map_free_list (scm_freelist_t *master, SCM freelist)
493 {
494 int last_seg = -1, count = 0;
495 SCM f;
496
497 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
498 {
499 int this_seg = which_seg (f);
500
501 if (this_seg != last_seg)
502 {
503 if (last_seg != -1)
504 fprintf (stderr, " %5d %d-cells in segment %d\n",
505 count, master->span, last_seg);
506 last_seg = this_seg;
507 count = 0;
508 }
509 count++;
510 }
511 if (last_seg != -1)
512 fprintf (stderr, " %5d %d-cells in segment %d\n",
513 count, master->span, last_seg);
514 }
515
516 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
517 (),
518 "Print debugging information about the free-list.\n"
519 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
520 #define FUNC_NAME s_scm_map_free_list
521 {
522 int i;
523 fprintf (stderr, "%d segments total (%d:%d",
524 scm_n_heap_segs,
525 scm_heap_table[0].span,
526 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
527 for (i = 1; i < scm_n_heap_segs; i++)
528 fprintf (stderr, ", %d:%d",
529 scm_heap_table[i].span,
530 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
531 fprintf (stderr, ")\n");
532 map_free_list (&scm_master_freelist, scm_freelist);
533 map_free_list (&scm_master_freelist2, scm_freelist2);
534 fflush (stderr);
535
536 return SCM_UNSPECIFIED;
537 }
538 #undef FUNC_NAME
539
540 static int last_cluster;
541 static int last_size;
542
543 static int
544 free_list_length (char *title, int i, SCM freelist)
545 {
546 SCM ls;
547 int n = 0;
548 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
549 if (SCM_FREE_CELL_P (ls))
550 ++n;
551 else
552 {
553 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
554 abort ();
555 }
556 if (n != last_size)
557 {
558 if (i > 0)
559 {
560 if (last_cluster == i - 1)
561 fprintf (stderr, "\t%d\n", last_size);
562 else
563 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
564 }
565 if (i >= 0)
566 fprintf (stderr, "%s %d", title, i);
567 else
568 fprintf (stderr, "%s\t%d\n", title, n);
569 last_cluster = i;
570 last_size = n;
571 }
572 return n;
573 }
574
575 static void
576 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
577 {
578 SCM clusters;
579 int i = 0, len, n = 0;
580 fprintf (stderr, "%s\n\n", title);
581 n += free_list_length ("free list", -1, freelist);
582 for (clusters = master->clusters;
583 SCM_NNULLP (clusters);
584 clusters = SCM_CDR (clusters))
585 {
586 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
587 n += len;
588 }
589 if (last_cluster == i - 1)
590 fprintf (stderr, "\t%d\n", last_size);
591 else
592 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
593 fprintf (stderr, "\ntotal %d objects\n\n", n);
594 }
595
596 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
597 (),
598 "Print debugging information about the free-list.\n"
599 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
600 #define FUNC_NAME s_scm_free_list_length
601 {
602 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
603 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
604 return SCM_UNSPECIFIED;
605 }
606 #undef FUNC_NAME
607
608 #endif
609
610 #ifdef GUILE_DEBUG_FREELIST
611
612 /* Number of calls to SCM_NEWCELL since startup. */
613 static unsigned long scm_newcell_count;
614 static unsigned long scm_newcell2_count;
615
616 /* Search freelist for anything that isn't marked as a free cell.
617 Abort if we find something. */
618 static void
619 scm_check_freelist (SCM freelist)
620 {
621 SCM f;
622 int i = 0;
623
624 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
625 if (!SCM_FREE_CELL_P (f))
626 {
627 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
628 scm_newcell_count, i);
629 abort ();
630 }
631 }
632
633 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
634 (SCM flag),
635 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
636 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
637 "compile-time flag was selected.\n")
638 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
639 {
640 /* [cmm] I did a double-take when I read this code the first time.
641 well, FWIW. */
642 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
643 return SCM_UNSPECIFIED;
644 }
645 #undef FUNC_NAME
646
647
648 SCM
649 scm_debug_newcell (void)
650 {
651 SCM new;
652
653 scm_newcell_count++;
654 if (scm_debug_check_freelist)
655 {
656 scm_check_freelist (scm_freelist);
657 scm_gc();
658 }
659
660 /* The rest of this is supposed to be identical to the SCM_NEWCELL
661 macro. */
662 if (SCM_NULLP (scm_freelist))
663 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
664 else
665 {
666 new = scm_freelist;
667 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
668 }
669
670 return new;
671 }
672
673 SCM
674 scm_debug_newcell2 (void)
675 {
676 SCM new;
677
678 scm_newcell2_count++;
679 if (scm_debug_check_freelist)
680 {
681 scm_check_freelist (scm_freelist2);
682 scm_gc ();
683 }
684
685 /* The rest of this is supposed to be identical to the SCM_NEWCELL
686 macro. */
687 if (SCM_NULLP (scm_freelist2))
688 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
689 else
690 {
691 new = scm_freelist2;
692 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
693 }
694
695 return new;
696 }
697
698 #endif /* GUILE_DEBUG_FREELIST */
699
700 \f
701
702 static unsigned long
703 master_cells_allocated (scm_freelist_t *master)
704 {
705 /* the '- 1' below is to ignore the cluster spine cells. */
706 int objects = master->clusters_allocated * (master->cluster_size - 1);
707 if (SCM_NULLP (master->clusters))
708 objects -= master->left_to_collect;
709 return master->span * objects;
710 }
711
712 static unsigned long
713 freelist_length (SCM freelist)
714 {
715 int n;
716 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
717 ++n;
718 return n;
719 }
720
721 static unsigned long
722 compute_cells_allocated ()
723 {
724 return (scm_cells_allocated
725 + master_cells_allocated (&scm_master_freelist)
726 + master_cells_allocated (&scm_master_freelist2)
727 - scm_master_freelist.span * freelist_length (scm_freelist)
728 - scm_master_freelist2.span * freelist_length (scm_freelist2));
729 }
730
731 /* {Scheme Interface to GC}
732 */
733
734 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
735 (),
736 "Returns an association list of statistics about Guile's current use of storage. ")
737 #define FUNC_NAME s_scm_gc_stats
738 {
739 int i;
740 int n;
741 SCM heap_segs;
742 long int local_scm_mtrigger;
743 long int local_scm_mallocated;
744 long int local_scm_heap_size;
745 long int local_scm_cells_allocated;
746 long int local_scm_gc_time_taken;
747 long int local_scm_gc_times;
748 long int local_scm_gc_mark_time_taken;
749 long int local_scm_gc_sweep_time_taken;
750 double local_scm_gc_cells_swept;
751 double local_scm_gc_cells_marked;
752 SCM answer;
753
754 SCM_DEFER_INTS;
755
756 ++scm_block_gc;
757
758 retry:
759 heap_segs = SCM_EOL;
760 n = scm_n_heap_segs;
761 for (i = scm_n_heap_segs; i--; )
762 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
763 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
764 heap_segs);
765 if (scm_n_heap_segs != n)
766 goto retry;
767
768 --scm_block_gc;
769
770 /* Below, we cons to produce the resulting list. We want a snapshot of
771 * the heap situation before consing.
772 */
773 local_scm_mtrigger = scm_mtrigger;
774 local_scm_mallocated = scm_mallocated;
775 local_scm_heap_size = SCM_HEAP_SIZE;
776 local_scm_cells_allocated = compute_cells_allocated ();
777 local_scm_gc_time_taken = scm_gc_time_taken;
778 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
779 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
780 local_scm_gc_times = scm_gc_times;
781 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
782 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
783
784 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
785 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
786 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
787 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
788 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
789 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
790 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
791 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
792 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
793 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
794 scm_cons (sym_heap_segments, heap_segs),
795 SCM_UNDEFINED);
796 SCM_ALLOW_INTS;
797 return answer;
798 }
799 #undef FUNC_NAME
800
801
802 static void
803 gc_start_stats (const char *what)
804 {
805 t_before_gc = scm_c_get_internal_run_time ();
806 scm_gc_cells_swept = 0;
807 scm_gc_cells_collected = 0;
808 scm_gc_yield_1 = scm_gc_yield;
809 scm_gc_yield = (scm_cells_allocated
810 + master_cells_allocated (&scm_master_freelist)
811 + master_cells_allocated (&scm_master_freelist2));
812 scm_gc_malloc_collected = 0;
813 scm_gc_ports_collected = 0;
814 }
815
816
817 static void
818 gc_end_stats ()
819 {
820 unsigned long t = scm_c_get_internal_run_time ();
821 scm_gc_time_taken += (t - t_before_gc);
822 scm_gc_sweep_time_taken += (t - t_before_sweep);
823 ++scm_gc_times;
824
825 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
826 scm_gc_cells_swept_acc += scm_gc_cells_swept;
827 }
828
829
830 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
831 (SCM obj),
832 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
833 "returned by this function for @var{obj}")
834 #define FUNC_NAME s_scm_object_address
835 {
836 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
837 }
838 #undef FUNC_NAME
839
840
841 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
842 (),
843 "Scans all of SCM objects and reclaims for further use those that are\n"
844 "no longer accessible.")
845 #define FUNC_NAME s_scm_gc
846 {
847 SCM_DEFER_INTS;
848 scm_igc ("call");
849 SCM_ALLOW_INTS;
850 return SCM_UNSPECIFIED;
851 }
852 #undef FUNC_NAME
853
854
855 \f
856 /* {C Interface For When GC is Triggered}
857 */
858
859 static void
860 adjust_min_yield (scm_freelist_t *freelist)
861 {
862 /* min yield is adjusted upwards so that next predicted total yield
863 * (allocated cells actually freed by GC) becomes
864 * `min_yield_fraction' of total heap size. Note, however, that
865 * the absolute value of min_yield will correspond to `collected'
866 * on one master (the one which currently is triggering GC).
867 *
868 * The reason why we look at total yield instead of cells collected
869 * on one list is that we want to take other freelists into account.
870 * On this freelist, we know that (local) yield = collected cells,
871 * but that's probably not the case on the other lists.
872 *
873 * (We might consider computing a better prediction, for example
874 * by computing an average over multiple GC:s.)
875 */
876 if (freelist->min_yield_fraction)
877 {
878 /* Pick largest of last two yields. */
879 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
880 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
881 #ifdef DEBUGINFO
882 fprintf (stderr, " after GC = %d, delta = %d\n",
883 scm_cells_allocated,
884 delta);
885 #endif
886 if (delta > 0)
887 freelist->min_yield += delta;
888 }
889 }
890
891
892 /* When we get POSIX threads support, the master will be global and
893 * common while the freelist will be individual for each thread.
894 */
895
896 SCM
897 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
898 {
899 SCM cell;
900 ++scm_ints_disabled;
901 do
902 {
903 if (SCM_NULLP (master->clusters))
904 {
905 if (master->grow_heap_p || scm_block_gc)
906 {
907 /* In order to reduce gc frequency, try to allocate a new heap
908 * segment first, even if gc might find some free cells. If we
909 * can't obtain a new heap segment, we will try gc later.
910 */
911 master->grow_heap_p = 0;
912 alloc_some_heap (master, return_on_error);
913 }
914 if (SCM_NULLP (master->clusters))
915 {
916 /* The heap was not grown, either because it wasn't scheduled to
917 * grow, or because there was not enough memory available. In
918 * both cases we have to try gc to get some free cells.
919 */
920 #ifdef DEBUGINFO
921 fprintf (stderr, "allocated = %d, ",
922 scm_cells_allocated
923 + master_cells_allocated (&scm_master_freelist)
924 + master_cells_allocated (&scm_master_freelist2));
925 #endif
926 scm_igc ("cells");
927 adjust_min_yield (master);
928 if (SCM_NULLP (master->clusters))
929 {
930 /* gc could not free any cells. Now, we _must_ allocate a
931 * new heap segment, because there is no other possibility
932 * to provide a new cell for the caller.
933 */
934 alloc_some_heap (master, abort_on_error);
935 }
936 }
937 }
938 cell = SCM_CAR (master->clusters);
939 master->clusters = SCM_CDR (master->clusters);
940 ++master->clusters_allocated;
941 }
942 while (SCM_NULLP (cell));
943
944 #ifdef GUILE_DEBUG_FREELIST
945 scm_check_freelist (cell);
946 #endif
947
948 --scm_ints_disabled;
949 *freelist = SCM_FREE_CELL_CDR (cell);
950 return cell;
951 }
952
953
954 #if 0
955 /* This is a support routine which can be used to reserve a cluster
956 * for some special use, such as debugging. It won't be useful until
957 * free cells are preserved between garbage collections.
958 */
959
960 void
961 scm_alloc_cluster (scm_freelist_t *master)
962 {
963 SCM freelist, cell;
964 cell = scm_gc_for_newcell (master, &freelist);
965 SCM_SETCDR (cell, freelist);
966 return cell;
967 }
968 #endif
969
970
971 scm_c_hook_t scm_before_gc_c_hook;
972 scm_c_hook_t scm_before_mark_c_hook;
973 scm_c_hook_t scm_before_sweep_c_hook;
974 scm_c_hook_t scm_after_sweep_c_hook;
975 scm_c_hook_t scm_after_gc_c_hook;
976
977
978 void
979 scm_igc (const char *what)
980 {
981 int j;
982
983 ++scm_gc_running_p;
984 scm_c_hook_run (&scm_before_gc_c_hook, 0);
985 #ifdef DEBUGINFO
986 fprintf (stderr,
987 SCM_NULLP (scm_freelist)
988 ? "*"
989 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
990 #endif
991 #ifdef USE_THREADS
992 /* During the critical section, only the current thread may run. */
993 SCM_THREAD_CRITICAL_SECTION_START;
994 #endif
995
996 /* fprintf (stderr, "gc: %s\n", what); */
997
998 if (!scm_stack_base || scm_block_gc)
999 {
1000 --scm_gc_running_p;
1001 return;
1002 }
1003
1004 gc_start_stats (what);
1005
1006 if (scm_mallocated < 0)
1007 /* The byte count of allocated objects has underflowed. This is
1008 probably because you forgot to report the sizes of objects you
1009 have allocated, by calling scm_done_malloc or some such. When
1010 the GC freed them, it subtracted their size from
1011 scm_mallocated, which underflowed. */
1012 abort ();
1013
1014 if (scm_gc_heap_lock)
1015 /* We've invoked the collector while a GC is already in progress.
1016 That should never happen. */
1017 abort ();
1018
1019 ++scm_gc_heap_lock;
1020
1021 /* flush dead entries from the continuation stack */
1022 {
1023 int x;
1024 int bound;
1025 SCM * elts;
1026 elts = SCM_VELTS (scm_continuation_stack);
1027 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1028 x = SCM_INUM (scm_continuation_stack_ptr);
1029 while (x < bound)
1030 {
1031 elts[x] = SCM_BOOL_F;
1032 ++x;
1033 }
1034 }
1035
1036 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1037
1038 clear_mark_space ();
1039
1040 #ifndef USE_THREADS
1041
1042 /* Mark objects on the C stack. */
1043 SCM_FLUSH_REGISTER_WINDOWS;
1044 /* This assumes that all registers are saved into the jmp_buf */
1045 setjmp (scm_save_regs_gc_mark);
1046 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1047 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1048 sizeof scm_save_regs_gc_mark)
1049 / sizeof (SCM_STACKITEM)));
1050
1051 {
1052 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1053 #ifdef SCM_STACK_GROWS_UP
1054 scm_mark_locations (scm_stack_base, stack_len);
1055 #else
1056 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1057 #endif
1058 }
1059
1060 #else /* USE_THREADS */
1061
1062 /* Mark every thread's stack and registers */
1063 scm_threads_mark_stacks ();
1064
1065 #endif /* USE_THREADS */
1066
1067 j = SCM_NUM_PROTECTS;
1068 while (j--)
1069 scm_gc_mark (scm_sys_protects[j]);
1070
1071 /* FIXME: we should have a means to register C functions to be run
1072 * in different phases of GC
1073 */
1074 scm_mark_subr_table ();
1075
1076 #ifndef USE_THREADS
1077 scm_gc_mark (scm_root->handle);
1078 #endif
1079
1080 t_before_sweep = scm_c_get_internal_run_time ();
1081 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1082
1083 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1084
1085 scm_gc_sweep ();
1086
1087 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1088
1089 --scm_gc_heap_lock;
1090 gc_end_stats ();
1091
1092 #ifdef USE_THREADS
1093 SCM_THREAD_CRITICAL_SECTION_END;
1094 #endif
1095 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1096 --scm_gc_running_p;
1097 }
1098
1099 \f
1100
1101 /* {Mark/Sweep}
1102 */
1103
1104 #define MARK scm_gc_mark
1105 #define FNAME "scm_gc_mark"
1106
1107 #endif /*!MARK_DEPENDENCIES*/
1108
1109 /* Mark an object precisely.
1110 */
1111 void
1112 MARK (SCM p)
1113 #define FUNC_NAME FNAME
1114 {
1115 register long i;
1116 register SCM ptr;
1117
1118 #ifndef MARK_DEPENDENCIES
1119 # define RECURSE scm_gc_mark
1120 #else
1121 /* go through the usual marking, but not for self-cycles. */
1122 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1123 #endif
1124 ptr = p;
1125
1126 #ifdef MARK_DEPENDENCIES
1127 goto gc_mark_loop_first_time;
1128 #endif
1129
1130 gc_mark_loop:
1131 if (SCM_IMP (ptr))
1132 return;
1133
1134 gc_mark_nimp:
1135
1136 #ifdef MARK_DEPENDENCIES
1137 if (SCM_EQ_P (ptr, p))
1138 return;
1139
1140 scm_gc_mark (ptr);
1141 return;
1142
1143 gc_mark_loop_first_time:
1144 #endif
1145
1146 if (!SCM_CELLP (ptr))
1147 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1148
1149 #if (defined (GUILE_DEBUG_FREELIST))
1150
1151 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1152 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1153
1154 #endif
1155
1156 #ifndef MARK_DEPENDENCIES
1157
1158 if (SCM_GCMARKP (ptr))
1159 return;
1160
1161 SCM_SETGCMARK (ptr);
1162
1163 #endif
1164
1165 switch (SCM_TYP7 (ptr))
1166 {
1167 case scm_tcs_cons_nimcar:
1168 if (SCM_IMP (SCM_CDR (ptr)))
1169 {
1170 ptr = SCM_CAR (ptr);
1171 goto gc_mark_nimp;
1172 }
1173 RECURSE (SCM_CAR (ptr));
1174 ptr = SCM_CDR (ptr);
1175 goto gc_mark_nimp;
1176 case scm_tcs_cons_imcar:
1177 ptr = SCM_CDR (ptr);
1178 goto gc_mark_loop;
1179 case scm_tc7_pws:
1180 RECURSE (SCM_CELL_OBJECT_2 (ptr));
1181 ptr = SCM_CDR (ptr);
1182 goto gc_mark_loop;
1183 case scm_tcs_cons_gloc:
1184 {
1185 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1186 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1187 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1188 * pointer to a struct vtable data region. The fact that these are
1189 * accessed in the same way restricts the possibilites to change the
1190 * data layout of structs or heap cells.
1191 */
1192 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1193 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1194 if (vtable_data [scm_vtable_index_vcell] != 0)
1195 {
1196 /* ptr is a gloc */
1197 SCM gloc_car = SCM_PACK (word0);
1198 RECURSE (gloc_car);
1199 ptr = SCM_CDR (ptr);
1200 goto gc_mark_loop;
1201 }
1202 else
1203 {
1204 /* ptr is a struct */
1205 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1206 int len = SCM_SYMBOL_LENGTH (layout);
1207 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1208 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1209
1210 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1211 {
1212 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1213 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1214 }
1215 if (len)
1216 {
1217 int x;
1218
1219 for (x = 0; x < len - 2; x += 2, ++struct_data)
1220 if (fields_desc[x] == 'p')
1221 RECURSE (SCM_PACK (*struct_data));
1222 if (fields_desc[x] == 'p')
1223 {
1224 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1225 for (x = *struct_data++; x; --x, ++struct_data)
1226 RECURSE (SCM_PACK (*struct_data));
1227 else
1228 RECURSE (SCM_PACK (*struct_data));
1229 }
1230 }
1231 /* mark vtable */
1232 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1233 goto gc_mark_loop;
1234 }
1235 }
1236 break;
1237 case scm_tcs_closures:
1238 if (SCM_IMP (SCM_CDR (ptr)))
1239 {
1240 ptr = SCM_CLOSCAR (ptr);
1241 goto gc_mark_nimp;
1242 }
1243 RECURSE (SCM_CLOSCAR (ptr));
1244 ptr = SCM_CDR (ptr);
1245 goto gc_mark_nimp;
1246 case scm_tc7_vector:
1247 i = SCM_VECTOR_LENGTH (ptr);
1248 if (i == 0)
1249 break;
1250 while (--i > 0)
1251 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1252 RECURSE (SCM_VELTS (ptr)[i]);
1253 ptr = SCM_VELTS (ptr)[0];
1254 goto gc_mark_loop;
1255 #ifdef CCLO
1256 case scm_tc7_cclo:
1257 {
1258 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1259 unsigned long int j;
1260 for (j = 1; j != i; ++j)
1261 {
1262 SCM obj = SCM_CCLO_REF (ptr, j);
1263 if (!SCM_IMP (obj))
1264 RECURSE (obj);
1265 }
1266 ptr = SCM_CCLO_REF (ptr, 0);
1267 goto gc_mark_loop;
1268 }
1269 #endif
1270 #ifdef HAVE_ARRAYS
1271 case scm_tc7_bvect:
1272 case scm_tc7_byvect:
1273 case scm_tc7_ivect:
1274 case scm_tc7_uvect:
1275 case scm_tc7_fvect:
1276 case scm_tc7_dvect:
1277 case scm_tc7_cvect:
1278 case scm_tc7_svect:
1279 #ifdef HAVE_LONG_LONGS
1280 case scm_tc7_llvect:
1281 #endif
1282 #endif
1283 case scm_tc7_string:
1284 break;
1285
1286 case scm_tc7_substring:
1287 ptr = SCM_CDR (ptr);
1288 goto gc_mark_loop;
1289
1290 case scm_tc7_wvect:
1291 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1292 scm_weak_vectors = ptr;
1293 if (SCM_IS_WHVEC_ANY (ptr))
1294 {
1295 int x;
1296 int len;
1297 int weak_keys;
1298 int weak_values;
1299
1300 len = SCM_VECTOR_LENGTH (ptr);
1301 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1302 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1303
1304 for (x = 0; x < len; ++x)
1305 {
1306 SCM alist;
1307 alist = SCM_VELTS (ptr)[x];
1308
1309 /* mark everything on the alist except the keys or
1310 * values, according to weak_values and weak_keys. */
1311 while ( SCM_CONSP (alist)
1312 && !SCM_GCMARKP (alist)
1313 && SCM_CONSP (SCM_CAR (alist)))
1314 {
1315 SCM kvpair;
1316 SCM next_alist;
1317
1318 kvpair = SCM_CAR (alist);
1319 next_alist = SCM_CDR (alist);
1320 /*
1321 * Do not do this:
1322 * SCM_SETGCMARK (alist);
1323 * SCM_SETGCMARK (kvpair);
1324 *
1325 * It may be that either the key or value is protected by
1326 * an escaped reference to part of the spine of this alist.
1327 * If we mark the spine here, and only mark one or neither of the
1328 * key and value, they may never be properly marked.
1329 * This leads to a horrible situation in which an alist containing
1330 * freelist cells is exported.
1331 *
1332 * So only mark the spines of these arrays last of all marking.
1333 * If somebody confuses us by constructing a weak vector
1334 * with a circular alist then we are hosed, but at least we
1335 * won't prematurely drop table entries.
1336 */
1337 if (!weak_keys)
1338 RECURSE (SCM_CAR (kvpair));
1339 if (!weak_values)
1340 RECURSE (SCM_CDR (kvpair));
1341 alist = next_alist;
1342 }
1343 if (SCM_NIMP (alist))
1344 RECURSE (alist);
1345 }
1346 }
1347 break;
1348
1349 case scm_tc7_symbol:
1350 ptr = SCM_PROP_SLOTS (ptr);
1351 goto gc_mark_loop;
1352 case scm_tcs_subrs:
1353 break;
1354 case scm_tc7_port:
1355 i = SCM_PTOBNUM (ptr);
1356 if (!(i < scm_numptob))
1357 goto def;
1358 if (SCM_PTAB_ENTRY(ptr))
1359 RECURSE (SCM_FILENAME (ptr));
1360 if (scm_ptobs[i].mark)
1361 {
1362 ptr = (scm_ptobs[i].mark) (ptr);
1363 goto gc_mark_loop;
1364 }
1365 else
1366 return;
1367 break;
1368 case scm_tc7_smob:
1369 switch (SCM_TYP16 (ptr))
1370 { /* should be faster than going through scm_smobs */
1371 case scm_tc_free_cell:
1372 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1373 case scm_tc16_big:
1374 case scm_tc16_real:
1375 case scm_tc16_complex:
1376 break;
1377 default:
1378 i = SCM_SMOBNUM (ptr);
1379 if (!(i < scm_numsmob))
1380 goto def;
1381 if (scm_smobs[i].mark)
1382 {
1383 ptr = (scm_smobs[i].mark) (ptr);
1384 goto gc_mark_loop;
1385 }
1386 else
1387 return;
1388 }
1389 break;
1390 default:
1391 def:
1392 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1393 }
1394 #undef RECURSE
1395 }
1396 #undef FUNC_NAME
1397
1398 #ifndef MARK_DEPENDENCIES
1399
1400 #undef MARK
1401 #undef FNAME
1402
1403 /* And here we define `scm_gc_mark_dependencies', by including this
1404 * same file in itself.
1405 */
1406 #define MARK scm_gc_mark_dependencies
1407 #define FNAME "scm_gc_mark_dependencies"
1408 #define MARK_DEPENDENCIES
1409 #include "gc.c"
1410 #undef MARK_DEPENDENCIES
1411 #undef MARK
1412 #undef FNAME
1413
1414
1415 /* Mark a Region Conservatively
1416 */
1417
1418 void
1419 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1420 {
1421 unsigned long m;
1422
1423 for (m = 0; m < n; ++m)
1424 {
1425 SCM obj = * (SCM *) &x[m];
1426 if (SCM_CELLP (obj))
1427 {
1428 SCM_CELLPTR ptr = SCM2PTR (obj);
1429 int i = 0;
1430 int j = scm_n_heap_segs - 1;
1431 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1432 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1433 {
1434 while (i <= j)
1435 {
1436 int seg_id;
1437 seg_id = -1;
1438 if ((i == j)
1439 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1440 seg_id = i;
1441 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1442 seg_id = j;
1443 else
1444 {
1445 int k;
1446 k = (i + j) / 2;
1447 if (k == i)
1448 break;
1449 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1450 {
1451 j = k;
1452 ++i;
1453 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1454 continue;
1455 else
1456 break;
1457 }
1458 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1459 {
1460 i = k;
1461 --j;
1462 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1463 continue;
1464 else
1465 break;
1466 }
1467 }
1468
1469 if (SCM_GC_IN_CARD_HEADERP (ptr))
1470 break;
1471
1472 if (scm_heap_table[seg_id].span == 1
1473 || DOUBLECELL_ALIGNED_P (obj))
1474 scm_gc_mark (obj);
1475
1476 break;
1477 }
1478 }
1479 }
1480 }
1481 }
1482
1483
1484 /* The function scm_cellp determines whether an SCM value can be regarded as a
1485 * pointer to a cell on the heap. Binary search is used in order to determine
1486 * the heap segment that contains the cell.
1487 */
1488 int
1489 scm_cellp (SCM value)
1490 {
1491 if (SCM_CELLP (value)) {
1492 scm_cell * ptr = SCM2PTR (value);
1493 unsigned int i = 0;
1494 unsigned int j = scm_n_heap_segs - 1;
1495
1496 while (i < j) {
1497 int k = (i + j) / 2;
1498 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1499 j = k;
1500 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1501 i = k + 1;
1502 }
1503 }
1504
1505 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1506 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1507 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1508 && !SCM_GC_IN_CARD_HEADERP (ptr)
1509 )
1510 return 1;
1511 else
1512 return 0;
1513 } else
1514 return 0;
1515 }
1516
1517
1518 static void
1519 gc_sweep_freelist_start (scm_freelist_t *freelist)
1520 {
1521 freelist->cells = SCM_EOL;
1522 freelist->left_to_collect = freelist->cluster_size;
1523 freelist->clusters_allocated = 0;
1524 freelist->clusters = SCM_EOL;
1525 freelist->clustertail = &freelist->clusters;
1526 freelist->collected_1 = freelist->collected;
1527 freelist->collected = 0;
1528 }
1529
1530 static void
1531 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1532 {
1533 int collected;
1534 *freelist->clustertail = freelist->cells;
1535 if (!SCM_NULLP (freelist->cells))
1536 {
1537 SCM c = freelist->cells;
1538 SCM_SETCAR (c, SCM_CDR (c));
1539 SCM_SETCDR (c, SCM_EOL);
1540 freelist->collected +=
1541 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1542 }
1543 scm_gc_cells_collected += freelist->collected;
1544
1545 /* Although freelist->min_yield is used to test freelist->collected
1546 * (which is the local GC yield for freelist), it is adjusted so
1547 * that *total* yield is freelist->min_yield_fraction of total heap
1548 * size. This means that a too low yield is compensated by more
1549 * heap on the list which is currently doing most work, which is
1550 * just what we want.
1551 */
1552 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1553 freelist->grow_heap_p = (collected < freelist->min_yield);
1554 }
1555
1556 #define NEXT_DATA_CELL(ptr, span) \
1557 do { \
1558 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1559 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1560 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1561 : nxt__); \
1562 } while (0)
1563
1564 void
1565 scm_gc_sweep ()
1566 #define FUNC_NAME "scm_gc_sweep"
1567 {
1568 register SCM_CELLPTR ptr;
1569 register SCM nfreelist;
1570 register scm_freelist_t *freelist;
1571 register long m;
1572 register int span;
1573 long i;
1574 scm_sizet seg_size;
1575
1576 m = 0;
1577
1578 gc_sweep_freelist_start (&scm_master_freelist);
1579 gc_sweep_freelist_start (&scm_master_freelist2);
1580
1581 for (i = 0; i < scm_n_heap_segs; i++)
1582 {
1583 register unsigned int left_to_collect;
1584 register scm_sizet j;
1585
1586 /* Unmarked cells go onto the front of the freelist this heap
1587 segment points to. Rather than updating the real freelist
1588 pointer as we go along, we accumulate the new head in
1589 nfreelist. Then, if it turns out that the entire segment is
1590 free, we free (i.e., malloc's free) the whole segment, and
1591 simply don't assign nfreelist back into the real freelist. */
1592 freelist = scm_heap_table[i].freelist;
1593 nfreelist = freelist->cells;
1594 left_to_collect = freelist->left_to_collect;
1595 span = scm_heap_table[i].span;
1596
1597 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1598 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1599
1600 /* use only data cells in seg_size */
1601 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1602
1603 scm_gc_cells_swept += seg_size;
1604
1605 for (j = seg_size + span; j -= span; ptr += span)
1606 {
1607 SCM scmptr;
1608
1609 if (SCM_GC_IN_CARD_HEADERP (ptr))
1610 {
1611 SCM_CELLPTR nxt;
1612
1613 /* cheat here */
1614 nxt = ptr;
1615 NEXT_DATA_CELL (nxt, span);
1616 j += span;
1617
1618 ptr = nxt - span;
1619 continue;
1620 }
1621
1622 scmptr = PTR2SCM (ptr);
1623
1624 if (SCM_GCMARKP (scmptr))
1625 continue;
1626
1627 switch SCM_TYP7 (scmptr)
1628 {
1629 case scm_tcs_cons_gloc:
1630 {
1631 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1632 * struct or a gloc. See the corresponding comment in
1633 * scm_gc_mark.
1634 */
1635 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1636 - scm_tc3_cons_gloc);
1637 /* access as struct */
1638 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1639 if (vtable_data[scm_vtable_index_vcell] == 0)
1640 {
1641 /* Structs need to be freed in a special order.
1642 * This is handled by GC C hooks in struct.c.
1643 */
1644 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1645 scm_structs_to_free = scmptr;
1646 continue;
1647 }
1648 /* fall through so that scmptr gets collected */
1649 }
1650 break;
1651 case scm_tcs_cons_imcar:
1652 case scm_tcs_cons_nimcar:
1653 case scm_tcs_closures:
1654 case scm_tc7_pws:
1655 break;
1656 case scm_tc7_wvect:
1657 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1658 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1659 break;
1660 case scm_tc7_vector:
1661 {
1662 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1663 if (length > 0)
1664 {
1665 m += length * sizeof (scm_bits_t);
1666 scm_must_free (SCM_VECTOR_BASE (scmptr));
1667 }
1668 break;
1669 }
1670 #ifdef CCLO
1671 case scm_tc7_cclo:
1672 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1673 scm_must_free (SCM_CCLO_BASE (scmptr));
1674 break;
1675 #endif
1676 #ifdef HAVE_ARRAYS
1677 case scm_tc7_bvect:
1678 {
1679 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1680 if (length > 0)
1681 {
1682 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1683 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1684 }
1685 }
1686 break;
1687 case scm_tc7_byvect:
1688 case scm_tc7_ivect:
1689 case scm_tc7_uvect:
1690 case scm_tc7_svect:
1691 #ifdef HAVE_LONG_LONGS
1692 case scm_tc7_llvect:
1693 #endif
1694 case scm_tc7_fvect:
1695 case scm_tc7_dvect:
1696 case scm_tc7_cvect:
1697 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1698 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1699 break;
1700 #endif
1701 case scm_tc7_substring:
1702 break;
1703 case scm_tc7_string:
1704 m += SCM_STRING_LENGTH (scmptr) + 1;
1705 scm_must_free (SCM_STRING_CHARS (scmptr));
1706 break;
1707 case scm_tc7_symbol:
1708 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1709 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1710 break;
1711 case scm_tcs_subrs:
1712 /* the various "subrs" (primitives) are never freed */
1713 continue;
1714 case scm_tc7_port:
1715 if SCM_OPENP (scmptr)
1716 {
1717 int k = SCM_PTOBNUM (scmptr);
1718 if (!(k < scm_numptob))
1719 goto sweeperr;
1720 /* Keep "revealed" ports alive. */
1721 if (scm_revealed_count (scmptr) > 0)
1722 continue;
1723 /* Yes, I really do mean scm_ptobs[k].free */
1724 /* rather than ftobs[k].close. .close */
1725 /* is for explicit CLOSE-PORT by user */
1726 m += (scm_ptobs[k].free) (scmptr);
1727 SCM_SETSTREAM (scmptr, 0);
1728 scm_remove_from_port_table (scmptr);
1729 scm_gc_ports_collected++;
1730 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1731 }
1732 break;
1733 case scm_tc7_smob:
1734 switch SCM_TYP16 (scmptr)
1735 {
1736 case scm_tc_free_cell:
1737 case scm_tc16_real:
1738 break;
1739 #ifdef SCM_BIGDIG
1740 case scm_tc16_big:
1741 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1742 scm_must_free (SCM_BDIGITS (scmptr));
1743 break;
1744 #endif /* def SCM_BIGDIG */
1745 case scm_tc16_complex:
1746 m += sizeof (scm_complex_t);
1747 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1748 break;
1749 default:
1750 {
1751 int k;
1752 k = SCM_SMOBNUM (scmptr);
1753 if (!(k < scm_numsmob))
1754 goto sweeperr;
1755 m += (scm_smobs[k].free) (scmptr);
1756 break;
1757 }
1758 }
1759 break;
1760 default:
1761 sweeperr:
1762 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1763 }
1764
1765 if (!--left_to_collect)
1766 {
1767 SCM_SETCAR (scmptr, nfreelist);
1768 *freelist->clustertail = scmptr;
1769 freelist->clustertail = SCM_CDRLOC (scmptr);
1770
1771 nfreelist = SCM_EOL;
1772 freelist->collected += span * freelist->cluster_size;
1773 left_to_collect = freelist->cluster_size;
1774 }
1775 else
1776 {
1777 /* Stick the new cell on the front of nfreelist. It's
1778 critical that we mark this cell as freed; otherwise, the
1779 conservative collector might trace it as some other type
1780 of object. */
1781 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1782 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1783 nfreelist = scmptr;
1784 }
1785 }
1786
1787 #ifdef GC_FREE_SEGMENTS
1788 if (n == seg_size)
1789 {
1790 register long j;
1791
1792 freelist->heap_size -= seg_size;
1793 free ((char *) scm_heap_table[i].bounds[0]);
1794 scm_heap_table[i].bounds[0] = 0;
1795 for (j = i + 1; j < scm_n_heap_segs; j++)
1796 scm_heap_table[j - 1] = scm_heap_table[j];
1797 scm_n_heap_segs -= 1;
1798 i--; /* We need to scan the segment just moved. */
1799 }
1800 else
1801 #endif /* ifdef GC_FREE_SEGMENTS */
1802 {
1803 /* Update the real freelist pointer to point to the head of
1804 the list of free cells we've built for this segment. */
1805 freelist->cells = nfreelist;
1806 freelist->left_to_collect = left_to_collect;
1807 }
1808
1809 #ifdef GUILE_DEBUG_FREELIST
1810 scm_map_free_list ();
1811 #endif
1812 }
1813
1814 gc_sweep_freelist_finish (&scm_master_freelist);
1815 gc_sweep_freelist_finish (&scm_master_freelist2);
1816
1817 /* When we move to POSIX threads private freelists should probably
1818 be GC-protected instead. */
1819 scm_freelist = SCM_EOL;
1820 scm_freelist2 = SCM_EOL;
1821
1822 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1823 scm_gc_yield -= scm_cells_allocated;
1824 scm_mallocated -= m;
1825 scm_gc_malloc_collected = m;
1826 }
1827 #undef FUNC_NAME
1828
1829
1830 \f
1831 /* {Front end to malloc}
1832 *
1833 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1834 * scm_done_free
1835 *
1836 * These functions provide services comperable to malloc, realloc, and
1837 * free. They are for allocating malloced parts of scheme objects.
1838 * The primary purpose of the front end is to impose calls to gc. */
1839
1840
1841 /* scm_must_malloc
1842 * Return newly malloced storage or throw an error.
1843 *
1844 * The parameter WHAT is a string for error reporting.
1845 * If the threshold scm_mtrigger will be passed by this
1846 * allocation, or if the first call to malloc fails,
1847 * garbage collect -- on the presumption that some objects
1848 * using malloced storage may be collected.
1849 *
1850 * The limit scm_mtrigger may be raised by this allocation.
1851 */
1852 void *
1853 scm_must_malloc (scm_sizet size, const char *what)
1854 {
1855 void *ptr;
1856 unsigned long nm = scm_mallocated + size;
1857
1858 if (nm <= scm_mtrigger)
1859 {
1860 SCM_SYSCALL (ptr = malloc (size));
1861 if (NULL != ptr)
1862 {
1863 scm_mallocated = nm;
1864 #ifdef GUILE_DEBUG_MALLOC
1865 scm_malloc_register (ptr, what);
1866 #endif
1867 return ptr;
1868 }
1869 }
1870
1871 scm_igc (what);
1872
1873 nm = scm_mallocated + size;
1874 SCM_SYSCALL (ptr = malloc (size));
1875 if (NULL != ptr)
1876 {
1877 scm_mallocated = nm;
1878 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1879 if (nm > scm_mtrigger)
1880 scm_mtrigger = nm + nm / 2;
1881 else
1882 scm_mtrigger += scm_mtrigger / 2;
1883 }
1884 #ifdef GUILE_DEBUG_MALLOC
1885 scm_malloc_register (ptr, what);
1886 #endif
1887
1888 return ptr;
1889 }
1890
1891 scm_memory_error (what);
1892 }
1893
1894
1895 /* scm_must_realloc
1896 * is similar to scm_must_malloc.
1897 */
1898 void *
1899 scm_must_realloc (void *where,
1900 scm_sizet old_size,
1901 scm_sizet size,
1902 const char *what)
1903 {
1904 void *ptr;
1905 scm_sizet nm = scm_mallocated + size - old_size;
1906
1907 if (nm <= scm_mtrigger)
1908 {
1909 SCM_SYSCALL (ptr = realloc (where, size));
1910 if (NULL != ptr)
1911 {
1912 scm_mallocated = nm;
1913 #ifdef GUILE_DEBUG_MALLOC
1914 scm_malloc_reregister (where, ptr, what);
1915 #endif
1916 return ptr;
1917 }
1918 }
1919
1920 scm_igc (what);
1921
1922 nm = scm_mallocated + size - old_size;
1923 SCM_SYSCALL (ptr = realloc (where, size));
1924 if (NULL != ptr)
1925 {
1926 scm_mallocated = nm;
1927 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1928 if (nm > scm_mtrigger)
1929 scm_mtrigger = nm + nm / 2;
1930 else
1931 scm_mtrigger += scm_mtrigger / 2;
1932 }
1933 #ifdef GUILE_DEBUG_MALLOC
1934 scm_malloc_reregister (where, ptr, what);
1935 #endif
1936 return ptr;
1937 }
1938
1939 scm_memory_error (what);
1940 }
1941
1942
1943 void
1944 scm_must_free (void *obj)
1945 #define FUNC_NAME "scm_must_free"
1946 {
1947 #ifdef GUILE_DEBUG_MALLOC
1948 scm_malloc_unregister (obj);
1949 #endif
1950 if (obj)
1951 free (obj);
1952 else
1953 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1954 }
1955 #undef FUNC_NAME
1956
1957
1958 /* Announce that there has been some malloc done that will be freed
1959 * during gc. A typical use is for a smob that uses some malloced
1960 * memory but can not get it from scm_must_malloc (for whatever
1961 * reason). When a new object of this smob is created you call
1962 * scm_done_malloc with the size of the object. When your smob free
1963 * function is called, be sure to include this size in the return
1964 * value.
1965 *
1966 * If you can't actually free the memory in the smob free function,
1967 * for whatever reason (like reference counting), you still can (and
1968 * should) report the amount of memory freed when you actually free it.
1969 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1970 * eh? Or even better, call scm_done_free. */
1971
1972 void
1973 scm_done_malloc (long size)
1974 {
1975 scm_mallocated += size;
1976
1977 if (scm_mallocated > scm_mtrigger)
1978 {
1979 scm_igc ("foreign mallocs");
1980 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1981 {
1982 if (scm_mallocated > scm_mtrigger)
1983 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1984 else
1985 scm_mtrigger += scm_mtrigger / 2;
1986 }
1987 }
1988 }
1989
1990 void
1991 scm_done_free (long size)
1992 {
1993 scm_mallocated -= size;
1994 }
1995
1996
1997 \f
1998 /* {Heap Segments}
1999 *
2000 * Each heap segment is an array of objects of a particular size.
2001 * Every segment has an associated (possibly shared) freelist.
2002 * A table of segment records is kept that records the upper and
2003 * lower extents of the segment; this is used during the conservative
2004 * phase of gc to identify probably gc roots (because they point
2005 * into valid segments at reasonable offsets). */
2006
2007 /* scm_expmem
2008 * is true if the first segment was smaller than INIT_HEAP_SEG.
2009 * If scm_expmem is set to one, subsequent segment allocations will
2010 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2011 */
2012 int scm_expmem = 0;
2013
2014 scm_sizet scm_max_segment_size;
2015
2016 /* scm_heap_org
2017 * is the lowest base address of any heap segment.
2018 */
2019 SCM_CELLPTR scm_heap_org;
2020
2021 scm_heap_seg_data_t * scm_heap_table = 0;
2022 static unsigned int heap_segment_table_size = 0;
2023 int scm_n_heap_segs = 0;
2024
2025 /* init_heap_seg
2026 * initializes a new heap segment and returns the number of objects it contains.
2027 *
2028 * The segment origin and segment size in bytes are input parameters.
2029 * The freelist is both input and output.
2030 *
2031 * This function presumes that the scm_heap_table has already been expanded
2032 * to accomodate a new segment record and that the markbit space was reserved
2033 * for all the cards in this segment.
2034 */
2035
2036 #define INIT_CARD(card, span) \
2037 do { \
2038 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2039 if ((span) == 2) \
2040 SCM_GC_SET_CARD_DOUBLECELL (card); \
2041 } while (0)
2042
2043 static scm_sizet
2044 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2045 {
2046 register SCM_CELLPTR ptr;
2047 SCM_CELLPTR seg_end;
2048 int new_seg_index;
2049 int n_new_cells;
2050 int span = freelist->span;
2051
2052 if (seg_org == NULL)
2053 return 0;
2054
2055 /* Align the begin ptr up.
2056 */
2057 ptr = SCM_GC_CARD_UP (seg_org);
2058
2059 /* Compute the ceiling on valid object pointers w/in this segment.
2060 */
2061 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2062
2063 /* Find the right place and insert the segment record.
2064 *
2065 */
2066 for (new_seg_index = 0;
2067 ( (new_seg_index < scm_n_heap_segs)
2068 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2069 new_seg_index++)
2070 ;
2071
2072 {
2073 int i;
2074 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2075 scm_heap_table[i] = scm_heap_table[i - 1];
2076 }
2077
2078 ++scm_n_heap_segs;
2079
2080 scm_heap_table[new_seg_index].span = span;
2081 scm_heap_table[new_seg_index].freelist = freelist;
2082 scm_heap_table[new_seg_index].bounds[0] = ptr;
2083 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2084
2085 /*n_new_cells*/
2086 n_new_cells = seg_end - ptr;
2087
2088 freelist->heap_size += n_new_cells;
2089
2090 /* Partition objects in this segment into clusters */
2091 {
2092 SCM clusters;
2093 SCM *clusterp = &clusters;
2094
2095 NEXT_DATA_CELL (ptr, span);
2096 while (ptr < seg_end)
2097 {
2098 scm_cell *nxt = ptr;
2099 scm_cell *prv = NULL;
2100 scm_cell *last_card = NULL;
2101 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2102 NEXT_DATA_CELL(nxt, span);
2103
2104 /* Allocate cluster spine
2105 */
2106 *clusterp = PTR2SCM (ptr);
2107 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2108 clusterp = SCM_CDRLOC (*clusterp);
2109 ptr = nxt;
2110
2111 while (n_data_cells--)
2112 {
2113 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2114 SCM scmptr = PTR2SCM (ptr);
2115 nxt = ptr;
2116 NEXT_DATA_CELL (nxt, span);
2117 prv = ptr;
2118
2119 if (card != last_card)
2120 {
2121 INIT_CARD (card, span);
2122 last_card = card;
2123 }
2124
2125 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2126 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2127
2128 ptr = nxt;
2129 }
2130
2131 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2132 }
2133
2134 /* sanity check */
2135 {
2136 scm_cell *ref = seg_end;
2137 NEXT_DATA_CELL (ref, span);
2138 if (ref != ptr)
2139 /* [cmm] looks like the segment size doesn't divide cleanly by
2140 cluster size. bad cmm! */
2141 abort();
2142 }
2143
2144 /* Patch up the last cluster pointer in the segment
2145 * to join it to the input freelist.
2146 */
2147 *clusterp = freelist->clusters;
2148 freelist->clusters = clusters;
2149 }
2150
2151 #ifdef DEBUGINFO
2152 fprintf (stderr, "H");
2153 #endif
2154 return size;
2155 }
2156
2157 static scm_sizet
2158 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2159 {
2160 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2161
2162 return
2163 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2164 + ALIGNMENT_SLACK (freelist);
2165 }
2166
2167 static void
2168 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2169 #define FUNC_NAME "alloc_some_heap"
2170 {
2171 SCM_CELLPTR ptr;
2172 long len;
2173
2174 if (scm_gc_heap_lock)
2175 {
2176 /* Critical code sections (such as the garbage collector) aren't
2177 * supposed to add heap segments.
2178 */
2179 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2180 abort ();
2181 }
2182
2183 if (scm_n_heap_segs == heap_segment_table_size)
2184 {
2185 /* We have to expand the heap segment table to have room for the new
2186 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2187 * init_heap_seg only if the allocation of the segment itself succeeds.
2188 */
2189 unsigned int new_table_size = scm_n_heap_segs + 1;
2190 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2191 scm_heap_seg_data_t * new_heap_table;
2192
2193 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2194 realloc ((char *)scm_heap_table, size)));
2195 if (!new_heap_table)
2196 {
2197 if (error_policy == abort_on_error)
2198 {
2199 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2200 abort ();
2201 }
2202 else
2203 {
2204 return;
2205 }
2206 }
2207 else
2208 {
2209 scm_heap_table = new_heap_table;
2210 heap_segment_table_size = new_table_size;
2211 }
2212 }
2213
2214 /* Pick a size for the new heap segment.
2215 * The rule for picking the size of a segment is explained in
2216 * gc.h
2217 */
2218 {
2219 /* Assure that the new segment is predicted to be large enough.
2220 *
2221 * New yield should at least equal GC fraction of new heap size, i.e.
2222 *
2223 * y + dh > f * (h + dh)
2224 *
2225 * y : yield
2226 * f : min yield fraction
2227 * h : heap size
2228 * dh : size of new heap segment
2229 *
2230 * This gives dh > (f * h - y) / (1 - f)
2231 */
2232 int f = freelist->min_yield_fraction;
2233 long h = SCM_HEAP_SIZE;
2234 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2235 len = SCM_EXPHEAP (freelist->heap_size);
2236 #ifdef DEBUGINFO
2237 fprintf (stderr, "(%d < %d)", len, min_cells);
2238 #endif
2239 if (len < min_cells)
2240 len = min_cells + freelist->cluster_size;
2241 len *= sizeof (scm_cell);
2242 /* force new sampling */
2243 freelist->collected = LONG_MAX;
2244 }
2245
2246 if (len > scm_max_segment_size)
2247 len = scm_max_segment_size;
2248
2249 {
2250 scm_sizet smallest;
2251
2252 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2253
2254 if (len < smallest)
2255 len = smallest;
2256
2257 /* Allocate with decaying ambition. */
2258 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2259 && (len >= smallest))
2260 {
2261 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2262 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2263 if (ptr)
2264 {
2265 init_heap_seg (ptr, rounded_len, freelist);
2266 return;
2267 }
2268 len /= 2;
2269 }
2270 }
2271
2272 if (error_policy == abort_on_error)
2273 {
2274 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2275 abort ();
2276 }
2277 }
2278 #undef FUNC_NAME
2279
2280
2281 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2282 (SCM name),
2283 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2284 "is @code{#t}.")
2285 #define FUNC_NAME s_scm_unhash_name
2286 {
2287 int x;
2288 int bound;
2289 SCM_VALIDATE_SYMBOL (1,name);
2290 SCM_DEFER_INTS;
2291 bound = scm_n_heap_segs;
2292 for (x = 0; x < bound; ++x)
2293 {
2294 SCM_CELLPTR p;
2295 SCM_CELLPTR pbound;
2296 p = scm_heap_table[x].bounds[0];
2297 pbound = scm_heap_table[x].bounds[1];
2298 while (p < pbound)
2299 {
2300 SCM cell = PTR2SCM (p);
2301 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2302 {
2303 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2304 * struct cell. See the corresponding comment in scm_gc_mark.
2305 */
2306 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2307 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2308 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2309 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2310 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2311 {
2312 SCM_SET_CELL_OBJECT_0 (cell, name);
2313 }
2314 }
2315 ++p;
2316 }
2317 }
2318 SCM_ALLOW_INTS;
2319 return name;
2320 }
2321 #undef FUNC_NAME
2322
2323
2324 \f
2325 /* {GC Protection Helper Functions}
2326 */
2327
2328
2329 /*
2330 * If within a function you need to protect one or more scheme objects from
2331 * garbage collection, pass them as parameters to one of the
2332 * scm_remember_upto_here* functions below. These functions don't do
2333 * anything, but since the compiler does not know that they are actually
2334 * no-ops, it will generate code that calls these functions with the given
2335 * parameters. Therefore, you can be sure that the compiler will keep those
2336 * scheme values alive (on the stack or in a register) up to the point where
2337 * scm_remember_upto_here* is called. In other words, place the call to
2338 * scm_remember_upt_here* _behind_ the last code in your function, that
2339 * depends on the scheme object to exist.
2340 *
2341 * Example: We want to make sure, that the string object str does not get
2342 * garbage collected during the execution of 'some_function', because
2343 * otherwise the characters belonging to str would be freed and
2344 * 'some_function' might access freed memory. To make sure that the compiler
2345 * keeps str alive on the stack or in a register such that it is visible to
2346 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2347 * call to 'some_function'. Note that this would not be necessary if str was
2348 * used anyway after the call to 'some_function'.
2349 * char *chars = SCM_STRING_CHARS (str);
2350 * some_function (chars);
2351 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2352 */
2353
2354 void
2355 scm_remember_upto_here_1 (SCM obj)
2356 {
2357 /* Empty. Protects a single object from garbage collection. */
2358 }
2359
2360 void
2361 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2362 {
2363 /* Empty. Protects two objects from garbage collection. */
2364 }
2365
2366 void
2367 scm_remember_upto_here (SCM obj, ...)
2368 {
2369 /* Empty. Protects any number of objects from garbage collection. */
2370 }
2371
2372
2373 #if (SCM_DEBUG_DEPRECATED == 0)
2374
2375 void
2376 scm_remember (SCM *ptr)
2377 {
2378 /* empty */
2379 }
2380
2381 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2382
2383 /*
2384 These crazy functions prevent garbage collection
2385 of arguments after the first argument by
2386 ensuring they remain live throughout the
2387 function because they are used in the last
2388 line of the code block.
2389 It'd be better to have a nice compiler hint to
2390 aid the conservative stack-scanning GC. --03/09/00 gjb */
2391 SCM
2392 scm_return_first (SCM elt, ...)
2393 {
2394 return elt;
2395 }
2396
2397 int
2398 scm_return_first_int (int i, ...)
2399 {
2400 return i;
2401 }
2402
2403
2404 SCM
2405 scm_permanent_object (SCM obj)
2406 {
2407 SCM_REDEFER_INTS;
2408 scm_permobjs = scm_cons (obj, scm_permobjs);
2409 SCM_REALLOW_INTS;
2410 return obj;
2411 }
2412
2413
2414 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2415 other references are dropped, until the object is unprotected by calling
2416 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2417 i. e. it is possible to protect the same object several times, but it is
2418 necessary to unprotect the object the same number of times to actually get
2419 the object unprotected. It is an error to unprotect an object more often
2420 than it has been protected before. The function scm_protect_object returns
2421 OBJ.
2422 */
2423
2424 /* Implementation note: For every object X, there is a counter which
2425 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2426 */
2427
2428 SCM
2429 scm_protect_object (SCM obj)
2430 {
2431 SCM handle;
2432
2433 /* This critical section barrier will be replaced by a mutex. */
2434 SCM_REDEFER_INTS;
2435
2436 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2437 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2438
2439 SCM_REALLOW_INTS;
2440
2441 return obj;
2442 }
2443
2444
2445 /* Remove any protection for OBJ established by a prior call to
2446 scm_protect_object. This function returns OBJ.
2447
2448 See scm_protect_object for more information. */
2449 SCM
2450 scm_unprotect_object (SCM obj)
2451 {
2452 SCM handle;
2453
2454 /* This critical section barrier will be replaced by a mutex. */
2455 SCM_REDEFER_INTS;
2456
2457 handle = scm_hashq_get_handle (scm_protects, obj);
2458
2459 if (SCM_IMP (handle))
2460 {
2461 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2462 abort ();
2463 }
2464 else
2465 {
2466 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2467 if (count == 0)
2468 scm_hashq_remove_x (scm_protects, obj);
2469 else
2470 SCM_SETCDR (handle, SCM_MAKINUM (count));
2471 }
2472
2473 SCM_REALLOW_INTS;
2474
2475 return obj;
2476 }
2477
2478 int terminating;
2479
2480 /* called on process termination. */
2481 #ifdef HAVE_ATEXIT
2482 static void
2483 cleanup (void)
2484 #else
2485 #ifdef HAVE_ON_EXIT
2486 extern int on_exit (void (*procp) (), int arg);
2487
2488 static void
2489 cleanup (int status, void *arg)
2490 #else
2491 #error Dont know how to setup a cleanup handler on your system.
2492 #endif
2493 #endif
2494 {
2495 terminating = 1;
2496 scm_flush_all_ports ();
2497 }
2498
2499 \f
2500 static int
2501 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2502 {
2503 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2504
2505 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2506 rounded_size,
2507 freelist))
2508 {
2509 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2510 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2511 rounded_size,
2512 freelist))
2513 return 1;
2514 }
2515 else
2516 scm_expmem = 1;
2517
2518 if (freelist->min_yield_fraction)
2519 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2520 / 100);
2521 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2522
2523 return 0;
2524 }
2525
2526 \f
2527 static void
2528 init_freelist (scm_freelist_t *freelist,
2529 int span,
2530 int cluster_size,
2531 int min_yield)
2532 {
2533 freelist->clusters = SCM_EOL;
2534 freelist->cluster_size = cluster_size + 1;
2535 freelist->left_to_collect = 0;
2536 freelist->clusters_allocated = 0;
2537 freelist->min_yield = 0;
2538 freelist->min_yield_fraction = min_yield;
2539 freelist->span = span;
2540 freelist->collected = 0;
2541 freelist->collected_1 = 0;
2542 freelist->heap_size = 0;
2543 }
2544
2545
2546 /* Get an integer from an environment variable. */
2547 static int
2548 scm_i_getenv_int (const char *var, int def)
2549 {
2550 char *end, *val = getenv (var);
2551 long res;
2552 if (!val)
2553 return def;
2554 res = strtol (val, &end, 10);
2555 if (end == val)
2556 return def;
2557 return res;
2558 }
2559
2560
2561 int
2562 scm_init_storage ()
2563 {
2564 scm_sizet gc_trigger_1;
2565 scm_sizet gc_trigger_2;
2566 scm_sizet init_heap_size_1;
2567 scm_sizet init_heap_size_2;
2568 scm_sizet j;
2569
2570 j = SCM_NUM_PROTECTS;
2571 while (j)
2572 scm_sys_protects[--j] = SCM_BOOL_F;
2573 scm_block_gc = 1;
2574
2575 scm_freelist = SCM_EOL;
2576 scm_freelist2 = SCM_EOL;
2577 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2578 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2579 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2580 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2581 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2582
2583 scm_expmem = 0;
2584
2585 j = SCM_HEAP_SEG_SIZE;
2586 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2587 scm_heap_table = ((scm_heap_seg_data_t *)
2588 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2589 heap_segment_table_size = 2;
2590
2591 mark_space_ptr = &mark_space_head;
2592
2593 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2594 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2595 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2596 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2597 return 1;
2598
2599 /* scm_hplims[0] can change. do not remove scm_heap_org */
2600 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2601
2602 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2603 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2604 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2605 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2606 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2607
2608 /* Initialise the list of ports. */
2609 scm_port_table = (scm_port **)
2610 malloc (sizeof (scm_port *) * scm_port_table_room);
2611 if (!scm_port_table)
2612 return 1;
2613
2614 #ifdef HAVE_ATEXIT
2615 atexit (cleanup);
2616 #else
2617 #ifdef HAVE_ON_EXIT
2618 on_exit (cleanup, 0);
2619 #endif
2620 #endif
2621
2622 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2623 SCM_SETCDR (scm_undefineds, scm_undefineds);
2624
2625 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2626 scm_nullstr = scm_makstr (0L, 0);
2627 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2628
2629 #define DEFAULT_SYMHASH_SIZE 277
2630 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2631 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2632
2633 scm_stand_in_procs = SCM_EOL;
2634 scm_permobjs = SCM_EOL;
2635 scm_protects = scm_c_make_hash_table (31);
2636
2637 return 0;
2638 }
2639
2640 \f
2641
2642 SCM scm_after_gc_hook;
2643
2644 #if (SCM_DEBUG_DEPRECATED == 0)
2645 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2646 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2647 static SCM gc_async;
2648
2649
2650 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2651 * is run after the gc, as soon as the asynchronous events are handled by the
2652 * evaluator.
2653 */
2654 static SCM
2655 gc_async_thunk (void)
2656 {
2657 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2658
2659 #if (SCM_DEBUG_DEPRECATED == 0)
2660
2661 /* The following code will be removed in Guile 1.5. */
2662 if (SCM_NFALSEP (scm_gc_vcell))
2663 {
2664 SCM proc = SCM_CDR (scm_gc_vcell);
2665
2666 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2667 scm_apply (proc, SCM_EOL, SCM_EOL);
2668 }
2669
2670 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2671
2672 return SCM_UNSPECIFIED;
2673 }
2674
2675
2676 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2677 * the garbage collection. The only purpose of this function is to mark the
2678 * gc_async (which will eventually lead to the execution of the
2679 * gc_async_thunk).
2680 */
2681 static void *
2682 mark_gc_async (void * hook_data, void *func_data, void *data)
2683 {
2684 scm_system_async_mark (gc_async);
2685 return NULL;
2686 }
2687
2688
2689 void
2690 scm_init_gc ()
2691 {
2692 SCM after_gc_thunk;
2693
2694 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2695
2696 #if (SCM_DEBUG_DEPRECATED == 0)
2697 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2698 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2699 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2700 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2701
2702 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2703
2704 #ifndef SCM_MAGIC_SNARFER
2705 #include "libguile/gc.x"
2706 #endif
2707 }
2708
2709 #endif /*MARK_DEPENDENCIES*/
2710
2711 /*
2712 Local Variables:
2713 c-file-style: "gnu"
2714 End:
2715 */