Remove #include <stdio.h>. Add #include <string.h>.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <string.h>
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
70
71 #include "libguile/validate.h"
72 #include "libguile/gc.h"
73
74 #ifdef GUILE_DEBUG_MALLOC
75 #include "libguile/debug-malloc.h"
76 #endif
77
78 #ifdef HAVE_MALLOC_H
79 #include <malloc.h>
80 #endif
81
82 #ifdef HAVE_UNISTD_H
83 #include <unistd.h>
84 #endif
85
86 #ifdef __STDC__
87 #include <stdarg.h>
88 #define var_start(x, y) va_start(x, y)
89 #else
90 #include <varargs.h>
91 #define var_start(x, y) va_start(x)
92 #endif
93
94 \f
95
96 unsigned int scm_gc_running_p = 0;
97
98 \f
99
100 #if (SCM_DEBUG_CELL_ACCESSES == 1)
101
102 unsigned int scm_debug_cell_accesses_p = 0;
103
104
105 /* Assert that the given object is a valid reference to a valid cell. This
106 * test involves to determine whether the object is a cell pointer, whether
107 * this pointer actually points into a heap segment and whether the cell
108 * pointed to is not a free cell.
109 */
110 void
111 scm_assert_cell_valid (SCM cell)
112 {
113 if (scm_debug_cell_accesses_p)
114 {
115 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
116
117 if (!scm_cellp (cell))
118 {
119 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
120 abort ();
121 }
122 else if (!scm_gc_running_p)
123 {
124 /* Dirk::FIXME:: During garbage collection there occur references to
125 free cells. This is allright during conservative marking, but
126 should not happen otherwise (I think). The case of free cells
127 accessed during conservative marking is handled in function
128 scm_mark_locations. However, there still occur accesses to free
129 cells during gc. I don't understand why this happens. If it is
130 a bug and gets fixed, the following test should also work while
131 gc is running.
132 */
133 if (SCM_FREE_CELL_P (cell))
134 {
135 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
136 abort ();
137 }
138 }
139 scm_debug_cell_accesses_p = 1; /* re-enable */
140 }
141 }
142
143
144 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
145 (SCM flag),
146 "If FLAG is #f, cell access checking is disabled.\n"
147 "If FLAG is #t, cell access checking is enabled.\n"
148 "This procedure only exists because the compile-time flag\n"
149 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
150 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
151 {
152 if (SCM_FALSEP (flag)) {
153 scm_debug_cell_accesses_p = 0;
154 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
155 scm_debug_cell_accesses_p = 1;
156 } else {
157 SCM_WRONG_TYPE_ARG (1, flag);
158 }
159 return SCM_UNSPECIFIED;
160 }
161 #undef FUNC_NAME
162
163 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
164
165 \f
166
167 /* {heap tuning parameters}
168 *
169 * These are parameters for controlling memory allocation. The heap
170 * is the area out of which scm_cons, and object headers are allocated.
171 *
172 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
173 * 64 bit machine. The units of the _SIZE parameters are bytes.
174 * Cons pairs and object headers occupy one heap cell.
175 *
176 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
177 * allocated initially the heap will grow by half its current size
178 * each subsequent time more heap is needed.
179 *
180 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
181 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
182 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
183 * is in scm_init_storage() and alloc_some_heap() in sys.c
184 *
185 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
186 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
187 *
188 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
189 * is needed.
190 *
191 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
192 * trigger a GC.
193 *
194 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
195 * reclaimed by a GC triggered by must_malloc. If less than this is
196 * reclaimed, the trigger threshold is raised. [I don't know what a
197 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
198 * work around a oscillation that caused almost constant GC.]
199 */
200
201 /*
202 * Heap size 45000 and 40% min yield gives quick startup and no extra
203 * heap allocation. Having higher values on min yield may lead to
204 * large heaps, especially if code behaviour is varying its
205 * maximum consumption between different freelists.
206 */
207
208 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
209 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
210 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
211 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
212 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
213 int scm_default_min_yield_1 = 40;
214
215 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
216 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
217 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
218 /* The following value may seem large, but note that if we get to GC at
219 * all, this means that we have a numerically intensive application
220 */
221 int scm_default_min_yield_2 = 40;
222
223 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
224
225 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
226 #ifdef _QC
227 # define SCM_HEAP_SEG_SIZE 32768L
228 #else
229 # ifdef sequent
230 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
231 # else
232 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
233 # endif
234 #endif
235 /* Make heap grow with factor 1.5 */
236 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
237 #define SCM_INIT_MALLOC_LIMIT 100000
238 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
239
240 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
241 aligned inner bounds for allocated storage */
242
243 #ifdef PROT386
244 /*in 386 protected mode we must only adjust the offset */
245 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
246 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
247 #else
248 # ifdef _UNICOS
249 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
250 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
251 # else
252 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
253 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
254 # endif /* UNICOS */
255 #endif /* PROT386 */
256
257 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
258
259 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
260 #define CLUSTER_SIZE_IN_BYTES(freelist) \
261 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
262
263 \f
264 /* scm_freelists
265 */
266
267 typedef struct scm_freelist_t {
268 /* collected cells */
269 SCM cells;
270 /* number of cells left to collect before cluster is full */
271 unsigned int left_to_collect;
272 /* number of clusters which have been allocated */
273 unsigned int clusters_allocated;
274 /* a list of freelists, each of size cluster_size,
275 * except the last one which may be shorter
276 */
277 SCM clusters;
278 SCM *clustertail;
279 /* this is the number of objects in each cluster, including the spine cell */
280 int cluster_size;
281 /* indicates that we should grow heap instead of GC:ing
282 */
283 int grow_heap_p;
284 /* minimum yield on this list in order not to grow the heap
285 */
286 long min_yield;
287 /* defines min_yield as percent of total heap size
288 */
289 int min_yield_fraction;
290 /* number of cells per object on this list */
291 int span;
292 /* number of collected cells during last GC */
293 long collected;
294 /* number of collected cells during penultimate GC */
295 long collected_1;
296 /* total number of cells in heap segments
297 * belonging to this list.
298 */
299 long heap_size;
300 } scm_freelist_t;
301
302 SCM scm_freelist = SCM_EOL;
303 scm_freelist_t scm_master_freelist = {
304 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
305 };
306 SCM scm_freelist2 = SCM_EOL;
307 scm_freelist_t scm_master_freelist2 = {
308 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
309 };
310
311 /* scm_mtrigger
312 * is the number of bytes of must_malloc allocation needed to trigger gc.
313 */
314 unsigned long scm_mtrigger;
315
316 /* scm_gc_heap_lock
317 * If set, don't expand the heap. Set only during gc, during which no allocation
318 * is supposed to take place anyway.
319 */
320 int scm_gc_heap_lock = 0;
321
322 /* GC Blocking
323 * Don't pause for collection if this is set -- just
324 * expand the heap.
325 */
326 int scm_block_gc = 1;
327
328 /* During collection, this accumulates objects holding
329 * weak references.
330 */
331 SCM scm_weak_vectors;
332
333 /* During collection, this accumulates structures which are to be freed.
334 */
335 SCM scm_structs_to_free;
336
337 /* GC Statistics Keeping
338 */
339 unsigned long scm_cells_allocated = 0;
340 long scm_mallocated = 0;
341 unsigned long scm_gc_cells_collected;
342 unsigned long scm_gc_yield;
343 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
344 unsigned long scm_gc_malloc_collected;
345 unsigned long scm_gc_ports_collected;
346 unsigned long scm_gc_time_taken = 0;
347 static unsigned long t_before_gc;
348 static unsigned long t_before_sweep;
349 unsigned long scm_gc_mark_time_taken = 0;
350 unsigned long scm_gc_sweep_time_taken = 0;
351 unsigned long scm_gc_times = 0;
352 unsigned long scm_gc_cells_swept = 0;
353 double scm_gc_cells_marked_acc = 0.;
354 double scm_gc_cells_swept_acc = 0.;
355
356 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
357 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
358 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
359 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
360 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
361 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
362 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
363 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
364 SCM_SYMBOL (sym_times, "gc-times");
365 SCM_SYMBOL (sym_cells_marked, "cells-marked");
366 SCM_SYMBOL (sym_cells_swept, "cells-swept");
367
368 typedef struct scm_heap_seg_data_t
369 {
370 /* lower and upper bounds of the segment */
371 SCM_CELLPTR bounds[2];
372
373 /* address of the head-of-freelist pointer for this segment's cells.
374 All segments usually point to the same one, scm_freelist. */
375 scm_freelist_t *freelist;
376
377 /* number of cells per object in this segment */
378 int span;
379 } scm_heap_seg_data_t;
380
381
382
383 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
384
385 typedef enum { return_on_error, abort_on_error } policy_on_error;
386 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
387
388
389 #define SCM_HEAP_SIZE \
390 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
391 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
392
393 #define BVEC_GROW_SIZE 256
394 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
395 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
396
397 /* mark space allocation */
398
399 typedef struct scm_mark_space_t
400 {
401 scm_c_bvec_limb_t *bvec_space;
402 struct scm_mark_space_t *next;
403 } scm_mark_space_t;
404
405 static scm_mark_space_t *current_mark_space;
406 static scm_mark_space_t **mark_space_ptr;
407 static int current_mark_space_offset;
408 static scm_mark_space_t *mark_space_head;
409
410 static scm_c_bvec_limb_t *
411 get_bvec ()
412 #define FUNC_NAME "get_bvec"
413 {
414 scm_c_bvec_limb_t *res;
415
416 if (!current_mark_space)
417 {
418 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
419 if (!current_mark_space)
420 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
421
422 current_mark_space->bvec_space = NULL;
423 current_mark_space->next = NULL;
424
425 *mark_space_ptr = current_mark_space;
426 mark_space_ptr = &(current_mark_space->next);
427
428 return get_bvec ();
429 }
430
431 if (!(current_mark_space->bvec_space))
432 {
433 SCM_SYSCALL (current_mark_space->bvec_space =
434 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
435 if (!(current_mark_space->bvec_space))
436 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
437
438 current_mark_space_offset = 0;
439
440 return get_bvec ();
441 }
442
443 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
444 {
445 current_mark_space = NULL;
446
447 return get_bvec ();
448 }
449
450 res = current_mark_space->bvec_space + current_mark_space_offset;
451 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
452
453 return res;
454 }
455 #undef FUNC_NAME
456
457
458 static void
459 clear_mark_space ()
460 {
461 scm_mark_space_t *ms;
462
463 for (ms = mark_space_head; ms; ms = ms->next)
464 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
465 }
466
467
468 \f
469 /* Debugging functions. */
470
471 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
472
473 /* Return the number of the heap segment containing CELL. */
474 static int
475 which_seg (SCM cell)
476 {
477 int i;
478
479 for (i = 0; i < scm_n_heap_segs; i++)
480 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
481 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
482 return i;
483 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
484 SCM_UNPACK (cell));
485 abort ();
486 }
487
488
489 static void
490 map_free_list (scm_freelist_t *master, SCM freelist)
491 {
492 int last_seg = -1, count = 0;
493 SCM f;
494
495 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
496 {
497 int this_seg = which_seg (f);
498
499 if (this_seg != last_seg)
500 {
501 if (last_seg != -1)
502 fprintf (stderr, " %5d %d-cells in segment %d\n",
503 count, master->span, last_seg);
504 last_seg = this_seg;
505 count = 0;
506 }
507 count++;
508 }
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 }
513
514 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
515 (),
516 "Print debugging information about the free-list.\n"
517 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
518 #define FUNC_NAME s_scm_map_free_list
519 {
520 int i;
521 fprintf (stderr, "%d segments total (%d:%d",
522 scm_n_heap_segs,
523 scm_heap_table[0].span,
524 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
525 for (i = 1; i < scm_n_heap_segs; i++)
526 fprintf (stderr, ", %d:%d",
527 scm_heap_table[i].span,
528 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
529 fprintf (stderr, ")\n");
530 map_free_list (&scm_master_freelist, scm_freelist);
531 map_free_list (&scm_master_freelist2, scm_freelist2);
532 fflush (stderr);
533
534 return SCM_UNSPECIFIED;
535 }
536 #undef FUNC_NAME
537
538 static int last_cluster;
539 static int last_size;
540
541 static int
542 free_list_length (char *title, int i, SCM freelist)
543 {
544 SCM ls;
545 int n = 0;
546 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
547 if (SCM_FREE_CELL_P (ls))
548 ++n;
549 else
550 {
551 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
552 abort ();
553 }
554 if (n != last_size)
555 {
556 if (i > 0)
557 {
558 if (last_cluster == i - 1)
559 fprintf (stderr, "\t%d\n", last_size);
560 else
561 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
562 }
563 if (i >= 0)
564 fprintf (stderr, "%s %d", title, i);
565 else
566 fprintf (stderr, "%s\t%d\n", title, n);
567 last_cluster = i;
568 last_size = n;
569 }
570 return n;
571 }
572
573 static void
574 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
575 {
576 SCM clusters;
577 int i = 0, len, n = 0;
578 fprintf (stderr, "%s\n\n", title);
579 n += free_list_length ("free list", -1, freelist);
580 for (clusters = master->clusters;
581 SCM_NNULLP (clusters);
582 clusters = SCM_CDR (clusters))
583 {
584 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
585 n += len;
586 }
587 if (last_cluster == i - 1)
588 fprintf (stderr, "\t%d\n", last_size);
589 else
590 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
591 fprintf (stderr, "\ntotal %d objects\n\n", n);
592 }
593
594 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
595 (),
596 "Print debugging information about the free-list.\n"
597 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
598 #define FUNC_NAME s_scm_free_list_length
599 {
600 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
601 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
602 return SCM_UNSPECIFIED;
603 }
604 #undef FUNC_NAME
605
606 #endif
607
608 #ifdef GUILE_DEBUG_FREELIST
609
610 /* Number of calls to SCM_NEWCELL since startup. */
611 static unsigned long scm_newcell_count;
612 static unsigned long scm_newcell2_count;
613
614 /* Search freelist for anything that isn't marked as a free cell.
615 Abort if we find something. */
616 static void
617 scm_check_freelist (SCM freelist)
618 {
619 SCM f;
620 int i = 0;
621
622 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
623 if (!SCM_FREE_CELL_P (f))
624 {
625 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
626 scm_newcell_count, i);
627 abort ();
628 }
629 }
630
631 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
632 (SCM flag),
633 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
634 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
635 "compile-time flag was selected.\n")
636 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
637 {
638 /* [cmm] I did a double-take when I read this code the first time.
639 well, FWIW. */
640 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
641 return SCM_UNSPECIFIED;
642 }
643 #undef FUNC_NAME
644
645
646 SCM
647 scm_debug_newcell (void)
648 {
649 SCM new;
650
651 scm_newcell_count++;
652 if (scm_debug_check_freelist)
653 {
654 scm_check_freelist (scm_freelist);
655 scm_gc();
656 }
657
658 /* The rest of this is supposed to be identical to the SCM_NEWCELL
659 macro. */
660 if (SCM_NULLP (scm_freelist))
661 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
662 else
663 {
664 new = scm_freelist;
665 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
666 }
667
668 return new;
669 }
670
671 SCM
672 scm_debug_newcell2 (void)
673 {
674 SCM new;
675
676 scm_newcell2_count++;
677 if (scm_debug_check_freelist)
678 {
679 scm_check_freelist (scm_freelist2);
680 scm_gc ();
681 }
682
683 /* The rest of this is supposed to be identical to the SCM_NEWCELL
684 macro. */
685 if (SCM_NULLP (scm_freelist2))
686 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
687 else
688 {
689 new = scm_freelist2;
690 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
691 }
692
693 return new;
694 }
695
696 #endif /* GUILE_DEBUG_FREELIST */
697
698 \f
699
700 static unsigned long
701 master_cells_allocated (scm_freelist_t *master)
702 {
703 /* the '- 1' below is to ignore the cluster spine cells. */
704 int objects = master->clusters_allocated * (master->cluster_size - 1);
705 if (SCM_NULLP (master->clusters))
706 objects -= master->left_to_collect;
707 return master->span * objects;
708 }
709
710 static unsigned long
711 freelist_length (SCM freelist)
712 {
713 int n;
714 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
715 ++n;
716 return n;
717 }
718
719 static unsigned long
720 compute_cells_allocated ()
721 {
722 return (scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist)
724 + master_cells_allocated (&scm_master_freelist2)
725 - scm_master_freelist.span * freelist_length (scm_freelist)
726 - scm_master_freelist2.span * freelist_length (scm_freelist2));
727 }
728
729 /* {Scheme Interface to GC}
730 */
731
732 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
733 (),
734 "Returns an association list of statistics about Guile's current use of storage. ")
735 #define FUNC_NAME s_scm_gc_stats
736 {
737 int i;
738 int n;
739 SCM heap_segs;
740 long int local_scm_mtrigger;
741 long int local_scm_mallocated;
742 long int local_scm_heap_size;
743 long int local_scm_cells_allocated;
744 long int local_scm_gc_time_taken;
745 long int local_scm_gc_times;
746 long int local_scm_gc_mark_time_taken;
747 long int local_scm_gc_sweep_time_taken;
748 double local_scm_gc_cells_swept;
749 double local_scm_gc_cells_marked;
750 SCM answer;
751
752 SCM_DEFER_INTS;
753
754 ++scm_block_gc;
755
756 retry:
757 heap_segs = SCM_EOL;
758 n = scm_n_heap_segs;
759 for (i = scm_n_heap_segs; i--; )
760 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
761 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
762 heap_segs);
763 if (scm_n_heap_segs != n)
764 goto retry;
765
766 --scm_block_gc;
767
768 /* Below, we cons to produce the resulting list. We want a snapshot of
769 * the heap situation before consing.
770 */
771 local_scm_mtrigger = scm_mtrigger;
772 local_scm_mallocated = scm_mallocated;
773 local_scm_heap_size = SCM_HEAP_SIZE;
774 local_scm_cells_allocated = compute_cells_allocated ();
775 local_scm_gc_time_taken = scm_gc_time_taken;
776 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
777 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
778 local_scm_gc_times = scm_gc_times;
779 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
780 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
781
782 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
783 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
784 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
785 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
786 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
787 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
788 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
789 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
790 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
791 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
792 scm_cons (sym_heap_segments, heap_segs),
793 SCM_UNDEFINED);
794 SCM_ALLOW_INTS;
795 return answer;
796 }
797 #undef FUNC_NAME
798
799
800 static void
801 gc_start_stats (const char *what)
802 {
803 t_before_gc = scm_c_get_internal_run_time ();
804 scm_gc_cells_swept = 0;
805 scm_gc_cells_collected = 0;
806 scm_gc_yield_1 = scm_gc_yield;
807 scm_gc_yield = (scm_cells_allocated
808 + master_cells_allocated (&scm_master_freelist)
809 + master_cells_allocated (&scm_master_freelist2));
810 scm_gc_malloc_collected = 0;
811 scm_gc_ports_collected = 0;
812 }
813
814
815 static void
816 gc_end_stats ()
817 {
818 unsigned long t = scm_c_get_internal_run_time ();
819 scm_gc_time_taken += (t - t_before_gc);
820 scm_gc_sweep_time_taken += (t - t_before_sweep);
821 ++scm_gc_times;
822
823 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
824 scm_gc_cells_swept_acc += scm_gc_cells_swept;
825 }
826
827
828 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
829 (SCM obj),
830 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
831 "returned by this function for @var{obj}")
832 #define FUNC_NAME s_scm_object_address
833 {
834 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
835 }
836 #undef FUNC_NAME
837
838
839 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
840 (),
841 "Scans all of SCM objects and reclaims for further use those that are\n"
842 "no longer accessible.")
843 #define FUNC_NAME s_scm_gc
844 {
845 SCM_DEFER_INTS;
846 scm_igc ("call");
847 SCM_ALLOW_INTS;
848 return SCM_UNSPECIFIED;
849 }
850 #undef FUNC_NAME
851
852
853 \f
854 /* {C Interface For When GC is Triggered}
855 */
856
857 static void
858 adjust_min_yield (scm_freelist_t *freelist)
859 {
860 /* min yield is adjusted upwards so that next predicted total yield
861 * (allocated cells actually freed by GC) becomes
862 * `min_yield_fraction' of total heap size. Note, however, that
863 * the absolute value of min_yield will correspond to `collected'
864 * on one master (the one which currently is triggering GC).
865 *
866 * The reason why we look at total yield instead of cells collected
867 * on one list is that we want to take other freelists into account.
868 * On this freelist, we know that (local) yield = collected cells,
869 * but that's probably not the case on the other lists.
870 *
871 * (We might consider computing a better prediction, for example
872 * by computing an average over multiple GC:s.)
873 */
874 if (freelist->min_yield_fraction)
875 {
876 /* Pick largest of last two yields. */
877 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
878 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
879 #ifdef DEBUGINFO
880 fprintf (stderr, " after GC = %d, delta = %d\n",
881 scm_cells_allocated,
882 delta);
883 #endif
884 if (delta > 0)
885 freelist->min_yield += delta;
886 }
887 }
888
889
890 /* When we get POSIX threads support, the master will be global and
891 * common while the freelist will be individual for each thread.
892 */
893
894 SCM
895 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
896 {
897 SCM cell;
898 ++scm_ints_disabled;
899 do
900 {
901 if (SCM_NULLP (master->clusters))
902 {
903 if (master->grow_heap_p || scm_block_gc)
904 {
905 /* In order to reduce gc frequency, try to allocate a new heap
906 * segment first, even if gc might find some free cells. If we
907 * can't obtain a new heap segment, we will try gc later.
908 */
909 master->grow_heap_p = 0;
910 alloc_some_heap (master, return_on_error);
911 }
912 if (SCM_NULLP (master->clusters))
913 {
914 /* The heap was not grown, either because it wasn't scheduled to
915 * grow, or because there was not enough memory available. In
916 * both cases we have to try gc to get some free cells.
917 */
918 #ifdef DEBUGINFO
919 fprintf (stderr, "allocated = %d, ",
920 scm_cells_allocated
921 + master_cells_allocated (&scm_master_freelist)
922 + master_cells_allocated (&scm_master_freelist2));
923 #endif
924 scm_igc ("cells");
925 adjust_min_yield (master);
926 if (SCM_NULLP (master->clusters))
927 {
928 /* gc could not free any cells. Now, we _must_ allocate a
929 * new heap segment, because there is no other possibility
930 * to provide a new cell for the caller.
931 */
932 alloc_some_heap (master, abort_on_error);
933 }
934 }
935 }
936 cell = SCM_CAR (master->clusters);
937 master->clusters = SCM_CDR (master->clusters);
938 ++master->clusters_allocated;
939 }
940 while (SCM_NULLP (cell));
941
942 #ifdef GUILE_DEBUG_FREELIST
943 scm_check_freelist (cell);
944 #endif
945
946 --scm_ints_disabled;
947 *freelist = SCM_FREE_CELL_CDR (cell);
948 return cell;
949 }
950
951
952 #if 0
953 /* This is a support routine which can be used to reserve a cluster
954 * for some special use, such as debugging. It won't be useful until
955 * free cells are preserved between garbage collections.
956 */
957
958 void
959 scm_alloc_cluster (scm_freelist_t *master)
960 {
961 SCM freelist, cell;
962 cell = scm_gc_for_newcell (master, &freelist);
963 SCM_SETCDR (cell, freelist);
964 return cell;
965 }
966 #endif
967
968
969 scm_c_hook_t scm_before_gc_c_hook;
970 scm_c_hook_t scm_before_mark_c_hook;
971 scm_c_hook_t scm_before_sweep_c_hook;
972 scm_c_hook_t scm_after_sweep_c_hook;
973 scm_c_hook_t scm_after_gc_c_hook;
974
975
976 void
977 scm_igc (const char *what)
978 {
979 int j;
980
981 ++scm_gc_running_p;
982 scm_c_hook_run (&scm_before_gc_c_hook, 0);
983 #ifdef DEBUGINFO
984 fprintf (stderr,
985 SCM_NULLP (scm_freelist)
986 ? "*"
987 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
988 #endif
989 #ifdef USE_THREADS
990 /* During the critical section, only the current thread may run. */
991 SCM_THREAD_CRITICAL_SECTION_START;
992 #endif
993
994 /* fprintf (stderr, "gc: %s\n", what); */
995
996 if (!scm_stack_base || scm_block_gc)
997 {
998 --scm_gc_running_p;
999 return;
1000 }
1001
1002 gc_start_stats (what);
1003
1004 if (scm_mallocated < 0)
1005 /* The byte count of allocated objects has underflowed. This is
1006 probably because you forgot to report the sizes of objects you
1007 have allocated, by calling scm_done_malloc or some such. When
1008 the GC freed them, it subtracted their size from
1009 scm_mallocated, which underflowed. */
1010 abort ();
1011
1012 if (scm_gc_heap_lock)
1013 /* We've invoked the collector while a GC is already in progress.
1014 That should never happen. */
1015 abort ();
1016
1017 ++scm_gc_heap_lock;
1018
1019 /* flush dead entries from the continuation stack */
1020 {
1021 int x;
1022 int bound;
1023 SCM * elts;
1024 elts = SCM_VELTS (scm_continuation_stack);
1025 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1026 x = SCM_INUM (scm_continuation_stack_ptr);
1027 while (x < bound)
1028 {
1029 elts[x] = SCM_BOOL_F;
1030 ++x;
1031 }
1032 }
1033
1034 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1035
1036 clear_mark_space ();
1037
1038 #ifndef USE_THREADS
1039
1040 /* Mark objects on the C stack. */
1041 SCM_FLUSH_REGISTER_WINDOWS;
1042 /* This assumes that all registers are saved into the jmp_buf */
1043 setjmp (scm_save_regs_gc_mark);
1044 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1045 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1046 sizeof scm_save_regs_gc_mark)
1047 / sizeof (SCM_STACKITEM)));
1048
1049 {
1050 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1051 #ifdef SCM_STACK_GROWS_UP
1052 scm_mark_locations (scm_stack_base, stack_len);
1053 #else
1054 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1055 #endif
1056 }
1057
1058 #else /* USE_THREADS */
1059
1060 /* Mark every thread's stack and registers */
1061 scm_threads_mark_stacks ();
1062
1063 #endif /* USE_THREADS */
1064
1065 j = SCM_NUM_PROTECTS;
1066 while (j--)
1067 scm_gc_mark (scm_sys_protects[j]);
1068
1069 /* FIXME: we should have a means to register C functions to be run
1070 * in different phases of GC
1071 */
1072 scm_mark_subr_table ();
1073
1074 #ifndef USE_THREADS
1075 scm_gc_mark (scm_root->handle);
1076 #endif
1077
1078 t_before_sweep = scm_c_get_internal_run_time ();
1079 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1080
1081 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1082
1083 scm_gc_sweep ();
1084
1085 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1086
1087 --scm_gc_heap_lock;
1088 gc_end_stats ();
1089
1090 #ifdef USE_THREADS
1091 SCM_THREAD_CRITICAL_SECTION_END;
1092 #endif
1093 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1094 --scm_gc_running_p;
1095 }
1096
1097 \f
1098
1099 /* {Mark/Sweep}
1100 */
1101
1102 #define MARK scm_gc_mark
1103 #define FNAME "scm_gc_mark"
1104
1105 #endif /*!MARK_DEPENDENCIES*/
1106
1107 /* Mark an object precisely.
1108 */
1109 void
1110 MARK (SCM p)
1111 #define FUNC_NAME FNAME
1112 {
1113 register long i;
1114 register SCM ptr;
1115
1116 #ifndef MARK_DEPENDENCIES
1117 # define RECURSE scm_gc_mark
1118 #else
1119 /* go through the usual marking, but not for self-cycles. */
1120 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1121 #endif
1122 ptr = p;
1123
1124 #ifdef MARK_DEPENDENCIES
1125 goto gc_mark_loop_first_time;
1126 #endif
1127
1128 gc_mark_loop:
1129 if (SCM_IMP (ptr))
1130 return;
1131
1132 gc_mark_nimp:
1133
1134 #ifdef MARK_DEPENDENCIES
1135 if (SCM_EQ_P (ptr, p))
1136 return;
1137
1138 scm_gc_mark (ptr);
1139 return;
1140
1141 gc_mark_loop_first_time:
1142 #endif
1143
1144 if (!SCM_CELLP (ptr))
1145 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1146
1147 #if (defined (GUILE_DEBUG_FREELIST))
1148
1149 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1150 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1151
1152 #endif
1153
1154 #ifndef MARK_DEPENDENCIES
1155
1156 if (SCM_GCMARKP (ptr))
1157 return;
1158
1159 SCM_SETGCMARK (ptr);
1160
1161 #endif
1162
1163 switch (SCM_TYP7 (ptr))
1164 {
1165 case scm_tcs_cons_nimcar:
1166 if (SCM_IMP (SCM_CDR (ptr)))
1167 {
1168 ptr = SCM_CAR (ptr);
1169 goto gc_mark_nimp;
1170 }
1171 RECURSE (SCM_CAR (ptr));
1172 ptr = SCM_CDR (ptr);
1173 goto gc_mark_nimp;
1174 case scm_tcs_cons_imcar:
1175 ptr = SCM_CDR (ptr);
1176 goto gc_mark_loop;
1177 case scm_tc7_pws:
1178 RECURSE (SCM_CELL_OBJECT_2 (ptr));
1179 ptr = SCM_CDR (ptr);
1180 goto gc_mark_loop;
1181 case scm_tcs_cons_gloc:
1182 {
1183 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1184 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1185 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1186 * pointer to a struct vtable data region. The fact that these are
1187 * accessed in the same way restricts the possibilites to change the
1188 * data layout of structs or heap cells.
1189 */
1190 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1191 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1192 if (vtable_data [scm_vtable_index_vcell] != 0)
1193 {
1194 /* ptr is a gloc */
1195 SCM gloc_car = SCM_PACK (word0);
1196 RECURSE (gloc_car);
1197 ptr = SCM_CDR (ptr);
1198 goto gc_mark_loop;
1199 }
1200 else
1201 {
1202 /* ptr is a struct */
1203 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1204 int len = SCM_SYMBOL_LENGTH (layout);
1205 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1206 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1207
1208 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1209 {
1210 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1211 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1212 }
1213 if (len)
1214 {
1215 int x;
1216
1217 for (x = 0; x < len - 2; x += 2, ++struct_data)
1218 if (fields_desc[x] == 'p')
1219 RECURSE (SCM_PACK (*struct_data));
1220 if (fields_desc[x] == 'p')
1221 {
1222 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1223 for (x = *struct_data++; x; --x, ++struct_data)
1224 RECURSE (SCM_PACK (*struct_data));
1225 else
1226 RECURSE (SCM_PACK (*struct_data));
1227 }
1228 }
1229 /* mark vtable */
1230 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1231 goto gc_mark_loop;
1232 }
1233 }
1234 break;
1235 case scm_tcs_closures:
1236 if (SCM_IMP (SCM_CDR (ptr)))
1237 {
1238 ptr = SCM_CLOSCAR (ptr);
1239 goto gc_mark_nimp;
1240 }
1241 RECURSE (SCM_CLOSCAR (ptr));
1242 ptr = SCM_CDR (ptr);
1243 goto gc_mark_nimp;
1244 case scm_tc7_vector:
1245 i = SCM_VECTOR_LENGTH (ptr);
1246 if (i == 0)
1247 break;
1248 while (--i > 0)
1249 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1250 RECURSE (SCM_VELTS (ptr)[i]);
1251 ptr = SCM_VELTS (ptr)[0];
1252 goto gc_mark_loop;
1253 #ifdef CCLO
1254 case scm_tc7_cclo:
1255 {
1256 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1257 unsigned long int j;
1258 for (j = 1; j != i; ++j)
1259 {
1260 SCM obj = SCM_CCLO_REF (ptr, j);
1261 if (!SCM_IMP (obj))
1262 RECURSE (obj);
1263 }
1264 ptr = SCM_CCLO_REF (ptr, 0);
1265 goto gc_mark_loop;
1266 }
1267 #endif
1268 #ifdef HAVE_ARRAYS
1269 case scm_tc7_bvect:
1270 case scm_tc7_byvect:
1271 case scm_tc7_ivect:
1272 case scm_tc7_uvect:
1273 case scm_tc7_fvect:
1274 case scm_tc7_dvect:
1275 case scm_tc7_cvect:
1276 case scm_tc7_svect:
1277 #ifdef HAVE_LONG_LONGS
1278 case scm_tc7_llvect:
1279 #endif
1280 #endif
1281 case scm_tc7_string:
1282 break;
1283
1284 case scm_tc7_substring:
1285 ptr = SCM_CDR (ptr);
1286 goto gc_mark_loop;
1287
1288 case scm_tc7_wvect:
1289 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1290 scm_weak_vectors = ptr;
1291 if (SCM_IS_WHVEC_ANY (ptr))
1292 {
1293 int x;
1294 int len;
1295 int weak_keys;
1296 int weak_values;
1297
1298 len = SCM_VECTOR_LENGTH (ptr);
1299 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1300 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1301
1302 for (x = 0; x < len; ++x)
1303 {
1304 SCM alist;
1305 alist = SCM_VELTS (ptr)[x];
1306
1307 /* mark everything on the alist except the keys or
1308 * values, according to weak_values and weak_keys. */
1309 while ( SCM_CONSP (alist)
1310 && !SCM_GCMARKP (alist)
1311 && SCM_CONSP (SCM_CAR (alist)))
1312 {
1313 SCM kvpair;
1314 SCM next_alist;
1315
1316 kvpair = SCM_CAR (alist);
1317 next_alist = SCM_CDR (alist);
1318 /*
1319 * Do not do this:
1320 * SCM_SETGCMARK (alist);
1321 * SCM_SETGCMARK (kvpair);
1322 *
1323 * It may be that either the key or value is protected by
1324 * an escaped reference to part of the spine of this alist.
1325 * If we mark the spine here, and only mark one or neither of the
1326 * key and value, they may never be properly marked.
1327 * This leads to a horrible situation in which an alist containing
1328 * freelist cells is exported.
1329 *
1330 * So only mark the spines of these arrays last of all marking.
1331 * If somebody confuses us by constructing a weak vector
1332 * with a circular alist then we are hosed, but at least we
1333 * won't prematurely drop table entries.
1334 */
1335 if (!weak_keys)
1336 RECURSE (SCM_CAR (kvpair));
1337 if (!weak_values)
1338 RECURSE (SCM_CDR (kvpair));
1339 alist = next_alist;
1340 }
1341 if (SCM_NIMP (alist))
1342 RECURSE (alist);
1343 }
1344 }
1345 break;
1346
1347 case scm_tc7_symbol:
1348 ptr = SCM_PROP_SLOTS (ptr);
1349 goto gc_mark_loop;
1350 case scm_tcs_subrs:
1351 break;
1352 case scm_tc7_port:
1353 i = SCM_PTOBNUM (ptr);
1354 if (!(i < scm_numptob))
1355 goto def;
1356 if (SCM_PTAB_ENTRY(ptr))
1357 RECURSE (SCM_FILENAME (ptr));
1358 if (scm_ptobs[i].mark)
1359 {
1360 ptr = (scm_ptobs[i].mark) (ptr);
1361 goto gc_mark_loop;
1362 }
1363 else
1364 return;
1365 break;
1366 case scm_tc7_smob:
1367 switch (SCM_TYP16 (ptr))
1368 { /* should be faster than going through scm_smobs */
1369 case scm_tc_free_cell:
1370 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1371 case scm_tc16_big:
1372 case scm_tc16_real:
1373 case scm_tc16_complex:
1374 break;
1375 default:
1376 i = SCM_SMOBNUM (ptr);
1377 if (!(i < scm_numsmob))
1378 goto def;
1379 if (scm_smobs[i].mark)
1380 {
1381 ptr = (scm_smobs[i].mark) (ptr);
1382 goto gc_mark_loop;
1383 }
1384 else
1385 return;
1386 }
1387 break;
1388 default:
1389 def:
1390 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1391 }
1392 #undef RECURSE
1393 }
1394 #undef FUNC_NAME
1395
1396 #ifndef MARK_DEPENDENCIES
1397
1398 #undef MARK
1399 #undef FNAME
1400
1401 /* And here we define `scm_gc_mark_dependencies', by including this
1402 * same file in itself.
1403 */
1404 #define MARK scm_gc_mark_dependencies
1405 #define FNAME "scm_gc_mark_dependencies"
1406 #define MARK_DEPENDENCIES
1407 #include "gc.c"
1408 #undef MARK_DEPENDENCIES
1409 #undef MARK
1410 #undef FNAME
1411
1412
1413 /* Mark a Region Conservatively
1414 */
1415
1416 void
1417 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1418 {
1419 unsigned long m;
1420
1421 for (m = 0; m < n; ++m)
1422 {
1423 SCM obj = * (SCM *) &x[m];
1424 if (SCM_CELLP (obj))
1425 {
1426 SCM_CELLPTR ptr = SCM2PTR (obj);
1427 int i = 0;
1428 int j = scm_n_heap_segs - 1;
1429 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1430 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1431 {
1432 while (i <= j)
1433 {
1434 int seg_id;
1435 seg_id = -1;
1436 if ((i == j)
1437 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1438 seg_id = i;
1439 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1440 seg_id = j;
1441 else
1442 {
1443 int k;
1444 k = (i + j) / 2;
1445 if (k == i)
1446 break;
1447 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1448 {
1449 j = k;
1450 ++i;
1451 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1452 continue;
1453 else
1454 break;
1455 }
1456 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1457 {
1458 i = k;
1459 --j;
1460 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1461 continue;
1462 else
1463 break;
1464 }
1465 }
1466
1467 if (SCM_GC_IN_CARD_HEADERP (ptr))
1468 break;
1469
1470 if (scm_heap_table[seg_id].span == 1
1471 || DOUBLECELL_ALIGNED_P (obj))
1472 scm_gc_mark (obj);
1473
1474 break;
1475 }
1476 }
1477 }
1478 }
1479 }
1480
1481
1482 /* The function scm_cellp determines whether an SCM value can be regarded as a
1483 * pointer to a cell on the heap. Binary search is used in order to determine
1484 * the heap segment that contains the cell.
1485 */
1486 int
1487 scm_cellp (SCM value)
1488 {
1489 if (SCM_CELLP (value)) {
1490 scm_cell * ptr = SCM2PTR (value);
1491 unsigned int i = 0;
1492 unsigned int j = scm_n_heap_segs - 1;
1493
1494 while (i < j) {
1495 int k = (i + j) / 2;
1496 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1497 j = k;
1498 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1499 i = k + 1;
1500 }
1501 }
1502
1503 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1504 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1505 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1506 && !SCM_GC_IN_CARD_HEADERP (ptr)
1507 )
1508 return 1;
1509 else
1510 return 0;
1511 } else
1512 return 0;
1513 }
1514
1515
1516 static void
1517 gc_sweep_freelist_start (scm_freelist_t *freelist)
1518 {
1519 freelist->cells = SCM_EOL;
1520 freelist->left_to_collect = freelist->cluster_size;
1521 freelist->clusters_allocated = 0;
1522 freelist->clusters = SCM_EOL;
1523 freelist->clustertail = &freelist->clusters;
1524 freelist->collected_1 = freelist->collected;
1525 freelist->collected = 0;
1526 }
1527
1528 static void
1529 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1530 {
1531 int collected;
1532 *freelist->clustertail = freelist->cells;
1533 if (!SCM_NULLP (freelist->cells))
1534 {
1535 SCM c = freelist->cells;
1536 SCM_SETCAR (c, SCM_CDR (c));
1537 SCM_SETCDR (c, SCM_EOL);
1538 freelist->collected +=
1539 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1540 }
1541 scm_gc_cells_collected += freelist->collected;
1542
1543 /* Although freelist->min_yield is used to test freelist->collected
1544 * (which is the local GC yield for freelist), it is adjusted so
1545 * that *total* yield is freelist->min_yield_fraction of total heap
1546 * size. This means that a too low yield is compensated by more
1547 * heap on the list which is currently doing most work, which is
1548 * just what we want.
1549 */
1550 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1551 freelist->grow_heap_p = (collected < freelist->min_yield);
1552 }
1553
1554 #define NEXT_DATA_CELL(ptr, span) \
1555 do { \
1556 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1557 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1558 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1559 : nxt__); \
1560 } while (0)
1561
1562 void
1563 scm_gc_sweep ()
1564 #define FUNC_NAME "scm_gc_sweep"
1565 {
1566 register SCM_CELLPTR ptr;
1567 register SCM nfreelist;
1568 register scm_freelist_t *freelist;
1569 register long m;
1570 register int span;
1571 long i;
1572 scm_sizet seg_size;
1573
1574 m = 0;
1575
1576 gc_sweep_freelist_start (&scm_master_freelist);
1577 gc_sweep_freelist_start (&scm_master_freelist2);
1578
1579 for (i = 0; i < scm_n_heap_segs; i++)
1580 {
1581 register unsigned int left_to_collect;
1582 register scm_sizet j;
1583
1584 /* Unmarked cells go onto the front of the freelist this heap
1585 segment points to. Rather than updating the real freelist
1586 pointer as we go along, we accumulate the new head in
1587 nfreelist. Then, if it turns out that the entire segment is
1588 free, we free (i.e., malloc's free) the whole segment, and
1589 simply don't assign nfreelist back into the real freelist. */
1590 freelist = scm_heap_table[i].freelist;
1591 nfreelist = freelist->cells;
1592 left_to_collect = freelist->left_to_collect;
1593 span = scm_heap_table[i].span;
1594
1595 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1596 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1597
1598 /* use only data cells in seg_size */
1599 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1600
1601 scm_gc_cells_swept += seg_size;
1602
1603 for (j = seg_size + span; j -= span; ptr += span)
1604 {
1605 SCM scmptr;
1606
1607 if (SCM_GC_IN_CARD_HEADERP (ptr))
1608 {
1609 SCM_CELLPTR nxt;
1610
1611 /* cheat here */
1612 nxt = ptr;
1613 NEXT_DATA_CELL (nxt, span);
1614 j += span;
1615
1616 ptr = nxt - span;
1617 continue;
1618 }
1619
1620 scmptr = PTR2SCM (ptr);
1621
1622 if (SCM_GCMARKP (scmptr))
1623 continue;
1624
1625 switch SCM_TYP7 (scmptr)
1626 {
1627 case scm_tcs_cons_gloc:
1628 {
1629 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1630 * struct or a gloc. See the corresponding comment in
1631 * scm_gc_mark.
1632 */
1633 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1634 - scm_tc3_cons_gloc);
1635 /* access as struct */
1636 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1637 if (vtable_data[scm_vtable_index_vcell] == 0)
1638 {
1639 /* Structs need to be freed in a special order.
1640 * This is handled by GC C hooks in struct.c.
1641 */
1642 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1643 scm_structs_to_free = scmptr;
1644 continue;
1645 }
1646 /* fall through so that scmptr gets collected */
1647 }
1648 break;
1649 case scm_tcs_cons_imcar:
1650 case scm_tcs_cons_nimcar:
1651 case scm_tcs_closures:
1652 case scm_tc7_pws:
1653 break;
1654 case scm_tc7_wvect:
1655 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1656 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1657 break;
1658 case scm_tc7_vector:
1659 {
1660 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1661 if (length > 0)
1662 {
1663 m += length * sizeof (scm_bits_t);
1664 scm_must_free (SCM_VECTOR_BASE (scmptr));
1665 }
1666 break;
1667 }
1668 #ifdef CCLO
1669 case scm_tc7_cclo:
1670 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1671 scm_must_free (SCM_CCLO_BASE (scmptr));
1672 break;
1673 #endif
1674 #ifdef HAVE_ARRAYS
1675 case scm_tc7_bvect:
1676 {
1677 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1678 if (length > 0)
1679 {
1680 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1681 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1682 }
1683 }
1684 break;
1685 case scm_tc7_byvect:
1686 case scm_tc7_ivect:
1687 case scm_tc7_uvect:
1688 case scm_tc7_svect:
1689 #ifdef HAVE_LONG_LONGS
1690 case scm_tc7_llvect:
1691 #endif
1692 case scm_tc7_fvect:
1693 case scm_tc7_dvect:
1694 case scm_tc7_cvect:
1695 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1696 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1697 break;
1698 #endif
1699 case scm_tc7_substring:
1700 break;
1701 case scm_tc7_string:
1702 m += SCM_STRING_LENGTH (scmptr) + 1;
1703 scm_must_free (SCM_STRING_CHARS (scmptr));
1704 break;
1705 case scm_tc7_symbol:
1706 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1707 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1708 break;
1709 case scm_tcs_subrs:
1710 /* the various "subrs" (primitives) are never freed */
1711 continue;
1712 case scm_tc7_port:
1713 if SCM_OPENP (scmptr)
1714 {
1715 int k = SCM_PTOBNUM (scmptr);
1716 if (!(k < scm_numptob))
1717 goto sweeperr;
1718 /* Keep "revealed" ports alive. */
1719 if (scm_revealed_count (scmptr) > 0)
1720 continue;
1721 /* Yes, I really do mean scm_ptobs[k].free */
1722 /* rather than ftobs[k].close. .close */
1723 /* is for explicit CLOSE-PORT by user */
1724 m += (scm_ptobs[k].free) (scmptr);
1725 SCM_SETSTREAM (scmptr, 0);
1726 scm_remove_from_port_table (scmptr);
1727 scm_gc_ports_collected++;
1728 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1729 }
1730 break;
1731 case scm_tc7_smob:
1732 switch SCM_TYP16 (scmptr)
1733 {
1734 case scm_tc_free_cell:
1735 case scm_tc16_real:
1736 break;
1737 #ifdef SCM_BIGDIG
1738 case scm_tc16_big:
1739 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1740 scm_must_free (SCM_BDIGITS (scmptr));
1741 break;
1742 #endif /* def SCM_BIGDIG */
1743 case scm_tc16_complex:
1744 m += sizeof (scm_complex_t);
1745 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1746 break;
1747 default:
1748 {
1749 int k;
1750 k = SCM_SMOBNUM (scmptr);
1751 if (!(k < scm_numsmob))
1752 goto sweeperr;
1753 m += (scm_smobs[k].free) (scmptr);
1754 break;
1755 }
1756 }
1757 break;
1758 default:
1759 sweeperr:
1760 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1761 }
1762
1763 if (!--left_to_collect)
1764 {
1765 SCM_SETCAR (scmptr, nfreelist);
1766 *freelist->clustertail = scmptr;
1767 freelist->clustertail = SCM_CDRLOC (scmptr);
1768
1769 nfreelist = SCM_EOL;
1770 freelist->collected += span * freelist->cluster_size;
1771 left_to_collect = freelist->cluster_size;
1772 }
1773 else
1774 {
1775 /* Stick the new cell on the front of nfreelist. It's
1776 critical that we mark this cell as freed; otherwise, the
1777 conservative collector might trace it as some other type
1778 of object. */
1779 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1780 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1781 nfreelist = scmptr;
1782 }
1783 }
1784
1785 #ifdef GC_FREE_SEGMENTS
1786 if (n == seg_size)
1787 {
1788 register long j;
1789
1790 freelist->heap_size -= seg_size;
1791 free ((char *) scm_heap_table[i].bounds[0]);
1792 scm_heap_table[i].bounds[0] = 0;
1793 for (j = i + 1; j < scm_n_heap_segs; j++)
1794 scm_heap_table[j - 1] = scm_heap_table[j];
1795 scm_n_heap_segs -= 1;
1796 i--; /* We need to scan the segment just moved. */
1797 }
1798 else
1799 #endif /* ifdef GC_FREE_SEGMENTS */
1800 {
1801 /* Update the real freelist pointer to point to the head of
1802 the list of free cells we've built for this segment. */
1803 freelist->cells = nfreelist;
1804 freelist->left_to_collect = left_to_collect;
1805 }
1806
1807 #ifdef GUILE_DEBUG_FREELIST
1808 scm_map_free_list ();
1809 #endif
1810 }
1811
1812 gc_sweep_freelist_finish (&scm_master_freelist);
1813 gc_sweep_freelist_finish (&scm_master_freelist2);
1814
1815 /* When we move to POSIX threads private freelists should probably
1816 be GC-protected instead. */
1817 scm_freelist = SCM_EOL;
1818 scm_freelist2 = SCM_EOL;
1819
1820 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1821 scm_gc_yield -= scm_cells_allocated;
1822 scm_mallocated -= m;
1823 scm_gc_malloc_collected = m;
1824 }
1825 #undef FUNC_NAME
1826
1827
1828 \f
1829 /* {Front end to malloc}
1830 *
1831 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1832 * scm_done_free
1833 *
1834 * These functions provide services comperable to malloc, realloc, and
1835 * free. They are for allocating malloced parts of scheme objects.
1836 * The primary purpose of the front end is to impose calls to gc. */
1837
1838
1839 /* scm_must_malloc
1840 * Return newly malloced storage or throw an error.
1841 *
1842 * The parameter WHAT is a string for error reporting.
1843 * If the threshold scm_mtrigger will be passed by this
1844 * allocation, or if the first call to malloc fails,
1845 * garbage collect -- on the presumption that some objects
1846 * using malloced storage may be collected.
1847 *
1848 * The limit scm_mtrigger may be raised by this allocation.
1849 */
1850 void *
1851 scm_must_malloc (scm_sizet size, const char *what)
1852 {
1853 void *ptr;
1854 unsigned long nm = scm_mallocated + size;
1855
1856 if (nm <= scm_mtrigger)
1857 {
1858 SCM_SYSCALL (ptr = malloc (size));
1859 if (NULL != ptr)
1860 {
1861 scm_mallocated = nm;
1862 #ifdef GUILE_DEBUG_MALLOC
1863 scm_malloc_register (ptr, what);
1864 #endif
1865 return ptr;
1866 }
1867 }
1868
1869 scm_igc (what);
1870
1871 nm = scm_mallocated + size;
1872 SCM_SYSCALL (ptr = malloc (size));
1873 if (NULL != ptr)
1874 {
1875 scm_mallocated = nm;
1876 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1877 if (nm > scm_mtrigger)
1878 scm_mtrigger = nm + nm / 2;
1879 else
1880 scm_mtrigger += scm_mtrigger / 2;
1881 }
1882 #ifdef GUILE_DEBUG_MALLOC
1883 scm_malloc_register (ptr, what);
1884 #endif
1885
1886 return ptr;
1887 }
1888
1889 scm_memory_error (what);
1890 }
1891
1892
1893 /* scm_must_realloc
1894 * is similar to scm_must_malloc.
1895 */
1896 void *
1897 scm_must_realloc (void *where,
1898 scm_sizet old_size,
1899 scm_sizet size,
1900 const char *what)
1901 {
1902 void *ptr;
1903 scm_sizet nm = scm_mallocated + size - old_size;
1904
1905 if (nm <= scm_mtrigger)
1906 {
1907 SCM_SYSCALL (ptr = realloc (where, size));
1908 if (NULL != ptr)
1909 {
1910 scm_mallocated = nm;
1911 #ifdef GUILE_DEBUG_MALLOC
1912 scm_malloc_reregister (where, ptr, what);
1913 #endif
1914 return ptr;
1915 }
1916 }
1917
1918 scm_igc (what);
1919
1920 nm = scm_mallocated + size - old_size;
1921 SCM_SYSCALL (ptr = realloc (where, size));
1922 if (NULL != ptr)
1923 {
1924 scm_mallocated = nm;
1925 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1926 if (nm > scm_mtrigger)
1927 scm_mtrigger = nm + nm / 2;
1928 else
1929 scm_mtrigger += scm_mtrigger / 2;
1930 }
1931 #ifdef GUILE_DEBUG_MALLOC
1932 scm_malloc_reregister (where, ptr, what);
1933 #endif
1934 return ptr;
1935 }
1936
1937 scm_memory_error (what);
1938 }
1939
1940
1941 void
1942 scm_must_free (void *obj)
1943 #define FUNC_NAME "scm_must_free"
1944 {
1945 #ifdef GUILE_DEBUG_MALLOC
1946 scm_malloc_unregister (obj);
1947 #endif
1948 if (obj)
1949 free (obj);
1950 else
1951 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1952 }
1953 #undef FUNC_NAME
1954
1955
1956 /* Announce that there has been some malloc done that will be freed
1957 * during gc. A typical use is for a smob that uses some malloced
1958 * memory but can not get it from scm_must_malloc (for whatever
1959 * reason). When a new object of this smob is created you call
1960 * scm_done_malloc with the size of the object. When your smob free
1961 * function is called, be sure to include this size in the return
1962 * value.
1963 *
1964 * If you can't actually free the memory in the smob free function,
1965 * for whatever reason (like reference counting), you still can (and
1966 * should) report the amount of memory freed when you actually free it.
1967 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1968 * eh? Or even better, call scm_done_free. */
1969
1970 void
1971 scm_done_malloc (long size)
1972 {
1973 scm_mallocated += size;
1974
1975 if (scm_mallocated > scm_mtrigger)
1976 {
1977 scm_igc ("foreign mallocs");
1978 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1979 {
1980 if (scm_mallocated > scm_mtrigger)
1981 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1982 else
1983 scm_mtrigger += scm_mtrigger / 2;
1984 }
1985 }
1986 }
1987
1988 void
1989 scm_done_free (long size)
1990 {
1991 scm_mallocated -= size;
1992 }
1993
1994
1995 \f
1996 /* {Heap Segments}
1997 *
1998 * Each heap segment is an array of objects of a particular size.
1999 * Every segment has an associated (possibly shared) freelist.
2000 * A table of segment records is kept that records the upper and
2001 * lower extents of the segment; this is used during the conservative
2002 * phase of gc to identify probably gc roots (because they point
2003 * into valid segments at reasonable offsets). */
2004
2005 /* scm_expmem
2006 * is true if the first segment was smaller than INIT_HEAP_SEG.
2007 * If scm_expmem is set to one, subsequent segment allocations will
2008 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2009 */
2010 int scm_expmem = 0;
2011
2012 scm_sizet scm_max_segment_size;
2013
2014 /* scm_heap_org
2015 * is the lowest base address of any heap segment.
2016 */
2017 SCM_CELLPTR scm_heap_org;
2018
2019 scm_heap_seg_data_t * scm_heap_table = 0;
2020 static unsigned int heap_segment_table_size = 0;
2021 int scm_n_heap_segs = 0;
2022
2023 /* init_heap_seg
2024 * initializes a new heap segment and returns the number of objects it contains.
2025 *
2026 * The segment origin and segment size in bytes are input parameters.
2027 * The freelist is both input and output.
2028 *
2029 * This function presumes that the scm_heap_table has already been expanded
2030 * to accomodate a new segment record and that the markbit space was reserved
2031 * for all the cards in this segment.
2032 */
2033
2034 #define INIT_CARD(card, span) \
2035 do { \
2036 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2037 if ((span) == 2) \
2038 SCM_GC_SET_CARD_DOUBLECELL (card); \
2039 } while (0)
2040
2041 static scm_sizet
2042 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2043 {
2044 register SCM_CELLPTR ptr;
2045 SCM_CELLPTR seg_end;
2046 int new_seg_index;
2047 int n_new_cells;
2048 int span = freelist->span;
2049
2050 if (seg_org == NULL)
2051 return 0;
2052
2053 /* Align the begin ptr up.
2054 */
2055 ptr = SCM_GC_CARD_UP (seg_org);
2056
2057 /* Compute the ceiling on valid object pointers w/in this segment.
2058 */
2059 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2060
2061 /* Find the right place and insert the segment record.
2062 *
2063 */
2064 for (new_seg_index = 0;
2065 ( (new_seg_index < scm_n_heap_segs)
2066 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2067 new_seg_index++)
2068 ;
2069
2070 {
2071 int i;
2072 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2073 scm_heap_table[i] = scm_heap_table[i - 1];
2074 }
2075
2076 ++scm_n_heap_segs;
2077
2078 scm_heap_table[new_seg_index].span = span;
2079 scm_heap_table[new_seg_index].freelist = freelist;
2080 scm_heap_table[new_seg_index].bounds[0] = ptr;
2081 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2082
2083 /*n_new_cells*/
2084 n_new_cells = seg_end - ptr;
2085
2086 freelist->heap_size += n_new_cells;
2087
2088 /* Partition objects in this segment into clusters */
2089 {
2090 SCM clusters;
2091 SCM *clusterp = &clusters;
2092
2093 NEXT_DATA_CELL (ptr, span);
2094 while (ptr < seg_end)
2095 {
2096 scm_cell *nxt = ptr;
2097 scm_cell *prv = NULL;
2098 scm_cell *last_card = NULL;
2099 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2100 NEXT_DATA_CELL(nxt, span);
2101
2102 /* Allocate cluster spine
2103 */
2104 *clusterp = PTR2SCM (ptr);
2105 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2106 clusterp = SCM_CDRLOC (*clusterp);
2107 ptr = nxt;
2108
2109 while (n_data_cells--)
2110 {
2111 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2112 SCM scmptr = PTR2SCM (ptr);
2113 nxt = ptr;
2114 NEXT_DATA_CELL (nxt, span);
2115 prv = ptr;
2116
2117 if (card != last_card)
2118 {
2119 INIT_CARD (card, span);
2120 last_card = card;
2121 }
2122
2123 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2124 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2125
2126 ptr = nxt;
2127 }
2128
2129 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2130 }
2131
2132 /* sanity check */
2133 {
2134 scm_cell *ref = seg_end;
2135 NEXT_DATA_CELL (ref, span);
2136 if (ref != ptr)
2137 /* [cmm] looks like the segment size doesn't divide cleanly by
2138 cluster size. bad cmm! */
2139 abort();
2140 }
2141
2142 /* Patch up the last cluster pointer in the segment
2143 * to join it to the input freelist.
2144 */
2145 *clusterp = freelist->clusters;
2146 freelist->clusters = clusters;
2147 }
2148
2149 #ifdef DEBUGINFO
2150 fprintf (stderr, "H");
2151 #endif
2152 return size;
2153 }
2154
2155 static scm_sizet
2156 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2157 {
2158 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2159
2160 return
2161 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2162 + ALIGNMENT_SLACK (freelist);
2163 }
2164
2165 static void
2166 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2167 #define FUNC_NAME "alloc_some_heap"
2168 {
2169 SCM_CELLPTR ptr;
2170 long len;
2171
2172 if (scm_gc_heap_lock)
2173 {
2174 /* Critical code sections (such as the garbage collector) aren't
2175 * supposed to add heap segments.
2176 */
2177 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2178 abort ();
2179 }
2180
2181 if (scm_n_heap_segs == heap_segment_table_size)
2182 {
2183 /* We have to expand the heap segment table to have room for the new
2184 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2185 * init_heap_seg only if the allocation of the segment itself succeeds.
2186 */
2187 unsigned int new_table_size = scm_n_heap_segs + 1;
2188 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2189 scm_heap_seg_data_t * new_heap_table;
2190
2191 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2192 realloc ((char *)scm_heap_table, size)));
2193 if (!new_heap_table)
2194 {
2195 if (error_policy == abort_on_error)
2196 {
2197 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2198 abort ();
2199 }
2200 else
2201 {
2202 return;
2203 }
2204 }
2205 else
2206 {
2207 scm_heap_table = new_heap_table;
2208 heap_segment_table_size = new_table_size;
2209 }
2210 }
2211
2212 /* Pick a size for the new heap segment.
2213 * The rule for picking the size of a segment is explained in
2214 * gc.h
2215 */
2216 {
2217 /* Assure that the new segment is predicted to be large enough.
2218 *
2219 * New yield should at least equal GC fraction of new heap size, i.e.
2220 *
2221 * y + dh > f * (h + dh)
2222 *
2223 * y : yield
2224 * f : min yield fraction
2225 * h : heap size
2226 * dh : size of new heap segment
2227 *
2228 * This gives dh > (f * h - y) / (1 - f)
2229 */
2230 int f = freelist->min_yield_fraction;
2231 long h = SCM_HEAP_SIZE;
2232 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2233 len = SCM_EXPHEAP (freelist->heap_size);
2234 #ifdef DEBUGINFO
2235 fprintf (stderr, "(%d < %d)", len, min_cells);
2236 #endif
2237 if (len < min_cells)
2238 len = min_cells + freelist->cluster_size;
2239 len *= sizeof (scm_cell);
2240 /* force new sampling */
2241 freelist->collected = LONG_MAX;
2242 }
2243
2244 if (len > scm_max_segment_size)
2245 len = scm_max_segment_size;
2246
2247 {
2248 scm_sizet smallest;
2249
2250 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2251
2252 if (len < smallest)
2253 len = smallest;
2254
2255 /* Allocate with decaying ambition. */
2256 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2257 && (len >= smallest))
2258 {
2259 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2260 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2261 if (ptr)
2262 {
2263 init_heap_seg (ptr, rounded_len, freelist);
2264 return;
2265 }
2266 len /= 2;
2267 }
2268 }
2269
2270 if (error_policy == abort_on_error)
2271 {
2272 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2273 abort ();
2274 }
2275 }
2276 #undef FUNC_NAME
2277
2278
2279 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2280 (SCM name),
2281 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2282 "is @code{#t}.")
2283 #define FUNC_NAME s_scm_unhash_name
2284 {
2285 int x;
2286 int bound;
2287 SCM_VALIDATE_SYMBOL (1,name);
2288 SCM_DEFER_INTS;
2289 bound = scm_n_heap_segs;
2290 for (x = 0; x < bound; ++x)
2291 {
2292 SCM_CELLPTR p;
2293 SCM_CELLPTR pbound;
2294 p = scm_heap_table[x].bounds[0];
2295 pbound = scm_heap_table[x].bounds[1];
2296 while (p < pbound)
2297 {
2298 SCM cell = PTR2SCM (p);
2299 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2300 {
2301 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2302 * struct cell. See the corresponding comment in scm_gc_mark.
2303 */
2304 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2305 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2306 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2307 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2308 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2309 {
2310 SCM_SET_CELL_OBJECT_0 (cell, name);
2311 }
2312 }
2313 ++p;
2314 }
2315 }
2316 SCM_ALLOW_INTS;
2317 return name;
2318 }
2319 #undef FUNC_NAME
2320
2321
2322 \f
2323 /* {GC Protection Helper Functions}
2324 */
2325
2326
2327 /*
2328 * If within a function you need to protect one or more scheme objects from
2329 * garbage collection, pass them as parameters to one of the
2330 * scm_remember_upto_here* functions below. These functions don't do
2331 * anything, but since the compiler does not know that they are actually
2332 * no-ops, it will generate code that calls these functions with the given
2333 * parameters. Therefore, you can be sure that the compiler will keep those
2334 * scheme values alive (on the stack or in a register) up to the point where
2335 * scm_remember_upto_here* is called. In other words, place the call to
2336 * scm_remember_upt_here* _behind_ the last code in your function, that
2337 * depends on the scheme object to exist.
2338 *
2339 * Example: We want to make sure, that the string object str does not get
2340 * garbage collected during the execution of 'some_function', because
2341 * otherwise the characters belonging to str would be freed and
2342 * 'some_function' might access freed memory. To make sure that the compiler
2343 * keeps str alive on the stack or in a register such that it is visible to
2344 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2345 * call to 'some_function'. Note that this would not be necessary if str was
2346 * used anyway after the call to 'some_function'.
2347 * char *chars = SCM_STRING_CHARS (str);
2348 * some_function (chars);
2349 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2350 */
2351
2352 void
2353 scm_remember_upto_here_1 (SCM obj)
2354 {
2355 /* Empty. Protects a single object from garbage collection. */
2356 }
2357
2358 void
2359 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2360 {
2361 /* Empty. Protects two objects from garbage collection. */
2362 }
2363
2364 void
2365 scm_remember_upto_here (SCM obj, ...)
2366 {
2367 /* Empty. Protects any number of objects from garbage collection. */
2368 }
2369
2370
2371 #if (SCM_DEBUG_DEPRECATED == 0)
2372
2373 void
2374 scm_remember (SCM *ptr)
2375 {
2376 /* empty */
2377 }
2378
2379 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2380
2381 /*
2382 These crazy functions prevent garbage collection
2383 of arguments after the first argument by
2384 ensuring they remain live throughout the
2385 function because they are used in the last
2386 line of the code block.
2387 It'd be better to have a nice compiler hint to
2388 aid the conservative stack-scanning GC. --03/09/00 gjb */
2389 SCM
2390 scm_return_first (SCM elt, ...)
2391 {
2392 return elt;
2393 }
2394
2395 int
2396 scm_return_first_int (int i, ...)
2397 {
2398 return i;
2399 }
2400
2401
2402 SCM
2403 scm_permanent_object (SCM obj)
2404 {
2405 SCM_REDEFER_INTS;
2406 scm_permobjs = scm_cons (obj, scm_permobjs);
2407 SCM_REALLOW_INTS;
2408 return obj;
2409 }
2410
2411
2412 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2413 other references are dropped, until the object is unprotected by calling
2414 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2415 i. e. it is possible to protect the same object several times, but it is
2416 necessary to unprotect the object the same number of times to actually get
2417 the object unprotected. It is an error to unprotect an object more often
2418 than it has been protected before. The function scm_protect_object returns
2419 OBJ.
2420 */
2421
2422 /* Implementation note: For every object X, there is a counter which
2423 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2424 */
2425
2426 SCM
2427 scm_protect_object (SCM obj)
2428 {
2429 SCM handle;
2430
2431 /* This critical section barrier will be replaced by a mutex. */
2432 SCM_REDEFER_INTS;
2433
2434 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2435 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2436
2437 SCM_REALLOW_INTS;
2438
2439 return obj;
2440 }
2441
2442
2443 /* Remove any protection for OBJ established by a prior call to
2444 scm_protect_object. This function returns OBJ.
2445
2446 See scm_protect_object for more information. */
2447 SCM
2448 scm_unprotect_object (SCM obj)
2449 {
2450 SCM handle;
2451
2452 /* This critical section barrier will be replaced by a mutex. */
2453 SCM_REDEFER_INTS;
2454
2455 handle = scm_hashq_get_handle (scm_protects, obj);
2456
2457 if (SCM_IMP (handle))
2458 {
2459 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2460 abort ();
2461 }
2462 else
2463 {
2464 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2465 if (count == 0)
2466 scm_hashq_remove_x (scm_protects, obj);
2467 else
2468 SCM_SETCDR (handle, SCM_MAKINUM (count));
2469 }
2470
2471 SCM_REALLOW_INTS;
2472
2473 return obj;
2474 }
2475
2476 int terminating;
2477
2478 /* called on process termination. */
2479 #ifdef HAVE_ATEXIT
2480 static void
2481 cleanup (void)
2482 #else
2483 #ifdef HAVE_ON_EXIT
2484 extern int on_exit (void (*procp) (), int arg);
2485
2486 static void
2487 cleanup (int status, void *arg)
2488 #else
2489 #error Dont know how to setup a cleanup handler on your system.
2490 #endif
2491 #endif
2492 {
2493 terminating = 1;
2494 scm_flush_all_ports ();
2495 }
2496
2497 \f
2498 static int
2499 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2500 {
2501 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2502
2503 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2504 rounded_size,
2505 freelist))
2506 {
2507 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2508 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2509 rounded_size,
2510 freelist))
2511 return 1;
2512 }
2513 else
2514 scm_expmem = 1;
2515
2516 if (freelist->min_yield_fraction)
2517 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2518 / 100);
2519 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2520
2521 return 0;
2522 }
2523
2524 \f
2525 static void
2526 init_freelist (scm_freelist_t *freelist,
2527 int span,
2528 int cluster_size,
2529 int min_yield)
2530 {
2531 freelist->clusters = SCM_EOL;
2532 freelist->cluster_size = cluster_size + 1;
2533 freelist->left_to_collect = 0;
2534 freelist->clusters_allocated = 0;
2535 freelist->min_yield = 0;
2536 freelist->min_yield_fraction = min_yield;
2537 freelist->span = span;
2538 freelist->collected = 0;
2539 freelist->collected_1 = 0;
2540 freelist->heap_size = 0;
2541 }
2542
2543
2544 /* Get an integer from an environment variable. */
2545 static int
2546 scm_i_getenv_int (const char *var, int def)
2547 {
2548 char *end, *val = getenv (var);
2549 long res;
2550 if (!val)
2551 return def;
2552 res = strtol (val, &end, 10);
2553 if (end == val)
2554 return def;
2555 return res;
2556 }
2557
2558
2559 int
2560 scm_init_storage ()
2561 {
2562 scm_sizet gc_trigger_1;
2563 scm_sizet gc_trigger_2;
2564 scm_sizet init_heap_size_1;
2565 scm_sizet init_heap_size_2;
2566 scm_sizet j;
2567
2568 j = SCM_NUM_PROTECTS;
2569 while (j)
2570 scm_sys_protects[--j] = SCM_BOOL_F;
2571 scm_block_gc = 1;
2572
2573 scm_freelist = SCM_EOL;
2574 scm_freelist2 = SCM_EOL;
2575 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2576 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2577 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2578 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2579 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2580
2581 scm_expmem = 0;
2582
2583 j = SCM_HEAP_SEG_SIZE;
2584 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2585 scm_heap_table = ((scm_heap_seg_data_t *)
2586 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2587 heap_segment_table_size = 2;
2588
2589 mark_space_ptr = &mark_space_head;
2590
2591 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2592 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2593 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2594 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2595 return 1;
2596
2597 /* scm_hplims[0] can change. do not remove scm_heap_org */
2598 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2599
2600 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2601 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2602 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2603 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2604 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2605
2606 /* Initialise the list of ports. */
2607 scm_port_table = (scm_port **)
2608 malloc (sizeof (scm_port *) * scm_port_table_room);
2609 if (!scm_port_table)
2610 return 1;
2611
2612 #ifdef HAVE_ATEXIT
2613 atexit (cleanup);
2614 #else
2615 #ifdef HAVE_ON_EXIT
2616 on_exit (cleanup, 0);
2617 #endif
2618 #endif
2619
2620 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2621 SCM_SETCDR (scm_undefineds, scm_undefineds);
2622
2623 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2624 scm_nullstr = scm_makstr (0L, 0);
2625 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2626
2627 #define DEFAULT_SYMHASH_SIZE 277
2628 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2629 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2630
2631 scm_stand_in_procs = SCM_EOL;
2632 scm_permobjs = SCM_EOL;
2633 scm_protects = scm_c_make_hash_table (31);
2634
2635 return 0;
2636 }
2637
2638 \f
2639
2640 SCM scm_after_gc_hook;
2641
2642 #if (SCM_DEBUG_DEPRECATED == 0)
2643 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2644 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2645 static SCM gc_async;
2646
2647
2648 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2649 * is run after the gc, as soon as the asynchronous events are handled by the
2650 * evaluator.
2651 */
2652 static SCM
2653 gc_async_thunk (void)
2654 {
2655 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2656
2657 #if (SCM_DEBUG_DEPRECATED == 0)
2658
2659 /* The following code will be removed in Guile 1.5. */
2660 if (SCM_NFALSEP (scm_gc_vcell))
2661 {
2662 SCM proc = SCM_CDR (scm_gc_vcell);
2663
2664 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2665 scm_apply (proc, SCM_EOL, SCM_EOL);
2666 }
2667
2668 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2669
2670 return SCM_UNSPECIFIED;
2671 }
2672
2673
2674 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2675 * the garbage collection. The only purpose of this function is to mark the
2676 * gc_async (which will eventually lead to the execution of the
2677 * gc_async_thunk).
2678 */
2679 static void *
2680 mark_gc_async (void * hook_data, void *func_data, void *data)
2681 {
2682 scm_system_async_mark (gc_async);
2683 return NULL;
2684 }
2685
2686
2687 void
2688 scm_init_gc ()
2689 {
2690 SCM after_gc_thunk;
2691
2692 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2693
2694 #if (SCM_DEBUG_DEPRECATED == 0)
2695 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2696 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2697 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2698 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2699
2700 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2701
2702 #ifndef SCM_MAGIC_SNARFER
2703 #include "libguile/gc.x"
2704 #endif
2705 }
2706
2707 #endif /*MARK_DEPENDENCIES*/
2708
2709 /*
2710 Local Variables:
2711 c-file-style: "gnu"
2712 End:
2713 */