* Fixed the changelog entry regarding re-introduction of struct member
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include "libguile/_scm.h"
55 #include "libguile/eval.h"
56 #include "libguile/stime.h"
57 #include "libguile/stackchk.h"
58 #include "libguile/struct.h"
59 #include "libguile/smob.h"
60 #include "libguile/unif.h"
61 #include "libguile/async.h"
62 #include "libguile/ports.h"
63 #include "libguile/root.h"
64 #include "libguile/strings.h"
65 #include "libguile/vectors.h"
66 #include "libguile/weaks.h"
67 #include "libguile/hashtab.h"
68
69 #include "libguile/validate.h"
70 #include "libguile/gc.h"
71
72 #ifdef GUILE_DEBUG_MALLOC
73 #include "libguile/debug-malloc.h"
74 #endif
75
76 #ifdef HAVE_MALLOC_H
77 #include <malloc.h>
78 #endif
79
80 #ifdef HAVE_UNISTD_H
81 #include <unistd.h>
82 #endif
83
84 #ifdef __STDC__
85 #include <stdarg.h>
86 #define var_start(x, y) va_start(x, y)
87 #else
88 #include <varargs.h>
89 #define var_start(x, y) va_start(x)
90 #endif
91
92 \f
93
94 unsigned int scm_gc_running_p = 0;
95
96 \f
97
98 #if (SCM_DEBUG_CELL_ACCESSES == 1)
99
100 unsigned int scm_debug_cell_accesses_p = 0;
101
102
103 /* Assert that the given object is a valid reference to a valid cell. This
104 * test involves to determine whether the object is a cell pointer, whether
105 * this pointer actually points into a heap segment and whether the cell
106 * pointed to is not a free cell.
107 */
108 void
109 scm_assert_cell_valid (SCM cell)
110 {
111 if (scm_debug_cell_accesses_p)
112 {
113 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
114
115 if (!scm_cellp (cell))
116 {
117 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
118 abort ();
119 }
120 else if (!scm_gc_running_p)
121 {
122 /* Dirk::FIXME:: During garbage collection there occur references to
123 free cells. This is allright during conservative marking, but
124 should not happen otherwise (I think). The case of free cells
125 accessed during conservative marking is handled in function
126 scm_mark_locations. However, there still occur accesses to free
127 cells during gc. I don't understand why this happens. If it is
128 a bug and gets fixed, the following test should also work while
129 gc is running.
130 */
131 if (SCM_FREE_CELL_P (cell))
132 {
133 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
134 abort ();
135 }
136 }
137 scm_debug_cell_accesses_p = 1; /* re-enable */
138 }
139 }
140
141
142 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
143 (SCM flag),
144 "If FLAG is #f, cell access checking is disabled.\n"
145 "If FLAG is #t, cell access checking is enabled.\n"
146 "This procedure only exists because the compile-time flag\n"
147 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
148 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
149 {
150 if (SCM_FALSEP (flag)) {
151 scm_debug_cell_accesses_p = 0;
152 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
153 scm_debug_cell_accesses_p = 1;
154 } else {
155 SCM_WRONG_TYPE_ARG (1, flag);
156 }
157 return SCM_UNSPECIFIED;
158 }
159 #undef FUNC_NAME
160
161 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
162
163 \f
164
165 /* {heap tuning parameters}
166 *
167 * These are parameters for controlling memory allocation. The heap
168 * is the area out of which scm_cons, and object headers are allocated.
169 *
170 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
171 * 64 bit machine. The units of the _SIZE parameters are bytes.
172 * Cons pairs and object headers occupy one heap cell.
173 *
174 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
175 * allocated initially the heap will grow by half its current size
176 * each subsequent time more heap is needed.
177 *
178 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
179 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
180 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
181 * is in scm_init_storage() and alloc_some_heap() in sys.c
182 *
183 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
184 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
185 *
186 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
187 * is needed.
188 *
189 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
190 * trigger a GC.
191 *
192 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
193 * reclaimed by a GC triggered by must_malloc. If less than this is
194 * reclaimed, the trigger threshold is raised. [I don't know what a
195 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
196 * work around a oscillation that caused almost constant GC.]
197 */
198
199 /*
200 * Heap size 45000 and 40% min yield gives quick startup and no extra
201 * heap allocation. Having higher values on min yield may lead to
202 * large heaps, especially if code behaviour is varying its
203 * maximum consumption between different freelists.
204 */
205
206 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
207 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
208 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
209 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
210 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
211 int scm_default_min_yield_1 = 40;
212
213 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
214 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
215 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
216 /* The following value may seem large, but note that if we get to GC at
217 * all, this means that we have a numerically intensive application
218 */
219 int scm_default_min_yield_2 = 40;
220
221 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
222
223 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
224 #ifdef _QC
225 # define SCM_HEAP_SEG_SIZE 32768L
226 #else
227 # ifdef sequent
228 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
229 # else
230 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
231 # endif
232 #endif
233 /* Make heap grow with factor 1.5 */
234 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
235 #define SCM_INIT_MALLOC_LIMIT 100000
236 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
237
238 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
239 aligned inner bounds for allocated storage */
240
241 #ifdef PROT386
242 /*in 386 protected mode we must only adjust the offset */
243 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
244 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
245 #else
246 # ifdef _UNICOS
247 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
248 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
249 # else
250 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
251 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
252 # endif /* UNICOS */
253 #endif /* PROT386 */
254
255 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
256 #define CLUSTER_SIZE_IN_BYTES(freelist) \
257 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
258
259 \f
260 /* scm_freelists
261 */
262
263 typedef struct scm_freelist_t {
264 /* collected cells */
265 SCM cells;
266 /* number of cells left to collect before cluster is full */
267 unsigned int left_to_collect;
268 /* number of clusters which have been allocated */
269 unsigned int clusters_allocated;
270 /* a list of freelists, each of size cluster_size,
271 * except the last one which may be shorter
272 */
273 SCM clusters;
274 SCM *clustertail;
275 /* this is the number of objects in each cluster, including the spine cell */
276 int cluster_size;
277 /* indicates that we should grow heap instead of GC:ing
278 */
279 int grow_heap_p;
280 /* minimum yield on this list in order not to grow the heap
281 */
282 long min_yield;
283 /* defines min_yield as percent of total heap size
284 */
285 int min_yield_fraction;
286 /* number of cells per object on this list */
287 int span;
288 /* number of collected cells during last GC */
289 long collected;
290 /* number of collected cells during penultimate GC */
291 long collected_1;
292 /* total number of cells in heap segments
293 * belonging to this list.
294 */
295 long heap_size;
296 } scm_freelist_t;
297
298 SCM scm_freelist = SCM_EOL;
299 scm_freelist_t scm_master_freelist = {
300 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
301 };
302 SCM scm_freelist2 = SCM_EOL;
303 scm_freelist_t scm_master_freelist2 = {
304 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
305 };
306
307 /* scm_mtrigger
308 * is the number of bytes of must_malloc allocation needed to trigger gc.
309 */
310 unsigned long scm_mtrigger;
311
312 /* scm_gc_heap_lock
313 * If set, don't expand the heap. Set only during gc, during which no allocation
314 * is supposed to take place anyway.
315 */
316 int scm_gc_heap_lock = 0;
317
318 /* GC Blocking
319 * Don't pause for collection if this is set -- just
320 * expand the heap.
321 */
322 int scm_block_gc = 1;
323
324 /* During collection, this accumulates objects holding
325 * weak references.
326 */
327 SCM scm_weak_vectors;
328
329 /* During collection, this accumulates structures which are to be freed.
330 */
331 SCM scm_structs_to_free;
332
333 /* GC Statistics Keeping
334 */
335 unsigned long scm_cells_allocated = 0;
336 long scm_mallocated = 0;
337 unsigned long scm_gc_cells_collected;
338 unsigned long scm_gc_yield;
339 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
340 unsigned long scm_gc_malloc_collected;
341 unsigned long scm_gc_ports_collected;
342 unsigned long scm_gc_time_taken = 0;
343 static unsigned long t_before_gc;
344 static unsigned long t_before_sweep;
345 unsigned long scm_gc_mark_time_taken = 0;
346 unsigned long scm_gc_sweep_time_taken = 0;
347 unsigned long scm_gc_times = 0;
348 unsigned long scm_gc_cells_swept = 0;
349 double scm_gc_cells_marked_acc = 0.;
350 double scm_gc_cells_swept_acc = 0.;
351
352 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
353 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
354 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
355 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
356 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
357 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
358 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
359 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
360 SCM_SYMBOL (sym_times, "gc-times");
361 SCM_SYMBOL (sym_cells_marked, "cells-marked");
362 SCM_SYMBOL (sym_cells_swept, "cells-swept");
363
364 typedef struct scm_heap_seg_data_t
365 {
366 /* lower and upper bounds of the segment */
367 SCM_CELLPTR bounds[2];
368
369 /* address of the head-of-freelist pointer for this segment's cells.
370 All segments usually point to the same one, scm_freelist. */
371 scm_freelist_t *freelist;
372
373 /* number of cells per object in this segment */
374 int span;
375 } scm_heap_seg_data_t;
376
377
378
379 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
380
381 typedef enum { return_on_error, abort_on_error } policy_on_error;
382 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
383
384
385 #define SCM_HEAP_SIZE \
386 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
387 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
388
389 #define BVEC_GROW_SIZE 256
390 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
391 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
392
393 /* mark space allocation */
394
395 typedef struct scm_mark_space_t
396 {
397 scm_c_bvec_limb_t *bvec_space;
398 struct scm_mark_space_t *next;
399 } scm_mark_space_t;
400
401 static scm_mark_space_t *current_mark_space;
402 static scm_mark_space_t **mark_space_ptr;
403 static int current_mark_space_offset;
404 static scm_mark_space_t *mark_space_head;
405
406 static scm_c_bvec_limb_t *
407 get_bvec ()
408 {
409 scm_c_bvec_limb_t *res;
410
411 if (!current_mark_space)
412 {
413 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
414 if (!current_mark_space)
415 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
416
417 current_mark_space->bvec_space = NULL;
418 current_mark_space->next = NULL;
419
420 *mark_space_ptr = current_mark_space;
421 mark_space_ptr = &(current_mark_space->next);
422
423 return get_bvec ();
424 }
425
426 if (!(current_mark_space->bvec_space))
427 {
428 SCM_SYSCALL (current_mark_space->bvec_space =
429 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
430 if (!(current_mark_space->bvec_space))
431 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
432
433 current_mark_space_offset = 0;
434
435 return get_bvec ();
436 }
437
438 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
439 {
440 current_mark_space = NULL;
441
442 return get_bvec ();
443 }
444
445 res = current_mark_space->bvec_space + current_mark_space_offset;
446 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
447
448 return res;
449 }
450
451 static void
452 clear_mark_space ()
453 {
454 scm_mark_space_t *ms;
455
456 for (ms = mark_space_head; ms; ms = ms->next)
457 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
458 }
459
460
461 \f
462 /* Debugging functions. */
463
464 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
465
466 /* Return the number of the heap segment containing CELL. */
467 static int
468 which_seg (SCM cell)
469 {
470 int i;
471
472 for (i = 0; i < scm_n_heap_segs; i++)
473 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
474 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
475 return i;
476 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
477 SCM_UNPACK (cell));
478 abort ();
479 }
480
481
482 static void
483 map_free_list (scm_freelist_t *master, SCM freelist)
484 {
485 int last_seg = -1, count = 0;
486 SCM f;
487
488 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
489 {
490 int this_seg = which_seg (f);
491
492 if (this_seg != last_seg)
493 {
494 if (last_seg != -1)
495 fprintf (stderr, " %5d %d-cells in segment %d\n",
496 count, master->span, last_seg);
497 last_seg = this_seg;
498 count = 0;
499 }
500 count++;
501 }
502 if (last_seg != -1)
503 fprintf (stderr, " %5d %d-cells in segment %d\n",
504 count, master->span, last_seg);
505 }
506
507 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
508 (),
509 "Print debugging information about the free-list.\n"
510 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
511 #define FUNC_NAME s_scm_map_free_list
512 {
513 int i;
514 fprintf (stderr, "%d segments total (%d:%d",
515 scm_n_heap_segs,
516 scm_heap_table[0].span,
517 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
518 for (i = 1; i < scm_n_heap_segs; i++)
519 fprintf (stderr, ", %d:%d",
520 scm_heap_table[i].span,
521 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
522 fprintf (stderr, ")\n");
523 map_free_list (&scm_master_freelist, scm_freelist);
524 map_free_list (&scm_master_freelist2, scm_freelist2);
525 fflush (stderr);
526
527 return SCM_UNSPECIFIED;
528 }
529 #undef FUNC_NAME
530
531 static int last_cluster;
532 static int last_size;
533
534 static int
535 free_list_length (char *title, int i, SCM freelist)
536 {
537 SCM ls;
538 int n = 0;
539 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
540 if (SCM_FREE_CELL_P (ls))
541 ++n;
542 else
543 {
544 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
545 abort ();
546 }
547 if (n != last_size)
548 {
549 if (i > 0)
550 {
551 if (last_cluster == i - 1)
552 fprintf (stderr, "\t%d\n", last_size);
553 else
554 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
555 }
556 if (i >= 0)
557 fprintf (stderr, "%s %d", title, i);
558 else
559 fprintf (stderr, "%s\t%d\n", title, n);
560 last_cluster = i;
561 last_size = n;
562 }
563 return n;
564 }
565
566 static void
567 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
568 {
569 SCM clusters;
570 int i = 0, len, n = 0;
571 fprintf (stderr, "%s\n\n", title);
572 n += free_list_length ("free list", -1, freelist);
573 for (clusters = master->clusters;
574 SCM_NNULLP (clusters);
575 clusters = SCM_CDR (clusters))
576 {
577 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
578 n += len;
579 }
580 if (last_cluster == i - 1)
581 fprintf (stderr, "\t%d\n", last_size);
582 else
583 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
584 fprintf (stderr, "\ntotal %d objects\n\n", n);
585 }
586
587 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
588 (),
589 "Print debugging information about the free-list.\n"
590 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
591 #define FUNC_NAME s_scm_free_list_length
592 {
593 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
594 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
595 return SCM_UNSPECIFIED;
596 }
597 #undef FUNC_NAME
598
599 #endif
600
601 #ifdef GUILE_DEBUG_FREELIST
602
603 /* Number of calls to SCM_NEWCELL since startup. */
604 static unsigned long scm_newcell_count;
605 static unsigned long scm_newcell2_count;
606
607 /* Search freelist for anything that isn't marked as a free cell.
608 Abort if we find something. */
609 static void
610 scm_check_freelist (SCM freelist)
611 {
612 SCM f;
613 int i = 0;
614
615 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
616 if (!SCM_FREE_CELL_P (f))
617 {
618 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
619 scm_newcell_count, i);
620 abort ();
621 }
622 }
623
624 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
625 (SCM flag),
626 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
627 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
628 "compile-time flag was selected.\n")
629 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
630 {
631 /* [cmm] I did a double-take when I read this code the first time.
632 well, FWIW. */
633 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
634 return SCM_UNSPECIFIED;
635 }
636 #undef FUNC_NAME
637
638
639 SCM
640 scm_debug_newcell (void)
641 {
642 SCM new;
643
644 scm_newcell_count++;
645 if (scm_debug_check_freelist)
646 {
647 scm_check_freelist (scm_freelist);
648 scm_gc();
649 }
650
651 /* The rest of this is supposed to be identical to the SCM_NEWCELL
652 macro. */
653 if (SCM_NULLP (scm_freelist))
654 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
655 else
656 {
657 new = scm_freelist;
658 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
659 }
660
661 return new;
662 }
663
664 SCM
665 scm_debug_newcell2 (void)
666 {
667 SCM new;
668
669 scm_newcell2_count++;
670 if (scm_debug_check_freelist)
671 {
672 scm_check_freelist (scm_freelist2);
673 scm_gc ();
674 }
675
676 /* The rest of this is supposed to be identical to the SCM_NEWCELL
677 macro. */
678 if (SCM_NULLP (scm_freelist2))
679 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
680 else
681 {
682 new = scm_freelist2;
683 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
684 }
685
686 return new;
687 }
688
689 #endif /* GUILE_DEBUG_FREELIST */
690
691 \f
692
693 static unsigned long
694 master_cells_allocated (scm_freelist_t *master)
695 {
696 /* the '- 1' below is to ignore the cluster spine cells. */
697 int objects = master->clusters_allocated * (master->cluster_size - 1);
698 if (SCM_NULLP (master->clusters))
699 objects -= master->left_to_collect;
700 return master->span * objects;
701 }
702
703 static unsigned long
704 freelist_length (SCM freelist)
705 {
706 int n;
707 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
708 ++n;
709 return n;
710 }
711
712 static unsigned long
713 compute_cells_allocated ()
714 {
715 return (scm_cells_allocated
716 + master_cells_allocated (&scm_master_freelist)
717 + master_cells_allocated (&scm_master_freelist2)
718 - scm_master_freelist.span * freelist_length (scm_freelist)
719 - scm_master_freelist2.span * freelist_length (scm_freelist2));
720 }
721
722 /* {Scheme Interface to GC}
723 */
724
725 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
726 (),
727 "Returns an association list of statistics about Guile's current use of storage. ")
728 #define FUNC_NAME s_scm_gc_stats
729 {
730 int i;
731 int n;
732 SCM heap_segs;
733 long int local_scm_mtrigger;
734 long int local_scm_mallocated;
735 long int local_scm_heap_size;
736 long int local_scm_cells_allocated;
737 long int local_scm_gc_time_taken;
738 long int local_scm_gc_times;
739 long int local_scm_gc_mark_time_taken;
740 long int local_scm_gc_sweep_time_taken;
741 double local_scm_gc_cells_swept;
742 double local_scm_gc_cells_marked;
743 SCM answer;
744
745 SCM_DEFER_INTS;
746
747 ++scm_block_gc;
748
749 retry:
750 heap_segs = SCM_EOL;
751 n = scm_n_heap_segs;
752 for (i = scm_n_heap_segs; i--; )
753 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
754 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
755 heap_segs);
756 if (scm_n_heap_segs != n)
757 goto retry;
758
759 --scm_block_gc;
760
761 /* Below, we cons to produce the resulting list. We want a snapshot of
762 * the heap situation before consing.
763 */
764 local_scm_mtrigger = scm_mtrigger;
765 local_scm_mallocated = scm_mallocated;
766 local_scm_heap_size = SCM_HEAP_SIZE;
767 local_scm_cells_allocated = compute_cells_allocated ();
768 local_scm_gc_time_taken = scm_gc_time_taken;
769 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
770 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
771 local_scm_gc_times = scm_gc_times;
772 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
773 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
774
775 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
776 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
777 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
778 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
779 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
780 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
781 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
782 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
783 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
784 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
785 scm_cons (sym_heap_segments, heap_segs),
786 SCM_UNDEFINED);
787 SCM_ALLOW_INTS;
788 return answer;
789 }
790 #undef FUNC_NAME
791
792
793 static void
794 gc_start_stats (const char *what)
795 {
796 t_before_gc = scm_c_get_internal_run_time ();
797 scm_gc_cells_swept = 0;
798 scm_gc_cells_collected = 0;
799 scm_gc_yield_1 = scm_gc_yield;
800 scm_gc_yield = (scm_cells_allocated
801 + master_cells_allocated (&scm_master_freelist)
802 + master_cells_allocated (&scm_master_freelist2));
803 scm_gc_malloc_collected = 0;
804 scm_gc_ports_collected = 0;
805 }
806
807
808 static void
809 gc_end_stats ()
810 {
811 unsigned long t = scm_c_get_internal_run_time ();
812 scm_gc_time_taken += (t - t_before_gc);
813 scm_gc_sweep_time_taken += (t - t_before_sweep);
814 ++scm_gc_times;
815
816 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
817 scm_gc_cells_swept_acc += scm_gc_cells_swept;
818 }
819
820
821 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
822 (SCM obj),
823 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
824 "returned by this function for @var{obj}")
825 #define FUNC_NAME s_scm_object_address
826 {
827 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
828 }
829 #undef FUNC_NAME
830
831
832 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
833 (),
834 "Scans all of SCM objects and reclaims for further use those that are\n"
835 "no longer accessible.")
836 #define FUNC_NAME s_scm_gc
837 {
838 SCM_DEFER_INTS;
839 scm_igc ("call");
840 SCM_ALLOW_INTS;
841 return SCM_UNSPECIFIED;
842 }
843 #undef FUNC_NAME
844
845
846 \f
847 /* {C Interface For When GC is Triggered}
848 */
849
850 static void
851 adjust_min_yield (scm_freelist_t *freelist)
852 {
853 /* min yield is adjusted upwards so that next predicted total yield
854 * (allocated cells actually freed by GC) becomes
855 * `min_yield_fraction' of total heap size. Note, however, that
856 * the absolute value of min_yield will correspond to `collected'
857 * on one master (the one which currently is triggering GC).
858 *
859 * The reason why we look at total yield instead of cells collected
860 * on one list is that we want to take other freelists into account.
861 * On this freelist, we know that (local) yield = collected cells,
862 * but that's probably not the case on the other lists.
863 *
864 * (We might consider computing a better prediction, for example
865 * by computing an average over multiple GC:s.)
866 */
867 if (freelist->min_yield_fraction)
868 {
869 /* Pick largest of last two yields. */
870 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
871 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
872 #ifdef DEBUGINFO
873 fprintf (stderr, " after GC = %d, delta = %d\n",
874 scm_cells_allocated,
875 delta);
876 #endif
877 if (delta > 0)
878 freelist->min_yield += delta;
879 }
880 }
881
882
883 /* When we get POSIX threads support, the master will be global and
884 * common while the freelist will be individual for each thread.
885 */
886
887 SCM
888 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
889 {
890 SCM cell;
891 ++scm_ints_disabled;
892 do
893 {
894 if (SCM_NULLP (master->clusters))
895 {
896 if (master->grow_heap_p || scm_block_gc)
897 {
898 /* In order to reduce gc frequency, try to allocate a new heap
899 * segment first, even if gc might find some free cells. If we
900 * can't obtain a new heap segment, we will try gc later.
901 */
902 master->grow_heap_p = 0;
903 alloc_some_heap (master, return_on_error);
904 }
905 if (SCM_NULLP (master->clusters))
906 {
907 /* The heap was not grown, either because it wasn't scheduled to
908 * grow, or because there was not enough memory available. In
909 * both cases we have to try gc to get some free cells.
910 */
911 #ifdef DEBUGINFO
912 fprintf (stderr, "allocated = %d, ",
913 scm_cells_allocated
914 + master_cells_allocated (&scm_master_freelist)
915 + master_cells_allocated (&scm_master_freelist2));
916 #endif
917 scm_igc ("cells");
918 adjust_min_yield (master);
919 if (SCM_NULLP (master->clusters))
920 {
921 /* gc could not free any cells. Now, we _must_ allocate a
922 * new heap segment, because there is no other possibility
923 * to provide a new cell for the caller.
924 */
925 alloc_some_heap (master, abort_on_error);
926 }
927 }
928 }
929 cell = SCM_CAR (master->clusters);
930 master->clusters = SCM_CDR (master->clusters);
931 ++master->clusters_allocated;
932 }
933 while (SCM_NULLP (cell));
934
935 #ifdef GUILE_DEBUG_FREELIST
936 scm_check_freelist (cell);
937 #endif
938
939 --scm_ints_disabled;
940 *freelist = SCM_FREE_CELL_CDR (cell);
941 return cell;
942 }
943
944
945 #if 0
946 /* This is a support routine which can be used to reserve a cluster
947 * for some special use, such as debugging. It won't be useful until
948 * free cells are preserved between garbage collections.
949 */
950
951 void
952 scm_alloc_cluster (scm_freelist_t *master)
953 {
954 SCM freelist, cell;
955 cell = scm_gc_for_newcell (master, &freelist);
956 SCM_SETCDR (cell, freelist);
957 return cell;
958 }
959 #endif
960
961
962 scm_c_hook_t scm_before_gc_c_hook;
963 scm_c_hook_t scm_before_mark_c_hook;
964 scm_c_hook_t scm_before_sweep_c_hook;
965 scm_c_hook_t scm_after_sweep_c_hook;
966 scm_c_hook_t scm_after_gc_c_hook;
967
968
969 void
970 scm_igc (const char *what)
971 {
972 int j;
973
974 ++scm_gc_running_p;
975 scm_c_hook_run (&scm_before_gc_c_hook, 0);
976 #ifdef DEBUGINFO
977 fprintf (stderr,
978 SCM_NULLP (scm_freelist)
979 ? "*"
980 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
981 #endif
982 #ifdef USE_THREADS
983 /* During the critical section, only the current thread may run. */
984 SCM_THREAD_CRITICAL_SECTION_START;
985 #endif
986
987 /* fprintf (stderr, "gc: %s\n", what); */
988
989 if (!scm_stack_base || scm_block_gc)
990 {
991 --scm_gc_running_p;
992 return;
993 }
994
995 gc_start_stats (what);
996
997 if (scm_mallocated < 0)
998 /* The byte count of allocated objects has underflowed. This is
999 probably because you forgot to report the sizes of objects you
1000 have allocated, by calling scm_done_malloc or some such. When
1001 the GC freed them, it subtracted their size from
1002 scm_mallocated, which underflowed. */
1003 abort ();
1004
1005 if (scm_gc_heap_lock)
1006 /* We've invoked the collector while a GC is already in progress.
1007 That should never happen. */
1008 abort ();
1009
1010 ++scm_gc_heap_lock;
1011
1012 /* flush dead entries from the continuation stack */
1013 {
1014 int x;
1015 int bound;
1016 SCM * elts;
1017 elts = SCM_VELTS (scm_continuation_stack);
1018 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1019 x = SCM_INUM (scm_continuation_stack_ptr);
1020 while (x < bound)
1021 {
1022 elts[x] = SCM_BOOL_F;
1023 ++x;
1024 }
1025 }
1026
1027 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1028
1029 clear_mark_space ();
1030
1031 #ifndef USE_THREADS
1032
1033 /* Mark objects on the C stack. */
1034 SCM_FLUSH_REGISTER_WINDOWS;
1035 /* This assumes that all registers are saved into the jmp_buf */
1036 setjmp (scm_save_regs_gc_mark);
1037 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1038 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1039 sizeof scm_save_regs_gc_mark)
1040 / sizeof (SCM_STACKITEM)));
1041
1042 {
1043 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1044 #ifdef SCM_STACK_GROWS_UP
1045 scm_mark_locations (scm_stack_base, stack_len);
1046 #else
1047 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1048 #endif
1049 }
1050
1051 #else /* USE_THREADS */
1052
1053 /* Mark every thread's stack and registers */
1054 scm_threads_mark_stacks ();
1055
1056 #endif /* USE_THREADS */
1057
1058 j = SCM_NUM_PROTECTS;
1059 while (j--)
1060 scm_gc_mark (scm_sys_protects[j]);
1061
1062 /* FIXME: we should have a means to register C functions to be run
1063 * in different phases of GC
1064 */
1065 scm_mark_subr_table ();
1066
1067 #ifndef USE_THREADS
1068 scm_gc_mark (scm_root->handle);
1069 #endif
1070
1071 t_before_sweep = scm_c_get_internal_run_time ();
1072 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1073
1074 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1075
1076 scm_gc_sweep ();
1077
1078 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1079
1080 --scm_gc_heap_lock;
1081 gc_end_stats ();
1082
1083 #ifdef USE_THREADS
1084 SCM_THREAD_CRITICAL_SECTION_END;
1085 #endif
1086 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1087 --scm_gc_running_p;
1088 }
1089
1090 \f
1091
1092 /* {Mark/Sweep}
1093 */
1094
1095 #define MARK scm_gc_mark
1096 #define FNAME "scm_gc_mark"
1097
1098 #endif /*!MARK_DEPENDENCIES*/
1099
1100 /* Mark an object precisely.
1101 */
1102 void
1103 MARK (SCM p)
1104 #define FUNC_NAME FNAME
1105 {
1106 register long i;
1107 register SCM ptr;
1108
1109 #ifndef MARK_DEPENDENCIES
1110 # define RECURSE scm_gc_mark
1111 #else
1112 /* go through the usual marking, but not for self-cycles. */
1113 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1114 #endif
1115 ptr = p;
1116
1117 #ifdef MARK_DEPENDENCIES
1118 goto gc_mark_loop_first_time;
1119 #endif
1120
1121 gc_mark_loop:
1122 if (SCM_IMP (ptr))
1123 return;
1124
1125 gc_mark_nimp:
1126
1127 #ifdef MARK_DEPENDENCIES
1128 if (SCM_EQ_P (ptr, p))
1129 return;
1130
1131 scm_gc_mark (ptr);
1132 return;
1133
1134 gc_mark_loop_first_time:
1135 #endif
1136
1137 if (!SCM_CELLP (ptr))
1138 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1139
1140 #if (defined (GUILE_DEBUG_FREELIST))
1141
1142 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1143 scm_wta (ptr, "rogue pointer in heap", NULL);
1144
1145 #endif
1146
1147 #ifndef MARK_DEPENDENCIES
1148
1149 if (SCM_GCMARKP (ptr))
1150 return;
1151
1152 SCM_SETGCMARK (ptr);
1153
1154 #endif
1155
1156 switch (SCM_TYP7 (ptr))
1157 {
1158 case scm_tcs_cons_nimcar:
1159 if (SCM_IMP (SCM_CDR (ptr)))
1160 {
1161 ptr = SCM_CAR (ptr);
1162 goto gc_mark_nimp;
1163 }
1164 RECURSE (SCM_CAR (ptr));
1165 ptr = SCM_CDR (ptr);
1166 goto gc_mark_nimp;
1167 case scm_tcs_cons_imcar:
1168 ptr = SCM_CDR (ptr);
1169 goto gc_mark_loop;
1170 case scm_tc7_pws:
1171 RECURSE (SCM_CELL_OBJECT_2 (ptr));
1172 ptr = SCM_CDR (ptr);
1173 goto gc_mark_loop;
1174 case scm_tcs_cons_gloc:
1175 {
1176 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1177 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1178 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1179 * pointer to a struct vtable data region. The fact that these are
1180 * accessed in the same way restricts the possibilites to change the
1181 * data layout of structs or heap cells.
1182 */
1183 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1184 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1185 if (vtable_data [scm_vtable_index_vcell] != 0)
1186 {
1187 /* ptr is a gloc */
1188 SCM gloc_car = SCM_PACK (word0);
1189 RECURSE (gloc_car);
1190 ptr = SCM_CDR (ptr);
1191 goto gc_mark_loop;
1192 }
1193 else
1194 {
1195 /* ptr is a struct */
1196 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1197 int len = SCM_SYMBOL_LENGTH (layout);
1198 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1199 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1200
1201 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1202 {
1203 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1204 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1205 }
1206 if (len)
1207 {
1208 int x;
1209
1210 for (x = 0; x < len - 2; x += 2, ++struct_data)
1211 if (fields_desc[x] == 'p')
1212 RECURSE (SCM_PACK (*struct_data));
1213 if (fields_desc[x] == 'p')
1214 {
1215 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1216 for (x = *struct_data++; x; --x, ++struct_data)
1217 RECURSE (SCM_PACK (*struct_data));
1218 else
1219 RECURSE (SCM_PACK (*struct_data));
1220 }
1221 }
1222 /* mark vtable */
1223 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1224 goto gc_mark_loop;
1225 }
1226 }
1227 break;
1228 case scm_tcs_closures:
1229 if (SCM_IMP (SCM_CDR (ptr)))
1230 {
1231 ptr = SCM_CLOSCAR (ptr);
1232 goto gc_mark_nimp;
1233 }
1234 RECURSE (SCM_CLOSCAR (ptr));
1235 ptr = SCM_CDR (ptr);
1236 goto gc_mark_nimp;
1237 case scm_tc7_vector:
1238 i = SCM_VECTOR_LENGTH (ptr);
1239 if (i == 0)
1240 break;
1241 while (--i > 0)
1242 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1243 RECURSE (SCM_VELTS (ptr)[i]);
1244 ptr = SCM_VELTS (ptr)[0];
1245 goto gc_mark_loop;
1246 #ifdef CCLO
1247 case scm_tc7_cclo:
1248 {
1249 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1250 unsigned long int j;
1251 for (j = 1; j != i; ++j)
1252 {
1253 SCM obj = SCM_CCLO_REF (ptr, j);
1254 if (!SCM_IMP (obj))
1255 RECURSE (obj);
1256 }
1257 ptr = SCM_CCLO_REF (ptr, 0);
1258 goto gc_mark_loop;
1259 }
1260 #endif
1261 #ifdef HAVE_ARRAYS
1262 case scm_tc7_bvect:
1263 case scm_tc7_byvect:
1264 case scm_tc7_ivect:
1265 case scm_tc7_uvect:
1266 case scm_tc7_fvect:
1267 case scm_tc7_dvect:
1268 case scm_tc7_cvect:
1269 case scm_tc7_svect:
1270 #ifdef HAVE_LONG_LONGS
1271 case scm_tc7_llvect:
1272 #endif
1273 #endif
1274 case scm_tc7_string:
1275 break;
1276
1277 case scm_tc7_substring:
1278 ptr = SCM_CDR (ptr);
1279 goto gc_mark_loop;
1280
1281 case scm_tc7_wvect:
1282 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1283 scm_weak_vectors = ptr;
1284 if (SCM_IS_WHVEC_ANY (ptr))
1285 {
1286 int x;
1287 int len;
1288 int weak_keys;
1289 int weak_values;
1290
1291 len = SCM_VECTOR_LENGTH (ptr);
1292 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1293 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1294
1295 for (x = 0; x < len; ++x)
1296 {
1297 SCM alist;
1298 alist = SCM_VELTS (ptr)[x];
1299
1300 /* mark everything on the alist except the keys or
1301 * values, according to weak_values and weak_keys. */
1302 while ( SCM_CONSP (alist)
1303 && !SCM_GCMARKP (alist)
1304 && SCM_CONSP (SCM_CAR (alist)))
1305 {
1306 SCM kvpair;
1307 SCM next_alist;
1308
1309 kvpair = SCM_CAR (alist);
1310 next_alist = SCM_CDR (alist);
1311 /*
1312 * Do not do this:
1313 * SCM_SETGCMARK (alist);
1314 * SCM_SETGCMARK (kvpair);
1315 *
1316 * It may be that either the key or value is protected by
1317 * an escaped reference to part of the spine of this alist.
1318 * If we mark the spine here, and only mark one or neither of the
1319 * key and value, they may never be properly marked.
1320 * This leads to a horrible situation in which an alist containing
1321 * freelist cells is exported.
1322 *
1323 * So only mark the spines of these arrays last of all marking.
1324 * If somebody confuses us by constructing a weak vector
1325 * with a circular alist then we are hosed, but at least we
1326 * won't prematurely drop table entries.
1327 */
1328 if (!weak_keys)
1329 RECURSE (SCM_CAR (kvpair));
1330 if (!weak_values)
1331 RECURSE (SCM_CDR (kvpair));
1332 alist = next_alist;
1333 }
1334 if (SCM_NIMP (alist))
1335 RECURSE (alist);
1336 }
1337 }
1338 break;
1339
1340 case scm_tc7_symbol:
1341 ptr = SCM_PROP_SLOTS (ptr);
1342 goto gc_mark_loop;
1343 case scm_tcs_subrs:
1344 break;
1345 case scm_tc7_port:
1346 i = SCM_PTOBNUM (ptr);
1347 if (!(i < scm_numptob))
1348 goto def;
1349 if (SCM_PTAB_ENTRY(ptr))
1350 RECURSE (SCM_FILENAME (ptr));
1351 if (scm_ptobs[i].mark)
1352 {
1353 ptr = (scm_ptobs[i].mark) (ptr);
1354 goto gc_mark_loop;
1355 }
1356 else
1357 return;
1358 break;
1359 case scm_tc7_smob:
1360 switch (SCM_TYP16 (ptr))
1361 { /* should be faster than going through scm_smobs */
1362 case scm_tc_free_cell:
1363 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1364 case scm_tc16_big:
1365 case scm_tc16_real:
1366 case scm_tc16_complex:
1367 break;
1368 default:
1369 i = SCM_SMOBNUM (ptr);
1370 if (!(i < scm_numsmob))
1371 goto def;
1372 if (scm_smobs[i].mark)
1373 {
1374 ptr = (scm_smobs[i].mark) (ptr);
1375 goto gc_mark_loop;
1376 }
1377 else
1378 return;
1379 }
1380 break;
1381 default:
1382 def:
1383 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1384 }
1385 #undef RECURSE
1386 }
1387 #undef FUNC_NAME
1388
1389 #ifndef MARK_DEPENDENCIES
1390
1391 #undef MARK
1392 #undef FNAME
1393
1394 /* And here we define `scm_gc_mark_dependencies', by including this
1395 * same file in itself.
1396 */
1397 #define MARK scm_gc_mark_dependencies
1398 #define FNAME "scm_gc_mark_dependencies"
1399 #define MARK_DEPENDENCIES
1400 #include "gc.c"
1401 #undef MARK_DEPENDENCIES
1402 #undef MARK
1403 #undef FNAME
1404
1405
1406 /* Mark a Region Conservatively
1407 */
1408
1409 void
1410 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1411 {
1412 unsigned long m;
1413
1414 for (m = 0; m < n; ++m)
1415 {
1416 SCM obj = * (SCM *) &x[m];
1417 if (SCM_CELLP (obj))
1418 {
1419 SCM_CELLPTR ptr = SCM2PTR (obj);
1420 int i = 0;
1421 int j = scm_n_heap_segs - 1;
1422 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1423 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1424 {
1425 while (i <= j)
1426 {
1427 int seg_id;
1428 seg_id = -1;
1429 if ((i == j)
1430 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1431 seg_id = i;
1432 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1433 seg_id = j;
1434 else
1435 {
1436 int k;
1437 k = (i + j) / 2;
1438 if (k == i)
1439 break;
1440 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1441 {
1442 j = k;
1443 ++i;
1444 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1445 continue;
1446 else
1447 break;
1448 }
1449 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1450 {
1451 i = k;
1452 --j;
1453 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1454 continue;
1455 else
1456 break;
1457 }
1458 }
1459
1460 if (SCM_GC_IN_CARD_HEADERP (ptr))
1461 break;
1462
1463 if (scm_heap_table[seg_id].span == 1
1464 || SCM_DOUBLE_CELLP (obj))
1465 scm_gc_mark (obj);
1466
1467 break;
1468 }
1469 }
1470 }
1471 }
1472 }
1473
1474
1475 /* The function scm_cellp determines whether an SCM value can be regarded as a
1476 * pointer to a cell on the heap. Binary search is used in order to determine
1477 * the heap segment that contains the cell.
1478 */
1479 int
1480 scm_cellp (SCM value)
1481 {
1482 if (SCM_CELLP (value)) {
1483 scm_cell * ptr = SCM2PTR (value);
1484 unsigned int i = 0;
1485 unsigned int j = scm_n_heap_segs - 1;
1486
1487 while (i < j) {
1488 int k = (i + j) / 2;
1489 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1490 j = k;
1491 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1492 i = k + 1;
1493 }
1494 }
1495
1496 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1497 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1498 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))
1499 && !SCM_GC_IN_CARD_HEADERP (ptr)
1500 )
1501 return 1;
1502 else
1503 return 0;
1504 } else
1505 return 0;
1506 }
1507
1508
1509 static void
1510 gc_sweep_freelist_start (scm_freelist_t *freelist)
1511 {
1512 freelist->cells = SCM_EOL;
1513 freelist->left_to_collect = freelist->cluster_size;
1514 freelist->clusters_allocated = 0;
1515 freelist->clusters = SCM_EOL;
1516 freelist->clustertail = &freelist->clusters;
1517 freelist->collected_1 = freelist->collected;
1518 freelist->collected = 0;
1519 }
1520
1521 static void
1522 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1523 {
1524 int collected;
1525 *freelist->clustertail = freelist->cells;
1526 if (!SCM_NULLP (freelist->cells))
1527 {
1528 SCM c = freelist->cells;
1529 SCM_SETCAR (c, SCM_CDR (c));
1530 SCM_SETCDR (c, SCM_EOL);
1531 freelist->collected +=
1532 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1533 }
1534 scm_gc_cells_collected += freelist->collected;
1535
1536 /* Although freelist->min_yield is used to test freelist->collected
1537 * (which is the local GC yield for freelist), it is adjusted so
1538 * that *total* yield is freelist->min_yield_fraction of total heap
1539 * size. This means that a too low yield is compensated by more
1540 * heap on the list which is currently doing most work, which is
1541 * just what we want.
1542 */
1543 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1544 freelist->grow_heap_p = (collected < freelist->min_yield);
1545 }
1546
1547 #define NEXT_DATA_CELL(ptr, span) \
1548 do { \
1549 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1550 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1551 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1552 : nxt__); \
1553 } while (0)
1554
1555 void
1556 scm_gc_sweep ()
1557 #define FUNC_NAME "scm_gc_sweep"
1558 {
1559 register SCM_CELLPTR ptr;
1560 register SCM nfreelist;
1561 register scm_freelist_t *freelist;
1562 register long m;
1563 register int span;
1564 long i;
1565 scm_sizet seg_size;
1566
1567 m = 0;
1568
1569 gc_sweep_freelist_start (&scm_master_freelist);
1570 gc_sweep_freelist_start (&scm_master_freelist2);
1571
1572 for (i = 0; i < scm_n_heap_segs; i++)
1573 {
1574 register unsigned int left_to_collect;
1575 register scm_sizet j;
1576
1577 /* Unmarked cells go onto the front of the freelist this heap
1578 segment points to. Rather than updating the real freelist
1579 pointer as we go along, we accumulate the new head in
1580 nfreelist. Then, if it turns out that the entire segment is
1581 free, we free (i.e., malloc's free) the whole segment, and
1582 simply don't assign nfreelist back into the real freelist. */
1583 freelist = scm_heap_table[i].freelist;
1584 nfreelist = freelist->cells;
1585 left_to_collect = freelist->left_to_collect;
1586 span = scm_heap_table[i].span;
1587
1588 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1589 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1590
1591 /* use only data cells in seg_size */
1592 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1593
1594 scm_gc_cells_swept += seg_size;
1595
1596 for (j = seg_size + span; j -= span; ptr += span)
1597 {
1598 SCM scmptr;
1599
1600 if (SCM_GC_IN_CARD_HEADERP (ptr))
1601 {
1602 SCM_CELLPTR nxt;
1603
1604 /* cheat here */
1605 nxt = ptr;
1606 NEXT_DATA_CELL (nxt, span);
1607 j += span;
1608
1609 ptr = nxt - span;
1610 continue;
1611 }
1612
1613 scmptr = PTR2SCM (ptr);
1614
1615 if (SCM_GCMARKP (scmptr))
1616 continue;
1617
1618 switch SCM_TYP7 (scmptr)
1619 {
1620 case scm_tcs_cons_gloc:
1621 {
1622 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1623 * struct or a gloc. See the corresponding comment in
1624 * scm_gc_mark.
1625 */
1626 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1627 - scm_tc3_cons_gloc);
1628 /* access as struct */
1629 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1630 if (vtable_data[scm_vtable_index_vcell] == 0)
1631 {
1632 /* Structs need to be freed in a special order.
1633 * This is handled by GC C hooks in struct.c.
1634 */
1635 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1636 scm_structs_to_free = scmptr;
1637 continue;
1638 }
1639 /* fall through so that scmptr gets collected */
1640 }
1641 break;
1642 case scm_tcs_cons_imcar:
1643 case scm_tcs_cons_nimcar:
1644 case scm_tcs_closures:
1645 case scm_tc7_pws:
1646 break;
1647 case scm_tc7_wvect:
1648 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1649 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1650 break;
1651 case scm_tc7_vector:
1652 {
1653 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1654 if (length > 0)
1655 {
1656 m += length * sizeof (scm_bits_t);
1657 scm_must_free (SCM_VECTOR_BASE (scmptr));
1658 }
1659 break;
1660 }
1661 #ifdef CCLO
1662 case scm_tc7_cclo:
1663 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1664 scm_must_free (SCM_CCLO_BASE (scmptr));
1665 break;
1666 #endif
1667 #ifdef HAVE_ARRAYS
1668 case scm_tc7_bvect:
1669 {
1670 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1671 if (length > 0)
1672 {
1673 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1674 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1675 }
1676 }
1677 break;
1678 case scm_tc7_byvect:
1679 case scm_tc7_ivect:
1680 case scm_tc7_uvect:
1681 case scm_tc7_svect:
1682 #ifdef HAVE_LONG_LONGS
1683 case scm_tc7_llvect:
1684 #endif
1685 case scm_tc7_fvect:
1686 case scm_tc7_dvect:
1687 case scm_tc7_cvect:
1688 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1689 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1690 break;
1691 #endif
1692 case scm_tc7_substring:
1693 break;
1694 case scm_tc7_string:
1695 m += SCM_STRING_LENGTH (scmptr) + 1;
1696 scm_must_free (SCM_STRING_CHARS (scmptr));
1697 break;
1698 case scm_tc7_symbol:
1699 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1700 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1701 break;
1702 case scm_tcs_subrs:
1703 /* the various "subrs" (primitives) are never freed */
1704 continue;
1705 case scm_tc7_port:
1706 if SCM_OPENP (scmptr)
1707 {
1708 int k = SCM_PTOBNUM (scmptr);
1709 if (!(k < scm_numptob))
1710 goto sweeperr;
1711 /* Keep "revealed" ports alive. */
1712 if (scm_revealed_count (scmptr) > 0)
1713 continue;
1714 /* Yes, I really do mean scm_ptobs[k].free */
1715 /* rather than ftobs[k].close. .close */
1716 /* is for explicit CLOSE-PORT by user */
1717 m += (scm_ptobs[k].free) (scmptr);
1718 SCM_SETSTREAM (scmptr, 0);
1719 scm_remove_from_port_table (scmptr);
1720 scm_gc_ports_collected++;
1721 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1722 }
1723 break;
1724 case scm_tc7_smob:
1725 switch SCM_TYP16 (scmptr)
1726 {
1727 case scm_tc_free_cell:
1728 case scm_tc16_real:
1729 break;
1730 #ifdef SCM_BIGDIG
1731 case scm_tc16_big:
1732 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1733 scm_must_free (SCM_BDIGITS (scmptr));
1734 break;
1735 #endif /* def SCM_BIGDIG */
1736 case scm_tc16_complex:
1737 m += sizeof (scm_complex_t);
1738 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1739 break;
1740 default:
1741 {
1742 int k;
1743 k = SCM_SMOBNUM (scmptr);
1744 if (!(k < scm_numsmob))
1745 goto sweeperr;
1746 m += (scm_smobs[k].free) (scmptr);
1747 break;
1748 }
1749 }
1750 break;
1751 default:
1752 sweeperr:
1753 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1754 }
1755
1756 if (!--left_to_collect)
1757 {
1758 SCM_SETCAR (scmptr, nfreelist);
1759 *freelist->clustertail = scmptr;
1760 freelist->clustertail = SCM_CDRLOC (scmptr);
1761
1762 nfreelist = SCM_EOL;
1763 freelist->collected += span * freelist->cluster_size;
1764 left_to_collect = freelist->cluster_size;
1765 }
1766 else
1767 {
1768 /* Stick the new cell on the front of nfreelist. It's
1769 critical that we mark this cell as freed; otherwise, the
1770 conservative collector might trace it as some other type
1771 of object. */
1772 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1773 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1774 nfreelist = scmptr;
1775 }
1776 }
1777
1778 #ifdef GC_FREE_SEGMENTS
1779 if (n == seg_size)
1780 {
1781 register long j;
1782
1783 freelist->heap_size -= seg_size;
1784 free ((char *) scm_heap_table[i].bounds[0]);
1785 scm_heap_table[i].bounds[0] = 0;
1786 for (j = i + 1; j < scm_n_heap_segs; j++)
1787 scm_heap_table[j - 1] = scm_heap_table[j];
1788 scm_n_heap_segs -= 1;
1789 i--; /* We need to scan the segment just moved. */
1790 }
1791 else
1792 #endif /* ifdef GC_FREE_SEGMENTS */
1793 {
1794 /* Update the real freelist pointer to point to the head of
1795 the list of free cells we've built for this segment. */
1796 freelist->cells = nfreelist;
1797 freelist->left_to_collect = left_to_collect;
1798 }
1799
1800 #ifdef GUILE_DEBUG_FREELIST
1801 scm_map_free_list ();
1802 #endif
1803 }
1804
1805 gc_sweep_freelist_finish (&scm_master_freelist);
1806 gc_sweep_freelist_finish (&scm_master_freelist2);
1807
1808 /* When we move to POSIX threads private freelists should probably
1809 be GC-protected instead. */
1810 scm_freelist = SCM_EOL;
1811 scm_freelist2 = SCM_EOL;
1812
1813 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1814 scm_gc_yield -= scm_cells_allocated;
1815 scm_mallocated -= m;
1816 scm_gc_malloc_collected = m;
1817 }
1818 #undef FUNC_NAME
1819
1820
1821 \f
1822 /* {Front end to malloc}
1823 *
1824 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1825 * scm_done_free
1826 *
1827 * These functions provide services comperable to malloc, realloc, and
1828 * free. They are for allocating malloced parts of scheme objects.
1829 * The primary purpose of the front end is to impose calls to gc. */
1830
1831
1832 /* scm_must_malloc
1833 * Return newly malloced storage or throw an error.
1834 *
1835 * The parameter WHAT is a string for error reporting.
1836 * If the threshold scm_mtrigger will be passed by this
1837 * allocation, or if the first call to malloc fails,
1838 * garbage collect -- on the presumption that some objects
1839 * using malloced storage may be collected.
1840 *
1841 * The limit scm_mtrigger may be raised by this allocation.
1842 */
1843 void *
1844 scm_must_malloc (scm_sizet size, const char *what)
1845 {
1846 void *ptr;
1847 unsigned long nm = scm_mallocated + size;
1848
1849 if (nm <= scm_mtrigger)
1850 {
1851 SCM_SYSCALL (ptr = malloc (size));
1852 if (NULL != ptr)
1853 {
1854 scm_mallocated = nm;
1855 #ifdef GUILE_DEBUG_MALLOC
1856 scm_malloc_register (ptr, what);
1857 #endif
1858 return ptr;
1859 }
1860 }
1861
1862 scm_igc (what);
1863
1864 nm = scm_mallocated + size;
1865 SCM_SYSCALL (ptr = malloc (size));
1866 if (NULL != ptr)
1867 {
1868 scm_mallocated = nm;
1869 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1870 if (nm > scm_mtrigger)
1871 scm_mtrigger = nm + nm / 2;
1872 else
1873 scm_mtrigger += scm_mtrigger / 2;
1874 }
1875 #ifdef GUILE_DEBUG_MALLOC
1876 scm_malloc_register (ptr, what);
1877 #endif
1878
1879 return ptr;
1880 }
1881
1882 scm_memory_error (what);
1883 }
1884
1885
1886 /* scm_must_realloc
1887 * is similar to scm_must_malloc.
1888 */
1889 void *
1890 scm_must_realloc (void *where,
1891 scm_sizet old_size,
1892 scm_sizet size,
1893 const char *what)
1894 {
1895 void *ptr;
1896 scm_sizet nm = scm_mallocated + size - old_size;
1897
1898 if (nm <= scm_mtrigger)
1899 {
1900 SCM_SYSCALL (ptr = realloc (where, size));
1901 if (NULL != ptr)
1902 {
1903 scm_mallocated = nm;
1904 #ifdef GUILE_DEBUG_MALLOC
1905 scm_malloc_reregister (where, ptr, what);
1906 #endif
1907 return ptr;
1908 }
1909 }
1910
1911 scm_igc (what);
1912
1913 nm = scm_mallocated + size - old_size;
1914 SCM_SYSCALL (ptr = realloc (where, size));
1915 if (NULL != ptr)
1916 {
1917 scm_mallocated = nm;
1918 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1919 if (nm > scm_mtrigger)
1920 scm_mtrigger = nm + nm / 2;
1921 else
1922 scm_mtrigger += scm_mtrigger / 2;
1923 }
1924 #ifdef GUILE_DEBUG_MALLOC
1925 scm_malloc_reregister (where, ptr, what);
1926 #endif
1927 return ptr;
1928 }
1929
1930 scm_memory_error (what);
1931 }
1932
1933
1934 void
1935 scm_must_free (void *obj)
1936 #define FUNC_NAME "scm_must_free"
1937 {
1938 #ifdef GUILE_DEBUG_MALLOC
1939 scm_malloc_unregister (obj);
1940 #endif
1941 if (obj)
1942 free (obj);
1943 else
1944 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1945 }
1946 #undef FUNC_NAME
1947
1948
1949 /* Announce that there has been some malloc done that will be freed
1950 * during gc. A typical use is for a smob that uses some malloced
1951 * memory but can not get it from scm_must_malloc (for whatever
1952 * reason). When a new object of this smob is created you call
1953 * scm_done_malloc with the size of the object. When your smob free
1954 * function is called, be sure to include this size in the return
1955 * value.
1956 *
1957 * If you can't actually free the memory in the smob free function,
1958 * for whatever reason (like reference counting), you still can (and
1959 * should) report the amount of memory freed when you actually free it.
1960 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1961 * eh? Or even better, call scm_done_free. */
1962
1963 void
1964 scm_done_malloc (long size)
1965 {
1966 scm_mallocated += size;
1967
1968 if (scm_mallocated > scm_mtrigger)
1969 {
1970 scm_igc ("foreign mallocs");
1971 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1972 {
1973 if (scm_mallocated > scm_mtrigger)
1974 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1975 else
1976 scm_mtrigger += scm_mtrigger / 2;
1977 }
1978 }
1979 }
1980
1981 void
1982 scm_done_free (long size)
1983 {
1984 scm_mallocated -= size;
1985 }
1986
1987
1988 \f
1989 /* {Heap Segments}
1990 *
1991 * Each heap segment is an array of objects of a particular size.
1992 * Every segment has an associated (possibly shared) freelist.
1993 * A table of segment records is kept that records the upper and
1994 * lower extents of the segment; this is used during the conservative
1995 * phase of gc to identify probably gc roots (because they point
1996 * into valid segments at reasonable offsets). */
1997
1998 /* scm_expmem
1999 * is true if the first segment was smaller than INIT_HEAP_SEG.
2000 * If scm_expmem is set to one, subsequent segment allocations will
2001 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2002 */
2003 int scm_expmem = 0;
2004
2005 scm_sizet scm_max_segment_size;
2006
2007 /* scm_heap_org
2008 * is the lowest base address of any heap segment.
2009 */
2010 SCM_CELLPTR scm_heap_org;
2011
2012 scm_heap_seg_data_t * scm_heap_table = 0;
2013 static unsigned int heap_segment_table_size = 0;
2014 int scm_n_heap_segs = 0;
2015
2016 /* init_heap_seg
2017 * initializes a new heap segment and returns the number of objects it contains.
2018 *
2019 * The segment origin and segment size in bytes are input parameters.
2020 * The freelist is both input and output.
2021 *
2022 * This function presumes that the scm_heap_table has already been expanded
2023 * to accomodate a new segment record and that the markbit space was reserved
2024 * for all the cards in this segment.
2025 */
2026
2027 #define INIT_CARD(card, span) \
2028 do { \
2029 SCM_GC_CARD_BVEC (card) = get_bvec (); \
2030 if ((span) == 2) \
2031 SCM_GC_SET_CARD_DOUBLECELL (card); \
2032 } while (0)
2033
2034 static scm_sizet
2035 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2036 {
2037 register SCM_CELLPTR ptr;
2038 SCM_CELLPTR seg_end;
2039 int new_seg_index;
2040 int n_new_cells;
2041 int span = freelist->span;
2042
2043 if (seg_org == NULL)
2044 return 0;
2045
2046 /* Align the begin ptr up.
2047 */
2048 ptr = SCM_GC_CARD_UP (seg_org);
2049
2050 /* Compute the ceiling on valid object pointers w/in this segment.
2051 */
2052 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2053
2054 /* Find the right place and insert the segment record.
2055 *
2056 */
2057 for (new_seg_index = 0;
2058 ( (new_seg_index < scm_n_heap_segs)
2059 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2060 new_seg_index++)
2061 ;
2062
2063 {
2064 int i;
2065 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2066 scm_heap_table[i] = scm_heap_table[i - 1];
2067 }
2068
2069 ++scm_n_heap_segs;
2070
2071 scm_heap_table[new_seg_index].span = span;
2072 scm_heap_table[new_seg_index].freelist = freelist;
2073 scm_heap_table[new_seg_index].bounds[0] = ptr;
2074 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2075
2076 /*n_new_cells*/
2077 n_new_cells = seg_end - ptr;
2078
2079 freelist->heap_size += n_new_cells;
2080
2081 /* Partition objects in this segment into clusters */
2082 {
2083 SCM clusters;
2084 SCM *clusterp = &clusters;
2085
2086 NEXT_DATA_CELL (ptr, span);
2087 while (ptr < seg_end)
2088 {
2089 scm_cell *nxt = ptr;
2090 scm_cell *prv = NULL;
2091 scm_cell *last_card = NULL;
2092 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2093 NEXT_DATA_CELL(nxt, span);
2094
2095 /* Allocate cluster spine
2096 */
2097 *clusterp = PTR2SCM (ptr);
2098 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2099 clusterp = SCM_CDRLOC (*clusterp);
2100 ptr = nxt;
2101
2102 while (n_data_cells--)
2103 {
2104 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2105 SCM scmptr = PTR2SCM (ptr);
2106 nxt = ptr;
2107 NEXT_DATA_CELL (nxt, span);
2108 prv = ptr;
2109
2110 if (card != last_card)
2111 {
2112 INIT_CARD (card, span);
2113 last_card = card;
2114 }
2115
2116 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2117 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2118
2119 ptr = nxt;
2120 }
2121
2122 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2123 }
2124
2125 /* sanity check */
2126 {
2127 scm_cell *ref = seg_end;
2128 NEXT_DATA_CELL (ref, span);
2129 if (ref != ptr)
2130 /* [cmm] looks like the segment size doesn't divide cleanly by
2131 cluster size. bad cmm! */
2132 abort();
2133 }
2134
2135 /* Patch up the last cluster pointer in the segment
2136 * to join it to the input freelist.
2137 */
2138 *clusterp = freelist->clusters;
2139 freelist->clusters = clusters;
2140 }
2141
2142 #ifdef DEBUGINFO
2143 fprintf (stderr, "H");
2144 #endif
2145 return size;
2146 }
2147
2148 static scm_sizet
2149 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2150 {
2151 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2152
2153 return
2154 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2155 + ALIGNMENT_SLACK (freelist);
2156 }
2157
2158 static void
2159 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2160 #define FUNC_NAME "alloc_some_heap"
2161 {
2162 SCM_CELLPTR ptr;
2163 long len;
2164
2165 if (scm_gc_heap_lock)
2166 {
2167 /* Critical code sections (such as the garbage collector) aren't
2168 * supposed to add heap segments.
2169 */
2170 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2171 abort ();
2172 }
2173
2174 if (scm_n_heap_segs == heap_segment_table_size)
2175 {
2176 /* We have to expand the heap segment table to have room for the new
2177 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2178 * init_heap_seg only if the allocation of the segment itself succeeds.
2179 */
2180 unsigned int new_table_size = scm_n_heap_segs + 1;
2181 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2182 scm_heap_seg_data_t * new_heap_table;
2183
2184 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2185 realloc ((char *)scm_heap_table, size)));
2186 if (!new_heap_table)
2187 {
2188 if (error_policy == abort_on_error)
2189 {
2190 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2191 abort ();
2192 }
2193 else
2194 {
2195 return;
2196 }
2197 }
2198 else
2199 {
2200 scm_heap_table = new_heap_table;
2201 heap_segment_table_size = new_table_size;
2202 }
2203 }
2204
2205 /* Pick a size for the new heap segment.
2206 * The rule for picking the size of a segment is explained in
2207 * gc.h
2208 */
2209 {
2210 /* Assure that the new segment is predicted to be large enough.
2211 *
2212 * New yield should at least equal GC fraction of new heap size, i.e.
2213 *
2214 * y + dh > f * (h + dh)
2215 *
2216 * y : yield
2217 * f : min yield fraction
2218 * h : heap size
2219 * dh : size of new heap segment
2220 *
2221 * This gives dh > (f * h - y) / (1 - f)
2222 */
2223 int f = freelist->min_yield_fraction;
2224 long h = SCM_HEAP_SIZE;
2225 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2226 len = SCM_EXPHEAP (freelist->heap_size);
2227 #ifdef DEBUGINFO
2228 fprintf (stderr, "(%d < %d)", len, min_cells);
2229 #endif
2230 if (len < min_cells)
2231 len = min_cells + freelist->cluster_size;
2232 len *= sizeof (scm_cell);
2233 /* force new sampling */
2234 freelist->collected = LONG_MAX;
2235 }
2236
2237 if (len > scm_max_segment_size)
2238 len = scm_max_segment_size;
2239
2240 {
2241 scm_sizet smallest;
2242
2243 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2244
2245 if (len < smallest)
2246 len = smallest;
2247
2248 /* Allocate with decaying ambition. */
2249 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2250 && (len >= smallest))
2251 {
2252 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2253 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2254 if (ptr)
2255 {
2256 init_heap_seg (ptr, rounded_len, freelist);
2257 return;
2258 }
2259 len /= 2;
2260 }
2261 }
2262
2263 if (error_policy == abort_on_error)
2264 {
2265 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2266 abort ();
2267 }
2268 }
2269 #undef FUNC_NAME
2270
2271
2272 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2273 (SCM name),
2274 "")
2275 #define FUNC_NAME s_scm_unhash_name
2276 {
2277 int x;
2278 int bound;
2279 SCM_VALIDATE_SYMBOL (1,name);
2280 SCM_DEFER_INTS;
2281 bound = scm_n_heap_segs;
2282 for (x = 0; x < bound; ++x)
2283 {
2284 SCM_CELLPTR p;
2285 SCM_CELLPTR pbound;
2286 p = scm_heap_table[x].bounds[0];
2287 pbound = scm_heap_table[x].bounds[1];
2288 while (p < pbound)
2289 {
2290 SCM cell = PTR2SCM (p);
2291 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2292 {
2293 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2294 * struct cell. See the corresponding comment in scm_gc_mark.
2295 */
2296 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2297 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2298 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2299 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2300 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2301 {
2302 SCM_SET_CELL_OBJECT_0 (cell, name);
2303 }
2304 }
2305 ++p;
2306 }
2307 }
2308 SCM_ALLOW_INTS;
2309 return name;
2310 }
2311 #undef FUNC_NAME
2312
2313
2314 \f
2315 /* {GC Protection Helper Functions}
2316 */
2317
2318
2319 /*
2320 * If within a function you need to protect one or more scheme objects from
2321 * garbage collection, pass them as parameters to one of the
2322 * scm_remember_upto_here* functions below. These functions don't do
2323 * anything, but since the compiler does not know that they are actually
2324 * no-ops, it will generate code that calls these functions with the given
2325 * parameters. Therefore, you can be sure that the compiler will keep those
2326 * scheme values alive (on the stack or in a register) up to the point where
2327 * scm_remember_upto_here* is called. In other words, place the call to
2328 * scm_remember_upt_here* _behind_ the last code in your function, that
2329 * depends on the scheme object to exist.
2330 *
2331 * Example: We want to make sure, that the string object str does not get
2332 * garbage collected during the execution of 'some_function', because
2333 * otherwise the characters belonging to str would be freed and
2334 * 'some_function' might access freed memory. To make sure that the compiler
2335 * keeps str alive on the stack or in a register such that it is visible to
2336 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2337 * call to 'some_function'. Note that this would not be necessary if str was
2338 * used anyway after the call to 'some_function'.
2339 * char *chars = SCM_STRING_CHARS (str);
2340 * some_function (chars);
2341 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2342 */
2343
2344 void
2345 scm_remember_upto_here_1 (SCM obj)
2346 {
2347 /* Empty. Protects a single object from garbage collection. */
2348 }
2349
2350 void
2351 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2352 {
2353 /* Empty. Protects two objects from garbage collection. */
2354 }
2355
2356 void
2357 scm_remember_upto_here (SCM obj, ...)
2358 {
2359 /* Empty. Protects any number of objects from garbage collection. */
2360 }
2361
2362
2363 #if (SCM_DEBUG_DEPRECATED == 0)
2364
2365 void
2366 scm_remember (SCM *ptr)
2367 {
2368 /* empty */
2369 }
2370
2371 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2372
2373 /*
2374 These crazy functions prevent garbage collection
2375 of arguments after the first argument by
2376 ensuring they remain live throughout the
2377 function because they are used in the last
2378 line of the code block.
2379 It'd be better to have a nice compiler hint to
2380 aid the conservative stack-scanning GC. --03/09/00 gjb */
2381 SCM
2382 scm_return_first (SCM elt, ...)
2383 {
2384 return elt;
2385 }
2386
2387 int
2388 scm_return_first_int (int i, ...)
2389 {
2390 return i;
2391 }
2392
2393
2394 SCM
2395 scm_permanent_object (SCM obj)
2396 {
2397 SCM_REDEFER_INTS;
2398 scm_permobjs = scm_cons (obj, scm_permobjs);
2399 SCM_REALLOW_INTS;
2400 return obj;
2401 }
2402
2403
2404 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2405 other references are dropped, until the object is unprotected by calling
2406 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2407 i. e. it is possible to protect the same object several times, but it is
2408 necessary to unprotect the object the same number of times to actually get
2409 the object unprotected. It is an error to unprotect an object more often
2410 than it has been protected before. The function scm_protect_object returns
2411 OBJ.
2412 */
2413
2414 /* Implementation note: For every object X, there is a counter which
2415 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2416 */
2417
2418 SCM
2419 scm_protect_object (SCM obj)
2420 {
2421 SCM handle;
2422
2423 /* This critical section barrier will be replaced by a mutex. */
2424 SCM_REDEFER_INTS;
2425
2426 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2427 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2428
2429 SCM_REALLOW_INTS;
2430
2431 return obj;
2432 }
2433
2434
2435 /* Remove any protection for OBJ established by a prior call to
2436 scm_protect_object. This function returns OBJ.
2437
2438 See scm_protect_object for more information. */
2439 SCM
2440 scm_unprotect_object (SCM obj)
2441 {
2442 SCM handle;
2443
2444 /* This critical section barrier will be replaced by a mutex. */
2445 SCM_REDEFER_INTS;
2446
2447 handle = scm_hashq_get_handle (scm_protects, obj);
2448
2449 if (SCM_IMP (handle))
2450 {
2451 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2452 abort ();
2453 }
2454 else
2455 {
2456 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2457 if (count == 0)
2458 scm_hashq_remove_x (scm_protects, obj);
2459 else
2460 SCM_SETCDR (handle, SCM_MAKINUM (count));
2461 }
2462
2463 SCM_REALLOW_INTS;
2464
2465 return obj;
2466 }
2467
2468 int terminating;
2469
2470 /* called on process termination. */
2471 #ifdef HAVE_ATEXIT
2472 static void
2473 cleanup (void)
2474 #else
2475 #ifdef HAVE_ON_EXIT
2476 extern int on_exit (void (*procp) (), int arg);
2477
2478 static void
2479 cleanup (int status, void *arg)
2480 #else
2481 #error Dont know how to setup a cleanup handler on your system.
2482 #endif
2483 #endif
2484 {
2485 terminating = 1;
2486 scm_flush_all_ports ();
2487 }
2488
2489 \f
2490 static int
2491 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2492 {
2493 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2494
2495 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2496 rounded_size,
2497 freelist))
2498 {
2499 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2500 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2501 rounded_size,
2502 freelist))
2503 return 1;
2504 }
2505 else
2506 scm_expmem = 1;
2507
2508 if (freelist->min_yield_fraction)
2509 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2510 / 100);
2511 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2512
2513 return 0;
2514 }
2515
2516 \f
2517 static void
2518 init_freelist (scm_freelist_t *freelist,
2519 int span,
2520 int cluster_size,
2521 int min_yield)
2522 {
2523 freelist->clusters = SCM_EOL;
2524 freelist->cluster_size = cluster_size + 1;
2525 freelist->left_to_collect = 0;
2526 freelist->clusters_allocated = 0;
2527 freelist->min_yield = 0;
2528 freelist->min_yield_fraction = min_yield;
2529 freelist->span = span;
2530 freelist->collected = 0;
2531 freelist->collected_1 = 0;
2532 freelist->heap_size = 0;
2533 }
2534
2535
2536 /* Get an integer from an environment variable. */
2537 static int
2538 scm_i_getenv_int (const char *var, int def)
2539 {
2540 char *end, *val = getenv (var);
2541 long res;
2542 if (!val)
2543 return def;
2544 res = strtol (val, &end, 10);
2545 if (end == val)
2546 return def;
2547 return res;
2548 }
2549
2550
2551 int
2552 scm_init_storage ()
2553 {
2554 scm_sizet gc_trigger_1;
2555 scm_sizet gc_trigger_2;
2556 scm_sizet init_heap_size_1;
2557 scm_sizet init_heap_size_2;
2558 scm_sizet j;
2559
2560 j = SCM_NUM_PROTECTS;
2561 while (j)
2562 scm_sys_protects[--j] = SCM_BOOL_F;
2563 scm_block_gc = 1;
2564
2565 scm_freelist = SCM_EOL;
2566 scm_freelist2 = SCM_EOL;
2567 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2568 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2569 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2570 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2571 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2572
2573 scm_expmem = 0;
2574
2575 j = SCM_HEAP_SEG_SIZE;
2576 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2577 scm_heap_table = ((scm_heap_seg_data_t *)
2578 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2579 heap_segment_table_size = 2;
2580
2581 mark_space_ptr = &mark_space_head;
2582
2583 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2584 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2585 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2586 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2587 return 1;
2588
2589 /* scm_hplims[0] can change. do not remove scm_heap_org */
2590 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2591
2592 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2593 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2594 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2595 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2596 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2597
2598 /* Initialise the list of ports. */
2599 scm_port_table = (scm_port **)
2600 malloc (sizeof (scm_port *) * scm_port_table_room);
2601 if (!scm_port_table)
2602 return 1;
2603
2604 #ifdef HAVE_ATEXIT
2605 atexit (cleanup);
2606 #else
2607 #ifdef HAVE_ON_EXIT
2608 on_exit (cleanup, 0);
2609 #endif
2610 #endif
2611
2612 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2613 SCM_SETCDR (scm_undefineds, scm_undefineds);
2614
2615 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2616 scm_nullstr = scm_makstr (0L, 0);
2617 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2618
2619 #define DEFAULT_SYMHASH_SIZE 277
2620 scm_symhash = scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE), SCM_EOL);
2621 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (DEFAULT_SYMHASH_SIZE), SCM_EOL);
2622
2623 scm_stand_in_procs = SCM_EOL;
2624 scm_permobjs = SCM_EOL;
2625 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2626
2627 return 0;
2628 }
2629
2630 \f
2631
2632 SCM scm_after_gc_hook;
2633
2634 #if (SCM_DEBUG_DEPRECATED == 0)
2635 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2636 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2637 static SCM gc_async;
2638
2639
2640 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2641 * is run after the gc, as soon as the asynchronous events are handled by the
2642 * evaluator.
2643 */
2644 static SCM
2645 gc_async_thunk (void)
2646 {
2647 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2648
2649 #if (SCM_DEBUG_DEPRECATED == 0)
2650
2651 /* The following code will be removed in Guile 1.5. */
2652 if (SCM_NFALSEP (scm_gc_vcell))
2653 {
2654 SCM proc = SCM_CDR (scm_gc_vcell);
2655
2656 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2657 scm_apply (proc, SCM_EOL, SCM_EOL);
2658 }
2659
2660 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2661
2662 return SCM_UNSPECIFIED;
2663 }
2664
2665
2666 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2667 * the garbage collection. The only purpose of this function is to mark the
2668 * gc_async (which will eventually lead to the execution of the
2669 * gc_async_thunk).
2670 */
2671 static void *
2672 mark_gc_async (void * hook_data, void *func_data, void *data)
2673 {
2674 scm_system_async_mark (gc_async);
2675 return NULL;
2676 }
2677
2678
2679 void
2680 scm_init_gc ()
2681 {
2682 SCM after_gc_thunk;
2683
2684 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2685
2686 #if (SCM_DEBUG_DEPRECATED == 0)
2687 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2688 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2689 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2690 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2691
2692 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2693
2694 #ifndef SCM_MAGIC_SNARFER
2695 #include "libguile/gc.x"
2696 #endif
2697 }
2698
2699 #endif /*MARK_DEPENDENCIES*/
2700
2701 /*
2702 Local Variables:
2703 c-file-style: "gnu"
2704 End:
2705 */