* Make SCM_DEBUG_CELL_ACCESSES=1 work with GUILE_DEBUG_FREELIST.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/gc.h"
75
76 #ifdef GUILE_DEBUG_MALLOC
77 #include "libguile/debug-malloc.h"
78 #endif
79
80 #ifdef HAVE_MALLOC_H
81 #include <malloc.h>
82 #endif
83
84 #ifdef HAVE_UNISTD_H
85 #include <unistd.h>
86 #endif
87
88 #ifdef __STDC__
89 #include <stdarg.h>
90 #define var_start(x, y) va_start(x, y)
91 #else
92 #include <varargs.h>
93 #define var_start(x, y) va_start(x)
94 #endif
95
96 \f
97
98 unsigned int scm_gc_running_p = 0;
99
100 \f
101
102 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103
104 scm_bits_t scm_tc16_allocated;
105
106 /* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108 unsigned int scm_debug_cell_accesses_p = 1;
109
110
111 /* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116 void
117 scm_assert_cell_valid (SCM cell)
118 {
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
122 {
123 already_running = 1; /* set to avoid recursion */
124
125 if (!scm_cellp (cell))
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
147 already_running = 0; /* re-enable */
148 }
149 }
150
151
152 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
154 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
155 "If @var{flag} is @code{#t}, cell access checking is enabled.\n"
156 "This procedure only exists when the compile-time flag\n"
157 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
158 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
159 {
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168 }
169 #undef FUNC_NAME
170
171 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173 \f
174
175 /* {heap tuning parameters}
176 *
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
192 *
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
200 * trigger a GC.
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
206 * work around a oscillation that caused almost constant GC.]
207 */
208
209 /*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
215
216 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
221 int scm_default_min_yield_1 = 40;
222
223 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
226 /* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
229 int scm_default_min_yield_2 = 40;
230
231 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
232
233 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
234 #ifdef _QC
235 # define SCM_HEAP_SEG_SIZE 32768L
236 #else
237 # ifdef sequent
238 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
239 # else
240 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
241 # endif
242 #endif
243 /* Make heap grow with factor 1.5 */
244 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
245 #define SCM_INIT_MALLOC_LIMIT 100000
246 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
247
248 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
250
251 #ifdef PROT386
252 /*in 386 protected mode we must only adjust the offset */
253 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
255 #else
256 # ifdef _UNICOS
257 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
259 # else
260 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
262 # endif /* UNICOS */
263 #endif /* PROT386 */
264
265 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
267 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268 #define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
270
271 \f
272 /* scm_freelists
273 */
274
275 typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
285 SCM clusters;
286 SCM *clustertail;
287 /* this is the number of objects in each cluster, including the spine cell */
288 int cluster_size;
289 /* indicates that we should grow heap instead of GC:ing
290 */
291 int grow_heap_p;
292 /* minimum yield on this list in order not to grow the heap
293 */
294 long min_yield;
295 /* defines min_yield as percent of total heap size
296 */
297 int min_yield_fraction;
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
307 long heap_size;
308 } scm_freelist_t;
309
310 SCM scm_freelist = SCM_EOL;
311 scm_freelist_t scm_master_freelist = {
312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
313 };
314 SCM scm_freelist2 = SCM_EOL;
315 scm_freelist_t scm_master_freelist2 = {
316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
317 };
318
319 /* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
322 unsigned long scm_mtrigger;
323
324 /* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328 int scm_gc_heap_lock = 0;
329
330 /* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
334 int scm_block_gc = 1;
335
336 /* During collection, this accumulates objects holding
337 * weak references.
338 */
339 SCM scm_weak_vectors;
340
341 /* During collection, this accumulates structures which are to be freed.
342 */
343 SCM scm_structs_to_free;
344
345 /* GC Statistics Keeping
346 */
347 unsigned long scm_cells_allocated = 0;
348 long scm_mallocated = 0;
349 unsigned long scm_gc_cells_collected;
350 unsigned long scm_gc_yield;
351 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
352 unsigned long scm_gc_malloc_collected;
353 unsigned long scm_gc_ports_collected;
354 unsigned long scm_gc_time_taken = 0;
355 static unsigned long t_before_gc;
356 static unsigned long t_before_sweep;
357 unsigned long scm_gc_mark_time_taken = 0;
358 unsigned long scm_gc_sweep_time_taken = 0;
359 unsigned long scm_gc_times = 0;
360 unsigned long scm_gc_cells_swept = 0;
361 double scm_gc_cells_marked_acc = 0.;
362 double scm_gc_cells_swept_acc = 0.;
363
364 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
370 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372 SCM_SYMBOL (sym_times, "gc-times");
373 SCM_SYMBOL (sym_cells_marked, "cells-marked");
374 SCM_SYMBOL (sym_cells_swept, "cells-swept");
375
376 typedef struct scm_heap_seg_data_t
377 {
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
383 scm_freelist_t *freelist;
384
385 /* number of cells per object in this segment */
386 int span;
387 } scm_heap_seg_data_t;
388
389
390
391 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
392
393 typedef enum { return_on_error, abort_on_error } policy_on_error;
394 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
395
396
397 #define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401 #define BVEC_GROW_SIZE 256
402 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405 /* mark space allocation */
406
407 typedef struct scm_mark_space_t
408 {
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411 } scm_mark_space_t;
412
413 static scm_mark_space_t *current_mark_space;
414 static scm_mark_space_t **mark_space_ptr;
415 static int current_mark_space_offset;
416 static scm_mark_space_t *mark_space_head;
417
418 static scm_c_bvec_limb_t *
419 get_bvec ()
420 #define FUNC_NAME "get_bvec"
421 {
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462 }
463 #undef FUNC_NAME
464
465
466 static void
467 clear_mark_space ()
468 {
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473 }
474
475
476 \f
477 /* Debugging functions. */
478
479 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
480
481 /* Return the number of the heap segment containing CELL. */
482 static int
483 which_seg (SCM cell)
484 {
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
492 SCM_UNPACK (cell));
493 abort ();
494 }
495
496
497 static void
498 map_free_list (scm_freelist_t *master, SCM freelist)
499 {
500 int last_seg = -1, count = 0;
501 SCM f;
502
503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520 }
521
522 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
523 (),
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
527 #define FUNC_NAME s_scm_map_free_list
528 {
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544 }
545 #undef FUNC_NAME
546
547 static int last_cluster;
548 static int last_size;
549
550 static int
551 free_list_length (char *title, int i, SCM freelist)
552 {
553 SCM ls;
554 int n = 0;
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
579 return n;
580 }
581
582 static void
583 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584 {
585 SCM clusters;
586 int i = 0, len, n = 0;
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
601 }
602
603 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
604 (),
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
608 #define FUNC_NAME s_scm_free_list_length
609 {
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
612 return SCM_UNSPECIFIED;
613 }
614 #undef FUNC_NAME
615
616 #endif
617
618 #ifdef GUILE_DEBUG_FREELIST
619
620 /* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622 static int scm_debug_check_freelist = 0;
623
624 /* Number of calls to SCM_NEWCELL since startup. */
625 static unsigned long scm_newcell_count;
626 static unsigned long scm_newcell2_count;
627
628 /* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
630 static void
631 scm_check_freelist (SCM freelist)
632 {
633 SCM f;
634 int i = 0;
635
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
641 abort ();
642 }
643 }
644
645 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
646 (SCM flag),
647 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
648 "on each cell allocation. This procedure only exists when the\n"
649 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
650 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
651 {
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
655 return SCM_UNSPECIFIED;
656 }
657 #undef FUNC_NAME
658
659
660 SCM
661 scm_debug_newcell (void)
662 {
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
668 scm_check_freelist (scm_freelist);
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
674 if (SCM_NULLP (scm_freelist))
675 {
676 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
677 SCM_GC_SET_ALLOCATED (new);
678 }
679 else
680 {
681 new = scm_freelist;
682 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
683 SCM_GC_SET_ALLOCATED (new);
684 }
685
686 return new;
687 }
688
689 SCM
690 scm_debug_newcell2 (void)
691 {
692 SCM new;
693
694 scm_newcell2_count++;
695 if (scm_debug_check_freelist)
696 {
697 scm_check_freelist (scm_freelist2);
698 scm_gc ();
699 }
700
701 /* The rest of this is supposed to be identical to the SCM_NEWCELL
702 macro. */
703 if (SCM_NULLP (scm_freelist2))
704 {
705 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
706 SCM_GC_SET_ALLOCATED (new);
707 }
708 else
709 {
710 new = scm_freelist2;
711 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
712 SCM_GC_SET_ALLOCATED (new);
713 }
714
715 return new;
716 }
717
718 #endif /* GUILE_DEBUG_FREELIST */
719
720 \f
721
722 static unsigned long
723 master_cells_allocated (scm_freelist_t *master)
724 {
725 /* the '- 1' below is to ignore the cluster spine cells. */
726 int objects = master->clusters_allocated * (master->cluster_size - 1);
727 if (SCM_NULLP (master->clusters))
728 objects -= master->left_to_collect;
729 return master->span * objects;
730 }
731
732 static unsigned long
733 freelist_length (SCM freelist)
734 {
735 int n;
736 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
737 ++n;
738 return n;
739 }
740
741 static unsigned long
742 compute_cells_allocated ()
743 {
744 return (scm_cells_allocated
745 + master_cells_allocated (&scm_master_freelist)
746 + master_cells_allocated (&scm_master_freelist2)
747 - scm_master_freelist.span * freelist_length (scm_freelist)
748 - scm_master_freelist2.span * freelist_length (scm_freelist2));
749 }
750
751 /* {Scheme Interface to GC}
752 */
753
754 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
755 (),
756 "Return an association list of statistics about Guile's current\n"
757 "use of storage.")
758 #define FUNC_NAME s_scm_gc_stats
759 {
760 int i;
761 int n;
762 SCM heap_segs;
763 long int local_scm_mtrigger;
764 long int local_scm_mallocated;
765 long int local_scm_heap_size;
766 long int local_scm_cells_allocated;
767 long int local_scm_gc_time_taken;
768 long int local_scm_gc_times;
769 long int local_scm_gc_mark_time_taken;
770 long int local_scm_gc_sweep_time_taken;
771 double local_scm_gc_cells_swept;
772 double local_scm_gc_cells_marked;
773 SCM answer;
774
775 SCM_DEFER_INTS;
776
777 ++scm_block_gc;
778
779 retry:
780 heap_segs = SCM_EOL;
781 n = scm_n_heap_segs;
782 for (i = scm_n_heap_segs; i--; )
783 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
784 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
785 heap_segs);
786 if (scm_n_heap_segs != n)
787 goto retry;
788
789 --scm_block_gc;
790
791 /* Below, we cons to produce the resulting list. We want a snapshot of
792 * the heap situation before consing.
793 */
794 local_scm_mtrigger = scm_mtrigger;
795 local_scm_mallocated = scm_mallocated;
796 local_scm_heap_size = SCM_HEAP_SIZE;
797 local_scm_cells_allocated = compute_cells_allocated ();
798 local_scm_gc_time_taken = scm_gc_time_taken;
799 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
800 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
801 local_scm_gc_times = scm_gc_times;
802 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
803 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
804
805 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
806 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
807 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
808 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
809 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
810 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
811 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
812 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
813 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
814 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
815 scm_cons (sym_heap_segments, heap_segs),
816 SCM_UNDEFINED);
817 SCM_ALLOW_INTS;
818 return answer;
819 }
820 #undef FUNC_NAME
821
822
823 static void
824 gc_start_stats (const char *what)
825 {
826 t_before_gc = scm_c_get_internal_run_time ();
827 scm_gc_cells_swept = 0;
828 scm_gc_cells_collected = 0;
829 scm_gc_yield_1 = scm_gc_yield;
830 scm_gc_yield = (scm_cells_allocated
831 + master_cells_allocated (&scm_master_freelist)
832 + master_cells_allocated (&scm_master_freelist2));
833 scm_gc_malloc_collected = 0;
834 scm_gc_ports_collected = 0;
835 }
836
837
838 static void
839 gc_end_stats ()
840 {
841 unsigned long t = scm_c_get_internal_run_time ();
842 scm_gc_time_taken += (t - t_before_gc);
843 scm_gc_sweep_time_taken += (t - t_before_sweep);
844 ++scm_gc_times;
845
846 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
847 scm_gc_cells_swept_acc += scm_gc_cells_swept;
848 }
849
850
851 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
852 (SCM obj),
853 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
854 "returned by this function for @var{obj}")
855 #define FUNC_NAME s_scm_object_address
856 {
857 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
858 }
859 #undef FUNC_NAME
860
861
862 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
863 (),
864 "Scans all of SCM objects and reclaims for further use those that are\n"
865 "no longer accessible.")
866 #define FUNC_NAME s_scm_gc
867 {
868 SCM_DEFER_INTS;
869 scm_igc ("call");
870 SCM_ALLOW_INTS;
871 return SCM_UNSPECIFIED;
872 }
873 #undef FUNC_NAME
874
875
876 \f
877 /* {C Interface For When GC is Triggered}
878 */
879
880 static void
881 adjust_min_yield (scm_freelist_t *freelist)
882 {
883 /* min yield is adjusted upwards so that next predicted total yield
884 * (allocated cells actually freed by GC) becomes
885 * `min_yield_fraction' of total heap size. Note, however, that
886 * the absolute value of min_yield will correspond to `collected'
887 * on one master (the one which currently is triggering GC).
888 *
889 * The reason why we look at total yield instead of cells collected
890 * on one list is that we want to take other freelists into account.
891 * On this freelist, we know that (local) yield = collected cells,
892 * but that's probably not the case on the other lists.
893 *
894 * (We might consider computing a better prediction, for example
895 * by computing an average over multiple GC:s.)
896 */
897 if (freelist->min_yield_fraction)
898 {
899 /* Pick largest of last two yields. */
900 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
901 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
902 #ifdef DEBUGINFO
903 fprintf (stderr, " after GC = %d, delta = %d\n",
904 scm_cells_allocated,
905 delta);
906 #endif
907 if (delta > 0)
908 freelist->min_yield += delta;
909 }
910 }
911
912
913 /* When we get POSIX threads support, the master will be global and
914 * common while the freelist will be individual for each thread.
915 */
916
917 SCM
918 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
919 {
920 SCM cell;
921 ++scm_ints_disabled;
922 do
923 {
924 if (SCM_NULLP (master->clusters))
925 {
926 if (master->grow_heap_p || scm_block_gc)
927 {
928 /* In order to reduce gc frequency, try to allocate a new heap
929 * segment first, even if gc might find some free cells. If we
930 * can't obtain a new heap segment, we will try gc later.
931 */
932 master->grow_heap_p = 0;
933 alloc_some_heap (master, return_on_error);
934 }
935 if (SCM_NULLP (master->clusters))
936 {
937 /* The heap was not grown, either because it wasn't scheduled to
938 * grow, or because there was not enough memory available. In
939 * both cases we have to try gc to get some free cells.
940 */
941 #ifdef DEBUGINFO
942 fprintf (stderr, "allocated = %d, ",
943 scm_cells_allocated
944 + master_cells_allocated (&scm_master_freelist)
945 + master_cells_allocated (&scm_master_freelist2));
946 #endif
947 scm_igc ("cells");
948 adjust_min_yield (master);
949 if (SCM_NULLP (master->clusters))
950 {
951 /* gc could not free any cells. Now, we _must_ allocate a
952 * new heap segment, because there is no other possibility
953 * to provide a new cell for the caller.
954 */
955 alloc_some_heap (master, abort_on_error);
956 }
957 }
958 }
959 cell = SCM_CAR (master->clusters);
960 master->clusters = SCM_CDR (master->clusters);
961 ++master->clusters_allocated;
962 }
963 while (SCM_NULLP (cell));
964
965 #ifdef GUILE_DEBUG_FREELIST
966 scm_check_freelist (cell);
967 #endif
968
969 --scm_ints_disabled;
970 *freelist = SCM_FREE_CELL_CDR (cell);
971 return cell;
972 }
973
974
975 #if 0
976 /* This is a support routine which can be used to reserve a cluster
977 * for some special use, such as debugging. It won't be useful until
978 * free cells are preserved between garbage collections.
979 */
980
981 void
982 scm_alloc_cluster (scm_freelist_t *master)
983 {
984 SCM freelist, cell;
985 cell = scm_gc_for_newcell (master, &freelist);
986 SCM_SETCDR (cell, freelist);
987 return cell;
988 }
989 #endif
990
991
992 scm_c_hook_t scm_before_gc_c_hook;
993 scm_c_hook_t scm_before_mark_c_hook;
994 scm_c_hook_t scm_before_sweep_c_hook;
995 scm_c_hook_t scm_after_sweep_c_hook;
996 scm_c_hook_t scm_after_gc_c_hook;
997
998
999 void
1000 scm_igc (const char *what)
1001 {
1002 int j;
1003
1004 ++scm_gc_running_p;
1005 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1006 #ifdef DEBUGINFO
1007 fprintf (stderr,
1008 SCM_NULLP (scm_freelist)
1009 ? "*"
1010 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1011 #endif
1012 /* During the critical section, only the current thread may run. */
1013 SCM_CRITICAL_SECTION_START;
1014
1015 /* fprintf (stderr, "gc: %s\n", what); */
1016
1017 if (!scm_stack_base || scm_block_gc)
1018 {
1019 --scm_gc_running_p;
1020 return;
1021 }
1022
1023 gc_start_stats (what);
1024
1025 if (scm_mallocated < 0)
1026 /* The byte count of allocated objects has underflowed. This is
1027 probably because you forgot to report the sizes of objects you
1028 have allocated, by calling scm_done_malloc or some such. When
1029 the GC freed them, it subtracted their size from
1030 scm_mallocated, which underflowed. */
1031 abort ();
1032
1033 if (scm_gc_heap_lock)
1034 /* We've invoked the collector while a GC is already in progress.
1035 That should never happen. */
1036 abort ();
1037
1038 ++scm_gc_heap_lock;
1039
1040 /* flush dead entries from the continuation stack */
1041 {
1042 int x;
1043 int bound;
1044 SCM * elts;
1045 elts = SCM_VELTS (scm_continuation_stack);
1046 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1047 x = SCM_INUM (scm_continuation_stack_ptr);
1048 while (x < bound)
1049 {
1050 elts[x] = SCM_BOOL_F;
1051 ++x;
1052 }
1053 }
1054
1055 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1056
1057 clear_mark_space ();
1058
1059 #ifndef USE_THREADS
1060
1061 /* Mark objects on the C stack. */
1062 SCM_FLUSH_REGISTER_WINDOWS;
1063 /* This assumes that all registers are saved into the jmp_buf */
1064 setjmp (scm_save_regs_gc_mark);
1065 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1066 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1067 sizeof scm_save_regs_gc_mark)
1068 / sizeof (SCM_STACKITEM)));
1069
1070 {
1071 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1072 #ifdef SCM_STACK_GROWS_UP
1073 scm_mark_locations (scm_stack_base, stack_len);
1074 #else
1075 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1076 #endif
1077 }
1078
1079 #else /* USE_THREADS */
1080
1081 /* Mark every thread's stack and registers */
1082 scm_threads_mark_stacks ();
1083
1084 #endif /* USE_THREADS */
1085
1086 j = SCM_NUM_PROTECTS;
1087 while (j--)
1088 scm_gc_mark (scm_sys_protects[j]);
1089
1090 /* FIXME: we should have a means to register C functions to be run
1091 * in different phases of GC
1092 */
1093 scm_mark_subr_table ();
1094
1095 #ifndef USE_THREADS
1096 scm_gc_mark (scm_root->handle);
1097 #endif
1098
1099 t_before_sweep = scm_c_get_internal_run_time ();
1100 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1101
1102 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1103
1104 scm_gc_sweep ();
1105
1106 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1107
1108 --scm_gc_heap_lock;
1109 gc_end_stats ();
1110
1111 SCM_CRITICAL_SECTION_END;
1112 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1113 --scm_gc_running_p;
1114 }
1115
1116 \f
1117
1118 /* {Mark/Sweep}
1119 */
1120
1121 #define MARK scm_gc_mark
1122 #define FNAME "scm_gc_mark"
1123
1124 #endif /*!MARK_DEPENDENCIES*/
1125
1126 /* Mark an object precisely.
1127 */
1128 void
1129 MARK (SCM p)
1130 #define FUNC_NAME FNAME
1131 {
1132 register long i;
1133 register SCM ptr;
1134 scm_bits_t cell_type;
1135
1136 #ifndef MARK_DEPENDENCIES
1137 # define RECURSE scm_gc_mark
1138 #else
1139 /* go through the usual marking, but not for self-cycles. */
1140 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1141 #endif
1142 ptr = p;
1143
1144 #ifdef MARK_DEPENDENCIES
1145 goto gc_mark_loop_first_time;
1146 #endif
1147
1148 gc_mark_loop:
1149 if (SCM_IMP (ptr))
1150 return;
1151
1152 gc_mark_nimp:
1153
1154 #ifdef MARK_DEPENDENCIES
1155 if (SCM_EQ_P (ptr, p))
1156 return;
1157
1158 scm_gc_mark (ptr);
1159 return;
1160
1161 gc_mark_loop_first_time:
1162 #endif
1163
1164 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1165 /* We are in debug mode. Check the ptr exhaustively. */
1166 if (!scm_cellp (ptr))
1167 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1168 #else
1169 /* In non-debug mode, do at least some cheap testing. */
1170 if (!SCM_CELLP (ptr))
1171 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1172 #endif
1173
1174 #ifndef MARK_DEPENDENCIES
1175
1176 if (SCM_GCMARKP (ptr))
1177 return;
1178
1179 SCM_SETGCMARK (ptr);
1180
1181 #endif
1182
1183 cell_type = SCM_GC_CELL_TYPE (ptr);
1184 switch (SCM_ITAG7 (cell_type))
1185 {
1186 case scm_tcs_cons_nimcar:
1187 if (SCM_IMP (SCM_CDR (ptr)))
1188 {
1189 ptr = SCM_CAR (ptr);
1190 goto gc_mark_nimp;
1191 }
1192 RECURSE (SCM_CAR (ptr));
1193 ptr = SCM_CDR (ptr);
1194 goto gc_mark_nimp;
1195 case scm_tcs_cons_imcar:
1196 ptr = SCM_CDR (ptr);
1197 goto gc_mark_loop;
1198 case scm_tc7_pws:
1199 RECURSE (SCM_SETTER (ptr));
1200 ptr = SCM_PROCEDURE (ptr);
1201 goto gc_mark_loop;
1202 case scm_tcs_cons_gloc:
1203 {
1204 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1205 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1206 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1207 * pointer to a struct vtable data region. The fact that these are
1208 * accessed in the same way restricts the possibilites to change the
1209 * data layout of structs or heap cells.
1210 */
1211 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1212 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1213 if (vtable_data [scm_vtable_index_vcell] != 0)
1214 {
1215 /* ptr is a gloc */
1216 SCM gloc_car = SCM_PACK (word0);
1217 RECURSE (gloc_car);
1218 ptr = SCM_CDR (ptr);
1219 goto gc_mark_loop;
1220 }
1221 else
1222 {
1223 /* ptr is a struct */
1224 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1225 int len = SCM_SYMBOL_LENGTH (layout);
1226 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1227 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1228
1229 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1230 {
1231 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1232 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1233 }
1234 if (len)
1235 {
1236 int x;
1237
1238 for (x = 0; x < len - 2; x += 2, ++struct_data)
1239 if (fields_desc[x] == 'p')
1240 RECURSE (SCM_PACK (*struct_data));
1241 if (fields_desc[x] == 'p')
1242 {
1243 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1244 for (x = *struct_data++; x; --x, ++struct_data)
1245 RECURSE (SCM_PACK (*struct_data));
1246 else
1247 RECURSE (SCM_PACK (*struct_data));
1248 }
1249 }
1250 /* mark vtable */
1251 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1252 goto gc_mark_loop;
1253 }
1254 }
1255 break;
1256 case scm_tcs_closures:
1257 if (SCM_IMP (SCM_ENV (ptr)))
1258 {
1259 ptr = SCM_CLOSCAR (ptr);
1260 goto gc_mark_nimp;
1261 }
1262 RECURSE (SCM_CLOSCAR (ptr));
1263 ptr = SCM_ENV (ptr);
1264 goto gc_mark_nimp;
1265 case scm_tc7_vector:
1266 i = SCM_VECTOR_LENGTH (ptr);
1267 if (i == 0)
1268 break;
1269 while (--i > 0)
1270 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1271 RECURSE (SCM_VELTS (ptr)[i]);
1272 ptr = SCM_VELTS (ptr)[0];
1273 goto gc_mark_loop;
1274 #ifdef CCLO
1275 case scm_tc7_cclo:
1276 {
1277 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1278 unsigned long int j;
1279 for (j = 1; j != i; ++j)
1280 {
1281 SCM obj = SCM_CCLO_REF (ptr, j);
1282 if (!SCM_IMP (obj))
1283 RECURSE (obj);
1284 }
1285 ptr = SCM_CCLO_REF (ptr, 0);
1286 goto gc_mark_loop;
1287 }
1288 #endif
1289 #ifdef HAVE_ARRAYS
1290 case scm_tc7_bvect:
1291 case scm_tc7_byvect:
1292 case scm_tc7_ivect:
1293 case scm_tc7_uvect:
1294 case scm_tc7_fvect:
1295 case scm_tc7_dvect:
1296 case scm_tc7_cvect:
1297 case scm_tc7_svect:
1298 #ifdef HAVE_LONG_LONGS
1299 case scm_tc7_llvect:
1300 #endif
1301 #endif
1302 case scm_tc7_string:
1303 break;
1304
1305 case scm_tc7_substring:
1306 ptr = SCM_CDR (ptr);
1307 goto gc_mark_loop;
1308
1309 case scm_tc7_wvect:
1310 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1311 scm_weak_vectors = ptr;
1312 if (SCM_IS_WHVEC_ANY (ptr))
1313 {
1314 int x;
1315 int len;
1316 int weak_keys;
1317 int weak_values;
1318
1319 len = SCM_VECTOR_LENGTH (ptr);
1320 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1321 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1322
1323 for (x = 0; x < len; ++x)
1324 {
1325 SCM alist;
1326 alist = SCM_VELTS (ptr)[x];
1327
1328 /* mark everything on the alist except the keys or
1329 * values, according to weak_values and weak_keys. */
1330 while ( SCM_CONSP (alist)
1331 && !SCM_GCMARKP (alist)
1332 && SCM_CONSP (SCM_CAR (alist)))
1333 {
1334 SCM kvpair;
1335 SCM next_alist;
1336
1337 kvpair = SCM_CAR (alist);
1338 next_alist = SCM_CDR (alist);
1339 /*
1340 * Do not do this:
1341 * SCM_SETGCMARK (alist);
1342 * SCM_SETGCMARK (kvpair);
1343 *
1344 * It may be that either the key or value is protected by
1345 * an escaped reference to part of the spine of this alist.
1346 * If we mark the spine here, and only mark one or neither of the
1347 * key and value, they may never be properly marked.
1348 * This leads to a horrible situation in which an alist containing
1349 * freelist cells is exported.
1350 *
1351 * So only mark the spines of these arrays last of all marking.
1352 * If somebody confuses us by constructing a weak vector
1353 * with a circular alist then we are hosed, but at least we
1354 * won't prematurely drop table entries.
1355 */
1356 if (!weak_keys)
1357 RECURSE (SCM_CAR (kvpair));
1358 if (!weak_values)
1359 RECURSE (SCM_CDR (kvpair));
1360 alist = next_alist;
1361 }
1362 if (SCM_NIMP (alist))
1363 RECURSE (alist);
1364 }
1365 }
1366 break;
1367
1368 case scm_tc7_symbol:
1369 ptr = SCM_PROP_SLOTS (ptr);
1370 goto gc_mark_loop;
1371 case scm_tcs_subrs:
1372 break;
1373 case scm_tc7_port:
1374 i = SCM_PTOBNUM (ptr);
1375 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1376 if (!(i < scm_numptob))
1377 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1378 #endif
1379 if (SCM_PTAB_ENTRY(ptr))
1380 RECURSE (SCM_FILENAME (ptr));
1381 if (scm_ptobs[i].mark)
1382 {
1383 ptr = (scm_ptobs[i].mark) (ptr);
1384 goto gc_mark_loop;
1385 }
1386 else
1387 return;
1388 break;
1389 case scm_tc7_smob:
1390 switch (SCM_TYP16 (ptr))
1391 { /* should be faster than going through scm_smobs */
1392 case scm_tc_free_cell:
1393 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1394 case scm_tc16_big:
1395 case scm_tc16_real:
1396 case scm_tc16_complex:
1397 break;
1398 default:
1399 i = SCM_SMOBNUM (ptr);
1400 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1401 if (!(i < scm_numsmob))
1402 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1403 #endif
1404 if (scm_smobs[i].mark)
1405 {
1406 ptr = (scm_smobs[i].mark) (ptr);
1407 goto gc_mark_loop;
1408 }
1409 else
1410 return;
1411 }
1412 break;
1413 default:
1414 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1415 }
1416 #undef RECURSE
1417 }
1418 #undef FUNC_NAME
1419
1420 #ifndef MARK_DEPENDENCIES
1421
1422 #undef MARK
1423 #undef FNAME
1424
1425 /* And here we define `scm_gc_mark_dependencies', by including this
1426 * same file in itself.
1427 */
1428 #define MARK scm_gc_mark_dependencies
1429 #define FNAME "scm_gc_mark_dependencies"
1430 #define MARK_DEPENDENCIES
1431 #include "gc.c"
1432 #undef MARK_DEPENDENCIES
1433 #undef MARK
1434 #undef FNAME
1435
1436
1437 /* Mark a Region Conservatively
1438 */
1439
1440 void
1441 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1442 {
1443 unsigned long m;
1444
1445 for (m = 0; m < n; ++m)
1446 {
1447 SCM obj = * (SCM *) &x[m];
1448 if (SCM_CELLP (obj))
1449 {
1450 SCM_CELLPTR ptr = SCM2PTR (obj);
1451 int i = 0;
1452 int j = scm_n_heap_segs - 1;
1453 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1454 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1455 {
1456 while (i <= j)
1457 {
1458 int seg_id;
1459 seg_id = -1;
1460 if ((i == j)
1461 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1462 seg_id = i;
1463 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1464 seg_id = j;
1465 else
1466 {
1467 int k;
1468 k = (i + j) / 2;
1469 if (k == i)
1470 break;
1471 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1472 {
1473 j = k;
1474 ++i;
1475 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1476 continue;
1477 else
1478 break;
1479 }
1480 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1481 {
1482 i = k;
1483 --j;
1484 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1485 continue;
1486 else
1487 break;
1488 }
1489 }
1490
1491 if (SCM_GC_IN_CARD_HEADERP (ptr))
1492 break;
1493
1494 if (scm_heap_table[seg_id].span == 1
1495 || DOUBLECELL_ALIGNED_P (obj))
1496 scm_gc_mark (obj);
1497
1498 break;
1499 }
1500 }
1501 }
1502 }
1503 }
1504
1505
1506 /* The function scm_cellp determines whether an SCM value can be regarded as a
1507 * pointer to a cell on the heap. Binary search is used in order to determine
1508 * the heap segment that contains the cell.
1509 */
1510 int
1511 scm_cellp (SCM value)
1512 {
1513 if (SCM_CELLP (value)) {
1514 scm_cell * ptr = SCM2PTR (value);
1515 unsigned int i = 0;
1516 unsigned int j = scm_n_heap_segs - 1;
1517
1518 if (SCM_GC_IN_CARD_HEADERP (ptr))
1519 return 0;
1520
1521 while (i < j) {
1522 int k = (i + j) / 2;
1523 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1524 j = k;
1525 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1526 i = k + 1;
1527 }
1528 }
1529
1530 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1531 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1532 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1533 && !SCM_GC_IN_CARD_HEADERP (ptr)
1534 )
1535 return 1;
1536 else
1537 return 0;
1538 } else
1539 return 0;
1540 }
1541
1542
1543 static void
1544 gc_sweep_freelist_start (scm_freelist_t *freelist)
1545 {
1546 freelist->cells = SCM_EOL;
1547 freelist->left_to_collect = freelist->cluster_size;
1548 freelist->clusters_allocated = 0;
1549 freelist->clusters = SCM_EOL;
1550 freelist->clustertail = &freelist->clusters;
1551 freelist->collected_1 = freelist->collected;
1552 freelist->collected = 0;
1553 }
1554
1555 static void
1556 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1557 {
1558 int collected;
1559 *freelist->clustertail = freelist->cells;
1560 if (!SCM_NULLP (freelist->cells))
1561 {
1562 SCM c = freelist->cells;
1563 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1564 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1565 freelist->collected +=
1566 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1567 }
1568 scm_gc_cells_collected += freelist->collected;
1569
1570 /* Although freelist->min_yield is used to test freelist->collected
1571 * (which is the local GC yield for freelist), it is adjusted so
1572 * that *total* yield is freelist->min_yield_fraction of total heap
1573 * size. This means that a too low yield is compensated by more
1574 * heap on the list which is currently doing most work, which is
1575 * just what we want.
1576 */
1577 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1578 freelist->grow_heap_p = (collected < freelist->min_yield);
1579 }
1580
1581 #define NEXT_DATA_CELL(ptr, span) \
1582 do { \
1583 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1584 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1585 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1586 : nxt__); \
1587 } while (0)
1588
1589 void
1590 scm_gc_sweep ()
1591 #define FUNC_NAME "scm_gc_sweep"
1592 {
1593 register SCM_CELLPTR ptr;
1594 register SCM nfreelist;
1595 register scm_freelist_t *freelist;
1596 register long m;
1597 register int span;
1598 long i;
1599 scm_sizet seg_size;
1600
1601 m = 0;
1602
1603 gc_sweep_freelist_start (&scm_master_freelist);
1604 gc_sweep_freelist_start (&scm_master_freelist2);
1605
1606 for (i = 0; i < scm_n_heap_segs; i++)
1607 {
1608 register unsigned int left_to_collect;
1609 register scm_sizet j;
1610
1611 /* Unmarked cells go onto the front of the freelist this heap
1612 segment points to. Rather than updating the real freelist
1613 pointer as we go along, we accumulate the new head in
1614 nfreelist. Then, if it turns out that the entire segment is
1615 free, we free (i.e., malloc's free) the whole segment, and
1616 simply don't assign nfreelist back into the real freelist. */
1617 freelist = scm_heap_table[i].freelist;
1618 nfreelist = freelist->cells;
1619 left_to_collect = freelist->left_to_collect;
1620 span = scm_heap_table[i].span;
1621
1622 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1623 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1624
1625 /* use only data cells in seg_size */
1626 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1627
1628 scm_gc_cells_swept += seg_size;
1629
1630 for (j = seg_size + span; j -= span; ptr += span)
1631 {
1632 SCM scmptr;
1633
1634 if (SCM_GC_IN_CARD_HEADERP (ptr))
1635 {
1636 SCM_CELLPTR nxt;
1637
1638 /* cheat here */
1639 nxt = ptr;
1640 NEXT_DATA_CELL (nxt, span);
1641 j += span;
1642
1643 ptr = nxt - span;
1644 continue;
1645 }
1646
1647 scmptr = PTR2SCM (ptr);
1648
1649 if (SCM_GCMARKP (scmptr))
1650 continue;
1651
1652 switch SCM_TYP7 (scmptr)
1653 {
1654 case scm_tcs_cons_gloc:
1655 {
1656 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1657 * struct or a gloc. See the corresponding comment in
1658 * scm_gc_mark.
1659 */
1660 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1661 - scm_tc3_cons_gloc);
1662 /* access as struct */
1663 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1664 if (vtable_data[scm_vtable_index_vcell] == 0)
1665 {
1666 /* Structs need to be freed in a special order.
1667 * This is handled by GC C hooks in struct.c.
1668 */
1669 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1670 scm_structs_to_free = scmptr;
1671 continue;
1672 }
1673 /* fall through so that scmptr gets collected */
1674 }
1675 break;
1676 case scm_tcs_cons_imcar:
1677 case scm_tcs_cons_nimcar:
1678 case scm_tcs_closures:
1679 case scm_tc7_pws:
1680 break;
1681 case scm_tc7_wvect:
1682 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1683 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1684 break;
1685 case scm_tc7_vector:
1686 {
1687 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1688 if (length > 0)
1689 {
1690 m += length * sizeof (scm_bits_t);
1691 scm_must_free (SCM_VECTOR_BASE (scmptr));
1692 }
1693 break;
1694 }
1695 #ifdef CCLO
1696 case scm_tc7_cclo:
1697 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1698 scm_must_free (SCM_CCLO_BASE (scmptr));
1699 break;
1700 #endif
1701 #ifdef HAVE_ARRAYS
1702 case scm_tc7_bvect:
1703 {
1704 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1705 if (length > 0)
1706 {
1707 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1708 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1709 }
1710 }
1711 break;
1712 case scm_tc7_byvect:
1713 case scm_tc7_ivect:
1714 case scm_tc7_uvect:
1715 case scm_tc7_svect:
1716 #ifdef HAVE_LONG_LONGS
1717 case scm_tc7_llvect:
1718 #endif
1719 case scm_tc7_fvect:
1720 case scm_tc7_dvect:
1721 case scm_tc7_cvect:
1722 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1723 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1724 break;
1725 #endif
1726 case scm_tc7_substring:
1727 break;
1728 case scm_tc7_string:
1729 m += SCM_STRING_LENGTH (scmptr) + 1;
1730 scm_must_free (SCM_STRING_CHARS (scmptr));
1731 break;
1732 case scm_tc7_symbol:
1733 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1734 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1735 break;
1736 case scm_tcs_subrs:
1737 /* the various "subrs" (primitives) are never freed */
1738 continue;
1739 case scm_tc7_port:
1740 if SCM_OPENP (scmptr)
1741 {
1742 int k = SCM_PTOBNUM (scmptr);
1743 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1744 if (!(k < scm_numptob))
1745 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1746 #endif
1747 /* Keep "revealed" ports alive. */
1748 if (scm_revealed_count (scmptr) > 0)
1749 continue;
1750 /* Yes, I really do mean scm_ptobs[k].free */
1751 /* rather than ftobs[k].close. .close */
1752 /* is for explicit CLOSE-PORT by user */
1753 m += (scm_ptobs[k].free) (scmptr);
1754 SCM_SETSTREAM (scmptr, 0);
1755 scm_remove_from_port_table (scmptr);
1756 scm_gc_ports_collected++;
1757 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1758 }
1759 break;
1760 case scm_tc7_smob:
1761 switch SCM_TYP16 (scmptr)
1762 {
1763 case scm_tc_free_cell:
1764 case scm_tc16_real:
1765 break;
1766 #ifdef SCM_BIGDIG
1767 case scm_tc16_big:
1768 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1769 scm_must_free (SCM_BDIGITS (scmptr));
1770 break;
1771 #endif /* def SCM_BIGDIG */
1772 case scm_tc16_complex:
1773 m += sizeof (scm_complex_t);
1774 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1775 break;
1776 default:
1777 {
1778 int k;
1779 k = SCM_SMOBNUM (scmptr);
1780 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1781 if (!(k < scm_numsmob))
1782 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1783 #endif
1784 if (scm_smobs[k].free)
1785 m += (scm_smobs[k].free) (scmptr);
1786 break;
1787 }
1788 }
1789 break;
1790 default:
1791 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1792 }
1793
1794 if (!--left_to_collect)
1795 {
1796 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1797 *freelist->clustertail = scmptr;
1798 freelist->clustertail = SCM_CDRLOC (scmptr);
1799
1800 nfreelist = SCM_EOL;
1801 freelist->collected += span * freelist->cluster_size;
1802 left_to_collect = freelist->cluster_size;
1803 }
1804 else
1805 {
1806 /* Stick the new cell on the front of nfreelist. It's
1807 critical that we mark this cell as freed; otherwise, the
1808 conservative collector might trace it as some other type
1809 of object. */
1810 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1811 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1812 nfreelist = scmptr;
1813 }
1814 }
1815
1816 #ifdef GC_FREE_SEGMENTS
1817 if (n == seg_size)
1818 {
1819 register long j;
1820
1821 freelist->heap_size -= seg_size;
1822 free ((char *) scm_heap_table[i].bounds[0]);
1823 scm_heap_table[i].bounds[0] = 0;
1824 for (j = i + 1; j < scm_n_heap_segs; j++)
1825 scm_heap_table[j - 1] = scm_heap_table[j];
1826 scm_n_heap_segs -= 1;
1827 i--; /* We need to scan the segment just moved. */
1828 }
1829 else
1830 #endif /* ifdef GC_FREE_SEGMENTS */
1831 {
1832 /* Update the real freelist pointer to point to the head of
1833 the list of free cells we've built for this segment. */
1834 freelist->cells = nfreelist;
1835 freelist->left_to_collect = left_to_collect;
1836 }
1837
1838 #ifdef GUILE_DEBUG_FREELIST
1839 scm_map_free_list ();
1840 #endif
1841 }
1842
1843 gc_sweep_freelist_finish (&scm_master_freelist);
1844 gc_sweep_freelist_finish (&scm_master_freelist2);
1845
1846 /* When we move to POSIX threads private freelists should probably
1847 be GC-protected instead. */
1848 scm_freelist = SCM_EOL;
1849 scm_freelist2 = SCM_EOL;
1850
1851 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1852 scm_gc_yield -= scm_cells_allocated;
1853 scm_mallocated -= m;
1854 scm_gc_malloc_collected = m;
1855 }
1856 #undef FUNC_NAME
1857
1858
1859 \f
1860 /* {Front end to malloc}
1861 *
1862 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1863 * scm_done_free
1864 *
1865 * These functions provide services comparable to malloc, realloc, and
1866 * free. They should be used when allocating memory that will be under
1867 * control of the garbage collector, i.e., if the memory may be freed
1868 * during garbage collection.
1869 */
1870
1871 /* scm_must_malloc
1872 * Return newly malloced storage or throw an error.
1873 *
1874 * The parameter WHAT is a string for error reporting.
1875 * If the threshold scm_mtrigger will be passed by this
1876 * allocation, or if the first call to malloc fails,
1877 * garbage collect -- on the presumption that some objects
1878 * using malloced storage may be collected.
1879 *
1880 * The limit scm_mtrigger may be raised by this allocation.
1881 */
1882 void *
1883 scm_must_malloc (scm_sizet size, const char *what)
1884 {
1885 void *ptr;
1886 unsigned long nm = scm_mallocated + size;
1887
1888 if (nm <= scm_mtrigger)
1889 {
1890 SCM_SYSCALL (ptr = malloc (size));
1891 if (NULL != ptr)
1892 {
1893 scm_mallocated = nm;
1894 #ifdef GUILE_DEBUG_MALLOC
1895 scm_malloc_register (ptr, what);
1896 #endif
1897 return ptr;
1898 }
1899 }
1900
1901 scm_igc (what);
1902
1903 nm = scm_mallocated + size;
1904 SCM_SYSCALL (ptr = malloc (size));
1905 if (NULL != ptr)
1906 {
1907 scm_mallocated = nm;
1908 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1909 if (nm > scm_mtrigger)
1910 scm_mtrigger = nm + nm / 2;
1911 else
1912 scm_mtrigger += scm_mtrigger / 2;
1913 }
1914 #ifdef GUILE_DEBUG_MALLOC
1915 scm_malloc_register (ptr, what);
1916 #endif
1917
1918 return ptr;
1919 }
1920
1921 scm_memory_error (what);
1922 }
1923
1924
1925 /* scm_must_realloc
1926 * is similar to scm_must_malloc.
1927 */
1928 void *
1929 scm_must_realloc (void *where,
1930 scm_sizet old_size,
1931 scm_sizet size,
1932 const char *what)
1933 {
1934 void *ptr;
1935 scm_sizet nm = scm_mallocated + size - old_size;
1936
1937 if (nm <= scm_mtrigger)
1938 {
1939 SCM_SYSCALL (ptr = realloc (where, size));
1940 if (NULL != ptr)
1941 {
1942 scm_mallocated = nm;
1943 #ifdef GUILE_DEBUG_MALLOC
1944 scm_malloc_reregister (where, ptr, what);
1945 #endif
1946 return ptr;
1947 }
1948 }
1949
1950 scm_igc (what);
1951
1952 nm = scm_mallocated + size - old_size;
1953 SCM_SYSCALL (ptr = realloc (where, size));
1954 if (NULL != ptr)
1955 {
1956 scm_mallocated = nm;
1957 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1958 if (nm > scm_mtrigger)
1959 scm_mtrigger = nm + nm / 2;
1960 else
1961 scm_mtrigger += scm_mtrigger / 2;
1962 }
1963 #ifdef GUILE_DEBUG_MALLOC
1964 scm_malloc_reregister (where, ptr, what);
1965 #endif
1966 return ptr;
1967 }
1968
1969 scm_memory_error (what);
1970 }
1971
1972
1973 void
1974 scm_must_free (void *obj)
1975 #define FUNC_NAME "scm_must_free"
1976 {
1977 #ifdef GUILE_DEBUG_MALLOC
1978 scm_malloc_unregister (obj);
1979 #endif
1980 if (obj)
1981 free (obj);
1982 else
1983 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1984 }
1985 #undef FUNC_NAME
1986
1987
1988 /* Announce that there has been some malloc done that will be freed
1989 * during gc. A typical use is for a smob that uses some malloced
1990 * memory but can not get it from scm_must_malloc (for whatever
1991 * reason). When a new object of this smob is created you call
1992 * scm_done_malloc with the size of the object. When your smob free
1993 * function is called, be sure to include this size in the return
1994 * value.
1995 *
1996 * If you can't actually free the memory in the smob free function,
1997 * for whatever reason (like reference counting), you still can (and
1998 * should) report the amount of memory freed when you actually free it.
1999 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2000 * eh? Or even better, call scm_done_free. */
2001
2002 void
2003 scm_done_malloc (long size)
2004 {
2005 scm_mallocated += size;
2006
2007 if (scm_mallocated > scm_mtrigger)
2008 {
2009 scm_igc ("foreign mallocs");
2010 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2011 {
2012 if (scm_mallocated > scm_mtrigger)
2013 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2014 else
2015 scm_mtrigger += scm_mtrigger / 2;
2016 }
2017 }
2018 }
2019
2020 void
2021 scm_done_free (long size)
2022 {
2023 scm_mallocated -= size;
2024 }
2025
2026
2027 \f
2028 /* {Heap Segments}
2029 *
2030 * Each heap segment is an array of objects of a particular size.
2031 * Every segment has an associated (possibly shared) freelist.
2032 * A table of segment records is kept that records the upper and
2033 * lower extents of the segment; this is used during the conservative
2034 * phase of gc to identify probably gc roots (because they point
2035 * into valid segments at reasonable offsets). */
2036
2037 /* scm_expmem
2038 * is true if the first segment was smaller than INIT_HEAP_SEG.
2039 * If scm_expmem is set to one, subsequent segment allocations will
2040 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2041 */
2042 int scm_expmem = 0;
2043
2044 scm_sizet scm_max_segment_size;
2045
2046 /* scm_heap_org
2047 * is the lowest base address of any heap segment.
2048 */
2049 SCM_CELLPTR scm_heap_org;
2050
2051 scm_heap_seg_data_t * scm_heap_table = 0;
2052 static unsigned int heap_segment_table_size = 0;
2053 int scm_n_heap_segs = 0;
2054
2055 /* init_heap_seg
2056 * initializes a new heap segment and returns the number of objects it contains.
2057 *
2058 * The segment origin and segment size in bytes are input parameters.
2059 * The freelist is both input and output.
2060 *
2061 * This function presumes that the scm_heap_table has already been expanded
2062 * to accomodate a new segment record and that the markbit space was reserved
2063 * for all the cards in this segment.
2064 */
2065
2066 #define INIT_CARD(card, span) \
2067 do { \
2068 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2069 if ((span) == 2) \
2070 SCM_GC_SET_CARD_DOUBLECELL (card); \
2071 } while (0)
2072
2073 static scm_sizet
2074 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2075 {
2076 register SCM_CELLPTR ptr;
2077 SCM_CELLPTR seg_end;
2078 int new_seg_index;
2079 int n_new_cells;
2080 int span = freelist->span;
2081
2082 if (seg_org == NULL)
2083 return 0;
2084
2085 /* Align the begin ptr up.
2086 */
2087 ptr = SCM_GC_CARD_UP (seg_org);
2088
2089 /* Compute the ceiling on valid object pointers w/in this segment.
2090 */
2091 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2092
2093 /* Find the right place and insert the segment record.
2094 *
2095 */
2096 for (new_seg_index = 0;
2097 ( (new_seg_index < scm_n_heap_segs)
2098 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2099 new_seg_index++)
2100 ;
2101
2102 {
2103 int i;
2104 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2105 scm_heap_table[i] = scm_heap_table[i - 1];
2106 }
2107
2108 ++scm_n_heap_segs;
2109
2110 scm_heap_table[new_seg_index].span = span;
2111 scm_heap_table[new_seg_index].freelist = freelist;
2112 scm_heap_table[new_seg_index].bounds[0] = ptr;
2113 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2114
2115 /*n_new_cells*/
2116 n_new_cells = seg_end - ptr;
2117
2118 freelist->heap_size += n_new_cells;
2119
2120 /* Partition objects in this segment into clusters */
2121 {
2122 SCM clusters;
2123 SCM *clusterp = &clusters;
2124
2125 NEXT_DATA_CELL (ptr, span);
2126 while (ptr < seg_end)
2127 {
2128 scm_cell *nxt = ptr;
2129 scm_cell *prv = NULL;
2130 scm_cell *last_card = NULL;
2131 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2132 NEXT_DATA_CELL(nxt, span);
2133
2134 /* Allocate cluster spine
2135 */
2136 *clusterp = PTR2SCM (ptr);
2137 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2138 clusterp = SCM_CDRLOC (*clusterp);
2139 ptr = nxt;
2140
2141 while (n_data_cells--)
2142 {
2143 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2144 SCM scmptr = PTR2SCM (ptr);
2145 nxt = ptr;
2146 NEXT_DATA_CELL (nxt, span);
2147 prv = ptr;
2148
2149 if (card != last_card)
2150 {
2151 INIT_CARD (card, span);
2152 last_card = card;
2153 }
2154
2155 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2156 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2157
2158 ptr = nxt;
2159 }
2160
2161 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2162 }
2163
2164 /* sanity check */
2165 {
2166 scm_cell *ref = seg_end;
2167 NEXT_DATA_CELL (ref, span);
2168 if (ref != ptr)
2169 /* [cmm] looks like the segment size doesn't divide cleanly by
2170 cluster size. bad cmm! */
2171 abort();
2172 }
2173
2174 /* Patch up the last cluster pointer in the segment
2175 * to join it to the input freelist.
2176 */
2177 *clusterp = freelist->clusters;
2178 freelist->clusters = clusters;
2179 }
2180
2181 #ifdef DEBUGINFO
2182 fprintf (stderr, "H");
2183 #endif
2184 return size;
2185 }
2186
2187 static scm_sizet
2188 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2189 {
2190 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2191
2192 return
2193 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2194 + ALIGNMENT_SLACK (freelist);
2195 }
2196
2197 static void
2198 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2199 #define FUNC_NAME "alloc_some_heap"
2200 {
2201 SCM_CELLPTR ptr;
2202 long len;
2203
2204 if (scm_gc_heap_lock)
2205 {
2206 /* Critical code sections (such as the garbage collector) aren't
2207 * supposed to add heap segments.
2208 */
2209 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2210 abort ();
2211 }
2212
2213 if (scm_n_heap_segs == heap_segment_table_size)
2214 {
2215 /* We have to expand the heap segment table to have room for the new
2216 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2217 * init_heap_seg only if the allocation of the segment itself succeeds.
2218 */
2219 unsigned int new_table_size = scm_n_heap_segs + 1;
2220 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2221 scm_heap_seg_data_t * new_heap_table;
2222
2223 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2224 realloc ((char *)scm_heap_table, size)));
2225 if (!new_heap_table)
2226 {
2227 if (error_policy == abort_on_error)
2228 {
2229 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2230 abort ();
2231 }
2232 else
2233 {
2234 return;
2235 }
2236 }
2237 else
2238 {
2239 scm_heap_table = new_heap_table;
2240 heap_segment_table_size = new_table_size;
2241 }
2242 }
2243
2244 /* Pick a size for the new heap segment.
2245 * The rule for picking the size of a segment is explained in
2246 * gc.h
2247 */
2248 {
2249 /* Assure that the new segment is predicted to be large enough.
2250 *
2251 * New yield should at least equal GC fraction of new heap size, i.e.
2252 *
2253 * y + dh > f * (h + dh)
2254 *
2255 * y : yield
2256 * f : min yield fraction
2257 * h : heap size
2258 * dh : size of new heap segment
2259 *
2260 * This gives dh > (f * h - y) / (1 - f)
2261 */
2262 int f = freelist->min_yield_fraction;
2263 long h = SCM_HEAP_SIZE;
2264 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2265 len = SCM_EXPHEAP (freelist->heap_size);
2266 #ifdef DEBUGINFO
2267 fprintf (stderr, "(%d < %d)", len, min_cells);
2268 #endif
2269 if (len < min_cells)
2270 len = min_cells + freelist->cluster_size;
2271 len *= sizeof (scm_cell);
2272 /* force new sampling */
2273 freelist->collected = LONG_MAX;
2274 }
2275
2276 if (len > scm_max_segment_size)
2277 len = scm_max_segment_size;
2278
2279 {
2280 scm_sizet smallest;
2281
2282 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2283
2284 if (len < smallest)
2285 len = smallest;
2286
2287 /* Allocate with decaying ambition. */
2288 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2289 && (len >= smallest))
2290 {
2291 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2292 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2293 if (ptr)
2294 {
2295 init_heap_seg (ptr, rounded_len, freelist);
2296 return;
2297 }
2298 len /= 2;
2299 }
2300 }
2301
2302 if (error_policy == abort_on_error)
2303 {
2304 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2305 abort ();
2306 }
2307 }
2308 #undef FUNC_NAME
2309
2310
2311 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2312 (SCM name),
2313 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2314 "is @code{#t}.")
2315 #define FUNC_NAME s_scm_unhash_name
2316 {
2317 int x;
2318 int bound;
2319 SCM_VALIDATE_SYMBOL (1,name);
2320 SCM_DEFER_INTS;
2321 bound = scm_n_heap_segs;
2322 for (x = 0; x < bound; ++x)
2323 {
2324 SCM_CELLPTR p;
2325 SCM_CELLPTR pbound;
2326 p = scm_heap_table[x].bounds[0];
2327 pbound = scm_heap_table[x].bounds[1];
2328 while (p < pbound)
2329 {
2330 SCM cell = PTR2SCM (p);
2331 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2332 {
2333 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2334 * struct cell. See the corresponding comment in scm_gc_mark.
2335 */
2336 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2337 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2338 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2339 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2340 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2341 {
2342 SCM_SET_CELL_OBJECT_0 (cell, name);
2343 }
2344 }
2345 ++p;
2346 }
2347 }
2348 SCM_ALLOW_INTS;
2349 return name;
2350 }
2351 #undef FUNC_NAME
2352
2353
2354 \f
2355 /* {GC Protection Helper Functions}
2356 */
2357
2358
2359 /*
2360 * If within a function you need to protect one or more scheme objects from
2361 * garbage collection, pass them as parameters to one of the
2362 * scm_remember_upto_here* functions below. These functions don't do
2363 * anything, but since the compiler does not know that they are actually
2364 * no-ops, it will generate code that calls these functions with the given
2365 * parameters. Therefore, you can be sure that the compiler will keep those
2366 * scheme values alive (on the stack or in a register) up to the point where
2367 * scm_remember_upto_here* is called. In other words, place the call to
2368 * scm_remember_upt_here* _behind_ the last code in your function, that
2369 * depends on the scheme object to exist.
2370 *
2371 * Example: We want to make sure, that the string object str does not get
2372 * garbage collected during the execution of 'some_function', because
2373 * otherwise the characters belonging to str would be freed and
2374 * 'some_function' might access freed memory. To make sure that the compiler
2375 * keeps str alive on the stack or in a register such that it is visible to
2376 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2377 * call to 'some_function'. Note that this would not be necessary if str was
2378 * used anyway after the call to 'some_function'.
2379 * char *chars = SCM_STRING_CHARS (str);
2380 * some_function (chars);
2381 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2382 */
2383
2384 void
2385 scm_remember_upto_here_1 (SCM obj)
2386 {
2387 /* Empty. Protects a single object from garbage collection. */
2388 }
2389
2390 void
2391 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2392 {
2393 /* Empty. Protects two objects from garbage collection. */
2394 }
2395
2396 void
2397 scm_remember_upto_here (SCM obj, ...)
2398 {
2399 /* Empty. Protects any number of objects from garbage collection. */
2400 }
2401
2402
2403 #if (SCM_DEBUG_DEPRECATED == 0)
2404
2405 void
2406 scm_remember (SCM *ptr)
2407 {
2408 /* empty */
2409 }
2410
2411 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2412
2413 /*
2414 These crazy functions prevent garbage collection
2415 of arguments after the first argument by
2416 ensuring they remain live throughout the
2417 function because they are used in the last
2418 line of the code block.
2419 It'd be better to have a nice compiler hint to
2420 aid the conservative stack-scanning GC. --03/09/00 gjb */
2421 SCM
2422 scm_return_first (SCM elt, ...)
2423 {
2424 return elt;
2425 }
2426
2427 int
2428 scm_return_first_int (int i, ...)
2429 {
2430 return i;
2431 }
2432
2433
2434 SCM
2435 scm_permanent_object (SCM obj)
2436 {
2437 SCM_REDEFER_INTS;
2438 scm_permobjs = scm_cons (obj, scm_permobjs);
2439 SCM_REALLOW_INTS;
2440 return obj;
2441 }
2442
2443
2444 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2445 other references are dropped, until the object is unprotected by calling
2446 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2447 i. e. it is possible to protect the same object several times, but it is
2448 necessary to unprotect the object the same number of times to actually get
2449 the object unprotected. It is an error to unprotect an object more often
2450 than it has been protected before. The function scm_protect_object returns
2451 OBJ.
2452 */
2453
2454 /* Implementation note: For every object X, there is a counter which
2455 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2456 */
2457
2458 SCM
2459 scm_protect_object (SCM obj)
2460 {
2461 SCM handle;
2462
2463 /* This critical section barrier will be replaced by a mutex. */
2464 SCM_REDEFER_INTS;
2465
2466 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2467 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2468
2469 SCM_REALLOW_INTS;
2470
2471 return obj;
2472 }
2473
2474
2475 /* Remove any protection for OBJ established by a prior call to
2476 scm_protect_object. This function returns OBJ.
2477
2478 See scm_protect_object for more information. */
2479 SCM
2480 scm_unprotect_object (SCM obj)
2481 {
2482 SCM handle;
2483
2484 /* This critical section barrier will be replaced by a mutex. */
2485 SCM_REDEFER_INTS;
2486
2487 handle = scm_hashq_get_handle (scm_protects, obj);
2488
2489 if (SCM_FALSEP (handle))
2490 {
2491 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2492 abort ();
2493 }
2494 else
2495 {
2496 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2497 if (count == 0)
2498 scm_hashq_remove_x (scm_protects, obj);
2499 else
2500 SCM_SETCDR (handle, SCM_MAKINUM (count));
2501 }
2502
2503 SCM_REALLOW_INTS;
2504
2505 return obj;
2506 }
2507
2508 int terminating;
2509
2510 /* called on process termination. */
2511 #ifdef HAVE_ATEXIT
2512 static void
2513 cleanup (void)
2514 #else
2515 #ifdef HAVE_ON_EXIT
2516 extern int on_exit (void (*procp) (), int arg);
2517
2518 static void
2519 cleanup (int status, void *arg)
2520 #else
2521 #error Dont know how to setup a cleanup handler on your system.
2522 #endif
2523 #endif
2524 {
2525 terminating = 1;
2526 scm_flush_all_ports ();
2527 }
2528
2529 \f
2530 static int
2531 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2532 {
2533 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2534
2535 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2536 rounded_size,
2537 freelist))
2538 {
2539 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2540 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2541 rounded_size,
2542 freelist))
2543 return 1;
2544 }
2545 else
2546 scm_expmem = 1;
2547
2548 if (freelist->min_yield_fraction)
2549 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2550 / 100);
2551 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2552
2553 return 0;
2554 }
2555
2556 \f
2557 static void
2558 init_freelist (scm_freelist_t *freelist,
2559 int span,
2560 int cluster_size,
2561 int min_yield)
2562 {
2563 freelist->clusters = SCM_EOL;
2564 freelist->cluster_size = cluster_size + 1;
2565 freelist->left_to_collect = 0;
2566 freelist->clusters_allocated = 0;
2567 freelist->min_yield = 0;
2568 freelist->min_yield_fraction = min_yield;
2569 freelist->span = span;
2570 freelist->collected = 0;
2571 freelist->collected_1 = 0;
2572 freelist->heap_size = 0;
2573 }
2574
2575
2576 /* Get an integer from an environment variable. */
2577 static int
2578 scm_i_getenv_int (const char *var, int def)
2579 {
2580 char *end, *val = getenv (var);
2581 long res;
2582 if (!val)
2583 return def;
2584 res = strtol (val, &end, 10);
2585 if (end == val)
2586 return def;
2587 return res;
2588 }
2589
2590
2591 int
2592 scm_init_storage ()
2593 {
2594 scm_sizet gc_trigger_1;
2595 scm_sizet gc_trigger_2;
2596 scm_sizet init_heap_size_1;
2597 scm_sizet init_heap_size_2;
2598 scm_sizet j;
2599
2600 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2601 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2602 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2603
2604 j = SCM_NUM_PROTECTS;
2605 while (j)
2606 scm_sys_protects[--j] = SCM_BOOL_F;
2607 scm_block_gc = 1;
2608
2609 scm_freelist = SCM_EOL;
2610 scm_freelist2 = SCM_EOL;
2611 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2612 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2613 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2614 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2615 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2616
2617 scm_expmem = 0;
2618
2619 j = SCM_HEAP_SEG_SIZE;
2620 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2621 scm_heap_table = ((scm_heap_seg_data_t *)
2622 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2623 heap_segment_table_size = 2;
2624
2625 mark_space_ptr = &mark_space_head;
2626
2627 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2628 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2629 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2630 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2631 return 1;
2632
2633 /* scm_hplims[0] can change. do not remove scm_heap_org */
2634 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2635
2636 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2637 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2638 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2639 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2640 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2641
2642 /* Initialise the list of ports. */
2643 scm_port_table = (scm_port **)
2644 malloc (sizeof (scm_port *) * scm_port_table_room);
2645 if (!scm_port_table)
2646 return 1;
2647
2648 #ifdef HAVE_ATEXIT
2649 atexit (cleanup);
2650 #else
2651 #ifdef HAVE_ON_EXIT
2652 on_exit (cleanup, 0);
2653 #endif
2654 #endif
2655
2656 #define DEFAULT_SYMHASH_SIZE 277
2657 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2658 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2659
2660 scm_stand_in_procs = SCM_EOL;
2661 scm_permobjs = SCM_EOL;
2662 scm_protects = scm_c_make_hash_table (31);
2663
2664 return 0;
2665 }
2666
2667 \f
2668
2669 SCM scm_after_gc_hook;
2670
2671 static SCM gc_async;
2672
2673 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2674 * is run after the gc, as soon as the asynchronous events are handled by the
2675 * evaluator.
2676 */
2677 static SCM
2678 gc_async_thunk (void)
2679 {
2680 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2681 return SCM_UNSPECIFIED;
2682 }
2683
2684
2685 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2686 * the garbage collection. The only purpose of this function is to mark the
2687 * gc_async (which will eventually lead to the execution of the
2688 * gc_async_thunk).
2689 */
2690 static void *
2691 mark_gc_async (void * hook_data, void *func_data, void *data)
2692 {
2693 scm_system_async_mark (gc_async);
2694 return NULL;
2695 }
2696
2697
2698 void
2699 scm_init_gc ()
2700 {
2701 SCM after_gc_thunk;
2702
2703 /* Dirk:FIXME:: scm_create_hook is strange. */
2704 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2705
2706 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2707 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2708
2709 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2710
2711 #ifndef SCM_MAGIC_SNARFER
2712 #include "libguile/gc.x"
2713 #endif
2714 }
2715
2716 #endif /*MARK_DEPENDENCIES*/
2717
2718 /*
2719 Local Variables:
2720 c-file-style: "gnu"
2721 End:
2722 */