* Re-introduce a cheap sanity test for non debug mode.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/gc.h"
75
76 #ifdef GUILE_DEBUG_MALLOC
77 #include "libguile/debug-malloc.h"
78 #endif
79
80 #ifdef HAVE_MALLOC_H
81 #include <malloc.h>
82 #endif
83
84 #ifdef HAVE_UNISTD_H
85 #include <unistd.h>
86 #endif
87
88 #ifdef __STDC__
89 #include <stdarg.h>
90 #define var_start(x, y) va_start(x, y)
91 #else
92 #include <varargs.h>
93 #define var_start(x, y) va_start(x)
94 #endif
95
96 \f
97
98 unsigned int scm_gc_running_p = 0;
99
100 \f
101
102 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103
104 scm_bits_t scm_tc16_allocated;
105
106 /* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108 unsigned int scm_debug_cell_accesses_p = 1;
109
110
111 /* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116 void
117 scm_assert_cell_valid (SCM cell)
118 {
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
122 {
123 already_running = 1; /* set to avoid recursion */
124
125 if (!scm_cellp (cell))
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
147 already_running = 0; /* re-enable */
148 }
149 }
150
151
152 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
154 "If FLAG is #f, cell access checking is disabled.\n"
155 "If FLAG is #t, cell access checking is enabled.\n"
156 "This procedure only exists because the compile-time flag\n"
157 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
158 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
159 {
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168 }
169 #undef FUNC_NAME
170
171 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173 \f
174
175 /* {heap tuning parameters}
176 *
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
192 *
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
200 * trigger a GC.
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
206 * work around a oscillation that caused almost constant GC.]
207 */
208
209 /*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
215
216 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
221 int scm_default_min_yield_1 = 40;
222
223 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
226 /* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
229 int scm_default_min_yield_2 = 40;
230
231 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
232
233 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
234 #ifdef _QC
235 # define SCM_HEAP_SEG_SIZE 32768L
236 #else
237 # ifdef sequent
238 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
239 # else
240 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
241 # endif
242 #endif
243 /* Make heap grow with factor 1.5 */
244 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
245 #define SCM_INIT_MALLOC_LIMIT 100000
246 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
247
248 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
250
251 #ifdef PROT386
252 /*in 386 protected mode we must only adjust the offset */
253 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
255 #else
256 # ifdef _UNICOS
257 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
259 # else
260 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
262 # endif /* UNICOS */
263 #endif /* PROT386 */
264
265 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
267 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268 #define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
270
271 \f
272 /* scm_freelists
273 */
274
275 typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
285 SCM clusters;
286 SCM *clustertail;
287 /* this is the number of objects in each cluster, including the spine cell */
288 int cluster_size;
289 /* indicates that we should grow heap instead of GC:ing
290 */
291 int grow_heap_p;
292 /* minimum yield on this list in order not to grow the heap
293 */
294 long min_yield;
295 /* defines min_yield as percent of total heap size
296 */
297 int min_yield_fraction;
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
307 long heap_size;
308 } scm_freelist_t;
309
310 SCM scm_freelist = SCM_EOL;
311 scm_freelist_t scm_master_freelist = {
312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
313 };
314 SCM scm_freelist2 = SCM_EOL;
315 scm_freelist_t scm_master_freelist2 = {
316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
317 };
318
319 /* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
322 unsigned long scm_mtrigger;
323
324 /* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328 int scm_gc_heap_lock = 0;
329
330 /* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
334 int scm_block_gc = 1;
335
336 /* During collection, this accumulates objects holding
337 * weak references.
338 */
339 SCM scm_weak_vectors;
340
341 /* During collection, this accumulates structures which are to be freed.
342 */
343 SCM scm_structs_to_free;
344
345 /* GC Statistics Keeping
346 */
347 unsigned long scm_cells_allocated = 0;
348 long scm_mallocated = 0;
349 unsigned long scm_gc_cells_collected;
350 unsigned long scm_gc_yield;
351 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
352 unsigned long scm_gc_malloc_collected;
353 unsigned long scm_gc_ports_collected;
354 unsigned long scm_gc_time_taken = 0;
355 static unsigned long t_before_gc;
356 static unsigned long t_before_sweep;
357 unsigned long scm_gc_mark_time_taken = 0;
358 unsigned long scm_gc_sweep_time_taken = 0;
359 unsigned long scm_gc_times = 0;
360 unsigned long scm_gc_cells_swept = 0;
361 double scm_gc_cells_marked_acc = 0.;
362 double scm_gc_cells_swept_acc = 0.;
363
364 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
370 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372 SCM_SYMBOL (sym_times, "gc-times");
373 SCM_SYMBOL (sym_cells_marked, "cells-marked");
374 SCM_SYMBOL (sym_cells_swept, "cells-swept");
375
376 typedef struct scm_heap_seg_data_t
377 {
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
383 scm_freelist_t *freelist;
384
385 /* number of cells per object in this segment */
386 int span;
387 } scm_heap_seg_data_t;
388
389
390
391 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
392
393 typedef enum { return_on_error, abort_on_error } policy_on_error;
394 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
395
396
397 #define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401 #define BVEC_GROW_SIZE 256
402 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405 /* mark space allocation */
406
407 typedef struct scm_mark_space_t
408 {
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411 } scm_mark_space_t;
412
413 static scm_mark_space_t *current_mark_space;
414 static scm_mark_space_t **mark_space_ptr;
415 static int current_mark_space_offset;
416 static scm_mark_space_t *mark_space_head;
417
418 static scm_c_bvec_limb_t *
419 get_bvec ()
420 #define FUNC_NAME "get_bvec"
421 {
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462 }
463 #undef FUNC_NAME
464
465
466 static void
467 clear_mark_space ()
468 {
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473 }
474
475
476 \f
477 /* Debugging functions. */
478
479 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
480
481 /* Return the number of the heap segment containing CELL. */
482 static int
483 which_seg (SCM cell)
484 {
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
492 SCM_UNPACK (cell));
493 abort ();
494 }
495
496
497 static void
498 map_free_list (scm_freelist_t *master, SCM freelist)
499 {
500 int last_seg = -1, count = 0;
501 SCM f;
502
503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520 }
521
522 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
523 (),
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
527 #define FUNC_NAME s_scm_map_free_list
528 {
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544 }
545 #undef FUNC_NAME
546
547 static int last_cluster;
548 static int last_size;
549
550 static int
551 free_list_length (char *title, int i, SCM freelist)
552 {
553 SCM ls;
554 int n = 0;
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
579 return n;
580 }
581
582 static void
583 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584 {
585 SCM clusters;
586 int i = 0, len, n = 0;
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
601 }
602
603 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
604 (),
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
608 #define FUNC_NAME s_scm_free_list_length
609 {
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
612 return SCM_UNSPECIFIED;
613 }
614 #undef FUNC_NAME
615
616 #endif
617
618 #ifdef GUILE_DEBUG_FREELIST
619
620 /* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622 static int scm_debug_check_freelist = 0;
623
624 /* Number of calls to SCM_NEWCELL since startup. */
625 static unsigned long scm_newcell_count;
626 static unsigned long scm_newcell2_count;
627
628 /* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
630 static void
631 scm_check_freelist (SCM freelist)
632 {
633 SCM f;
634 int i = 0;
635
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
641 abort ();
642 }
643 }
644
645 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
646 (SCM flag),
647 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
648 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
649 "compile-time flag was selected.\n")
650 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
651 {
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
655 return SCM_UNSPECIFIED;
656 }
657 #undef FUNC_NAME
658
659
660 SCM
661 scm_debug_newcell (void)
662 {
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
668 scm_check_freelist (scm_freelist);
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
674 if (SCM_NULLP (scm_freelist))
675 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
676 else
677 {
678 new = scm_freelist;
679 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
680 }
681
682 return new;
683 }
684
685 SCM
686 scm_debug_newcell2 (void)
687 {
688 SCM new;
689
690 scm_newcell2_count++;
691 if (scm_debug_check_freelist)
692 {
693 scm_check_freelist (scm_freelist2);
694 scm_gc ();
695 }
696
697 /* The rest of this is supposed to be identical to the SCM_NEWCELL
698 macro. */
699 if (SCM_NULLP (scm_freelist2))
700 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
701 else
702 {
703 new = scm_freelist2;
704 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
705 }
706
707 return new;
708 }
709
710 #endif /* GUILE_DEBUG_FREELIST */
711
712 \f
713
714 static unsigned long
715 master_cells_allocated (scm_freelist_t *master)
716 {
717 /* the '- 1' below is to ignore the cluster spine cells. */
718 int objects = master->clusters_allocated * (master->cluster_size - 1);
719 if (SCM_NULLP (master->clusters))
720 objects -= master->left_to_collect;
721 return master->span * objects;
722 }
723
724 static unsigned long
725 freelist_length (SCM freelist)
726 {
727 int n;
728 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
729 ++n;
730 return n;
731 }
732
733 static unsigned long
734 compute_cells_allocated ()
735 {
736 return (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2)
739 - scm_master_freelist.span * freelist_length (scm_freelist)
740 - scm_master_freelist2.span * freelist_length (scm_freelist2));
741 }
742
743 /* {Scheme Interface to GC}
744 */
745
746 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
747 (),
748 "Returns an association list of statistics about Guile's current use of storage. ")
749 #define FUNC_NAME s_scm_gc_stats
750 {
751 int i;
752 int n;
753 SCM heap_segs;
754 long int local_scm_mtrigger;
755 long int local_scm_mallocated;
756 long int local_scm_heap_size;
757 long int local_scm_cells_allocated;
758 long int local_scm_gc_time_taken;
759 long int local_scm_gc_times;
760 long int local_scm_gc_mark_time_taken;
761 long int local_scm_gc_sweep_time_taken;
762 double local_scm_gc_cells_swept;
763 double local_scm_gc_cells_marked;
764 SCM answer;
765
766 SCM_DEFER_INTS;
767
768 ++scm_block_gc;
769
770 retry:
771 heap_segs = SCM_EOL;
772 n = scm_n_heap_segs;
773 for (i = scm_n_heap_segs; i--; )
774 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
775 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
776 heap_segs);
777 if (scm_n_heap_segs != n)
778 goto retry;
779
780 --scm_block_gc;
781
782 /* Below, we cons to produce the resulting list. We want a snapshot of
783 * the heap situation before consing.
784 */
785 local_scm_mtrigger = scm_mtrigger;
786 local_scm_mallocated = scm_mallocated;
787 local_scm_heap_size = SCM_HEAP_SIZE;
788 local_scm_cells_allocated = compute_cells_allocated ();
789 local_scm_gc_time_taken = scm_gc_time_taken;
790 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
791 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
792 local_scm_gc_times = scm_gc_times;
793 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
794 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
795
796 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
797 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
798 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
799 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
800 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
801 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
802 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
803 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
804 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
805 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
806 scm_cons (sym_heap_segments, heap_segs),
807 SCM_UNDEFINED);
808 SCM_ALLOW_INTS;
809 return answer;
810 }
811 #undef FUNC_NAME
812
813
814 static void
815 gc_start_stats (const char *what)
816 {
817 t_before_gc = scm_c_get_internal_run_time ();
818 scm_gc_cells_swept = 0;
819 scm_gc_cells_collected = 0;
820 scm_gc_yield_1 = scm_gc_yield;
821 scm_gc_yield = (scm_cells_allocated
822 + master_cells_allocated (&scm_master_freelist)
823 + master_cells_allocated (&scm_master_freelist2));
824 scm_gc_malloc_collected = 0;
825 scm_gc_ports_collected = 0;
826 }
827
828
829 static void
830 gc_end_stats ()
831 {
832 unsigned long t = scm_c_get_internal_run_time ();
833 scm_gc_time_taken += (t - t_before_gc);
834 scm_gc_sweep_time_taken += (t - t_before_sweep);
835 ++scm_gc_times;
836
837 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
838 scm_gc_cells_swept_acc += scm_gc_cells_swept;
839 }
840
841
842 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
843 (SCM obj),
844 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
845 "returned by this function for @var{obj}")
846 #define FUNC_NAME s_scm_object_address
847 {
848 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
849 }
850 #undef FUNC_NAME
851
852
853 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
854 (),
855 "Scans all of SCM objects and reclaims for further use those that are\n"
856 "no longer accessible.")
857 #define FUNC_NAME s_scm_gc
858 {
859 SCM_DEFER_INTS;
860 scm_igc ("call");
861 SCM_ALLOW_INTS;
862 return SCM_UNSPECIFIED;
863 }
864 #undef FUNC_NAME
865
866
867 \f
868 /* {C Interface For When GC is Triggered}
869 */
870
871 static void
872 adjust_min_yield (scm_freelist_t *freelist)
873 {
874 /* min yield is adjusted upwards so that next predicted total yield
875 * (allocated cells actually freed by GC) becomes
876 * `min_yield_fraction' of total heap size. Note, however, that
877 * the absolute value of min_yield will correspond to `collected'
878 * on one master (the one which currently is triggering GC).
879 *
880 * The reason why we look at total yield instead of cells collected
881 * on one list is that we want to take other freelists into account.
882 * On this freelist, we know that (local) yield = collected cells,
883 * but that's probably not the case on the other lists.
884 *
885 * (We might consider computing a better prediction, for example
886 * by computing an average over multiple GC:s.)
887 */
888 if (freelist->min_yield_fraction)
889 {
890 /* Pick largest of last two yields. */
891 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
892 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
893 #ifdef DEBUGINFO
894 fprintf (stderr, " after GC = %d, delta = %d\n",
895 scm_cells_allocated,
896 delta);
897 #endif
898 if (delta > 0)
899 freelist->min_yield += delta;
900 }
901 }
902
903
904 /* When we get POSIX threads support, the master will be global and
905 * common while the freelist will be individual for each thread.
906 */
907
908 SCM
909 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
910 {
911 SCM cell;
912 ++scm_ints_disabled;
913 do
914 {
915 if (SCM_NULLP (master->clusters))
916 {
917 if (master->grow_heap_p || scm_block_gc)
918 {
919 /* In order to reduce gc frequency, try to allocate a new heap
920 * segment first, even if gc might find some free cells. If we
921 * can't obtain a new heap segment, we will try gc later.
922 */
923 master->grow_heap_p = 0;
924 alloc_some_heap (master, return_on_error);
925 }
926 if (SCM_NULLP (master->clusters))
927 {
928 /* The heap was not grown, either because it wasn't scheduled to
929 * grow, or because there was not enough memory available. In
930 * both cases we have to try gc to get some free cells.
931 */
932 #ifdef DEBUGINFO
933 fprintf (stderr, "allocated = %d, ",
934 scm_cells_allocated
935 + master_cells_allocated (&scm_master_freelist)
936 + master_cells_allocated (&scm_master_freelist2));
937 #endif
938 scm_igc ("cells");
939 adjust_min_yield (master);
940 if (SCM_NULLP (master->clusters))
941 {
942 /* gc could not free any cells. Now, we _must_ allocate a
943 * new heap segment, because there is no other possibility
944 * to provide a new cell for the caller.
945 */
946 alloc_some_heap (master, abort_on_error);
947 }
948 }
949 }
950 cell = SCM_CAR (master->clusters);
951 master->clusters = SCM_CDR (master->clusters);
952 ++master->clusters_allocated;
953 }
954 while (SCM_NULLP (cell));
955
956 #ifdef GUILE_DEBUG_FREELIST
957 scm_check_freelist (cell);
958 #endif
959
960 --scm_ints_disabled;
961 *freelist = SCM_FREE_CELL_CDR (cell);
962 return cell;
963 }
964
965
966 #if 0
967 /* This is a support routine which can be used to reserve a cluster
968 * for some special use, such as debugging. It won't be useful until
969 * free cells are preserved between garbage collections.
970 */
971
972 void
973 scm_alloc_cluster (scm_freelist_t *master)
974 {
975 SCM freelist, cell;
976 cell = scm_gc_for_newcell (master, &freelist);
977 SCM_SETCDR (cell, freelist);
978 return cell;
979 }
980 #endif
981
982
983 scm_c_hook_t scm_before_gc_c_hook;
984 scm_c_hook_t scm_before_mark_c_hook;
985 scm_c_hook_t scm_before_sweep_c_hook;
986 scm_c_hook_t scm_after_sweep_c_hook;
987 scm_c_hook_t scm_after_gc_c_hook;
988
989
990 void
991 scm_igc (const char *what)
992 {
993 int j;
994
995 ++scm_gc_running_p;
996 scm_c_hook_run (&scm_before_gc_c_hook, 0);
997 #ifdef DEBUGINFO
998 fprintf (stderr,
999 SCM_NULLP (scm_freelist)
1000 ? "*"
1001 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1002 #endif
1003 #ifdef USE_THREADS
1004 /* During the critical section, only the current thread may run. */
1005 SCM_THREAD_CRITICAL_SECTION_START;
1006 #endif
1007
1008 /* fprintf (stderr, "gc: %s\n", what); */
1009
1010 if (!scm_stack_base || scm_block_gc)
1011 {
1012 --scm_gc_running_p;
1013 return;
1014 }
1015
1016 gc_start_stats (what);
1017
1018 if (scm_mallocated < 0)
1019 /* The byte count of allocated objects has underflowed. This is
1020 probably because you forgot to report the sizes of objects you
1021 have allocated, by calling scm_done_malloc or some such. When
1022 the GC freed them, it subtracted their size from
1023 scm_mallocated, which underflowed. */
1024 abort ();
1025
1026 if (scm_gc_heap_lock)
1027 /* We've invoked the collector while a GC is already in progress.
1028 That should never happen. */
1029 abort ();
1030
1031 ++scm_gc_heap_lock;
1032
1033 /* flush dead entries from the continuation stack */
1034 {
1035 int x;
1036 int bound;
1037 SCM * elts;
1038 elts = SCM_VELTS (scm_continuation_stack);
1039 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1040 x = SCM_INUM (scm_continuation_stack_ptr);
1041 while (x < bound)
1042 {
1043 elts[x] = SCM_BOOL_F;
1044 ++x;
1045 }
1046 }
1047
1048 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1049
1050 clear_mark_space ();
1051
1052 #ifndef USE_THREADS
1053
1054 /* Mark objects on the C stack. */
1055 SCM_FLUSH_REGISTER_WINDOWS;
1056 /* This assumes that all registers are saved into the jmp_buf */
1057 setjmp (scm_save_regs_gc_mark);
1058 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1059 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1060 sizeof scm_save_regs_gc_mark)
1061 / sizeof (SCM_STACKITEM)));
1062
1063 {
1064 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1065 #ifdef SCM_STACK_GROWS_UP
1066 scm_mark_locations (scm_stack_base, stack_len);
1067 #else
1068 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1069 #endif
1070 }
1071
1072 #else /* USE_THREADS */
1073
1074 /* Mark every thread's stack and registers */
1075 scm_threads_mark_stacks ();
1076
1077 #endif /* USE_THREADS */
1078
1079 j = SCM_NUM_PROTECTS;
1080 while (j--)
1081 scm_gc_mark (scm_sys_protects[j]);
1082
1083 /* FIXME: we should have a means to register C functions to be run
1084 * in different phases of GC
1085 */
1086 scm_mark_subr_table ();
1087
1088 #ifndef USE_THREADS
1089 scm_gc_mark (scm_root->handle);
1090 #endif
1091
1092 t_before_sweep = scm_c_get_internal_run_time ();
1093 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1094
1095 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1096
1097 scm_gc_sweep ();
1098
1099 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1100
1101 --scm_gc_heap_lock;
1102 gc_end_stats ();
1103
1104 #ifdef USE_THREADS
1105 SCM_THREAD_CRITICAL_SECTION_END;
1106 #endif
1107 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1108 --scm_gc_running_p;
1109 }
1110
1111 \f
1112
1113 /* {Mark/Sweep}
1114 */
1115
1116 #define MARK scm_gc_mark
1117 #define FNAME "scm_gc_mark"
1118
1119 #endif /*!MARK_DEPENDENCIES*/
1120
1121 /* Mark an object precisely.
1122 */
1123 void
1124 MARK (SCM p)
1125 #define FUNC_NAME FNAME
1126 {
1127 register long i;
1128 register SCM ptr;
1129 scm_bits_t cell_type;
1130
1131 #ifndef MARK_DEPENDENCIES
1132 # define RECURSE scm_gc_mark
1133 #else
1134 /* go through the usual marking, but not for self-cycles. */
1135 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1136 #endif
1137 ptr = p;
1138
1139 #ifdef MARK_DEPENDENCIES
1140 goto gc_mark_loop_first_time;
1141 #endif
1142
1143 gc_mark_loop:
1144 if (SCM_IMP (ptr))
1145 return;
1146
1147 gc_mark_nimp:
1148
1149 #ifdef MARK_DEPENDENCIES
1150 if (SCM_EQ_P (ptr, p))
1151 return;
1152
1153 scm_gc_mark (ptr);
1154 return;
1155
1156 gc_mark_loop_first_time:
1157 #endif
1158
1159 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1160 /* We are in debug mode. Check the ptr exhaustively. */
1161 if (!scm_cellp (ptr))
1162 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1163 #else
1164 /* In non-debug mode, do at least some cheap testing. */
1165 if (!SCM_CELLP (ptr))
1166 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1167 #endif
1168
1169 #ifndef MARK_DEPENDENCIES
1170
1171 if (SCM_GCMARKP (ptr))
1172 return;
1173
1174 SCM_SETGCMARK (ptr);
1175
1176 #endif
1177
1178 cell_type = SCM_GC_CELL_TYPE (ptr);
1179 switch (SCM_ITAG7 (cell_type))
1180 {
1181 case scm_tcs_cons_nimcar:
1182 if (SCM_IMP (SCM_CDR (ptr)))
1183 {
1184 ptr = SCM_CAR (ptr);
1185 goto gc_mark_nimp;
1186 }
1187 RECURSE (SCM_CAR (ptr));
1188 ptr = SCM_CDR (ptr);
1189 goto gc_mark_nimp;
1190 case scm_tcs_cons_imcar:
1191 ptr = SCM_CDR (ptr);
1192 goto gc_mark_loop;
1193 case scm_tc7_pws:
1194 RECURSE (SCM_SETTER (ptr));
1195 ptr = SCM_PROCEDURE (ptr);
1196 goto gc_mark_loop;
1197 case scm_tcs_cons_gloc:
1198 {
1199 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1200 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1201 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1202 * pointer to a struct vtable data region. The fact that these are
1203 * accessed in the same way restricts the possibilites to change the
1204 * data layout of structs or heap cells.
1205 */
1206 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1207 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1208 if (vtable_data [scm_vtable_index_vcell] != 0)
1209 {
1210 /* ptr is a gloc */
1211 SCM gloc_car = SCM_PACK (word0);
1212 RECURSE (gloc_car);
1213 ptr = SCM_CDR (ptr);
1214 goto gc_mark_loop;
1215 }
1216 else
1217 {
1218 /* ptr is a struct */
1219 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1220 int len = SCM_SYMBOL_LENGTH (layout);
1221 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1222 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1223
1224 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1225 {
1226 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1227 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1228 }
1229 if (len)
1230 {
1231 int x;
1232
1233 for (x = 0; x < len - 2; x += 2, ++struct_data)
1234 if (fields_desc[x] == 'p')
1235 RECURSE (SCM_PACK (*struct_data));
1236 if (fields_desc[x] == 'p')
1237 {
1238 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1239 for (x = *struct_data++; x; --x, ++struct_data)
1240 RECURSE (SCM_PACK (*struct_data));
1241 else
1242 RECURSE (SCM_PACK (*struct_data));
1243 }
1244 }
1245 /* mark vtable */
1246 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1247 goto gc_mark_loop;
1248 }
1249 }
1250 break;
1251 case scm_tcs_closures:
1252 if (SCM_IMP (SCM_ENV (ptr)))
1253 {
1254 ptr = SCM_CLOSCAR (ptr);
1255 goto gc_mark_nimp;
1256 }
1257 RECURSE (SCM_CLOSCAR (ptr));
1258 ptr = SCM_ENV (ptr);
1259 goto gc_mark_nimp;
1260 case scm_tc7_vector:
1261 i = SCM_VECTOR_LENGTH (ptr);
1262 if (i == 0)
1263 break;
1264 while (--i > 0)
1265 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1266 RECURSE (SCM_VELTS (ptr)[i]);
1267 ptr = SCM_VELTS (ptr)[0];
1268 goto gc_mark_loop;
1269 #ifdef CCLO
1270 case scm_tc7_cclo:
1271 {
1272 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1273 unsigned long int j;
1274 for (j = 1; j != i; ++j)
1275 {
1276 SCM obj = SCM_CCLO_REF (ptr, j);
1277 if (!SCM_IMP (obj))
1278 RECURSE (obj);
1279 }
1280 ptr = SCM_CCLO_REF (ptr, 0);
1281 goto gc_mark_loop;
1282 }
1283 #endif
1284 #ifdef HAVE_ARRAYS
1285 case scm_tc7_bvect:
1286 case scm_tc7_byvect:
1287 case scm_tc7_ivect:
1288 case scm_tc7_uvect:
1289 case scm_tc7_fvect:
1290 case scm_tc7_dvect:
1291 case scm_tc7_cvect:
1292 case scm_tc7_svect:
1293 #ifdef HAVE_LONG_LONGS
1294 case scm_tc7_llvect:
1295 #endif
1296 #endif
1297 case scm_tc7_string:
1298 break;
1299
1300 case scm_tc7_substring:
1301 ptr = SCM_CDR (ptr);
1302 goto gc_mark_loop;
1303
1304 case scm_tc7_wvect:
1305 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1306 scm_weak_vectors = ptr;
1307 if (SCM_IS_WHVEC_ANY (ptr))
1308 {
1309 int x;
1310 int len;
1311 int weak_keys;
1312 int weak_values;
1313
1314 len = SCM_VECTOR_LENGTH (ptr);
1315 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1316 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1317
1318 for (x = 0; x < len; ++x)
1319 {
1320 SCM alist;
1321 alist = SCM_VELTS (ptr)[x];
1322
1323 /* mark everything on the alist except the keys or
1324 * values, according to weak_values and weak_keys. */
1325 while ( SCM_CONSP (alist)
1326 && !SCM_GCMARKP (alist)
1327 && SCM_CONSP (SCM_CAR (alist)))
1328 {
1329 SCM kvpair;
1330 SCM next_alist;
1331
1332 kvpair = SCM_CAR (alist);
1333 next_alist = SCM_CDR (alist);
1334 /*
1335 * Do not do this:
1336 * SCM_SETGCMARK (alist);
1337 * SCM_SETGCMARK (kvpair);
1338 *
1339 * It may be that either the key or value is protected by
1340 * an escaped reference to part of the spine of this alist.
1341 * If we mark the spine here, and only mark one or neither of the
1342 * key and value, they may never be properly marked.
1343 * This leads to a horrible situation in which an alist containing
1344 * freelist cells is exported.
1345 *
1346 * So only mark the spines of these arrays last of all marking.
1347 * If somebody confuses us by constructing a weak vector
1348 * with a circular alist then we are hosed, but at least we
1349 * won't prematurely drop table entries.
1350 */
1351 if (!weak_keys)
1352 RECURSE (SCM_CAR (kvpair));
1353 if (!weak_values)
1354 RECURSE (SCM_CDR (kvpair));
1355 alist = next_alist;
1356 }
1357 if (SCM_NIMP (alist))
1358 RECURSE (alist);
1359 }
1360 }
1361 break;
1362
1363 case scm_tc7_symbol:
1364 ptr = SCM_PROP_SLOTS (ptr);
1365 goto gc_mark_loop;
1366 case scm_tcs_subrs:
1367 break;
1368 case scm_tc7_port:
1369 i = SCM_PTOBNUM (ptr);
1370 if (!(i < scm_numptob))
1371 goto def;
1372 if (SCM_PTAB_ENTRY(ptr))
1373 RECURSE (SCM_FILENAME (ptr));
1374 if (scm_ptobs[i].mark)
1375 {
1376 ptr = (scm_ptobs[i].mark) (ptr);
1377 goto gc_mark_loop;
1378 }
1379 else
1380 return;
1381 break;
1382 case scm_tc7_smob:
1383 switch (SCM_TYP16 (ptr))
1384 { /* should be faster than going through scm_smobs */
1385 case scm_tc_free_cell:
1386 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1387 case scm_tc16_big:
1388 case scm_tc16_real:
1389 case scm_tc16_complex:
1390 break;
1391 default:
1392 i = SCM_SMOBNUM (ptr);
1393 if (!(i < scm_numsmob))
1394 goto def;
1395 if (scm_smobs[i].mark)
1396 {
1397 ptr = (scm_smobs[i].mark) (ptr);
1398 goto gc_mark_loop;
1399 }
1400 else
1401 return;
1402 }
1403 break;
1404 default:
1405 def:
1406 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1407 }
1408 #undef RECURSE
1409 }
1410 #undef FUNC_NAME
1411
1412 #ifndef MARK_DEPENDENCIES
1413
1414 #undef MARK
1415 #undef FNAME
1416
1417 /* And here we define `scm_gc_mark_dependencies', by including this
1418 * same file in itself.
1419 */
1420 #define MARK scm_gc_mark_dependencies
1421 #define FNAME "scm_gc_mark_dependencies"
1422 #define MARK_DEPENDENCIES
1423 #include "gc.c"
1424 #undef MARK_DEPENDENCIES
1425 #undef MARK
1426 #undef FNAME
1427
1428
1429 /* Mark a Region Conservatively
1430 */
1431
1432 void
1433 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1434 {
1435 unsigned long m;
1436
1437 for (m = 0; m < n; ++m)
1438 {
1439 SCM obj = * (SCM *) &x[m];
1440 if (SCM_CELLP (obj))
1441 {
1442 SCM_CELLPTR ptr = SCM2PTR (obj);
1443 int i = 0;
1444 int j = scm_n_heap_segs - 1;
1445 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1446 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1447 {
1448 while (i <= j)
1449 {
1450 int seg_id;
1451 seg_id = -1;
1452 if ((i == j)
1453 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1454 seg_id = i;
1455 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1456 seg_id = j;
1457 else
1458 {
1459 int k;
1460 k = (i + j) / 2;
1461 if (k == i)
1462 break;
1463 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1464 {
1465 j = k;
1466 ++i;
1467 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1468 continue;
1469 else
1470 break;
1471 }
1472 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1473 {
1474 i = k;
1475 --j;
1476 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1477 continue;
1478 else
1479 break;
1480 }
1481 }
1482
1483 if (SCM_GC_IN_CARD_HEADERP (ptr))
1484 break;
1485
1486 if (scm_heap_table[seg_id].span == 1
1487 || DOUBLECELL_ALIGNED_P (obj))
1488 scm_gc_mark (obj);
1489
1490 break;
1491 }
1492 }
1493 }
1494 }
1495 }
1496
1497
1498 /* The function scm_cellp determines whether an SCM value can be regarded as a
1499 * pointer to a cell on the heap. Binary search is used in order to determine
1500 * the heap segment that contains the cell.
1501 */
1502 int
1503 scm_cellp (SCM value)
1504 {
1505 if (SCM_CELLP (value)) {
1506 scm_cell * ptr = SCM2PTR (value);
1507 unsigned int i = 0;
1508 unsigned int j = scm_n_heap_segs - 1;
1509
1510 if (SCM_GC_IN_CARD_HEADERP (ptr))
1511 return 0;
1512
1513 while (i < j) {
1514 int k = (i + j) / 2;
1515 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1516 j = k;
1517 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1518 i = k + 1;
1519 }
1520 }
1521
1522 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1523 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1524 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1525 && !SCM_GC_IN_CARD_HEADERP (ptr)
1526 )
1527 return 1;
1528 else
1529 return 0;
1530 } else
1531 return 0;
1532 }
1533
1534
1535 static void
1536 gc_sweep_freelist_start (scm_freelist_t *freelist)
1537 {
1538 freelist->cells = SCM_EOL;
1539 freelist->left_to_collect = freelist->cluster_size;
1540 freelist->clusters_allocated = 0;
1541 freelist->clusters = SCM_EOL;
1542 freelist->clustertail = &freelist->clusters;
1543 freelist->collected_1 = freelist->collected;
1544 freelist->collected = 0;
1545 }
1546
1547 static void
1548 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1549 {
1550 int collected;
1551 *freelist->clustertail = freelist->cells;
1552 if (!SCM_NULLP (freelist->cells))
1553 {
1554 SCM c = freelist->cells;
1555 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1556 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1557 freelist->collected +=
1558 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1559 }
1560 scm_gc_cells_collected += freelist->collected;
1561
1562 /* Although freelist->min_yield is used to test freelist->collected
1563 * (which is the local GC yield for freelist), it is adjusted so
1564 * that *total* yield is freelist->min_yield_fraction of total heap
1565 * size. This means that a too low yield is compensated by more
1566 * heap on the list which is currently doing most work, which is
1567 * just what we want.
1568 */
1569 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1570 freelist->grow_heap_p = (collected < freelist->min_yield);
1571 }
1572
1573 #define NEXT_DATA_CELL(ptr, span) \
1574 do { \
1575 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1576 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1577 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1578 : nxt__); \
1579 } while (0)
1580
1581 void
1582 scm_gc_sweep ()
1583 #define FUNC_NAME "scm_gc_sweep"
1584 {
1585 register SCM_CELLPTR ptr;
1586 register SCM nfreelist;
1587 register scm_freelist_t *freelist;
1588 register long m;
1589 register int span;
1590 long i;
1591 scm_sizet seg_size;
1592
1593 m = 0;
1594
1595 gc_sweep_freelist_start (&scm_master_freelist);
1596 gc_sweep_freelist_start (&scm_master_freelist2);
1597
1598 for (i = 0; i < scm_n_heap_segs; i++)
1599 {
1600 register unsigned int left_to_collect;
1601 register scm_sizet j;
1602
1603 /* Unmarked cells go onto the front of the freelist this heap
1604 segment points to. Rather than updating the real freelist
1605 pointer as we go along, we accumulate the new head in
1606 nfreelist. Then, if it turns out that the entire segment is
1607 free, we free (i.e., malloc's free) the whole segment, and
1608 simply don't assign nfreelist back into the real freelist. */
1609 freelist = scm_heap_table[i].freelist;
1610 nfreelist = freelist->cells;
1611 left_to_collect = freelist->left_to_collect;
1612 span = scm_heap_table[i].span;
1613
1614 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1615 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1616
1617 /* use only data cells in seg_size */
1618 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1619
1620 scm_gc_cells_swept += seg_size;
1621
1622 for (j = seg_size + span; j -= span; ptr += span)
1623 {
1624 SCM scmptr;
1625
1626 if (SCM_GC_IN_CARD_HEADERP (ptr))
1627 {
1628 SCM_CELLPTR nxt;
1629
1630 /* cheat here */
1631 nxt = ptr;
1632 NEXT_DATA_CELL (nxt, span);
1633 j += span;
1634
1635 ptr = nxt - span;
1636 continue;
1637 }
1638
1639 scmptr = PTR2SCM (ptr);
1640
1641 if (SCM_GCMARKP (scmptr))
1642 continue;
1643
1644 switch SCM_TYP7 (scmptr)
1645 {
1646 case scm_tcs_cons_gloc:
1647 {
1648 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1649 * struct or a gloc. See the corresponding comment in
1650 * scm_gc_mark.
1651 */
1652 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1653 - scm_tc3_cons_gloc);
1654 /* access as struct */
1655 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1656 if (vtable_data[scm_vtable_index_vcell] == 0)
1657 {
1658 /* Structs need to be freed in a special order.
1659 * This is handled by GC C hooks in struct.c.
1660 */
1661 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1662 scm_structs_to_free = scmptr;
1663 continue;
1664 }
1665 /* fall through so that scmptr gets collected */
1666 }
1667 break;
1668 case scm_tcs_cons_imcar:
1669 case scm_tcs_cons_nimcar:
1670 case scm_tcs_closures:
1671 case scm_tc7_pws:
1672 break;
1673 case scm_tc7_wvect:
1674 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1675 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1676 break;
1677 case scm_tc7_vector:
1678 {
1679 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1680 if (length > 0)
1681 {
1682 m += length * sizeof (scm_bits_t);
1683 scm_must_free (SCM_VECTOR_BASE (scmptr));
1684 }
1685 break;
1686 }
1687 #ifdef CCLO
1688 case scm_tc7_cclo:
1689 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1690 scm_must_free (SCM_CCLO_BASE (scmptr));
1691 break;
1692 #endif
1693 #ifdef HAVE_ARRAYS
1694 case scm_tc7_bvect:
1695 {
1696 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1697 if (length > 0)
1698 {
1699 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1700 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1701 }
1702 }
1703 break;
1704 case scm_tc7_byvect:
1705 case scm_tc7_ivect:
1706 case scm_tc7_uvect:
1707 case scm_tc7_svect:
1708 #ifdef HAVE_LONG_LONGS
1709 case scm_tc7_llvect:
1710 #endif
1711 case scm_tc7_fvect:
1712 case scm_tc7_dvect:
1713 case scm_tc7_cvect:
1714 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1715 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1716 break;
1717 #endif
1718 case scm_tc7_substring:
1719 break;
1720 case scm_tc7_string:
1721 m += SCM_STRING_LENGTH (scmptr) + 1;
1722 scm_must_free (SCM_STRING_CHARS (scmptr));
1723 break;
1724 case scm_tc7_symbol:
1725 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1726 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1727 break;
1728 case scm_tcs_subrs:
1729 /* the various "subrs" (primitives) are never freed */
1730 continue;
1731 case scm_tc7_port:
1732 if SCM_OPENP (scmptr)
1733 {
1734 int k = SCM_PTOBNUM (scmptr);
1735 if (!(k < scm_numptob))
1736 goto sweeperr;
1737 /* Keep "revealed" ports alive. */
1738 if (scm_revealed_count (scmptr) > 0)
1739 continue;
1740 /* Yes, I really do mean scm_ptobs[k].free */
1741 /* rather than ftobs[k].close. .close */
1742 /* is for explicit CLOSE-PORT by user */
1743 m += (scm_ptobs[k].free) (scmptr);
1744 SCM_SETSTREAM (scmptr, 0);
1745 scm_remove_from_port_table (scmptr);
1746 scm_gc_ports_collected++;
1747 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1748 }
1749 break;
1750 case scm_tc7_smob:
1751 switch SCM_TYP16 (scmptr)
1752 {
1753 case scm_tc_free_cell:
1754 case scm_tc16_real:
1755 break;
1756 #ifdef SCM_BIGDIG
1757 case scm_tc16_big:
1758 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1759 scm_must_free (SCM_BDIGITS (scmptr));
1760 break;
1761 #endif /* def SCM_BIGDIG */
1762 case scm_tc16_complex:
1763 m += sizeof (scm_complex_t);
1764 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1765 break;
1766 default:
1767 {
1768 int k;
1769 k = SCM_SMOBNUM (scmptr);
1770 if (!(k < scm_numsmob))
1771 goto sweeperr;
1772 m += (scm_smobs[k].free) (scmptr);
1773 break;
1774 }
1775 }
1776 break;
1777 default:
1778 sweeperr:
1779 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1780 }
1781
1782 if (!--left_to_collect)
1783 {
1784 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1785 *freelist->clustertail = scmptr;
1786 freelist->clustertail = SCM_CDRLOC (scmptr);
1787
1788 nfreelist = SCM_EOL;
1789 freelist->collected += span * freelist->cluster_size;
1790 left_to_collect = freelist->cluster_size;
1791 }
1792 else
1793 {
1794 /* Stick the new cell on the front of nfreelist. It's
1795 critical that we mark this cell as freed; otherwise, the
1796 conservative collector might trace it as some other type
1797 of object. */
1798 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1799 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1800 nfreelist = scmptr;
1801 }
1802 }
1803
1804 #ifdef GC_FREE_SEGMENTS
1805 if (n == seg_size)
1806 {
1807 register long j;
1808
1809 freelist->heap_size -= seg_size;
1810 free ((char *) scm_heap_table[i].bounds[0]);
1811 scm_heap_table[i].bounds[0] = 0;
1812 for (j = i + 1; j < scm_n_heap_segs; j++)
1813 scm_heap_table[j - 1] = scm_heap_table[j];
1814 scm_n_heap_segs -= 1;
1815 i--; /* We need to scan the segment just moved. */
1816 }
1817 else
1818 #endif /* ifdef GC_FREE_SEGMENTS */
1819 {
1820 /* Update the real freelist pointer to point to the head of
1821 the list of free cells we've built for this segment. */
1822 freelist->cells = nfreelist;
1823 freelist->left_to_collect = left_to_collect;
1824 }
1825
1826 #ifdef GUILE_DEBUG_FREELIST
1827 scm_map_free_list ();
1828 #endif
1829 }
1830
1831 gc_sweep_freelist_finish (&scm_master_freelist);
1832 gc_sweep_freelist_finish (&scm_master_freelist2);
1833
1834 /* When we move to POSIX threads private freelists should probably
1835 be GC-protected instead. */
1836 scm_freelist = SCM_EOL;
1837 scm_freelist2 = SCM_EOL;
1838
1839 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1840 scm_gc_yield -= scm_cells_allocated;
1841 scm_mallocated -= m;
1842 scm_gc_malloc_collected = m;
1843 }
1844 #undef FUNC_NAME
1845
1846
1847 \f
1848 /* {Front end to malloc}
1849 *
1850 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1851 * scm_done_free
1852 *
1853 * These functions provide services comparable to malloc, realloc, and
1854 * free. They should be used when allocating memory that will be under
1855 * control of the garbage collector, i.e., if the memory may be freed
1856 * during garbage collection.
1857 */
1858
1859 /* scm_must_malloc
1860 * Return newly malloced storage or throw an error.
1861 *
1862 * The parameter WHAT is a string for error reporting.
1863 * If the threshold scm_mtrigger will be passed by this
1864 * allocation, or if the first call to malloc fails,
1865 * garbage collect -- on the presumption that some objects
1866 * using malloced storage may be collected.
1867 *
1868 * The limit scm_mtrigger may be raised by this allocation.
1869 */
1870 void *
1871 scm_must_malloc (scm_sizet size, const char *what)
1872 {
1873 void *ptr;
1874 unsigned long nm = scm_mallocated + size;
1875
1876 if (nm <= scm_mtrigger)
1877 {
1878 SCM_SYSCALL (ptr = malloc (size));
1879 if (NULL != ptr)
1880 {
1881 scm_mallocated = nm;
1882 #ifdef GUILE_DEBUG_MALLOC
1883 scm_malloc_register (ptr, what);
1884 #endif
1885 return ptr;
1886 }
1887 }
1888
1889 scm_igc (what);
1890
1891 nm = scm_mallocated + size;
1892 SCM_SYSCALL (ptr = malloc (size));
1893 if (NULL != ptr)
1894 {
1895 scm_mallocated = nm;
1896 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1897 if (nm > scm_mtrigger)
1898 scm_mtrigger = nm + nm / 2;
1899 else
1900 scm_mtrigger += scm_mtrigger / 2;
1901 }
1902 #ifdef GUILE_DEBUG_MALLOC
1903 scm_malloc_register (ptr, what);
1904 #endif
1905
1906 return ptr;
1907 }
1908
1909 scm_memory_error (what);
1910 }
1911
1912
1913 /* scm_must_realloc
1914 * is similar to scm_must_malloc.
1915 */
1916 void *
1917 scm_must_realloc (void *where,
1918 scm_sizet old_size,
1919 scm_sizet size,
1920 const char *what)
1921 {
1922 void *ptr;
1923 scm_sizet nm = scm_mallocated + size - old_size;
1924
1925 if (nm <= scm_mtrigger)
1926 {
1927 SCM_SYSCALL (ptr = realloc (where, size));
1928 if (NULL != ptr)
1929 {
1930 scm_mallocated = nm;
1931 #ifdef GUILE_DEBUG_MALLOC
1932 scm_malloc_reregister (where, ptr, what);
1933 #endif
1934 return ptr;
1935 }
1936 }
1937
1938 scm_igc (what);
1939
1940 nm = scm_mallocated + size - old_size;
1941 SCM_SYSCALL (ptr = realloc (where, size));
1942 if (NULL != ptr)
1943 {
1944 scm_mallocated = nm;
1945 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1946 if (nm > scm_mtrigger)
1947 scm_mtrigger = nm + nm / 2;
1948 else
1949 scm_mtrigger += scm_mtrigger / 2;
1950 }
1951 #ifdef GUILE_DEBUG_MALLOC
1952 scm_malloc_reregister (where, ptr, what);
1953 #endif
1954 return ptr;
1955 }
1956
1957 scm_memory_error (what);
1958 }
1959
1960
1961 void
1962 scm_must_free (void *obj)
1963 #define FUNC_NAME "scm_must_free"
1964 {
1965 #ifdef GUILE_DEBUG_MALLOC
1966 scm_malloc_unregister (obj);
1967 #endif
1968 if (obj)
1969 free (obj);
1970 else
1971 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1972 }
1973 #undef FUNC_NAME
1974
1975
1976 /* Announce that there has been some malloc done that will be freed
1977 * during gc. A typical use is for a smob that uses some malloced
1978 * memory but can not get it from scm_must_malloc (for whatever
1979 * reason). When a new object of this smob is created you call
1980 * scm_done_malloc with the size of the object. When your smob free
1981 * function is called, be sure to include this size in the return
1982 * value.
1983 *
1984 * If you can't actually free the memory in the smob free function,
1985 * for whatever reason (like reference counting), you still can (and
1986 * should) report the amount of memory freed when you actually free it.
1987 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1988 * eh? Or even better, call scm_done_free. */
1989
1990 void
1991 scm_done_malloc (long size)
1992 {
1993 scm_mallocated += size;
1994
1995 if (scm_mallocated > scm_mtrigger)
1996 {
1997 scm_igc ("foreign mallocs");
1998 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1999 {
2000 if (scm_mallocated > scm_mtrigger)
2001 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2002 else
2003 scm_mtrigger += scm_mtrigger / 2;
2004 }
2005 }
2006 }
2007
2008 void
2009 scm_done_free (long size)
2010 {
2011 scm_mallocated -= size;
2012 }
2013
2014
2015 \f
2016 /* {Heap Segments}
2017 *
2018 * Each heap segment is an array of objects of a particular size.
2019 * Every segment has an associated (possibly shared) freelist.
2020 * A table of segment records is kept that records the upper and
2021 * lower extents of the segment; this is used during the conservative
2022 * phase of gc to identify probably gc roots (because they point
2023 * into valid segments at reasonable offsets). */
2024
2025 /* scm_expmem
2026 * is true if the first segment was smaller than INIT_HEAP_SEG.
2027 * If scm_expmem is set to one, subsequent segment allocations will
2028 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2029 */
2030 int scm_expmem = 0;
2031
2032 scm_sizet scm_max_segment_size;
2033
2034 /* scm_heap_org
2035 * is the lowest base address of any heap segment.
2036 */
2037 SCM_CELLPTR scm_heap_org;
2038
2039 scm_heap_seg_data_t * scm_heap_table = 0;
2040 static unsigned int heap_segment_table_size = 0;
2041 int scm_n_heap_segs = 0;
2042
2043 /* init_heap_seg
2044 * initializes a new heap segment and returns the number of objects it contains.
2045 *
2046 * The segment origin and segment size in bytes are input parameters.
2047 * The freelist is both input and output.
2048 *
2049 * This function presumes that the scm_heap_table has already been expanded
2050 * to accomodate a new segment record and that the markbit space was reserved
2051 * for all the cards in this segment.
2052 */
2053
2054 #define INIT_CARD(card, span) \
2055 do { \
2056 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2057 if ((span) == 2) \
2058 SCM_GC_SET_CARD_DOUBLECELL (card); \
2059 } while (0)
2060
2061 static scm_sizet
2062 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2063 {
2064 register SCM_CELLPTR ptr;
2065 SCM_CELLPTR seg_end;
2066 int new_seg_index;
2067 int n_new_cells;
2068 int span = freelist->span;
2069
2070 if (seg_org == NULL)
2071 return 0;
2072
2073 /* Align the begin ptr up.
2074 */
2075 ptr = SCM_GC_CARD_UP (seg_org);
2076
2077 /* Compute the ceiling on valid object pointers w/in this segment.
2078 */
2079 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2080
2081 /* Find the right place and insert the segment record.
2082 *
2083 */
2084 for (new_seg_index = 0;
2085 ( (new_seg_index < scm_n_heap_segs)
2086 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2087 new_seg_index++)
2088 ;
2089
2090 {
2091 int i;
2092 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2093 scm_heap_table[i] = scm_heap_table[i - 1];
2094 }
2095
2096 ++scm_n_heap_segs;
2097
2098 scm_heap_table[new_seg_index].span = span;
2099 scm_heap_table[new_seg_index].freelist = freelist;
2100 scm_heap_table[new_seg_index].bounds[0] = ptr;
2101 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2102
2103 /*n_new_cells*/
2104 n_new_cells = seg_end - ptr;
2105
2106 freelist->heap_size += n_new_cells;
2107
2108 /* Partition objects in this segment into clusters */
2109 {
2110 SCM clusters;
2111 SCM *clusterp = &clusters;
2112
2113 NEXT_DATA_CELL (ptr, span);
2114 while (ptr < seg_end)
2115 {
2116 scm_cell *nxt = ptr;
2117 scm_cell *prv = NULL;
2118 scm_cell *last_card = NULL;
2119 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2120 NEXT_DATA_CELL(nxt, span);
2121
2122 /* Allocate cluster spine
2123 */
2124 *clusterp = PTR2SCM (ptr);
2125 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2126 clusterp = SCM_CDRLOC (*clusterp);
2127 ptr = nxt;
2128
2129 while (n_data_cells--)
2130 {
2131 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2132 SCM scmptr = PTR2SCM (ptr);
2133 nxt = ptr;
2134 NEXT_DATA_CELL (nxt, span);
2135 prv = ptr;
2136
2137 if (card != last_card)
2138 {
2139 INIT_CARD (card, span);
2140 last_card = card;
2141 }
2142
2143 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2144 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2145
2146 ptr = nxt;
2147 }
2148
2149 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2150 }
2151
2152 /* sanity check */
2153 {
2154 scm_cell *ref = seg_end;
2155 NEXT_DATA_CELL (ref, span);
2156 if (ref != ptr)
2157 /* [cmm] looks like the segment size doesn't divide cleanly by
2158 cluster size. bad cmm! */
2159 abort();
2160 }
2161
2162 /* Patch up the last cluster pointer in the segment
2163 * to join it to the input freelist.
2164 */
2165 *clusterp = freelist->clusters;
2166 freelist->clusters = clusters;
2167 }
2168
2169 #ifdef DEBUGINFO
2170 fprintf (stderr, "H");
2171 #endif
2172 return size;
2173 }
2174
2175 static scm_sizet
2176 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2177 {
2178 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2179
2180 return
2181 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2182 + ALIGNMENT_SLACK (freelist);
2183 }
2184
2185 static void
2186 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2187 #define FUNC_NAME "alloc_some_heap"
2188 {
2189 SCM_CELLPTR ptr;
2190 long len;
2191
2192 if (scm_gc_heap_lock)
2193 {
2194 /* Critical code sections (such as the garbage collector) aren't
2195 * supposed to add heap segments.
2196 */
2197 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2198 abort ();
2199 }
2200
2201 if (scm_n_heap_segs == heap_segment_table_size)
2202 {
2203 /* We have to expand the heap segment table to have room for the new
2204 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2205 * init_heap_seg only if the allocation of the segment itself succeeds.
2206 */
2207 unsigned int new_table_size = scm_n_heap_segs + 1;
2208 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2209 scm_heap_seg_data_t * new_heap_table;
2210
2211 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2212 realloc ((char *)scm_heap_table, size)));
2213 if (!new_heap_table)
2214 {
2215 if (error_policy == abort_on_error)
2216 {
2217 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2218 abort ();
2219 }
2220 else
2221 {
2222 return;
2223 }
2224 }
2225 else
2226 {
2227 scm_heap_table = new_heap_table;
2228 heap_segment_table_size = new_table_size;
2229 }
2230 }
2231
2232 /* Pick a size for the new heap segment.
2233 * The rule for picking the size of a segment is explained in
2234 * gc.h
2235 */
2236 {
2237 /* Assure that the new segment is predicted to be large enough.
2238 *
2239 * New yield should at least equal GC fraction of new heap size, i.e.
2240 *
2241 * y + dh > f * (h + dh)
2242 *
2243 * y : yield
2244 * f : min yield fraction
2245 * h : heap size
2246 * dh : size of new heap segment
2247 *
2248 * This gives dh > (f * h - y) / (1 - f)
2249 */
2250 int f = freelist->min_yield_fraction;
2251 long h = SCM_HEAP_SIZE;
2252 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2253 len = SCM_EXPHEAP (freelist->heap_size);
2254 #ifdef DEBUGINFO
2255 fprintf (stderr, "(%d < %d)", len, min_cells);
2256 #endif
2257 if (len < min_cells)
2258 len = min_cells + freelist->cluster_size;
2259 len *= sizeof (scm_cell);
2260 /* force new sampling */
2261 freelist->collected = LONG_MAX;
2262 }
2263
2264 if (len > scm_max_segment_size)
2265 len = scm_max_segment_size;
2266
2267 {
2268 scm_sizet smallest;
2269
2270 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2271
2272 if (len < smallest)
2273 len = smallest;
2274
2275 /* Allocate with decaying ambition. */
2276 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2277 && (len >= smallest))
2278 {
2279 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2280 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2281 if (ptr)
2282 {
2283 init_heap_seg (ptr, rounded_len, freelist);
2284 return;
2285 }
2286 len /= 2;
2287 }
2288 }
2289
2290 if (error_policy == abort_on_error)
2291 {
2292 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2293 abort ();
2294 }
2295 }
2296 #undef FUNC_NAME
2297
2298
2299 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2300 (SCM name),
2301 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2302 "is @code{#t}.")
2303 #define FUNC_NAME s_scm_unhash_name
2304 {
2305 int x;
2306 int bound;
2307 SCM_VALIDATE_SYMBOL (1,name);
2308 SCM_DEFER_INTS;
2309 bound = scm_n_heap_segs;
2310 for (x = 0; x < bound; ++x)
2311 {
2312 SCM_CELLPTR p;
2313 SCM_CELLPTR pbound;
2314 p = scm_heap_table[x].bounds[0];
2315 pbound = scm_heap_table[x].bounds[1];
2316 while (p < pbound)
2317 {
2318 SCM cell = PTR2SCM (p);
2319 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2320 {
2321 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2322 * struct cell. See the corresponding comment in scm_gc_mark.
2323 */
2324 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2325 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2326 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2327 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2328 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2329 {
2330 SCM_SET_CELL_OBJECT_0 (cell, name);
2331 }
2332 }
2333 ++p;
2334 }
2335 }
2336 SCM_ALLOW_INTS;
2337 return name;
2338 }
2339 #undef FUNC_NAME
2340
2341
2342 \f
2343 /* {GC Protection Helper Functions}
2344 */
2345
2346
2347 /*
2348 * If within a function you need to protect one or more scheme objects from
2349 * garbage collection, pass them as parameters to one of the
2350 * scm_remember_upto_here* functions below. These functions don't do
2351 * anything, but since the compiler does not know that they are actually
2352 * no-ops, it will generate code that calls these functions with the given
2353 * parameters. Therefore, you can be sure that the compiler will keep those
2354 * scheme values alive (on the stack or in a register) up to the point where
2355 * scm_remember_upto_here* is called. In other words, place the call to
2356 * scm_remember_upt_here* _behind_ the last code in your function, that
2357 * depends on the scheme object to exist.
2358 *
2359 * Example: We want to make sure, that the string object str does not get
2360 * garbage collected during the execution of 'some_function', because
2361 * otherwise the characters belonging to str would be freed and
2362 * 'some_function' might access freed memory. To make sure that the compiler
2363 * keeps str alive on the stack or in a register such that it is visible to
2364 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2365 * call to 'some_function'. Note that this would not be necessary if str was
2366 * used anyway after the call to 'some_function'.
2367 * char *chars = SCM_STRING_CHARS (str);
2368 * some_function (chars);
2369 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2370 */
2371
2372 void
2373 scm_remember_upto_here_1 (SCM obj)
2374 {
2375 /* Empty. Protects a single object from garbage collection. */
2376 }
2377
2378 void
2379 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2380 {
2381 /* Empty. Protects two objects from garbage collection. */
2382 }
2383
2384 void
2385 scm_remember_upto_here (SCM obj, ...)
2386 {
2387 /* Empty. Protects any number of objects from garbage collection. */
2388 }
2389
2390
2391 #if (SCM_DEBUG_DEPRECATED == 0)
2392
2393 void
2394 scm_remember (SCM *ptr)
2395 {
2396 /* empty */
2397 }
2398
2399 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2400
2401 /*
2402 These crazy functions prevent garbage collection
2403 of arguments after the first argument by
2404 ensuring they remain live throughout the
2405 function because they are used in the last
2406 line of the code block.
2407 It'd be better to have a nice compiler hint to
2408 aid the conservative stack-scanning GC. --03/09/00 gjb */
2409 SCM
2410 scm_return_first (SCM elt, ...)
2411 {
2412 return elt;
2413 }
2414
2415 int
2416 scm_return_first_int (int i, ...)
2417 {
2418 return i;
2419 }
2420
2421
2422 SCM
2423 scm_permanent_object (SCM obj)
2424 {
2425 SCM_REDEFER_INTS;
2426 scm_permobjs = scm_cons (obj, scm_permobjs);
2427 SCM_REALLOW_INTS;
2428 return obj;
2429 }
2430
2431
2432 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2433 other references are dropped, until the object is unprotected by calling
2434 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2435 i. e. it is possible to protect the same object several times, but it is
2436 necessary to unprotect the object the same number of times to actually get
2437 the object unprotected. It is an error to unprotect an object more often
2438 than it has been protected before. The function scm_protect_object returns
2439 OBJ.
2440 */
2441
2442 /* Implementation note: For every object X, there is a counter which
2443 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2444 */
2445
2446 SCM
2447 scm_protect_object (SCM obj)
2448 {
2449 SCM handle;
2450
2451 /* This critical section barrier will be replaced by a mutex. */
2452 SCM_REDEFER_INTS;
2453
2454 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2455 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2456
2457 SCM_REALLOW_INTS;
2458
2459 return obj;
2460 }
2461
2462
2463 /* Remove any protection for OBJ established by a prior call to
2464 scm_protect_object. This function returns OBJ.
2465
2466 See scm_protect_object for more information. */
2467 SCM
2468 scm_unprotect_object (SCM obj)
2469 {
2470 SCM handle;
2471
2472 /* This critical section barrier will be replaced by a mutex. */
2473 SCM_REDEFER_INTS;
2474
2475 handle = scm_hashq_get_handle (scm_protects, obj);
2476
2477 if (SCM_FALSEP (handle))
2478 {
2479 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2480 abort ();
2481 }
2482 else
2483 {
2484 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2485 if (count == 0)
2486 scm_hashq_remove_x (scm_protects, obj);
2487 else
2488 SCM_SETCDR (handle, SCM_MAKINUM (count));
2489 }
2490
2491 SCM_REALLOW_INTS;
2492
2493 return obj;
2494 }
2495
2496 int terminating;
2497
2498 /* called on process termination. */
2499 #ifdef HAVE_ATEXIT
2500 static void
2501 cleanup (void)
2502 #else
2503 #ifdef HAVE_ON_EXIT
2504 extern int on_exit (void (*procp) (), int arg);
2505
2506 static void
2507 cleanup (int status, void *arg)
2508 #else
2509 #error Dont know how to setup a cleanup handler on your system.
2510 #endif
2511 #endif
2512 {
2513 terminating = 1;
2514 scm_flush_all_ports ();
2515 }
2516
2517 \f
2518 static int
2519 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2520 {
2521 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2522
2523 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2524 rounded_size,
2525 freelist))
2526 {
2527 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2528 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2529 rounded_size,
2530 freelist))
2531 return 1;
2532 }
2533 else
2534 scm_expmem = 1;
2535
2536 if (freelist->min_yield_fraction)
2537 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2538 / 100);
2539 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2540
2541 return 0;
2542 }
2543
2544 \f
2545 static void
2546 init_freelist (scm_freelist_t *freelist,
2547 int span,
2548 int cluster_size,
2549 int min_yield)
2550 {
2551 freelist->clusters = SCM_EOL;
2552 freelist->cluster_size = cluster_size + 1;
2553 freelist->left_to_collect = 0;
2554 freelist->clusters_allocated = 0;
2555 freelist->min_yield = 0;
2556 freelist->min_yield_fraction = min_yield;
2557 freelist->span = span;
2558 freelist->collected = 0;
2559 freelist->collected_1 = 0;
2560 freelist->heap_size = 0;
2561 }
2562
2563
2564 /* Get an integer from an environment variable. */
2565 static int
2566 scm_i_getenv_int (const char *var, int def)
2567 {
2568 char *end, *val = getenv (var);
2569 long res;
2570 if (!val)
2571 return def;
2572 res = strtol (val, &end, 10);
2573 if (end == val)
2574 return def;
2575 return res;
2576 }
2577
2578
2579 int
2580 scm_init_storage ()
2581 {
2582 scm_sizet gc_trigger_1;
2583 scm_sizet gc_trigger_2;
2584 scm_sizet init_heap_size_1;
2585 scm_sizet init_heap_size_2;
2586 scm_sizet j;
2587
2588 j = SCM_NUM_PROTECTS;
2589 while (j)
2590 scm_sys_protects[--j] = SCM_BOOL_F;
2591 scm_block_gc = 1;
2592
2593 scm_freelist = SCM_EOL;
2594 scm_freelist2 = SCM_EOL;
2595 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2596 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2597 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2598 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2599 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2600
2601 scm_expmem = 0;
2602
2603 j = SCM_HEAP_SEG_SIZE;
2604 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2605 scm_heap_table = ((scm_heap_seg_data_t *)
2606 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2607 heap_segment_table_size = 2;
2608
2609 mark_space_ptr = &mark_space_head;
2610
2611 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2612 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2613 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2614 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2615 return 1;
2616
2617 /* scm_hplims[0] can change. do not remove scm_heap_org */
2618 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2619
2620 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2621 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2622 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2623 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2624 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2625
2626 /* Initialise the list of ports. */
2627 scm_port_table = (scm_port **)
2628 malloc (sizeof (scm_port *) * scm_port_table_room);
2629 if (!scm_port_table)
2630 return 1;
2631
2632 #ifdef HAVE_ATEXIT
2633 atexit (cleanup);
2634 #else
2635 #ifdef HAVE_ON_EXIT
2636 on_exit (cleanup, 0);
2637 #endif
2638 #endif
2639
2640 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2641 SCM_SETCDR (scm_undefineds, scm_undefineds);
2642
2643 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2644 scm_nullstr = scm_allocate_string (0);
2645 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2646
2647 #define DEFAULT_SYMHASH_SIZE 277
2648 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2649 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2650
2651 scm_stand_in_procs = SCM_EOL;
2652 scm_permobjs = SCM_EOL;
2653 scm_protects = scm_c_make_hash_table (31);
2654
2655 return 0;
2656 }
2657
2658 \f
2659
2660 SCM scm_after_gc_hook;
2661
2662 #if (SCM_DEBUG_DEPRECATED == 0)
2663 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2664 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2665 static SCM gc_async;
2666
2667
2668 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2669 * is run after the gc, as soon as the asynchronous events are handled by the
2670 * evaluator.
2671 */
2672 static SCM
2673 gc_async_thunk (void)
2674 {
2675 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2676
2677 #if (SCM_DEBUG_DEPRECATED == 0)
2678
2679 /* The following code will be removed in Guile 1.5. */
2680 if (SCM_NFALSEP (scm_gc_vcell))
2681 {
2682 SCM proc = SCM_CDR (scm_gc_vcell);
2683
2684 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2685 scm_apply (proc, SCM_EOL, SCM_EOL);
2686 }
2687
2688 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2689
2690 return SCM_UNSPECIFIED;
2691 }
2692
2693
2694 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2695 * the garbage collection. The only purpose of this function is to mark the
2696 * gc_async (which will eventually lead to the execution of the
2697 * gc_async_thunk).
2698 */
2699 static void *
2700 mark_gc_async (void * hook_data, void *func_data, void *data)
2701 {
2702 scm_system_async_mark (gc_async);
2703 return NULL;
2704 }
2705
2706
2707 void
2708 scm_init_gc ()
2709 {
2710 SCM after_gc_thunk;
2711
2712 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2713 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2714 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2715
2716 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2717
2718 #if (SCM_DEBUG_DEPRECATED == 0)
2719 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2720 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2721 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2722 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2723
2724 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2725
2726 #ifndef SCM_MAGIC_SNARFER
2727 #include "libguile/gc.x"
2728 #endif
2729 }
2730
2731 #endif /*MARK_DEPENDENCIES*/
2732
2733 /*
2734 Local Variables:
2735 c-file-style: "gnu"
2736 End:
2737 */