* Made creation of new smob types thread safe.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/gc.h"
75
76 #ifdef GUILE_DEBUG_MALLOC
77 #include "libguile/debug-malloc.h"
78 #endif
79
80 #ifdef HAVE_MALLOC_H
81 #include <malloc.h>
82 #endif
83
84 #ifdef HAVE_UNISTD_H
85 #include <unistd.h>
86 #endif
87
88 #ifdef __STDC__
89 #include <stdarg.h>
90 #define var_start(x, y) va_start(x, y)
91 #else
92 #include <varargs.h>
93 #define var_start(x, y) va_start(x)
94 #endif
95
96 \f
97
98 unsigned int scm_gc_running_p = 0;
99
100 \f
101
102 #if (SCM_DEBUG_CELL_ACCESSES == 1)
103
104 scm_bits_t scm_tc16_allocated;
105
106 /* Set this to != 0 if every cell that is accessed shall be checked:
107 */
108 unsigned int scm_debug_cell_accesses_p = 1;
109
110
111 /* Assert that the given object is a valid reference to a valid cell. This
112 * test involves to determine whether the object is a cell pointer, whether
113 * this pointer actually points into a heap segment and whether the cell
114 * pointed to is not a free cell.
115 */
116 void
117 scm_assert_cell_valid (SCM cell)
118 {
119 static unsigned int already_running = 0;
120
121 if (scm_debug_cell_accesses_p && !already_running)
122 {
123 already_running = 1; /* set to avoid recursion */
124
125 if (!scm_cellp (cell))
126 {
127 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
128 abort ();
129 }
130 else if (!scm_gc_running_p)
131 {
132 /* Dirk::FIXME:: During garbage collection there occur references to
133 free cells. This is allright during conservative marking, but
134 should not happen otherwise (I think). The case of free cells
135 accessed during conservative marking is handled in function
136 scm_mark_locations. However, there still occur accesses to free
137 cells during gc. I don't understand why this happens. If it is
138 a bug and gets fixed, the following test should also work while
139 gc is running.
140 */
141 if (SCM_FREE_CELL_P (cell))
142 {
143 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
144 abort ();
145 }
146 }
147 already_running = 0; /* re-enable */
148 }
149 }
150
151
152 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
153 (SCM flag),
154 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
155 "If @var{flag} is @code{#t}, cell access checking is enabled.\n"
156 "This procedure only exists when the compile-time flag\n"
157 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
158 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
159 {
160 if (SCM_FALSEP (flag)) {
161 scm_debug_cell_accesses_p = 0;
162 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
163 scm_debug_cell_accesses_p = 1;
164 } else {
165 SCM_WRONG_TYPE_ARG (1, flag);
166 }
167 return SCM_UNSPECIFIED;
168 }
169 #undef FUNC_NAME
170
171 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
172
173 \f
174
175 /* {heap tuning parameters}
176 *
177 * These are parameters for controlling memory allocation. The heap
178 * is the area out of which scm_cons, and object headers are allocated.
179 *
180 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
181 * 64 bit machine. The units of the _SIZE parameters are bytes.
182 * Cons pairs and object headers occupy one heap cell.
183 *
184 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
185 * allocated initially the heap will grow by half its current size
186 * each subsequent time more heap is needed.
187 *
188 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
189 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
190 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
191 * is in scm_init_storage() and alloc_some_heap() in sys.c
192 *
193 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
194 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
195 *
196 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
197 * is needed.
198 *
199 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
200 * trigger a GC.
201 *
202 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
203 * reclaimed by a GC triggered by must_malloc. If less than this is
204 * reclaimed, the trigger threshold is raised. [I don't know what a
205 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
206 * work around a oscillation that caused almost constant GC.]
207 */
208
209 /*
210 * Heap size 45000 and 40% min yield gives quick startup and no extra
211 * heap allocation. Having higher values on min yield may lead to
212 * large heaps, especially if code behaviour is varying its
213 * maximum consumption between different freelists.
214 */
215
216 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
217 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
218 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
219 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
220 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
221 int scm_default_min_yield_1 = 40;
222
223 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
224 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
225 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
226 /* The following value may seem large, but note that if we get to GC at
227 * all, this means that we have a numerically intensive application
228 */
229 int scm_default_min_yield_2 = 40;
230
231 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
232
233 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
234 #ifdef _QC
235 # define SCM_HEAP_SEG_SIZE 32768L
236 #else
237 # ifdef sequent
238 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
239 # else
240 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
241 # endif
242 #endif
243 /* Make heap grow with factor 1.5 */
244 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
245 #define SCM_INIT_MALLOC_LIMIT 100000
246 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
247
248 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
249 aligned inner bounds for allocated storage */
250
251 #ifdef PROT386
252 /*in 386 protected mode we must only adjust the offset */
253 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
254 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
255 #else
256 # ifdef _UNICOS
257 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
258 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
259 # else
260 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
261 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
262 # endif /* UNICOS */
263 #endif /* PROT386 */
264
265 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
266
267 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
268 #define CLUSTER_SIZE_IN_BYTES(freelist) \
269 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
270
271 \f
272 /* scm_freelists
273 */
274
275 typedef struct scm_freelist_t {
276 /* collected cells */
277 SCM cells;
278 /* number of cells left to collect before cluster is full */
279 unsigned int left_to_collect;
280 /* number of clusters which have been allocated */
281 unsigned int clusters_allocated;
282 /* a list of freelists, each of size cluster_size,
283 * except the last one which may be shorter
284 */
285 SCM clusters;
286 SCM *clustertail;
287 /* this is the number of objects in each cluster, including the spine cell */
288 int cluster_size;
289 /* indicates that we should grow heap instead of GC:ing
290 */
291 int grow_heap_p;
292 /* minimum yield on this list in order not to grow the heap
293 */
294 long min_yield;
295 /* defines min_yield as percent of total heap size
296 */
297 int min_yield_fraction;
298 /* number of cells per object on this list */
299 int span;
300 /* number of collected cells during last GC */
301 long collected;
302 /* number of collected cells during penultimate GC */
303 long collected_1;
304 /* total number of cells in heap segments
305 * belonging to this list.
306 */
307 long heap_size;
308 } scm_freelist_t;
309
310 SCM scm_freelist = SCM_EOL;
311 scm_freelist_t scm_master_freelist = {
312 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
313 };
314 SCM scm_freelist2 = SCM_EOL;
315 scm_freelist_t scm_master_freelist2 = {
316 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
317 };
318
319 /* scm_mtrigger
320 * is the number of bytes of must_malloc allocation needed to trigger gc.
321 */
322 unsigned long scm_mtrigger;
323
324 /* scm_gc_heap_lock
325 * If set, don't expand the heap. Set only during gc, during which no allocation
326 * is supposed to take place anyway.
327 */
328 int scm_gc_heap_lock = 0;
329
330 /* GC Blocking
331 * Don't pause for collection if this is set -- just
332 * expand the heap.
333 */
334 int scm_block_gc = 1;
335
336 /* During collection, this accumulates objects holding
337 * weak references.
338 */
339 SCM scm_weak_vectors;
340
341 /* During collection, this accumulates structures which are to be freed.
342 */
343 SCM scm_structs_to_free;
344
345 /* GC Statistics Keeping
346 */
347 unsigned long scm_cells_allocated = 0;
348 long scm_mallocated = 0;
349 unsigned long scm_gc_cells_collected;
350 unsigned long scm_gc_yield;
351 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
352 unsigned long scm_gc_malloc_collected;
353 unsigned long scm_gc_ports_collected;
354 unsigned long scm_gc_time_taken = 0;
355 static unsigned long t_before_gc;
356 static unsigned long t_before_sweep;
357 unsigned long scm_gc_mark_time_taken = 0;
358 unsigned long scm_gc_sweep_time_taken = 0;
359 unsigned long scm_gc_times = 0;
360 unsigned long scm_gc_cells_swept = 0;
361 double scm_gc_cells_marked_acc = 0.;
362 double scm_gc_cells_swept_acc = 0.;
363
364 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
365 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
366 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
367 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
368 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
369 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
370 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
371 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
372 SCM_SYMBOL (sym_times, "gc-times");
373 SCM_SYMBOL (sym_cells_marked, "cells-marked");
374 SCM_SYMBOL (sym_cells_swept, "cells-swept");
375
376 typedef struct scm_heap_seg_data_t
377 {
378 /* lower and upper bounds of the segment */
379 SCM_CELLPTR bounds[2];
380
381 /* address of the head-of-freelist pointer for this segment's cells.
382 All segments usually point to the same one, scm_freelist. */
383 scm_freelist_t *freelist;
384
385 /* number of cells per object in this segment */
386 int span;
387 } scm_heap_seg_data_t;
388
389
390
391 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
392
393 typedef enum { return_on_error, abort_on_error } policy_on_error;
394 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
395
396
397 #define SCM_HEAP_SIZE \
398 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
399 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
400
401 #define BVEC_GROW_SIZE 256
402 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
403 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
404
405 /* mark space allocation */
406
407 typedef struct scm_mark_space_t
408 {
409 scm_c_bvec_limb_t *bvec_space;
410 struct scm_mark_space_t *next;
411 } scm_mark_space_t;
412
413 static scm_mark_space_t *current_mark_space;
414 static scm_mark_space_t **mark_space_ptr;
415 static int current_mark_space_offset;
416 static scm_mark_space_t *mark_space_head;
417
418 static scm_c_bvec_limb_t *
419 get_bvec ()
420 #define FUNC_NAME "get_bvec"
421 {
422 scm_c_bvec_limb_t *res;
423
424 if (!current_mark_space)
425 {
426 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
427 if (!current_mark_space)
428 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
429
430 current_mark_space->bvec_space = NULL;
431 current_mark_space->next = NULL;
432
433 *mark_space_ptr = current_mark_space;
434 mark_space_ptr = &(current_mark_space->next);
435
436 return get_bvec ();
437 }
438
439 if (!(current_mark_space->bvec_space))
440 {
441 SCM_SYSCALL (current_mark_space->bvec_space =
442 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
443 if (!(current_mark_space->bvec_space))
444 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
445
446 current_mark_space_offset = 0;
447
448 return get_bvec ();
449 }
450
451 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
452 {
453 current_mark_space = NULL;
454
455 return get_bvec ();
456 }
457
458 res = current_mark_space->bvec_space + current_mark_space_offset;
459 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
460
461 return res;
462 }
463 #undef FUNC_NAME
464
465
466 static void
467 clear_mark_space ()
468 {
469 scm_mark_space_t *ms;
470
471 for (ms = mark_space_head; ms; ms = ms->next)
472 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
473 }
474
475
476 \f
477 /* Debugging functions. */
478
479 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
480
481 /* Return the number of the heap segment containing CELL. */
482 static int
483 which_seg (SCM cell)
484 {
485 int i;
486
487 for (i = 0; i < scm_n_heap_segs; i++)
488 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
489 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
490 return i;
491 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
492 SCM_UNPACK (cell));
493 abort ();
494 }
495
496
497 static void
498 map_free_list (scm_freelist_t *master, SCM freelist)
499 {
500 int last_seg = -1, count = 0;
501 SCM f;
502
503 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
504 {
505 int this_seg = which_seg (f);
506
507 if (this_seg != last_seg)
508 {
509 if (last_seg != -1)
510 fprintf (stderr, " %5d %d-cells in segment %d\n",
511 count, master->span, last_seg);
512 last_seg = this_seg;
513 count = 0;
514 }
515 count++;
516 }
517 if (last_seg != -1)
518 fprintf (stderr, " %5d %d-cells in segment %d\n",
519 count, master->span, last_seg);
520 }
521
522 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
523 (),
524 "Print debugging information about the free-list.\n"
525 "@code{map-free-list} is only included in\n"
526 "@code{--enable-guile-debug} builds of Guile.")
527 #define FUNC_NAME s_scm_map_free_list
528 {
529 int i;
530 fprintf (stderr, "%d segments total (%d:%d",
531 scm_n_heap_segs,
532 scm_heap_table[0].span,
533 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
534 for (i = 1; i < scm_n_heap_segs; i++)
535 fprintf (stderr, ", %d:%d",
536 scm_heap_table[i].span,
537 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
538 fprintf (stderr, ")\n");
539 map_free_list (&scm_master_freelist, scm_freelist);
540 map_free_list (&scm_master_freelist2, scm_freelist2);
541 fflush (stderr);
542
543 return SCM_UNSPECIFIED;
544 }
545 #undef FUNC_NAME
546
547 static int last_cluster;
548 static int last_size;
549
550 static int
551 free_list_length (char *title, int i, SCM freelist)
552 {
553 SCM ls;
554 int n = 0;
555 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
556 if (SCM_FREE_CELL_P (ls))
557 ++n;
558 else
559 {
560 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
561 abort ();
562 }
563 if (n != last_size)
564 {
565 if (i > 0)
566 {
567 if (last_cluster == i - 1)
568 fprintf (stderr, "\t%d\n", last_size);
569 else
570 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
571 }
572 if (i >= 0)
573 fprintf (stderr, "%s %d", title, i);
574 else
575 fprintf (stderr, "%s\t%d\n", title, n);
576 last_cluster = i;
577 last_size = n;
578 }
579 return n;
580 }
581
582 static void
583 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
584 {
585 SCM clusters;
586 int i = 0, len, n = 0;
587 fprintf (stderr, "%s\n\n", title);
588 n += free_list_length ("free list", -1, freelist);
589 for (clusters = master->clusters;
590 SCM_NNULLP (clusters);
591 clusters = SCM_CDR (clusters))
592 {
593 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
594 n += len;
595 }
596 if (last_cluster == i - 1)
597 fprintf (stderr, "\t%d\n", last_size);
598 else
599 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
600 fprintf (stderr, "\ntotal %d objects\n\n", n);
601 }
602
603 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
604 (),
605 "Print debugging information about the free-list.\n"
606 "@code{free-list-length} is only included in\n"
607 "@code{--enable-guile-debug} builds of Guile.")
608 #define FUNC_NAME s_scm_free_list_length
609 {
610 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
611 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
612 return SCM_UNSPECIFIED;
613 }
614 #undef FUNC_NAME
615
616 #endif
617
618 #ifdef GUILE_DEBUG_FREELIST
619
620 /* Non-zero if freelist debugging is in effect. Set this via
621 `gc-set-debug-check-freelist!'. */
622 static int scm_debug_check_freelist = 0;
623
624 /* Number of calls to SCM_NEWCELL since startup. */
625 static unsigned long scm_newcell_count;
626 static unsigned long scm_newcell2_count;
627
628 /* Search freelist for anything that isn't marked as a free cell.
629 Abort if we find something. */
630 static void
631 scm_check_freelist (SCM freelist)
632 {
633 SCM f;
634 int i = 0;
635
636 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
637 if (!SCM_FREE_CELL_P (f))
638 {
639 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
640 scm_newcell_count, i);
641 abort ();
642 }
643 }
644
645 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
646 (SCM flag),
647 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
648 "on each cell allocation. This procedure only exists when the\n"
649 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
650 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
651 {
652 /* [cmm] I did a double-take when I read this code the first time.
653 well, FWIW. */
654 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
655 return SCM_UNSPECIFIED;
656 }
657 #undef FUNC_NAME
658
659
660 SCM
661 scm_debug_newcell (void)
662 {
663 SCM new;
664
665 scm_newcell_count++;
666 if (scm_debug_check_freelist)
667 {
668 scm_check_freelist (scm_freelist);
669 scm_gc();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
674 if (SCM_NULLP (scm_freelist))
675 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
676 else
677 {
678 new = scm_freelist;
679 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
680 }
681
682 return new;
683 }
684
685 SCM
686 scm_debug_newcell2 (void)
687 {
688 SCM new;
689
690 scm_newcell2_count++;
691 if (scm_debug_check_freelist)
692 {
693 scm_check_freelist (scm_freelist2);
694 scm_gc ();
695 }
696
697 /* The rest of this is supposed to be identical to the SCM_NEWCELL
698 macro. */
699 if (SCM_NULLP (scm_freelist2))
700 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
701 else
702 {
703 new = scm_freelist2;
704 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
705 }
706
707 return new;
708 }
709
710 #endif /* GUILE_DEBUG_FREELIST */
711
712 \f
713
714 static unsigned long
715 master_cells_allocated (scm_freelist_t *master)
716 {
717 /* the '- 1' below is to ignore the cluster spine cells. */
718 int objects = master->clusters_allocated * (master->cluster_size - 1);
719 if (SCM_NULLP (master->clusters))
720 objects -= master->left_to_collect;
721 return master->span * objects;
722 }
723
724 static unsigned long
725 freelist_length (SCM freelist)
726 {
727 int n;
728 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
729 ++n;
730 return n;
731 }
732
733 static unsigned long
734 compute_cells_allocated ()
735 {
736 return (scm_cells_allocated
737 + master_cells_allocated (&scm_master_freelist)
738 + master_cells_allocated (&scm_master_freelist2)
739 - scm_master_freelist.span * freelist_length (scm_freelist)
740 - scm_master_freelist2.span * freelist_length (scm_freelist2));
741 }
742
743 /* {Scheme Interface to GC}
744 */
745
746 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
747 (),
748 "Return an association list of statistics about Guile's current\n"
749 "use of storage.")
750 #define FUNC_NAME s_scm_gc_stats
751 {
752 int i;
753 int n;
754 SCM heap_segs;
755 long int local_scm_mtrigger;
756 long int local_scm_mallocated;
757 long int local_scm_heap_size;
758 long int local_scm_cells_allocated;
759 long int local_scm_gc_time_taken;
760 long int local_scm_gc_times;
761 long int local_scm_gc_mark_time_taken;
762 long int local_scm_gc_sweep_time_taken;
763 double local_scm_gc_cells_swept;
764 double local_scm_gc_cells_marked;
765 SCM answer;
766
767 SCM_DEFER_INTS;
768
769 ++scm_block_gc;
770
771 retry:
772 heap_segs = SCM_EOL;
773 n = scm_n_heap_segs;
774 for (i = scm_n_heap_segs; i--; )
775 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
776 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
777 heap_segs);
778 if (scm_n_heap_segs != n)
779 goto retry;
780
781 --scm_block_gc;
782
783 /* Below, we cons to produce the resulting list. We want a snapshot of
784 * the heap situation before consing.
785 */
786 local_scm_mtrigger = scm_mtrigger;
787 local_scm_mallocated = scm_mallocated;
788 local_scm_heap_size = SCM_HEAP_SIZE;
789 local_scm_cells_allocated = compute_cells_allocated ();
790 local_scm_gc_time_taken = scm_gc_time_taken;
791 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
792 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
793 local_scm_gc_times = scm_gc_times;
794 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
795 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
796
797 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
798 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
799 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
800 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
801 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
802 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
803 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
804 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
805 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
806 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
807 scm_cons (sym_heap_segments, heap_segs),
808 SCM_UNDEFINED);
809 SCM_ALLOW_INTS;
810 return answer;
811 }
812 #undef FUNC_NAME
813
814
815 static void
816 gc_start_stats (const char *what)
817 {
818 t_before_gc = scm_c_get_internal_run_time ();
819 scm_gc_cells_swept = 0;
820 scm_gc_cells_collected = 0;
821 scm_gc_yield_1 = scm_gc_yield;
822 scm_gc_yield = (scm_cells_allocated
823 + master_cells_allocated (&scm_master_freelist)
824 + master_cells_allocated (&scm_master_freelist2));
825 scm_gc_malloc_collected = 0;
826 scm_gc_ports_collected = 0;
827 }
828
829
830 static void
831 gc_end_stats ()
832 {
833 unsigned long t = scm_c_get_internal_run_time ();
834 scm_gc_time_taken += (t - t_before_gc);
835 scm_gc_sweep_time_taken += (t - t_before_sweep);
836 ++scm_gc_times;
837
838 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
839 scm_gc_cells_swept_acc += scm_gc_cells_swept;
840 }
841
842
843 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
844 (SCM obj),
845 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
846 "returned by this function for @var{obj}")
847 #define FUNC_NAME s_scm_object_address
848 {
849 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
850 }
851 #undef FUNC_NAME
852
853
854 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
855 (),
856 "Scans all of SCM objects and reclaims for further use those that are\n"
857 "no longer accessible.")
858 #define FUNC_NAME s_scm_gc
859 {
860 SCM_DEFER_INTS;
861 scm_igc ("call");
862 SCM_ALLOW_INTS;
863 return SCM_UNSPECIFIED;
864 }
865 #undef FUNC_NAME
866
867
868 \f
869 /* {C Interface For When GC is Triggered}
870 */
871
872 static void
873 adjust_min_yield (scm_freelist_t *freelist)
874 {
875 /* min yield is adjusted upwards so that next predicted total yield
876 * (allocated cells actually freed by GC) becomes
877 * `min_yield_fraction' of total heap size. Note, however, that
878 * the absolute value of min_yield will correspond to `collected'
879 * on one master (the one which currently is triggering GC).
880 *
881 * The reason why we look at total yield instead of cells collected
882 * on one list is that we want to take other freelists into account.
883 * On this freelist, we know that (local) yield = collected cells,
884 * but that's probably not the case on the other lists.
885 *
886 * (We might consider computing a better prediction, for example
887 * by computing an average over multiple GC:s.)
888 */
889 if (freelist->min_yield_fraction)
890 {
891 /* Pick largest of last two yields. */
892 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
893 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
894 #ifdef DEBUGINFO
895 fprintf (stderr, " after GC = %d, delta = %d\n",
896 scm_cells_allocated,
897 delta);
898 #endif
899 if (delta > 0)
900 freelist->min_yield += delta;
901 }
902 }
903
904
905 /* When we get POSIX threads support, the master will be global and
906 * common while the freelist will be individual for each thread.
907 */
908
909 SCM
910 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
911 {
912 SCM cell;
913 ++scm_ints_disabled;
914 do
915 {
916 if (SCM_NULLP (master->clusters))
917 {
918 if (master->grow_heap_p || scm_block_gc)
919 {
920 /* In order to reduce gc frequency, try to allocate a new heap
921 * segment first, even if gc might find some free cells. If we
922 * can't obtain a new heap segment, we will try gc later.
923 */
924 master->grow_heap_p = 0;
925 alloc_some_heap (master, return_on_error);
926 }
927 if (SCM_NULLP (master->clusters))
928 {
929 /* The heap was not grown, either because it wasn't scheduled to
930 * grow, or because there was not enough memory available. In
931 * both cases we have to try gc to get some free cells.
932 */
933 #ifdef DEBUGINFO
934 fprintf (stderr, "allocated = %d, ",
935 scm_cells_allocated
936 + master_cells_allocated (&scm_master_freelist)
937 + master_cells_allocated (&scm_master_freelist2));
938 #endif
939 scm_igc ("cells");
940 adjust_min_yield (master);
941 if (SCM_NULLP (master->clusters))
942 {
943 /* gc could not free any cells. Now, we _must_ allocate a
944 * new heap segment, because there is no other possibility
945 * to provide a new cell for the caller.
946 */
947 alloc_some_heap (master, abort_on_error);
948 }
949 }
950 }
951 cell = SCM_CAR (master->clusters);
952 master->clusters = SCM_CDR (master->clusters);
953 ++master->clusters_allocated;
954 }
955 while (SCM_NULLP (cell));
956
957 #ifdef GUILE_DEBUG_FREELIST
958 scm_check_freelist (cell);
959 #endif
960
961 --scm_ints_disabled;
962 *freelist = SCM_FREE_CELL_CDR (cell);
963 return cell;
964 }
965
966
967 #if 0
968 /* This is a support routine which can be used to reserve a cluster
969 * for some special use, such as debugging. It won't be useful until
970 * free cells are preserved between garbage collections.
971 */
972
973 void
974 scm_alloc_cluster (scm_freelist_t *master)
975 {
976 SCM freelist, cell;
977 cell = scm_gc_for_newcell (master, &freelist);
978 SCM_SETCDR (cell, freelist);
979 return cell;
980 }
981 #endif
982
983
984 scm_c_hook_t scm_before_gc_c_hook;
985 scm_c_hook_t scm_before_mark_c_hook;
986 scm_c_hook_t scm_before_sweep_c_hook;
987 scm_c_hook_t scm_after_sweep_c_hook;
988 scm_c_hook_t scm_after_gc_c_hook;
989
990
991 void
992 scm_igc (const char *what)
993 {
994 int j;
995
996 ++scm_gc_running_p;
997 scm_c_hook_run (&scm_before_gc_c_hook, 0);
998 #ifdef DEBUGINFO
999 fprintf (stderr,
1000 SCM_NULLP (scm_freelist)
1001 ? "*"
1002 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1003 #endif
1004 /* During the critical section, only the current thread may run. */
1005 SCM_CRITICAL_SECTION_START;
1006
1007 /* fprintf (stderr, "gc: %s\n", what); */
1008
1009 if (!scm_stack_base || scm_block_gc)
1010 {
1011 --scm_gc_running_p;
1012 return;
1013 }
1014
1015 gc_start_stats (what);
1016
1017 if (scm_mallocated < 0)
1018 /* The byte count of allocated objects has underflowed. This is
1019 probably because you forgot to report the sizes of objects you
1020 have allocated, by calling scm_done_malloc or some such. When
1021 the GC freed them, it subtracted their size from
1022 scm_mallocated, which underflowed. */
1023 abort ();
1024
1025 if (scm_gc_heap_lock)
1026 /* We've invoked the collector while a GC is already in progress.
1027 That should never happen. */
1028 abort ();
1029
1030 ++scm_gc_heap_lock;
1031
1032 /* flush dead entries from the continuation stack */
1033 {
1034 int x;
1035 int bound;
1036 SCM * elts;
1037 elts = SCM_VELTS (scm_continuation_stack);
1038 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1039 x = SCM_INUM (scm_continuation_stack_ptr);
1040 while (x < bound)
1041 {
1042 elts[x] = SCM_BOOL_F;
1043 ++x;
1044 }
1045 }
1046
1047 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1048
1049 clear_mark_space ();
1050
1051 #ifndef USE_THREADS
1052
1053 /* Mark objects on the C stack. */
1054 SCM_FLUSH_REGISTER_WINDOWS;
1055 /* This assumes that all registers are saved into the jmp_buf */
1056 setjmp (scm_save_regs_gc_mark);
1057 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1058 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1059 sizeof scm_save_regs_gc_mark)
1060 / sizeof (SCM_STACKITEM)));
1061
1062 {
1063 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1064 #ifdef SCM_STACK_GROWS_UP
1065 scm_mark_locations (scm_stack_base, stack_len);
1066 #else
1067 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1068 #endif
1069 }
1070
1071 #else /* USE_THREADS */
1072
1073 /* Mark every thread's stack and registers */
1074 scm_threads_mark_stacks ();
1075
1076 #endif /* USE_THREADS */
1077
1078 j = SCM_NUM_PROTECTS;
1079 while (j--)
1080 scm_gc_mark (scm_sys_protects[j]);
1081
1082 /* FIXME: we should have a means to register C functions to be run
1083 * in different phases of GC
1084 */
1085 scm_mark_subr_table ();
1086
1087 #ifndef USE_THREADS
1088 scm_gc_mark (scm_root->handle);
1089 #endif
1090
1091 t_before_sweep = scm_c_get_internal_run_time ();
1092 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1093
1094 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1095
1096 scm_gc_sweep ();
1097
1098 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1099
1100 --scm_gc_heap_lock;
1101 gc_end_stats ();
1102
1103 SCM_CRITICAL_SECTION_END;
1104 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1105 --scm_gc_running_p;
1106 }
1107
1108 \f
1109
1110 /* {Mark/Sweep}
1111 */
1112
1113 #define MARK scm_gc_mark
1114 #define FNAME "scm_gc_mark"
1115
1116 #endif /*!MARK_DEPENDENCIES*/
1117
1118 /* Mark an object precisely.
1119 */
1120 void
1121 MARK (SCM p)
1122 #define FUNC_NAME FNAME
1123 {
1124 register long i;
1125 register SCM ptr;
1126 scm_bits_t cell_type;
1127
1128 #ifndef MARK_DEPENDENCIES
1129 # define RECURSE scm_gc_mark
1130 #else
1131 /* go through the usual marking, but not for self-cycles. */
1132 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1133 #endif
1134 ptr = p;
1135
1136 #ifdef MARK_DEPENDENCIES
1137 goto gc_mark_loop_first_time;
1138 #endif
1139
1140 gc_mark_loop:
1141 if (SCM_IMP (ptr))
1142 return;
1143
1144 gc_mark_nimp:
1145
1146 #ifdef MARK_DEPENDENCIES
1147 if (SCM_EQ_P (ptr, p))
1148 return;
1149
1150 scm_gc_mark (ptr);
1151 return;
1152
1153 gc_mark_loop_first_time:
1154 #endif
1155
1156 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1157 /* We are in debug mode. Check the ptr exhaustively. */
1158 if (!scm_cellp (ptr))
1159 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1160 #else
1161 /* In non-debug mode, do at least some cheap testing. */
1162 if (!SCM_CELLP (ptr))
1163 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1164 #endif
1165
1166 #ifndef MARK_DEPENDENCIES
1167
1168 if (SCM_GCMARKP (ptr))
1169 return;
1170
1171 SCM_SETGCMARK (ptr);
1172
1173 #endif
1174
1175 cell_type = SCM_GC_CELL_TYPE (ptr);
1176 switch (SCM_ITAG7 (cell_type))
1177 {
1178 case scm_tcs_cons_nimcar:
1179 if (SCM_IMP (SCM_CDR (ptr)))
1180 {
1181 ptr = SCM_CAR (ptr);
1182 goto gc_mark_nimp;
1183 }
1184 RECURSE (SCM_CAR (ptr));
1185 ptr = SCM_CDR (ptr);
1186 goto gc_mark_nimp;
1187 case scm_tcs_cons_imcar:
1188 ptr = SCM_CDR (ptr);
1189 goto gc_mark_loop;
1190 case scm_tc7_pws:
1191 RECURSE (SCM_SETTER (ptr));
1192 ptr = SCM_PROCEDURE (ptr);
1193 goto gc_mark_loop;
1194 case scm_tcs_cons_gloc:
1195 {
1196 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1197 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1198 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1199 * pointer to a struct vtable data region. The fact that these are
1200 * accessed in the same way restricts the possibilites to change the
1201 * data layout of structs or heap cells.
1202 */
1203 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1204 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1205 if (vtable_data [scm_vtable_index_vcell] != 0)
1206 {
1207 /* ptr is a gloc */
1208 SCM gloc_car = SCM_PACK (word0);
1209 RECURSE (gloc_car);
1210 ptr = SCM_CDR (ptr);
1211 goto gc_mark_loop;
1212 }
1213 else
1214 {
1215 /* ptr is a struct */
1216 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1217 int len = SCM_SYMBOL_LENGTH (layout);
1218 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1219 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1220
1221 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1222 {
1223 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1224 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1225 }
1226 if (len)
1227 {
1228 int x;
1229
1230 for (x = 0; x < len - 2; x += 2, ++struct_data)
1231 if (fields_desc[x] == 'p')
1232 RECURSE (SCM_PACK (*struct_data));
1233 if (fields_desc[x] == 'p')
1234 {
1235 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1236 for (x = *struct_data++; x; --x, ++struct_data)
1237 RECURSE (SCM_PACK (*struct_data));
1238 else
1239 RECURSE (SCM_PACK (*struct_data));
1240 }
1241 }
1242 /* mark vtable */
1243 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1244 goto gc_mark_loop;
1245 }
1246 }
1247 break;
1248 case scm_tcs_closures:
1249 if (SCM_IMP (SCM_ENV (ptr)))
1250 {
1251 ptr = SCM_CLOSCAR (ptr);
1252 goto gc_mark_nimp;
1253 }
1254 RECURSE (SCM_CLOSCAR (ptr));
1255 ptr = SCM_ENV (ptr);
1256 goto gc_mark_nimp;
1257 case scm_tc7_vector:
1258 i = SCM_VECTOR_LENGTH (ptr);
1259 if (i == 0)
1260 break;
1261 while (--i > 0)
1262 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1263 RECURSE (SCM_VELTS (ptr)[i]);
1264 ptr = SCM_VELTS (ptr)[0];
1265 goto gc_mark_loop;
1266 #ifdef CCLO
1267 case scm_tc7_cclo:
1268 {
1269 unsigned long int i = SCM_CCLO_LENGTH (ptr);
1270 unsigned long int j;
1271 for (j = 1; j != i; ++j)
1272 {
1273 SCM obj = SCM_CCLO_REF (ptr, j);
1274 if (!SCM_IMP (obj))
1275 RECURSE (obj);
1276 }
1277 ptr = SCM_CCLO_REF (ptr, 0);
1278 goto gc_mark_loop;
1279 }
1280 #endif
1281 #ifdef HAVE_ARRAYS
1282 case scm_tc7_bvect:
1283 case scm_tc7_byvect:
1284 case scm_tc7_ivect:
1285 case scm_tc7_uvect:
1286 case scm_tc7_fvect:
1287 case scm_tc7_dvect:
1288 case scm_tc7_cvect:
1289 case scm_tc7_svect:
1290 #ifdef HAVE_LONG_LONGS
1291 case scm_tc7_llvect:
1292 #endif
1293 #endif
1294 case scm_tc7_string:
1295 break;
1296
1297 case scm_tc7_substring:
1298 ptr = SCM_CDR (ptr);
1299 goto gc_mark_loop;
1300
1301 case scm_tc7_wvect:
1302 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1303 scm_weak_vectors = ptr;
1304 if (SCM_IS_WHVEC_ANY (ptr))
1305 {
1306 int x;
1307 int len;
1308 int weak_keys;
1309 int weak_values;
1310
1311 len = SCM_VECTOR_LENGTH (ptr);
1312 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1313 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1314
1315 for (x = 0; x < len; ++x)
1316 {
1317 SCM alist;
1318 alist = SCM_VELTS (ptr)[x];
1319
1320 /* mark everything on the alist except the keys or
1321 * values, according to weak_values and weak_keys. */
1322 while ( SCM_CONSP (alist)
1323 && !SCM_GCMARKP (alist)
1324 && SCM_CONSP (SCM_CAR (alist)))
1325 {
1326 SCM kvpair;
1327 SCM next_alist;
1328
1329 kvpair = SCM_CAR (alist);
1330 next_alist = SCM_CDR (alist);
1331 /*
1332 * Do not do this:
1333 * SCM_SETGCMARK (alist);
1334 * SCM_SETGCMARK (kvpair);
1335 *
1336 * It may be that either the key or value is protected by
1337 * an escaped reference to part of the spine of this alist.
1338 * If we mark the spine here, and only mark one or neither of the
1339 * key and value, they may never be properly marked.
1340 * This leads to a horrible situation in which an alist containing
1341 * freelist cells is exported.
1342 *
1343 * So only mark the spines of these arrays last of all marking.
1344 * If somebody confuses us by constructing a weak vector
1345 * with a circular alist then we are hosed, but at least we
1346 * won't prematurely drop table entries.
1347 */
1348 if (!weak_keys)
1349 RECURSE (SCM_CAR (kvpair));
1350 if (!weak_values)
1351 RECURSE (SCM_CDR (kvpair));
1352 alist = next_alist;
1353 }
1354 if (SCM_NIMP (alist))
1355 RECURSE (alist);
1356 }
1357 }
1358 break;
1359
1360 case scm_tc7_symbol:
1361 ptr = SCM_PROP_SLOTS (ptr);
1362 goto gc_mark_loop;
1363 case scm_tcs_subrs:
1364 break;
1365 case scm_tc7_port:
1366 i = SCM_PTOBNUM (ptr);
1367 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1368 if (!(i < scm_numptob))
1369 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1370 #endif
1371 if (SCM_PTAB_ENTRY(ptr))
1372 RECURSE (SCM_FILENAME (ptr));
1373 if (scm_ptobs[i].mark)
1374 {
1375 ptr = (scm_ptobs[i].mark) (ptr);
1376 goto gc_mark_loop;
1377 }
1378 else
1379 return;
1380 break;
1381 case scm_tc7_smob:
1382 switch (SCM_TYP16 (ptr))
1383 { /* should be faster than going through scm_smobs */
1384 case scm_tc_free_cell:
1385 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1386 case scm_tc16_big:
1387 case scm_tc16_real:
1388 case scm_tc16_complex:
1389 break;
1390 default:
1391 i = SCM_SMOBNUM (ptr);
1392 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1393 if (!(i < scm_numsmob))
1394 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1395 #endif
1396 if (scm_smobs[i].mark)
1397 {
1398 ptr = (scm_smobs[i].mark) (ptr);
1399 goto gc_mark_loop;
1400 }
1401 else
1402 return;
1403 }
1404 break;
1405 default:
1406 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1407 }
1408 #undef RECURSE
1409 }
1410 #undef FUNC_NAME
1411
1412 #ifndef MARK_DEPENDENCIES
1413
1414 #undef MARK
1415 #undef FNAME
1416
1417 /* And here we define `scm_gc_mark_dependencies', by including this
1418 * same file in itself.
1419 */
1420 #define MARK scm_gc_mark_dependencies
1421 #define FNAME "scm_gc_mark_dependencies"
1422 #define MARK_DEPENDENCIES
1423 #include "gc.c"
1424 #undef MARK_DEPENDENCIES
1425 #undef MARK
1426 #undef FNAME
1427
1428
1429 /* Mark a Region Conservatively
1430 */
1431
1432 void
1433 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1434 {
1435 unsigned long m;
1436
1437 for (m = 0; m < n; ++m)
1438 {
1439 SCM obj = * (SCM *) &x[m];
1440 if (SCM_CELLP (obj))
1441 {
1442 SCM_CELLPTR ptr = SCM2PTR (obj);
1443 int i = 0;
1444 int j = scm_n_heap_segs - 1;
1445 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1446 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1447 {
1448 while (i <= j)
1449 {
1450 int seg_id;
1451 seg_id = -1;
1452 if ((i == j)
1453 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1454 seg_id = i;
1455 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1456 seg_id = j;
1457 else
1458 {
1459 int k;
1460 k = (i + j) / 2;
1461 if (k == i)
1462 break;
1463 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1464 {
1465 j = k;
1466 ++i;
1467 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1468 continue;
1469 else
1470 break;
1471 }
1472 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1473 {
1474 i = k;
1475 --j;
1476 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1477 continue;
1478 else
1479 break;
1480 }
1481 }
1482
1483 if (SCM_GC_IN_CARD_HEADERP (ptr))
1484 break;
1485
1486 if (scm_heap_table[seg_id].span == 1
1487 || DOUBLECELL_ALIGNED_P (obj))
1488 scm_gc_mark (obj);
1489
1490 break;
1491 }
1492 }
1493 }
1494 }
1495 }
1496
1497
1498 /* The function scm_cellp determines whether an SCM value can be regarded as a
1499 * pointer to a cell on the heap. Binary search is used in order to determine
1500 * the heap segment that contains the cell.
1501 */
1502 int
1503 scm_cellp (SCM value)
1504 {
1505 if (SCM_CELLP (value)) {
1506 scm_cell * ptr = SCM2PTR (value);
1507 unsigned int i = 0;
1508 unsigned int j = scm_n_heap_segs - 1;
1509
1510 if (SCM_GC_IN_CARD_HEADERP (ptr))
1511 return 0;
1512
1513 while (i < j) {
1514 int k = (i + j) / 2;
1515 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1516 j = k;
1517 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1518 i = k + 1;
1519 }
1520 }
1521
1522 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1523 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1524 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1525 && !SCM_GC_IN_CARD_HEADERP (ptr)
1526 )
1527 return 1;
1528 else
1529 return 0;
1530 } else
1531 return 0;
1532 }
1533
1534
1535 static void
1536 gc_sweep_freelist_start (scm_freelist_t *freelist)
1537 {
1538 freelist->cells = SCM_EOL;
1539 freelist->left_to_collect = freelist->cluster_size;
1540 freelist->clusters_allocated = 0;
1541 freelist->clusters = SCM_EOL;
1542 freelist->clustertail = &freelist->clusters;
1543 freelist->collected_1 = freelist->collected;
1544 freelist->collected = 0;
1545 }
1546
1547 static void
1548 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1549 {
1550 int collected;
1551 *freelist->clustertail = freelist->cells;
1552 if (!SCM_NULLP (freelist->cells))
1553 {
1554 SCM c = freelist->cells;
1555 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1556 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1557 freelist->collected +=
1558 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1559 }
1560 scm_gc_cells_collected += freelist->collected;
1561
1562 /* Although freelist->min_yield is used to test freelist->collected
1563 * (which is the local GC yield for freelist), it is adjusted so
1564 * that *total* yield is freelist->min_yield_fraction of total heap
1565 * size. This means that a too low yield is compensated by more
1566 * heap on the list which is currently doing most work, which is
1567 * just what we want.
1568 */
1569 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1570 freelist->grow_heap_p = (collected < freelist->min_yield);
1571 }
1572
1573 #define NEXT_DATA_CELL(ptr, span) \
1574 do { \
1575 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1576 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1577 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1578 : nxt__); \
1579 } while (0)
1580
1581 void
1582 scm_gc_sweep ()
1583 #define FUNC_NAME "scm_gc_sweep"
1584 {
1585 register SCM_CELLPTR ptr;
1586 register SCM nfreelist;
1587 register scm_freelist_t *freelist;
1588 register long m;
1589 register int span;
1590 long i;
1591 scm_sizet seg_size;
1592
1593 m = 0;
1594
1595 gc_sweep_freelist_start (&scm_master_freelist);
1596 gc_sweep_freelist_start (&scm_master_freelist2);
1597
1598 for (i = 0; i < scm_n_heap_segs; i++)
1599 {
1600 register unsigned int left_to_collect;
1601 register scm_sizet j;
1602
1603 /* Unmarked cells go onto the front of the freelist this heap
1604 segment points to. Rather than updating the real freelist
1605 pointer as we go along, we accumulate the new head in
1606 nfreelist. Then, if it turns out that the entire segment is
1607 free, we free (i.e., malloc's free) the whole segment, and
1608 simply don't assign nfreelist back into the real freelist. */
1609 freelist = scm_heap_table[i].freelist;
1610 nfreelist = freelist->cells;
1611 left_to_collect = freelist->left_to_collect;
1612 span = scm_heap_table[i].span;
1613
1614 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1615 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1616
1617 /* use only data cells in seg_size */
1618 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1619
1620 scm_gc_cells_swept += seg_size;
1621
1622 for (j = seg_size + span; j -= span; ptr += span)
1623 {
1624 SCM scmptr;
1625
1626 if (SCM_GC_IN_CARD_HEADERP (ptr))
1627 {
1628 SCM_CELLPTR nxt;
1629
1630 /* cheat here */
1631 nxt = ptr;
1632 NEXT_DATA_CELL (nxt, span);
1633 j += span;
1634
1635 ptr = nxt - span;
1636 continue;
1637 }
1638
1639 scmptr = PTR2SCM (ptr);
1640
1641 if (SCM_GCMARKP (scmptr))
1642 continue;
1643
1644 switch SCM_TYP7 (scmptr)
1645 {
1646 case scm_tcs_cons_gloc:
1647 {
1648 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1649 * struct or a gloc. See the corresponding comment in
1650 * scm_gc_mark.
1651 */
1652 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1653 - scm_tc3_cons_gloc);
1654 /* access as struct */
1655 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1656 if (vtable_data[scm_vtable_index_vcell] == 0)
1657 {
1658 /* Structs need to be freed in a special order.
1659 * This is handled by GC C hooks in struct.c.
1660 */
1661 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1662 scm_structs_to_free = scmptr;
1663 continue;
1664 }
1665 /* fall through so that scmptr gets collected */
1666 }
1667 break;
1668 case scm_tcs_cons_imcar:
1669 case scm_tcs_cons_nimcar:
1670 case scm_tcs_closures:
1671 case scm_tc7_pws:
1672 break;
1673 case scm_tc7_wvect:
1674 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1675 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1676 break;
1677 case scm_tc7_vector:
1678 {
1679 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1680 if (length > 0)
1681 {
1682 m += length * sizeof (scm_bits_t);
1683 scm_must_free (SCM_VECTOR_BASE (scmptr));
1684 }
1685 break;
1686 }
1687 #ifdef CCLO
1688 case scm_tc7_cclo:
1689 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1690 scm_must_free (SCM_CCLO_BASE (scmptr));
1691 break;
1692 #endif
1693 #ifdef HAVE_ARRAYS
1694 case scm_tc7_bvect:
1695 {
1696 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1697 if (length > 0)
1698 {
1699 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1700 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1701 }
1702 }
1703 break;
1704 case scm_tc7_byvect:
1705 case scm_tc7_ivect:
1706 case scm_tc7_uvect:
1707 case scm_tc7_svect:
1708 #ifdef HAVE_LONG_LONGS
1709 case scm_tc7_llvect:
1710 #endif
1711 case scm_tc7_fvect:
1712 case scm_tc7_dvect:
1713 case scm_tc7_cvect:
1714 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1715 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1716 break;
1717 #endif
1718 case scm_tc7_substring:
1719 break;
1720 case scm_tc7_string:
1721 m += SCM_STRING_LENGTH (scmptr) + 1;
1722 scm_must_free (SCM_STRING_CHARS (scmptr));
1723 break;
1724 case scm_tc7_symbol:
1725 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1726 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1727 break;
1728 case scm_tcs_subrs:
1729 /* the various "subrs" (primitives) are never freed */
1730 continue;
1731 case scm_tc7_port:
1732 if SCM_OPENP (scmptr)
1733 {
1734 int k = SCM_PTOBNUM (scmptr);
1735 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1736 if (!(k < scm_numptob))
1737 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1738 #endif
1739 /* Keep "revealed" ports alive. */
1740 if (scm_revealed_count (scmptr) > 0)
1741 continue;
1742 /* Yes, I really do mean scm_ptobs[k].free */
1743 /* rather than ftobs[k].close. .close */
1744 /* is for explicit CLOSE-PORT by user */
1745 m += (scm_ptobs[k].free) (scmptr);
1746 SCM_SETSTREAM (scmptr, 0);
1747 scm_remove_from_port_table (scmptr);
1748 scm_gc_ports_collected++;
1749 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1750 }
1751 break;
1752 case scm_tc7_smob:
1753 switch SCM_TYP16 (scmptr)
1754 {
1755 case scm_tc_free_cell:
1756 case scm_tc16_real:
1757 break;
1758 #ifdef SCM_BIGDIG
1759 case scm_tc16_big:
1760 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1761 scm_must_free (SCM_BDIGITS (scmptr));
1762 break;
1763 #endif /* def SCM_BIGDIG */
1764 case scm_tc16_complex:
1765 m += sizeof (scm_complex_t);
1766 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1767 break;
1768 default:
1769 {
1770 int k;
1771 k = SCM_SMOBNUM (scmptr);
1772 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1773 if (!(k < scm_numsmob))
1774 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1775 #endif
1776 if (scm_smobs[k].free)
1777 m += (scm_smobs[k].free) (scmptr);
1778 break;
1779 }
1780 }
1781 break;
1782 default:
1783 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1784 }
1785
1786 if (!--left_to_collect)
1787 {
1788 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1789 *freelist->clustertail = scmptr;
1790 freelist->clustertail = SCM_CDRLOC (scmptr);
1791
1792 nfreelist = SCM_EOL;
1793 freelist->collected += span * freelist->cluster_size;
1794 left_to_collect = freelist->cluster_size;
1795 }
1796 else
1797 {
1798 /* Stick the new cell on the front of nfreelist. It's
1799 critical that we mark this cell as freed; otherwise, the
1800 conservative collector might trace it as some other type
1801 of object. */
1802 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1803 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1804 nfreelist = scmptr;
1805 }
1806 }
1807
1808 #ifdef GC_FREE_SEGMENTS
1809 if (n == seg_size)
1810 {
1811 register long j;
1812
1813 freelist->heap_size -= seg_size;
1814 free ((char *) scm_heap_table[i].bounds[0]);
1815 scm_heap_table[i].bounds[0] = 0;
1816 for (j = i + 1; j < scm_n_heap_segs; j++)
1817 scm_heap_table[j - 1] = scm_heap_table[j];
1818 scm_n_heap_segs -= 1;
1819 i--; /* We need to scan the segment just moved. */
1820 }
1821 else
1822 #endif /* ifdef GC_FREE_SEGMENTS */
1823 {
1824 /* Update the real freelist pointer to point to the head of
1825 the list of free cells we've built for this segment. */
1826 freelist->cells = nfreelist;
1827 freelist->left_to_collect = left_to_collect;
1828 }
1829
1830 #ifdef GUILE_DEBUG_FREELIST
1831 scm_map_free_list ();
1832 #endif
1833 }
1834
1835 gc_sweep_freelist_finish (&scm_master_freelist);
1836 gc_sweep_freelist_finish (&scm_master_freelist2);
1837
1838 /* When we move to POSIX threads private freelists should probably
1839 be GC-protected instead. */
1840 scm_freelist = SCM_EOL;
1841 scm_freelist2 = SCM_EOL;
1842
1843 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1844 scm_gc_yield -= scm_cells_allocated;
1845 scm_mallocated -= m;
1846 scm_gc_malloc_collected = m;
1847 }
1848 #undef FUNC_NAME
1849
1850
1851 \f
1852 /* {Front end to malloc}
1853 *
1854 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1855 * scm_done_free
1856 *
1857 * These functions provide services comparable to malloc, realloc, and
1858 * free. They should be used when allocating memory that will be under
1859 * control of the garbage collector, i.e., if the memory may be freed
1860 * during garbage collection.
1861 */
1862
1863 /* scm_must_malloc
1864 * Return newly malloced storage or throw an error.
1865 *
1866 * The parameter WHAT is a string for error reporting.
1867 * If the threshold scm_mtrigger will be passed by this
1868 * allocation, or if the first call to malloc fails,
1869 * garbage collect -- on the presumption that some objects
1870 * using malloced storage may be collected.
1871 *
1872 * The limit scm_mtrigger may be raised by this allocation.
1873 */
1874 void *
1875 scm_must_malloc (scm_sizet size, const char *what)
1876 {
1877 void *ptr;
1878 unsigned long nm = scm_mallocated + size;
1879
1880 if (nm <= scm_mtrigger)
1881 {
1882 SCM_SYSCALL (ptr = malloc (size));
1883 if (NULL != ptr)
1884 {
1885 scm_mallocated = nm;
1886 #ifdef GUILE_DEBUG_MALLOC
1887 scm_malloc_register (ptr, what);
1888 #endif
1889 return ptr;
1890 }
1891 }
1892
1893 scm_igc (what);
1894
1895 nm = scm_mallocated + size;
1896 SCM_SYSCALL (ptr = malloc (size));
1897 if (NULL != ptr)
1898 {
1899 scm_mallocated = nm;
1900 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1901 if (nm > scm_mtrigger)
1902 scm_mtrigger = nm + nm / 2;
1903 else
1904 scm_mtrigger += scm_mtrigger / 2;
1905 }
1906 #ifdef GUILE_DEBUG_MALLOC
1907 scm_malloc_register (ptr, what);
1908 #endif
1909
1910 return ptr;
1911 }
1912
1913 scm_memory_error (what);
1914 }
1915
1916
1917 /* scm_must_realloc
1918 * is similar to scm_must_malloc.
1919 */
1920 void *
1921 scm_must_realloc (void *where,
1922 scm_sizet old_size,
1923 scm_sizet size,
1924 const char *what)
1925 {
1926 void *ptr;
1927 scm_sizet nm = scm_mallocated + size - old_size;
1928
1929 if (nm <= scm_mtrigger)
1930 {
1931 SCM_SYSCALL (ptr = realloc (where, size));
1932 if (NULL != ptr)
1933 {
1934 scm_mallocated = nm;
1935 #ifdef GUILE_DEBUG_MALLOC
1936 scm_malloc_reregister (where, ptr, what);
1937 #endif
1938 return ptr;
1939 }
1940 }
1941
1942 scm_igc (what);
1943
1944 nm = scm_mallocated + size - old_size;
1945 SCM_SYSCALL (ptr = realloc (where, size));
1946 if (NULL != ptr)
1947 {
1948 scm_mallocated = nm;
1949 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1950 if (nm > scm_mtrigger)
1951 scm_mtrigger = nm + nm / 2;
1952 else
1953 scm_mtrigger += scm_mtrigger / 2;
1954 }
1955 #ifdef GUILE_DEBUG_MALLOC
1956 scm_malloc_reregister (where, ptr, what);
1957 #endif
1958 return ptr;
1959 }
1960
1961 scm_memory_error (what);
1962 }
1963
1964
1965 void
1966 scm_must_free (void *obj)
1967 #define FUNC_NAME "scm_must_free"
1968 {
1969 #ifdef GUILE_DEBUG_MALLOC
1970 scm_malloc_unregister (obj);
1971 #endif
1972 if (obj)
1973 free (obj);
1974 else
1975 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1976 }
1977 #undef FUNC_NAME
1978
1979
1980 /* Announce that there has been some malloc done that will be freed
1981 * during gc. A typical use is for a smob that uses some malloced
1982 * memory but can not get it from scm_must_malloc (for whatever
1983 * reason). When a new object of this smob is created you call
1984 * scm_done_malloc with the size of the object. When your smob free
1985 * function is called, be sure to include this size in the return
1986 * value.
1987 *
1988 * If you can't actually free the memory in the smob free function,
1989 * for whatever reason (like reference counting), you still can (and
1990 * should) report the amount of memory freed when you actually free it.
1991 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1992 * eh? Or even better, call scm_done_free. */
1993
1994 void
1995 scm_done_malloc (long size)
1996 {
1997 scm_mallocated += size;
1998
1999 if (scm_mallocated > scm_mtrigger)
2000 {
2001 scm_igc ("foreign mallocs");
2002 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2003 {
2004 if (scm_mallocated > scm_mtrigger)
2005 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2006 else
2007 scm_mtrigger += scm_mtrigger / 2;
2008 }
2009 }
2010 }
2011
2012 void
2013 scm_done_free (long size)
2014 {
2015 scm_mallocated -= size;
2016 }
2017
2018
2019 \f
2020 /* {Heap Segments}
2021 *
2022 * Each heap segment is an array of objects of a particular size.
2023 * Every segment has an associated (possibly shared) freelist.
2024 * A table of segment records is kept that records the upper and
2025 * lower extents of the segment; this is used during the conservative
2026 * phase of gc to identify probably gc roots (because they point
2027 * into valid segments at reasonable offsets). */
2028
2029 /* scm_expmem
2030 * is true if the first segment was smaller than INIT_HEAP_SEG.
2031 * If scm_expmem is set to one, subsequent segment allocations will
2032 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2033 */
2034 int scm_expmem = 0;
2035
2036 scm_sizet scm_max_segment_size;
2037
2038 /* scm_heap_org
2039 * is the lowest base address of any heap segment.
2040 */
2041 SCM_CELLPTR scm_heap_org;
2042
2043 scm_heap_seg_data_t * scm_heap_table = 0;
2044 static unsigned int heap_segment_table_size = 0;
2045 int scm_n_heap_segs = 0;
2046
2047 /* init_heap_seg
2048 * initializes a new heap segment and returns the number of objects it contains.
2049 *
2050 * The segment origin and segment size in bytes are input parameters.
2051 * The freelist is both input and output.
2052 *
2053 * This function presumes that the scm_heap_table has already been expanded
2054 * to accomodate a new segment record and that the markbit space was reserved
2055 * for all the cards in this segment.
2056 */
2057
2058 #define INIT_CARD(card, span) \
2059 do { \
2060 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2061 if ((span) == 2) \
2062 SCM_GC_SET_CARD_DOUBLECELL (card); \
2063 } while (0)
2064
2065 static scm_sizet
2066 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2067 {
2068 register SCM_CELLPTR ptr;
2069 SCM_CELLPTR seg_end;
2070 int new_seg_index;
2071 int n_new_cells;
2072 int span = freelist->span;
2073
2074 if (seg_org == NULL)
2075 return 0;
2076
2077 /* Align the begin ptr up.
2078 */
2079 ptr = SCM_GC_CARD_UP (seg_org);
2080
2081 /* Compute the ceiling on valid object pointers w/in this segment.
2082 */
2083 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2084
2085 /* Find the right place and insert the segment record.
2086 *
2087 */
2088 for (new_seg_index = 0;
2089 ( (new_seg_index < scm_n_heap_segs)
2090 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2091 new_seg_index++)
2092 ;
2093
2094 {
2095 int i;
2096 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2097 scm_heap_table[i] = scm_heap_table[i - 1];
2098 }
2099
2100 ++scm_n_heap_segs;
2101
2102 scm_heap_table[new_seg_index].span = span;
2103 scm_heap_table[new_seg_index].freelist = freelist;
2104 scm_heap_table[new_seg_index].bounds[0] = ptr;
2105 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2106
2107 /*n_new_cells*/
2108 n_new_cells = seg_end - ptr;
2109
2110 freelist->heap_size += n_new_cells;
2111
2112 /* Partition objects in this segment into clusters */
2113 {
2114 SCM clusters;
2115 SCM *clusterp = &clusters;
2116
2117 NEXT_DATA_CELL (ptr, span);
2118 while (ptr < seg_end)
2119 {
2120 scm_cell *nxt = ptr;
2121 scm_cell *prv = NULL;
2122 scm_cell *last_card = NULL;
2123 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2124 NEXT_DATA_CELL(nxt, span);
2125
2126 /* Allocate cluster spine
2127 */
2128 *clusterp = PTR2SCM (ptr);
2129 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2130 clusterp = SCM_CDRLOC (*clusterp);
2131 ptr = nxt;
2132
2133 while (n_data_cells--)
2134 {
2135 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2136 SCM scmptr = PTR2SCM (ptr);
2137 nxt = ptr;
2138 NEXT_DATA_CELL (nxt, span);
2139 prv = ptr;
2140
2141 if (card != last_card)
2142 {
2143 INIT_CARD (card, span);
2144 last_card = card;
2145 }
2146
2147 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2148 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2149
2150 ptr = nxt;
2151 }
2152
2153 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2154 }
2155
2156 /* sanity check */
2157 {
2158 scm_cell *ref = seg_end;
2159 NEXT_DATA_CELL (ref, span);
2160 if (ref != ptr)
2161 /* [cmm] looks like the segment size doesn't divide cleanly by
2162 cluster size. bad cmm! */
2163 abort();
2164 }
2165
2166 /* Patch up the last cluster pointer in the segment
2167 * to join it to the input freelist.
2168 */
2169 *clusterp = freelist->clusters;
2170 freelist->clusters = clusters;
2171 }
2172
2173 #ifdef DEBUGINFO
2174 fprintf (stderr, "H");
2175 #endif
2176 return size;
2177 }
2178
2179 static scm_sizet
2180 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2181 {
2182 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2183
2184 return
2185 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2186 + ALIGNMENT_SLACK (freelist);
2187 }
2188
2189 static void
2190 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2191 #define FUNC_NAME "alloc_some_heap"
2192 {
2193 SCM_CELLPTR ptr;
2194 long len;
2195
2196 if (scm_gc_heap_lock)
2197 {
2198 /* Critical code sections (such as the garbage collector) aren't
2199 * supposed to add heap segments.
2200 */
2201 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2202 abort ();
2203 }
2204
2205 if (scm_n_heap_segs == heap_segment_table_size)
2206 {
2207 /* We have to expand the heap segment table to have room for the new
2208 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2209 * init_heap_seg only if the allocation of the segment itself succeeds.
2210 */
2211 unsigned int new_table_size = scm_n_heap_segs + 1;
2212 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2213 scm_heap_seg_data_t * new_heap_table;
2214
2215 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2216 realloc ((char *)scm_heap_table, size)));
2217 if (!new_heap_table)
2218 {
2219 if (error_policy == abort_on_error)
2220 {
2221 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2222 abort ();
2223 }
2224 else
2225 {
2226 return;
2227 }
2228 }
2229 else
2230 {
2231 scm_heap_table = new_heap_table;
2232 heap_segment_table_size = new_table_size;
2233 }
2234 }
2235
2236 /* Pick a size for the new heap segment.
2237 * The rule for picking the size of a segment is explained in
2238 * gc.h
2239 */
2240 {
2241 /* Assure that the new segment is predicted to be large enough.
2242 *
2243 * New yield should at least equal GC fraction of new heap size, i.e.
2244 *
2245 * y + dh > f * (h + dh)
2246 *
2247 * y : yield
2248 * f : min yield fraction
2249 * h : heap size
2250 * dh : size of new heap segment
2251 *
2252 * This gives dh > (f * h - y) / (1 - f)
2253 */
2254 int f = freelist->min_yield_fraction;
2255 long h = SCM_HEAP_SIZE;
2256 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2257 len = SCM_EXPHEAP (freelist->heap_size);
2258 #ifdef DEBUGINFO
2259 fprintf (stderr, "(%d < %d)", len, min_cells);
2260 #endif
2261 if (len < min_cells)
2262 len = min_cells + freelist->cluster_size;
2263 len *= sizeof (scm_cell);
2264 /* force new sampling */
2265 freelist->collected = LONG_MAX;
2266 }
2267
2268 if (len > scm_max_segment_size)
2269 len = scm_max_segment_size;
2270
2271 {
2272 scm_sizet smallest;
2273
2274 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2275
2276 if (len < smallest)
2277 len = smallest;
2278
2279 /* Allocate with decaying ambition. */
2280 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2281 && (len >= smallest))
2282 {
2283 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2284 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2285 if (ptr)
2286 {
2287 init_heap_seg (ptr, rounded_len, freelist);
2288 return;
2289 }
2290 len /= 2;
2291 }
2292 }
2293
2294 if (error_policy == abort_on_error)
2295 {
2296 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2297 abort ();
2298 }
2299 }
2300 #undef FUNC_NAME
2301
2302
2303 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2304 (SCM name),
2305 "Flushes the glocs for @var{name}, or all glocs if @var{name}\n"
2306 "is @code{#t}.")
2307 #define FUNC_NAME s_scm_unhash_name
2308 {
2309 int x;
2310 int bound;
2311 SCM_VALIDATE_SYMBOL (1,name);
2312 SCM_DEFER_INTS;
2313 bound = scm_n_heap_segs;
2314 for (x = 0; x < bound; ++x)
2315 {
2316 SCM_CELLPTR p;
2317 SCM_CELLPTR pbound;
2318 p = scm_heap_table[x].bounds[0];
2319 pbound = scm_heap_table[x].bounds[1];
2320 while (p < pbound)
2321 {
2322 SCM cell = PTR2SCM (p);
2323 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2324 {
2325 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2326 * struct cell. See the corresponding comment in scm_gc_mark.
2327 */
2328 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2329 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2330 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2331 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2332 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2333 {
2334 SCM_SET_CELL_OBJECT_0 (cell, name);
2335 }
2336 }
2337 ++p;
2338 }
2339 }
2340 SCM_ALLOW_INTS;
2341 return name;
2342 }
2343 #undef FUNC_NAME
2344
2345
2346 \f
2347 /* {GC Protection Helper Functions}
2348 */
2349
2350
2351 /*
2352 * If within a function you need to protect one or more scheme objects from
2353 * garbage collection, pass them as parameters to one of the
2354 * scm_remember_upto_here* functions below. These functions don't do
2355 * anything, but since the compiler does not know that they are actually
2356 * no-ops, it will generate code that calls these functions with the given
2357 * parameters. Therefore, you can be sure that the compiler will keep those
2358 * scheme values alive (on the stack or in a register) up to the point where
2359 * scm_remember_upto_here* is called. In other words, place the call to
2360 * scm_remember_upt_here* _behind_ the last code in your function, that
2361 * depends on the scheme object to exist.
2362 *
2363 * Example: We want to make sure, that the string object str does not get
2364 * garbage collected during the execution of 'some_function', because
2365 * otherwise the characters belonging to str would be freed and
2366 * 'some_function' might access freed memory. To make sure that the compiler
2367 * keeps str alive on the stack or in a register such that it is visible to
2368 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2369 * call to 'some_function'. Note that this would not be necessary if str was
2370 * used anyway after the call to 'some_function'.
2371 * char *chars = SCM_STRING_CHARS (str);
2372 * some_function (chars);
2373 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2374 */
2375
2376 void
2377 scm_remember_upto_here_1 (SCM obj)
2378 {
2379 /* Empty. Protects a single object from garbage collection. */
2380 }
2381
2382 void
2383 scm_remember_upto_here_2 (SCM obj1, SCM obj2)
2384 {
2385 /* Empty. Protects two objects from garbage collection. */
2386 }
2387
2388 void
2389 scm_remember_upto_here (SCM obj, ...)
2390 {
2391 /* Empty. Protects any number of objects from garbage collection. */
2392 }
2393
2394
2395 #if (SCM_DEBUG_DEPRECATED == 0)
2396
2397 void
2398 scm_remember (SCM *ptr)
2399 {
2400 /* empty */
2401 }
2402
2403 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2404
2405 /*
2406 These crazy functions prevent garbage collection
2407 of arguments after the first argument by
2408 ensuring they remain live throughout the
2409 function because they are used in the last
2410 line of the code block.
2411 It'd be better to have a nice compiler hint to
2412 aid the conservative stack-scanning GC. --03/09/00 gjb */
2413 SCM
2414 scm_return_first (SCM elt, ...)
2415 {
2416 return elt;
2417 }
2418
2419 int
2420 scm_return_first_int (int i, ...)
2421 {
2422 return i;
2423 }
2424
2425
2426 SCM
2427 scm_permanent_object (SCM obj)
2428 {
2429 SCM_REDEFER_INTS;
2430 scm_permobjs = scm_cons (obj, scm_permobjs);
2431 SCM_REALLOW_INTS;
2432 return obj;
2433 }
2434
2435
2436 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2437 other references are dropped, until the object is unprotected by calling
2438 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2439 i. e. it is possible to protect the same object several times, but it is
2440 necessary to unprotect the object the same number of times to actually get
2441 the object unprotected. It is an error to unprotect an object more often
2442 than it has been protected before. The function scm_protect_object returns
2443 OBJ.
2444 */
2445
2446 /* Implementation note: For every object X, there is a counter which
2447 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2448 */
2449
2450 SCM
2451 scm_protect_object (SCM obj)
2452 {
2453 SCM handle;
2454
2455 /* This critical section barrier will be replaced by a mutex. */
2456 SCM_REDEFER_INTS;
2457
2458 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2459 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2460
2461 SCM_REALLOW_INTS;
2462
2463 return obj;
2464 }
2465
2466
2467 /* Remove any protection for OBJ established by a prior call to
2468 scm_protect_object. This function returns OBJ.
2469
2470 See scm_protect_object for more information. */
2471 SCM
2472 scm_unprotect_object (SCM obj)
2473 {
2474 SCM handle;
2475
2476 /* This critical section barrier will be replaced by a mutex. */
2477 SCM_REDEFER_INTS;
2478
2479 handle = scm_hashq_get_handle (scm_protects, obj);
2480
2481 if (SCM_FALSEP (handle))
2482 {
2483 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2484 abort ();
2485 }
2486 else
2487 {
2488 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2489 if (count == 0)
2490 scm_hashq_remove_x (scm_protects, obj);
2491 else
2492 SCM_SETCDR (handle, SCM_MAKINUM (count));
2493 }
2494
2495 SCM_REALLOW_INTS;
2496
2497 return obj;
2498 }
2499
2500 int terminating;
2501
2502 /* called on process termination. */
2503 #ifdef HAVE_ATEXIT
2504 static void
2505 cleanup (void)
2506 #else
2507 #ifdef HAVE_ON_EXIT
2508 extern int on_exit (void (*procp) (), int arg);
2509
2510 static void
2511 cleanup (int status, void *arg)
2512 #else
2513 #error Dont know how to setup a cleanup handler on your system.
2514 #endif
2515 #endif
2516 {
2517 terminating = 1;
2518 scm_flush_all_ports ();
2519 }
2520
2521 \f
2522 static int
2523 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2524 {
2525 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2526
2527 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2528 rounded_size,
2529 freelist))
2530 {
2531 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2532 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2533 rounded_size,
2534 freelist))
2535 return 1;
2536 }
2537 else
2538 scm_expmem = 1;
2539
2540 if (freelist->min_yield_fraction)
2541 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2542 / 100);
2543 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2544
2545 return 0;
2546 }
2547
2548 \f
2549 static void
2550 init_freelist (scm_freelist_t *freelist,
2551 int span,
2552 int cluster_size,
2553 int min_yield)
2554 {
2555 freelist->clusters = SCM_EOL;
2556 freelist->cluster_size = cluster_size + 1;
2557 freelist->left_to_collect = 0;
2558 freelist->clusters_allocated = 0;
2559 freelist->min_yield = 0;
2560 freelist->min_yield_fraction = min_yield;
2561 freelist->span = span;
2562 freelist->collected = 0;
2563 freelist->collected_1 = 0;
2564 freelist->heap_size = 0;
2565 }
2566
2567
2568 /* Get an integer from an environment variable. */
2569 static int
2570 scm_i_getenv_int (const char *var, int def)
2571 {
2572 char *end, *val = getenv (var);
2573 long res;
2574 if (!val)
2575 return def;
2576 res = strtol (val, &end, 10);
2577 if (end == val)
2578 return def;
2579 return res;
2580 }
2581
2582
2583 int
2584 scm_init_storage ()
2585 {
2586 scm_sizet gc_trigger_1;
2587 scm_sizet gc_trigger_2;
2588 scm_sizet init_heap_size_1;
2589 scm_sizet init_heap_size_2;
2590 scm_sizet j;
2591
2592 j = SCM_NUM_PROTECTS;
2593 while (j)
2594 scm_sys_protects[--j] = SCM_BOOL_F;
2595 scm_block_gc = 1;
2596
2597 scm_freelist = SCM_EOL;
2598 scm_freelist2 = SCM_EOL;
2599 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2600 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2601 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2602 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2603 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2604
2605 scm_expmem = 0;
2606
2607 j = SCM_HEAP_SEG_SIZE;
2608 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2609 scm_heap_table = ((scm_heap_seg_data_t *)
2610 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2611 heap_segment_table_size = 2;
2612
2613 mark_space_ptr = &mark_space_head;
2614
2615 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2616 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2617 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2618 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2619 return 1;
2620
2621 /* scm_hplims[0] can change. do not remove scm_heap_org */
2622 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2623
2624 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2625 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2626 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2627 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2628 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2629
2630 /* Initialise the list of ports. */
2631 scm_port_table = (scm_port **)
2632 malloc (sizeof (scm_port *) * scm_port_table_room);
2633 if (!scm_port_table)
2634 return 1;
2635
2636 #ifdef HAVE_ATEXIT
2637 atexit (cleanup);
2638 #else
2639 #ifdef HAVE_ON_EXIT
2640 on_exit (cleanup, 0);
2641 #endif
2642 #endif
2643
2644 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2645 SCM_SETCDR (scm_undefineds, scm_undefineds);
2646
2647 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2648 scm_nullstr = scm_allocate_string (0);
2649 scm_nullvect = scm_c_make_vector (0, SCM_UNDEFINED);
2650
2651 #define DEFAULT_SYMHASH_SIZE 277
2652 scm_symhash = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2653 scm_symhash_vars = scm_c_make_hash_table (DEFAULT_SYMHASH_SIZE);
2654
2655 scm_stand_in_procs = SCM_EOL;
2656 scm_permobjs = SCM_EOL;
2657 scm_protects = scm_c_make_hash_table (31);
2658
2659 return 0;
2660 }
2661
2662 \f
2663
2664 SCM scm_after_gc_hook;
2665
2666 #if (SCM_DEBUG_DEPRECATED == 0)
2667 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2668 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2669 static SCM gc_async;
2670
2671
2672 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2673 * is run after the gc, as soon as the asynchronous events are handled by the
2674 * evaluator.
2675 */
2676 static SCM
2677 gc_async_thunk (void)
2678 {
2679 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2680
2681 #if (SCM_DEBUG_DEPRECATED == 0)
2682
2683 /* The following code will be removed in Guile 1.5. */
2684 if (SCM_NFALSEP (scm_gc_vcell))
2685 {
2686 SCM proc = SCM_CDR (scm_gc_vcell);
2687
2688 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2689 scm_apply (proc, SCM_EOL, SCM_EOL);
2690 }
2691
2692 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2693
2694 return SCM_UNSPECIFIED;
2695 }
2696
2697
2698 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2699 * the garbage collection. The only purpose of this function is to mark the
2700 * gc_async (which will eventually lead to the execution of the
2701 * gc_async_thunk).
2702 */
2703 static void *
2704 mark_gc_async (void * hook_data, void *func_data, void *data)
2705 {
2706 scm_system_async_mark (gc_async);
2707 return NULL;
2708 }
2709
2710
2711 void
2712 scm_init_gc ()
2713 {
2714 SCM after_gc_thunk;
2715
2716 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2717 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2718 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2719
2720 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2721
2722 #if (SCM_DEBUG_DEPRECATED == 0)
2723 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2724 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2725 after_gc_thunk = scm_make_subr_opt ("%gc-thunk", scm_tc7_subr_0, gc_async_thunk, 0);
2726 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2727
2728 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2729
2730 #ifndef SCM_MAGIC_SNARFER
2731 #include "libguile/gc.x"
2732 #endif
2733 }
2734
2735 #endif /*MARK_DEPENDENCIES*/
2736
2737 /*
2738 Local Variables:
2739 c-file-style: "gnu"
2740 End:
2741 */