* Removed function scm_gc_mark_cell_conservatively.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
70
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
74
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
77 #endif
78
79 #ifdef HAVE_MALLOC_H
80 #include <malloc.h>
81 #endif
82
83 #ifdef HAVE_UNISTD_H
84 #include <unistd.h>
85 #endif
86
87 #ifdef __STDC__
88 #include <stdarg.h>
89 #define var_start(x, y) va_start(x, y)
90 #else
91 #include <varargs.h>
92 #define var_start(x, y) va_start(x)
93 #endif
94
95 \f
96
97 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
98
99 unsigned int scm_gc_running_p = 0;
100
101 \f
102
103 #if (SCM_DEBUG_CELL_ACCESSES == 1)
104
105 scm_t_bits scm_tc16_allocated;
106
107 /* Set this to != 0 if every cell that is accessed shall be checked:
108 */
109 unsigned int scm_debug_cell_accesses_p = 1;
110
111 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
112 * the number of cell accesses after which a gc shall be called.
113 */
114 static unsigned int debug_cells_gc_interval = 0;
115
116
117 /* Assert that the given object is a valid reference to a valid cell. This
118 * test involves to determine whether the object is a cell pointer, whether
119 * this pointer actually points into a heap segment and whether the cell
120 * pointed to is not a free cell. Further, additional garbage collections may
121 * get executed after a user defined number of cell accesses. This helps to
122 * find places in the C code where references are dropped for extremely short
123 * periods.
124 */
125 void
126 scm_assert_cell_valid (SCM cell)
127 {
128 static unsigned int already_running = 0;
129
130 if (scm_debug_cell_accesses_p && !already_running)
131 {
132 already_running = 1; /* set to avoid recursion */
133
134 if (!scm_cellp (cell))
135 {
136 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
137 (unsigned long) SCM_UNPACK (cell));
138 abort ();
139 }
140 else if (!scm_gc_running_p)
141 {
142 /* Dirk::FIXME:: During garbage collection there occur references to
143 free cells. This is allright during conservative marking, but
144 should not happen otherwise (I think). The case of free cells
145 accessed during conservative marking is handled in function
146 scm_mark_locations. However, there still occur accesses to free
147 cells during gc. I don't understand why this happens. If it is
148 a bug and gets fixed, the following test should also work while
149 gc is running.
150 */
151 if (SCM_FREE_CELL_P (cell))
152 {
153 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
154 (unsigned long) SCM_UNPACK (cell));
155 abort ();
156 }
157
158 /* If desired, perform additional garbage collections after a user
159 * defined number of cell accesses.
160 */
161 if (debug_cells_gc_interval)
162 {
163 static unsigned int counter = 0;
164
165 if (counter != 0)
166 {
167 --counter;
168 }
169 else
170 {
171 counter = debug_cells_gc_interval;
172 scm_igc ("scm_assert_cell_valid");
173 }
174 }
175 }
176 already_running = 0; /* re-enable */
177 }
178 }
179
180
181 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
182 (SCM flag),
183 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
184 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
185 "but no additional calls to garbage collection are issued.\n"
186 "If @var{flag} is a number, cell access checking is enabled,\n"
187 "with an additional garbage collection after the given\n"
188 "number of cell accesses.\n"
189 "This procedure only exists when the compile-time flag\n"
190 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
191 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
192 {
193 if (SCM_FALSEP (flag)) {
194 scm_debug_cell_accesses_p = 0;
195 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
196 debug_cells_gc_interval = 0;
197 scm_debug_cell_accesses_p = 1;
198 } else if (SCM_INUMP (flag)) {
199 long int f = SCM_INUM (flag);
200 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
201 debug_cells_gc_interval = f;
202 scm_debug_cell_accesses_p = 1;
203 } else {
204 SCM_WRONG_TYPE_ARG (1, flag);
205 }
206 return SCM_UNSPECIFIED;
207 }
208 #undef FUNC_NAME
209
210 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
211
212 \f
213
214 /* {heap tuning parameters}
215 *
216 * These are parameters for controlling memory allocation. The heap
217 * is the area out of which scm_cons, and object headers are allocated.
218 *
219 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
220 * 64 bit machine. The units of the _SIZE parameters are bytes.
221 * Cons pairs and object headers occupy one heap cell.
222 *
223 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
224 * allocated initially the heap will grow by half its current size
225 * each subsequent time more heap is needed.
226 *
227 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
228 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
229 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
230 * is in scm_init_storage() and alloc_some_heap() in sys.c
231 *
232 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
233 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
234 *
235 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
236 * is needed.
237 *
238 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
239 * trigger a GC.
240 *
241 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
242 * reclaimed by a GC triggered by must_malloc. If less than this is
243 * reclaimed, the trigger threshold is raised. [I don't know what a
244 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
245 * work around a oscillation that caused almost constant GC.]
246 */
247
248 /*
249 * Heap size 45000 and 40% min yield gives quick startup and no extra
250 * heap allocation. Having higher values on min yield may lead to
251 * large heaps, especially if code behaviour is varying its
252 * maximum consumption between different freelists.
253 */
254
255 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
256 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
257 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
258 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
259 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
260 int scm_default_min_yield_1 = 40;
261
262 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
263 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
264 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
265 /* The following value may seem large, but note that if we get to GC at
266 * all, this means that we have a numerically intensive application
267 */
268 int scm_default_min_yield_2 = 40;
269
270 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
271
272 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
273 #ifdef _QC
274 # define SCM_HEAP_SEG_SIZE 32768L
275 #else
276 # ifdef sequent
277 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
278 # else
279 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
280 # endif
281 #endif
282 /* Make heap grow with factor 1.5 */
283 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
284 #define SCM_INIT_MALLOC_LIMIT 100000
285 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
286
287 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
288 aligned inner bounds for allocated storage */
289
290 #ifdef PROT386
291 /*in 386 protected mode we must only adjust the offset */
292 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
293 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
294 #else
295 # ifdef _UNICOS
296 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
297 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
298 # else
299 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
300 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
301 # endif /* UNICOS */
302 #endif /* PROT386 */
303
304 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
305
306 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
307 #define CLUSTER_SIZE_IN_BYTES(freelist) \
308 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
309
310 \f
311 /* scm_freelists
312 */
313
314 typedef struct scm_t_freelist {
315 /* collected cells */
316 SCM cells;
317 /* number of cells left to collect before cluster is full */
318 unsigned int left_to_collect;
319 /* number of clusters which have been allocated */
320 unsigned int clusters_allocated;
321 /* a list of freelists, each of size cluster_size,
322 * except the last one which may be shorter
323 */
324 SCM clusters;
325 SCM *clustertail;
326 /* this is the number of objects in each cluster, including the spine cell */
327 unsigned int cluster_size;
328 /* indicates that we should grow heap instead of GC:ing
329 */
330 int grow_heap_p;
331 /* minimum yield on this list in order not to grow the heap
332 */
333 long min_yield;
334 /* defines min_yield as percent of total heap size
335 */
336 int min_yield_fraction;
337 /* number of cells per object on this list */
338 int span;
339 /* number of collected cells during last GC */
340 unsigned long collected;
341 /* number of collected cells during penultimate GC */
342 unsigned long collected_1;
343 /* total number of cells in heap segments
344 * belonging to this list.
345 */
346 unsigned long heap_size;
347 } scm_t_freelist;
348
349 SCM scm_freelist = SCM_EOL;
350 scm_t_freelist scm_master_freelist = {
351 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
352 };
353 SCM scm_freelist2 = SCM_EOL;
354 scm_t_freelist scm_master_freelist2 = {
355 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
356 };
357
358 /* scm_mtrigger
359 * is the number of bytes of must_malloc allocation needed to trigger gc.
360 */
361 unsigned long scm_mtrigger;
362
363 /* scm_gc_heap_lock
364 * If set, don't expand the heap. Set only during gc, during which no allocation
365 * is supposed to take place anyway.
366 */
367 int scm_gc_heap_lock = 0;
368
369 /* GC Blocking
370 * Don't pause for collection if this is set -- just
371 * expand the heap.
372 */
373 int scm_block_gc = 1;
374
375 /* During collection, this accumulates objects holding
376 * weak references.
377 */
378 SCM scm_weak_vectors;
379
380 /* During collection, this accumulates structures which are to be freed.
381 */
382 SCM scm_structs_to_free;
383
384 /* GC Statistics Keeping
385 */
386 unsigned long scm_cells_allocated = 0;
387 unsigned long scm_mallocated = 0;
388 unsigned long scm_gc_cells_collected;
389 unsigned long scm_gc_yield;
390 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
391 unsigned long scm_gc_malloc_collected;
392 unsigned long scm_gc_ports_collected;
393 unsigned long scm_gc_time_taken = 0;
394 static unsigned long t_before_gc;
395 static unsigned long t_before_sweep;
396 unsigned long scm_gc_mark_time_taken = 0;
397 unsigned long scm_gc_sweep_time_taken = 0;
398 unsigned long scm_gc_times = 0;
399 unsigned long scm_gc_cells_swept = 0;
400 double scm_gc_cells_marked_acc = 0.;
401 double scm_gc_cells_swept_acc = 0.;
402
403 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
404 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
405 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
406 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
407 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
408 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
409 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
410 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
411 SCM_SYMBOL (sym_times, "gc-times");
412 SCM_SYMBOL (sym_cells_marked, "cells-marked");
413 SCM_SYMBOL (sym_cells_swept, "cells-swept");
414
415 typedef struct scm_t_heap_seg_data
416 {
417 /* lower and upper bounds of the segment */
418 SCM_CELLPTR bounds[2];
419
420 /* address of the head-of-freelist pointer for this segment's cells.
421 All segments usually point to the same one, scm_freelist. */
422 scm_t_freelist *freelist;
423
424 /* number of cells per object in this segment */
425 int span;
426 } scm_t_heap_seg_data;
427
428
429
430 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
431
432 typedef enum { return_on_error, abort_on_error } policy_on_error;
433 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
434
435
436 #define SCM_HEAP_SIZE \
437 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
438 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
439
440 #define BVEC_GROW_SIZE 256
441 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
442 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
443
444 /* mark space allocation */
445
446 typedef struct scm_t_mark_space
447 {
448 scm_t_c_bvec_limb *bvec_space;
449 struct scm_t_mark_space *next;
450 } scm_t_mark_space;
451
452 static scm_t_mark_space *current_mark_space;
453 static scm_t_mark_space **mark_space_ptr;
454 static ptrdiff_t current_mark_space_offset;
455 static scm_t_mark_space *mark_space_head;
456
457 static scm_t_c_bvec_limb *
458 get_bvec ()
459 #define FUNC_NAME "get_bvec"
460 {
461 scm_t_c_bvec_limb *res;
462
463 if (!current_mark_space)
464 {
465 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
466 if (!current_mark_space)
467 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
468
469 current_mark_space->bvec_space = NULL;
470 current_mark_space->next = NULL;
471
472 *mark_space_ptr = current_mark_space;
473 mark_space_ptr = &(current_mark_space->next);
474
475 return get_bvec ();
476 }
477
478 if (!(current_mark_space->bvec_space))
479 {
480 SCM_SYSCALL (current_mark_space->bvec_space =
481 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
482 if (!(current_mark_space->bvec_space))
483 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
484
485 current_mark_space_offset = 0;
486
487 return get_bvec ();
488 }
489
490 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
491 {
492 current_mark_space = NULL;
493
494 return get_bvec ();
495 }
496
497 res = current_mark_space->bvec_space + current_mark_space_offset;
498 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
499
500 return res;
501 }
502 #undef FUNC_NAME
503
504
505 static void
506 clear_mark_space ()
507 {
508 scm_t_mark_space *ms;
509
510 for (ms = mark_space_head; ms; ms = ms->next)
511 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
512 }
513
514
515 \f
516 /* Debugging functions. */
517
518 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
519
520 static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
521
522 static void
523 map_free_list (scm_t_freelist *master, SCM freelist)
524 {
525 long last_seg = -1, count = 0;
526 SCM f;
527
528 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
529 {
530 long int this_seg = heap_segment (f);
531
532 if (this_seg == -1)
533 {
534 fprintf (stderr,
535 "map_free_list: can't find segment containing cell %lux\n",
536 (unsigned long int) SCM_UNPACK (f));
537 abort ();
538 }
539 else if (this_seg != last_seg)
540 {
541 if (last_seg != -1)
542 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
543 (long) count, master->span, (long) last_seg);
544 last_seg = this_seg;
545 count = 0;
546 }
547 count++;
548 }
549 if (last_seg != -1)
550 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
551 (long) count, master->span, (long) last_seg);
552 }
553
554 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
555 (),
556 "Print debugging information about the free-list.\n"
557 "@code{map-free-list} is only included in\n"
558 "@code{--enable-guile-debug} builds of Guile.")
559 #define FUNC_NAME s_scm_map_free_list
560 {
561 size_t i;
562
563 fprintf (stderr, "%ld segments total (%d:%ld",
564 (long) scm_n_heap_segs,
565 scm_heap_table[0].span,
566 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
567
568 for (i = 1; i != scm_n_heap_segs; i++)
569 fprintf (stderr, ", %d:%ld",
570 scm_heap_table[i].span,
571 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
572 fprintf (stderr, ")\n");
573 map_free_list (&scm_master_freelist, scm_freelist);
574 map_free_list (&scm_master_freelist2, scm_freelist2);
575 fflush (stderr);
576
577 return SCM_UNSPECIFIED;
578 }
579 #undef FUNC_NAME
580
581 static long last_cluster;
582 static long last_size;
583
584 static long
585 free_list_length (char *title, long i, SCM freelist)
586 {
587 SCM ls;
588 long n = 0;
589 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
590 if (SCM_FREE_CELL_P (ls))
591 ++n;
592 else
593 {
594 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
595 abort ();
596 }
597 if (n != last_size)
598 {
599 if (i > 0)
600 {
601 if (last_cluster == i - 1)
602 fprintf (stderr, "\t%ld\n", (long) last_size);
603 else
604 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
605 }
606 if (i >= 0)
607 fprintf (stderr, "%s %ld", title, (long) i);
608 else
609 fprintf (stderr, "%s\t%ld\n", title, (long) n);
610 last_cluster = i;
611 last_size = n;
612 }
613 return n;
614 }
615
616 static void
617 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
618 {
619 SCM clusters;
620 long i = 0, len, n = 0;
621 fprintf (stderr, "%s\n\n", title);
622 n += free_list_length ("free list", -1, freelist);
623 for (clusters = master->clusters;
624 SCM_NNULLP (clusters);
625 clusters = SCM_CDR (clusters))
626 {
627 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
628 n += len;
629 }
630 if (last_cluster == i - 1)
631 fprintf (stderr, "\t%ld\n", (long) last_size);
632 else
633 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
634 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
635 }
636
637 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
638 (),
639 "Print debugging information about the free-list.\n"
640 "@code{free-list-length} is only included in\n"
641 "@code{--enable-guile-debug} builds of Guile.")
642 #define FUNC_NAME s_scm_free_list_length
643 {
644 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
645 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
646 return SCM_UNSPECIFIED;
647 }
648 #undef FUNC_NAME
649
650 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
651
652 #ifdef GUILE_DEBUG_FREELIST
653
654 /* Non-zero if freelist debugging is in effect. Set this via
655 `gc-set-debug-check-freelist!'. */
656 static int scm_debug_check_freelist = 0;
657
658 /* Number of calls to SCM_NEWCELL since startup. */
659 static unsigned long scm_newcell_count;
660 static unsigned long scm_newcell2_count;
661
662 /* Search freelist for anything that isn't marked as a free cell.
663 Abort if we find something. */
664 static void
665 scm_check_freelist (SCM freelist)
666 {
667 SCM f;
668 long i = 0;
669
670 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
671 if (!SCM_FREE_CELL_P (f))
672 {
673 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
674 (long) scm_newcell_count, (long) i);
675 abort ();
676 }
677 }
678
679 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
680 (SCM flag),
681 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
682 "on each cell allocation. This procedure only exists when the\n"
683 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
684 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
685 {
686 /* [cmm] I did a double-take when I read this code the first time.
687 well, FWIW. */
688 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
689 return SCM_UNSPECIFIED;
690 }
691 #undef FUNC_NAME
692
693
694 SCM
695 scm_debug_newcell (void)
696 {
697 SCM new;
698
699 scm_newcell_count++;
700 if (scm_debug_check_freelist)
701 {
702 scm_check_freelist (scm_freelist);
703 scm_gc();
704 }
705
706 /* The rest of this is supposed to be identical to the SCM_NEWCELL
707 macro. */
708 if (SCM_NULLP (scm_freelist))
709 {
710 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
711 SCM_GC_SET_ALLOCATED (new);
712 }
713 else
714 {
715 new = scm_freelist;
716 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
717 SCM_GC_SET_ALLOCATED (new);
718 }
719
720 return new;
721 }
722
723 SCM
724 scm_debug_newcell2 (void)
725 {
726 SCM new;
727
728 scm_newcell2_count++;
729 if (scm_debug_check_freelist)
730 {
731 scm_check_freelist (scm_freelist2);
732 scm_gc ();
733 }
734
735 /* The rest of this is supposed to be identical to the SCM_NEWCELL
736 macro. */
737 if (SCM_NULLP (scm_freelist2))
738 {
739 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
740 SCM_GC_SET_ALLOCATED (new);
741 }
742 else
743 {
744 new = scm_freelist2;
745 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
746 SCM_GC_SET_ALLOCATED (new);
747 }
748
749 return new;
750 }
751
752 #endif /* GUILE_DEBUG_FREELIST */
753
754 \f
755
756 static unsigned long
757 master_cells_allocated (scm_t_freelist *master)
758 {
759 /* the '- 1' below is to ignore the cluster spine cells. */
760 long objects = master->clusters_allocated * (master->cluster_size - 1);
761 if (SCM_NULLP (master->clusters))
762 objects -= master->left_to_collect;
763 return master->span * objects;
764 }
765
766 static unsigned long
767 freelist_length (SCM freelist)
768 {
769 long n;
770 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
771 ++n;
772 return n;
773 }
774
775 static unsigned long
776 compute_cells_allocated ()
777 {
778 return (scm_cells_allocated
779 + master_cells_allocated (&scm_master_freelist)
780 + master_cells_allocated (&scm_master_freelist2)
781 - scm_master_freelist.span * freelist_length (scm_freelist)
782 - scm_master_freelist2.span * freelist_length (scm_freelist2));
783 }
784
785 /* {Scheme Interface to GC}
786 */
787
788 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
789 (),
790 "Return an association list of statistics about Guile's current\n"
791 "use of storage.")
792 #define FUNC_NAME s_scm_gc_stats
793 {
794 long i;
795 long n;
796 SCM heap_segs;
797 unsigned long int local_scm_mtrigger;
798 unsigned long int local_scm_mallocated;
799 unsigned long int local_scm_heap_size;
800 unsigned long int local_scm_cells_allocated;
801 unsigned long int local_scm_gc_time_taken;
802 unsigned long int local_scm_gc_times;
803 unsigned long int local_scm_gc_mark_time_taken;
804 unsigned long int local_scm_gc_sweep_time_taken;
805 double local_scm_gc_cells_swept;
806 double local_scm_gc_cells_marked;
807 SCM answer;
808
809 SCM_DEFER_INTS;
810
811 ++scm_block_gc;
812
813 retry:
814 heap_segs = SCM_EOL;
815 n = scm_n_heap_segs;
816 for (i = scm_n_heap_segs; i--; )
817 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
818 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
819 heap_segs);
820 if (scm_n_heap_segs != n)
821 goto retry;
822
823 --scm_block_gc;
824
825 /* Below, we cons to produce the resulting list. We want a snapshot of
826 * the heap situation before consing.
827 */
828 local_scm_mtrigger = scm_mtrigger;
829 local_scm_mallocated = scm_mallocated;
830 local_scm_heap_size = SCM_HEAP_SIZE;
831 local_scm_cells_allocated = compute_cells_allocated ();
832 local_scm_gc_time_taken = scm_gc_time_taken;
833 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
834 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
835 local_scm_gc_times = scm_gc_times;
836 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
837 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
838
839 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
840 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
841 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
842 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
843 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
844 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
845 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
846 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
847 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
848 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
849 scm_cons (sym_heap_segments, heap_segs),
850 SCM_UNDEFINED);
851 SCM_ALLOW_INTS;
852 return answer;
853 }
854 #undef FUNC_NAME
855
856
857 static void
858 gc_start_stats (const char *what SCM_UNUSED)
859 {
860 t_before_gc = scm_c_get_internal_run_time ();
861 scm_gc_cells_swept = 0;
862 scm_gc_cells_collected = 0;
863 scm_gc_yield_1 = scm_gc_yield;
864 scm_gc_yield = (scm_cells_allocated
865 + master_cells_allocated (&scm_master_freelist)
866 + master_cells_allocated (&scm_master_freelist2));
867 scm_gc_malloc_collected = 0;
868 scm_gc_ports_collected = 0;
869 }
870
871
872 static void
873 gc_end_stats ()
874 {
875 unsigned long t = scm_c_get_internal_run_time ();
876 scm_gc_time_taken += (t - t_before_gc);
877 scm_gc_sweep_time_taken += (t - t_before_sweep);
878 ++scm_gc_times;
879
880 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
881 scm_gc_cells_swept_acc += scm_gc_cells_swept;
882 }
883
884
885 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
886 (SCM obj),
887 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
888 "returned by this function for @var{obj}")
889 #define FUNC_NAME s_scm_object_address
890 {
891 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
892 }
893 #undef FUNC_NAME
894
895
896 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
897 (),
898 "Scans all of SCM objects and reclaims for further use those that are\n"
899 "no longer accessible.")
900 #define FUNC_NAME s_scm_gc
901 {
902 SCM_DEFER_INTS;
903 scm_igc ("call");
904 SCM_ALLOW_INTS;
905 return SCM_UNSPECIFIED;
906 }
907 #undef FUNC_NAME
908
909
910 \f
911 /* {C Interface For When GC is Triggered}
912 */
913
914 static void
915 adjust_min_yield (scm_t_freelist *freelist)
916 {
917 /* min yield is adjusted upwards so that next predicted total yield
918 * (allocated cells actually freed by GC) becomes
919 * `min_yield_fraction' of total heap size. Note, however, that
920 * the absolute value of min_yield will correspond to `collected'
921 * on one master (the one which currently is triggering GC).
922 *
923 * The reason why we look at total yield instead of cells collected
924 * on one list is that we want to take other freelists into account.
925 * On this freelist, we know that (local) yield = collected cells,
926 * but that's probably not the case on the other lists.
927 *
928 * (We might consider computing a better prediction, for example
929 * by computing an average over multiple GC:s.)
930 */
931 if (freelist->min_yield_fraction)
932 {
933 /* Pick largest of last two yields. */
934 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
935 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
936 #ifdef DEBUGINFO
937 fprintf (stderr, " after GC = %lu, delta = %ld\n",
938 (long) scm_cells_allocated,
939 (long) delta);
940 #endif
941 if (delta > 0)
942 freelist->min_yield += delta;
943 }
944 }
945
946
947 /* When we get POSIX threads support, the master will be global and
948 * common while the freelist will be individual for each thread.
949 */
950
951 SCM
952 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
953 {
954 SCM cell;
955 ++scm_ints_disabled;
956 do
957 {
958 if (SCM_NULLP (master->clusters))
959 {
960 if (master->grow_heap_p || scm_block_gc)
961 {
962 /* In order to reduce gc frequency, try to allocate a new heap
963 * segment first, even if gc might find some free cells. If we
964 * can't obtain a new heap segment, we will try gc later.
965 */
966 master->grow_heap_p = 0;
967 alloc_some_heap (master, return_on_error);
968 }
969 if (SCM_NULLP (master->clusters))
970 {
971 /* The heap was not grown, either because it wasn't scheduled to
972 * grow, or because there was not enough memory available. In
973 * both cases we have to try gc to get some free cells.
974 */
975 #ifdef DEBUGINFO
976 fprintf (stderr, "allocated = %lu, ",
977 (long) (scm_cells_allocated
978 + master_cells_allocated (&scm_master_freelist)
979 + master_cells_allocated (&scm_master_freelist2)));
980 #endif
981 scm_igc ("cells");
982 adjust_min_yield (master);
983 if (SCM_NULLP (master->clusters))
984 {
985 /* gc could not free any cells. Now, we _must_ allocate a
986 * new heap segment, because there is no other possibility
987 * to provide a new cell for the caller.
988 */
989 alloc_some_heap (master, abort_on_error);
990 }
991 }
992 }
993 cell = SCM_CAR (master->clusters);
994 master->clusters = SCM_CDR (master->clusters);
995 ++master->clusters_allocated;
996 }
997 while (SCM_NULLP (cell));
998
999 #ifdef GUILE_DEBUG_FREELIST
1000 scm_check_freelist (cell);
1001 #endif
1002
1003 --scm_ints_disabled;
1004 *freelist = SCM_FREE_CELL_CDR (cell);
1005 return cell;
1006 }
1007
1008
1009 #if 0
1010 /* This is a support routine which can be used to reserve a cluster
1011 * for some special use, such as debugging. It won't be useful until
1012 * free cells are preserved between garbage collections.
1013 */
1014
1015 void
1016 scm_alloc_cluster (scm_t_freelist *master)
1017 {
1018 SCM freelist, cell;
1019 cell = scm_gc_for_newcell (master, &freelist);
1020 SCM_SETCDR (cell, freelist);
1021 return cell;
1022 }
1023 #endif
1024
1025
1026 scm_t_c_hook scm_before_gc_c_hook;
1027 scm_t_c_hook scm_before_mark_c_hook;
1028 scm_t_c_hook scm_before_sweep_c_hook;
1029 scm_t_c_hook scm_after_sweep_c_hook;
1030 scm_t_c_hook scm_after_gc_c_hook;
1031
1032
1033 void
1034 scm_igc (const char *what)
1035 {
1036 long j;
1037
1038 ++scm_gc_running_p;
1039 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1040 #ifdef DEBUGINFO
1041 fprintf (stderr,
1042 SCM_NULLP (scm_freelist)
1043 ? "*"
1044 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1045 #endif
1046 /* During the critical section, only the current thread may run. */
1047 SCM_CRITICAL_SECTION_START;
1048
1049 if (!scm_stack_base || scm_block_gc)
1050 {
1051 --scm_gc_running_p;
1052 return;
1053 }
1054
1055 gc_start_stats (what);
1056
1057 if (scm_gc_heap_lock)
1058 /* We've invoked the collector while a GC is already in progress.
1059 That should never happen. */
1060 abort ();
1061
1062 ++scm_gc_heap_lock;
1063
1064 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1065
1066 clear_mark_space ();
1067
1068 #ifndef USE_THREADS
1069
1070 /* Mark objects on the C stack. */
1071 SCM_FLUSH_REGISTER_WINDOWS;
1072 /* This assumes that all registers are saved into the jmp_buf */
1073 setjmp (scm_save_regs_gc_mark);
1074 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1075 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1076 sizeof scm_save_regs_gc_mark)
1077 / sizeof (SCM_STACKITEM)));
1078
1079 {
1080 unsigned long stack_len = scm_stack_size (scm_stack_base);
1081 #ifdef SCM_STACK_GROWS_UP
1082 scm_mark_locations (scm_stack_base, stack_len);
1083 #else
1084 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1085 #endif
1086 }
1087
1088 #else /* USE_THREADS */
1089
1090 /* Mark every thread's stack and registers */
1091 scm_threads_mark_stacks ();
1092
1093 #endif /* USE_THREADS */
1094
1095 j = SCM_NUM_PROTECTS;
1096 while (j--)
1097 scm_gc_mark (scm_sys_protects[j]);
1098
1099 /* mark the registered roots */
1100 {
1101 size_t i;
1102 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1103 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1104 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1105 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1106 scm_gc_mark (*p);
1107 }
1108 }
1109 }
1110
1111 /* FIXME: we should have a means to register C functions to be run
1112 * in different phases of GC
1113 */
1114 scm_mark_subr_table ();
1115
1116 #ifndef USE_THREADS
1117 scm_gc_mark (scm_root->handle);
1118 #endif
1119
1120 t_before_sweep = scm_c_get_internal_run_time ();
1121 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1122
1123 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1124
1125 scm_gc_sweep ();
1126
1127 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1128
1129 --scm_gc_heap_lock;
1130 gc_end_stats ();
1131
1132 SCM_CRITICAL_SECTION_END;
1133 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1134 --scm_gc_running_p;
1135 }
1136
1137 \f
1138
1139 /* {Mark/Sweep}
1140 */
1141
1142 #define MARK scm_gc_mark
1143 #define FNAME "scm_gc_mark"
1144
1145 #endif /*!MARK_DEPENDENCIES*/
1146
1147 /* Mark an object precisely.
1148 */
1149 void
1150 MARK (SCM p)
1151 #define FUNC_NAME FNAME
1152 {
1153 register long i;
1154 register SCM ptr;
1155 scm_t_bits cell_type;
1156
1157 #ifndef MARK_DEPENDENCIES
1158 # define RECURSE scm_gc_mark
1159 #else
1160 /* go through the usual marking, but not for self-cycles. */
1161 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1162 #endif
1163 ptr = p;
1164
1165 #ifdef MARK_DEPENDENCIES
1166 goto gc_mark_loop_first_time;
1167 #endif
1168
1169 /* A simple hack for debugging. Chose the second branch to get a
1170 meaningful backtrace for crashes inside the GC.
1171 */
1172 #if 1
1173 #define goto_gc_mark_loop goto gc_mark_loop
1174 #define goto_gc_mark_nimp goto gc_mark_nimp
1175 #else
1176 #define goto_gc_mark_loop RECURSE(ptr); return
1177 #define goto_gc_mark_nimp RECURSE(ptr); return
1178 #endif
1179
1180 gc_mark_loop:
1181 if (SCM_IMP (ptr))
1182 return;
1183
1184 gc_mark_nimp:
1185
1186 #ifdef MARK_DEPENDENCIES
1187 if (SCM_EQ_P (ptr, p))
1188 return;
1189
1190 scm_gc_mark (ptr);
1191 return;
1192
1193 gc_mark_loop_first_time:
1194 #endif
1195
1196 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1197 /* We are in debug mode. Check the ptr exhaustively. */
1198 if (!scm_cellp (ptr))
1199 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1200 #else
1201 /* In non-debug mode, do at least some cheap testing. */
1202 if (!CELL_P (ptr))
1203 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1204 #endif
1205
1206 #ifndef MARK_DEPENDENCIES
1207
1208 if (SCM_GCMARKP (ptr))
1209 return;
1210
1211 SCM_SETGCMARK (ptr);
1212
1213 #endif
1214
1215 cell_type = SCM_GC_CELL_TYPE (ptr);
1216 switch (SCM_ITAG7 (cell_type))
1217 {
1218 case scm_tcs_cons_nimcar:
1219 if (SCM_IMP (SCM_CDR (ptr)))
1220 {
1221 ptr = SCM_CAR (ptr);
1222 goto_gc_mark_nimp;
1223 }
1224 RECURSE (SCM_CAR (ptr));
1225 ptr = SCM_CDR (ptr);
1226 goto_gc_mark_nimp;
1227 case scm_tcs_cons_imcar:
1228 ptr = SCM_CDR (ptr);
1229 goto_gc_mark_loop;
1230 case scm_tc7_pws:
1231 RECURSE (SCM_SETTER (ptr));
1232 ptr = SCM_PROCEDURE (ptr);
1233 goto_gc_mark_loop;
1234 case scm_tcs_struct:
1235 {
1236 /* XXX - use less explicit code. */
1237 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1238 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1239 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1240 long len = SCM_SYMBOL_LENGTH (layout);
1241 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1242 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1243
1244 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1245 {
1246 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1247 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1248 }
1249 if (len)
1250 {
1251 long x;
1252
1253 for (x = 0; x < len - 2; x += 2, ++struct_data)
1254 if (fields_desc[x] == 'p')
1255 RECURSE (SCM_PACK (*struct_data));
1256 if (fields_desc[x] == 'p')
1257 {
1258 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1259 for (x = *struct_data++; x; --x, ++struct_data)
1260 RECURSE (SCM_PACK (*struct_data));
1261 else
1262 RECURSE (SCM_PACK (*struct_data));
1263 }
1264 }
1265 /* mark vtable */
1266 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1267 goto_gc_mark_loop;
1268 }
1269 break;
1270 case scm_tcs_closures:
1271 if (SCM_IMP (SCM_ENV (ptr)))
1272 {
1273 ptr = SCM_CLOSCAR (ptr);
1274 goto_gc_mark_nimp;
1275 }
1276 RECURSE (SCM_CLOSCAR (ptr));
1277 ptr = SCM_ENV (ptr);
1278 goto_gc_mark_nimp;
1279 case scm_tc7_vector:
1280 i = SCM_VECTOR_LENGTH (ptr);
1281 if (i == 0)
1282 break;
1283 while (--i > 0)
1284 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1285 RECURSE (SCM_VELTS (ptr)[i]);
1286 ptr = SCM_VELTS (ptr)[0];
1287 goto_gc_mark_loop;
1288 #ifdef CCLO
1289 case scm_tc7_cclo:
1290 {
1291 size_t i = SCM_CCLO_LENGTH (ptr);
1292 size_t j;
1293 for (j = 1; j != i; ++j)
1294 {
1295 SCM obj = SCM_CCLO_REF (ptr, j);
1296 if (!SCM_IMP (obj))
1297 RECURSE (obj);
1298 }
1299 ptr = SCM_CCLO_REF (ptr, 0);
1300 goto_gc_mark_loop;
1301 }
1302 #endif
1303 #ifdef HAVE_ARRAYS
1304 case scm_tc7_bvect:
1305 case scm_tc7_byvect:
1306 case scm_tc7_ivect:
1307 case scm_tc7_uvect:
1308 case scm_tc7_fvect:
1309 case scm_tc7_dvect:
1310 case scm_tc7_cvect:
1311 case scm_tc7_svect:
1312 #ifdef HAVE_LONG_LONGS
1313 case scm_tc7_llvect:
1314 #endif
1315 #endif
1316 case scm_tc7_string:
1317 break;
1318
1319 case scm_tc7_wvect:
1320 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1321 scm_weak_vectors = ptr;
1322 if (SCM_IS_WHVEC_ANY (ptr))
1323 {
1324 long x;
1325 long len;
1326 int weak_keys;
1327 int weak_values;
1328
1329 len = SCM_VECTOR_LENGTH (ptr);
1330 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1331 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1332
1333 for (x = 0; x < len; ++x)
1334 {
1335 SCM alist;
1336 alist = SCM_VELTS (ptr)[x];
1337
1338 /* mark everything on the alist except the keys or
1339 * values, according to weak_values and weak_keys. */
1340 while ( SCM_CONSP (alist)
1341 && !SCM_GCMARKP (alist)
1342 && SCM_CONSP (SCM_CAR (alist)))
1343 {
1344 SCM kvpair;
1345 SCM next_alist;
1346
1347 kvpair = SCM_CAR (alist);
1348 next_alist = SCM_CDR (alist);
1349 /*
1350 * Do not do this:
1351 * SCM_SETGCMARK (alist);
1352 * SCM_SETGCMARK (kvpair);
1353 *
1354 * It may be that either the key or value is protected by
1355 * an escaped reference to part of the spine of this alist.
1356 * If we mark the spine here, and only mark one or neither of the
1357 * key and value, they may never be properly marked.
1358 * This leads to a horrible situation in which an alist containing
1359 * freelist cells is exported.
1360 *
1361 * So only mark the spines of these arrays last of all marking.
1362 * If somebody confuses us by constructing a weak vector
1363 * with a circular alist then we are hosed, but at least we
1364 * won't prematurely drop table entries.
1365 */
1366 if (!weak_keys)
1367 RECURSE (SCM_CAR (kvpair));
1368 if (!weak_values)
1369 RECURSE (SCM_CDR (kvpair));
1370 alist = next_alist;
1371 }
1372 if (SCM_NIMP (alist))
1373 RECURSE (alist);
1374 }
1375 }
1376 break;
1377
1378 case scm_tc7_symbol:
1379 ptr = SCM_PROP_SLOTS (ptr);
1380 goto_gc_mark_loop;
1381 case scm_tc7_variable:
1382 ptr = SCM_CELL_OBJECT_1 (ptr);
1383 goto_gc_mark_loop;
1384 case scm_tcs_subrs:
1385 break;
1386 case scm_tc7_port:
1387 i = SCM_PTOBNUM (ptr);
1388 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1389 if (!(i < scm_numptob))
1390 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1391 #endif
1392 if (SCM_PTAB_ENTRY(ptr))
1393 RECURSE (SCM_FILENAME (ptr));
1394 if (scm_ptobs[i].mark)
1395 {
1396 ptr = (scm_ptobs[i].mark) (ptr);
1397 goto_gc_mark_loop;
1398 }
1399 else
1400 return;
1401 break;
1402 case scm_tc7_smob:
1403 switch (SCM_TYP16 (ptr))
1404 { /* should be faster than going through scm_smobs */
1405 case scm_tc_free_cell:
1406 /* We have detected a free cell. This can happen if non-object data
1407 * on the C stack points into guile's heap and is scanned during
1408 * conservative marking. */
1409 break;
1410 case scm_tc16_big:
1411 case scm_tc16_real:
1412 case scm_tc16_complex:
1413 break;
1414 default:
1415 i = SCM_SMOBNUM (ptr);
1416 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1417 if (!(i < scm_numsmob))
1418 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1419 #endif
1420 if (scm_smobs[i].mark)
1421 {
1422 ptr = (scm_smobs[i].mark) (ptr);
1423 goto_gc_mark_loop;
1424 }
1425 else
1426 return;
1427 }
1428 break;
1429 default:
1430 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1431 }
1432 #undef RECURSE
1433 }
1434 #undef FUNC_NAME
1435
1436 #ifndef MARK_DEPENDENCIES
1437
1438 #undef MARK
1439 #undef FNAME
1440
1441 /* And here we define `scm_gc_mark_dependencies', by including this
1442 * same file in itself.
1443 */
1444 #define MARK scm_gc_mark_dependencies
1445 #define FNAME "scm_gc_mark_dependencies"
1446 #define MARK_DEPENDENCIES
1447 #include "gc.c"
1448 #undef MARK_DEPENDENCIES
1449 #undef MARK
1450 #undef FNAME
1451
1452
1453 /* Determine whether the given value does actually represent a cell in some
1454 * heap segment. If this is the case, the number of the heap segment is
1455 * returned. Otherwise, -1 is returned. Binary search is used in order to
1456 * determine the heap segment that contains the cell.*/
1457 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1458 * should be an inline function. */
1459 static long int
1460 heap_segment (SCM obj)
1461 {
1462 if (!CELL_P (obj))
1463 return -1;
1464 else
1465 {
1466 SCM_CELLPTR ptr = SCM2PTR (obj);
1467 unsigned long int i = 0;
1468 unsigned long int j = scm_n_heap_segs - 1;
1469
1470 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1471 return -1;
1472 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1473 return -1;
1474 else
1475 {
1476 while (i < j)
1477 {
1478 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1479 {
1480 break;
1481 }
1482 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1483 {
1484 i = j;
1485 break;
1486 }
1487 else
1488 {
1489 unsigned long int k = (i + j) / 2;
1490
1491 if (k == i)
1492 return -1;
1493 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1494 {
1495 j = k;
1496 ++i;
1497 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1498 return -1;
1499 }
1500 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1501 {
1502 i = k;
1503 --j;
1504 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1505 return -1;
1506 }
1507 }
1508 }
1509
1510 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1511 return -1;
1512 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1513 return -1;
1514 else
1515 return i;
1516 }
1517 }
1518 }
1519
1520
1521 /* Mark a region conservatively */
1522 void
1523 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1524 {
1525 unsigned long m;
1526
1527 for (m = 0; m < n; ++m)
1528 {
1529 SCM obj = * (SCM *) &x[m];
1530 long int segment = heap_segment (obj);
1531 if (segment >= 0)
1532 scm_gc_mark (obj);
1533 }
1534 }
1535
1536
1537 /* The function scm_cellp determines whether an SCM value can be regarded as a
1538 * pointer to a cell on the heap.
1539 */
1540 int
1541 scm_cellp (SCM value)
1542 {
1543 long int segment = heap_segment (value);
1544 return (segment >= 0);
1545 }
1546
1547
1548 static void
1549 gc_sweep_freelist_start (scm_t_freelist *freelist)
1550 {
1551 freelist->cells = SCM_EOL;
1552 freelist->left_to_collect = freelist->cluster_size;
1553 freelist->clusters_allocated = 0;
1554 freelist->clusters = SCM_EOL;
1555 freelist->clustertail = &freelist->clusters;
1556 freelist->collected_1 = freelist->collected;
1557 freelist->collected = 0;
1558 }
1559
1560 static void
1561 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1562 {
1563 long collected;
1564 *freelist->clustertail = freelist->cells;
1565 if (!SCM_NULLP (freelist->cells))
1566 {
1567 SCM c = freelist->cells;
1568 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1569 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1570 freelist->collected +=
1571 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1572 }
1573 scm_gc_cells_collected += freelist->collected;
1574
1575 /* Although freelist->min_yield is used to test freelist->collected
1576 * (which is the local GC yield for freelist), it is adjusted so
1577 * that *total* yield is freelist->min_yield_fraction of total heap
1578 * size. This means that a too low yield is compensated by more
1579 * heap on the list which is currently doing most work, which is
1580 * just what we want.
1581 */
1582 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1583 freelist->grow_heap_p = (collected < freelist->min_yield);
1584 }
1585
1586 #define NEXT_DATA_CELL(ptr, span) \
1587 do { \
1588 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1589 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1590 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1591 : nxt__); \
1592 } while (0)
1593
1594 void
1595 scm_gc_sweep ()
1596 #define FUNC_NAME "scm_gc_sweep"
1597 {
1598 register SCM_CELLPTR ptr;
1599 register SCM nfreelist;
1600 register scm_t_freelist *freelist;
1601 register unsigned long m;
1602 register int span;
1603 size_t i;
1604 size_t seg_size;
1605
1606 m = 0;
1607
1608 gc_sweep_freelist_start (&scm_master_freelist);
1609 gc_sweep_freelist_start (&scm_master_freelist2);
1610
1611 for (i = 0; i < scm_n_heap_segs; i++)
1612 {
1613 register long left_to_collect;
1614 register size_t j;
1615
1616 /* Unmarked cells go onto the front of the freelist this heap
1617 segment points to. Rather than updating the real freelist
1618 pointer as we go along, we accumulate the new head in
1619 nfreelist. Then, if it turns out that the entire segment is
1620 free, we free (i.e., malloc's free) the whole segment, and
1621 simply don't assign nfreelist back into the real freelist. */
1622 freelist = scm_heap_table[i].freelist;
1623 nfreelist = freelist->cells;
1624 left_to_collect = freelist->left_to_collect;
1625 span = scm_heap_table[i].span;
1626
1627 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1628 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1629
1630 /* use only data cells in seg_size */
1631 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1632
1633 scm_gc_cells_swept += seg_size;
1634
1635 for (j = seg_size + span; j -= span; ptr += span)
1636 {
1637 SCM scmptr;
1638
1639 if (SCM_GC_IN_CARD_HEADERP (ptr))
1640 {
1641 SCM_CELLPTR nxt;
1642
1643 /* cheat here */
1644 nxt = ptr;
1645 NEXT_DATA_CELL (nxt, span);
1646 j += span;
1647
1648 ptr = nxt - span;
1649 continue;
1650 }
1651
1652 scmptr = PTR2SCM (ptr);
1653
1654 if (SCM_GCMARKP (scmptr))
1655 continue;
1656
1657 switch SCM_TYP7 (scmptr)
1658 {
1659 case scm_tcs_struct:
1660 {
1661 /* Structs need to be freed in a special order.
1662 * This is handled by GC C hooks in struct.c.
1663 */
1664 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1665 scm_structs_to_free = scmptr;
1666 }
1667 continue;
1668 case scm_tcs_cons_imcar:
1669 case scm_tcs_cons_nimcar:
1670 case scm_tcs_closures:
1671 case scm_tc7_pws:
1672 break;
1673 case scm_tc7_wvect:
1674 case scm_tc7_vector:
1675 {
1676 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1677 if (length > 0)
1678 {
1679 m += length * sizeof (scm_t_bits);
1680 scm_must_free (SCM_VECTOR_BASE (scmptr));
1681 }
1682 break;
1683 }
1684 #ifdef CCLO
1685 case scm_tc7_cclo:
1686 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1687 scm_must_free (SCM_CCLO_BASE (scmptr));
1688 break;
1689 #endif
1690 #ifdef HAVE_ARRAYS
1691 case scm_tc7_bvect:
1692 {
1693 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1694 if (length > 0)
1695 {
1696 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1697 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1698 }
1699 }
1700 break;
1701 case scm_tc7_byvect:
1702 case scm_tc7_ivect:
1703 case scm_tc7_uvect:
1704 case scm_tc7_svect:
1705 #ifdef HAVE_LONG_LONGS
1706 case scm_tc7_llvect:
1707 #endif
1708 case scm_tc7_fvect:
1709 case scm_tc7_dvect:
1710 case scm_tc7_cvect:
1711 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1712 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1713 break;
1714 #endif
1715 case scm_tc7_string:
1716 m += SCM_STRING_LENGTH (scmptr) + 1;
1717 scm_must_free (SCM_STRING_CHARS (scmptr));
1718 break;
1719 case scm_tc7_symbol:
1720 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1721 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1722 break;
1723 case scm_tc7_variable:
1724 break;
1725 case scm_tcs_subrs:
1726 /* the various "subrs" (primitives) are never freed */
1727 continue;
1728 case scm_tc7_port:
1729 if SCM_OPENP (scmptr)
1730 {
1731 int k = SCM_PTOBNUM (scmptr);
1732 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1733 if (!(k < scm_numptob))
1734 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1735 #endif
1736 /* Keep "revealed" ports alive. */
1737 if (scm_revealed_count (scmptr) > 0)
1738 continue;
1739 /* Yes, I really do mean scm_ptobs[k].free */
1740 /* rather than ftobs[k].close. .close */
1741 /* is for explicit CLOSE-PORT by user */
1742 m += (scm_ptobs[k].free) (scmptr);
1743 SCM_SETSTREAM (scmptr, 0);
1744 scm_remove_from_port_table (scmptr);
1745 scm_gc_ports_collected++;
1746 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1747 }
1748 break;
1749 case scm_tc7_smob:
1750 switch SCM_TYP16 (scmptr)
1751 {
1752 case scm_tc_free_cell:
1753 case scm_tc16_real:
1754 break;
1755 #ifdef SCM_BIGDIG
1756 case scm_tc16_big:
1757 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1758 scm_must_free (SCM_BDIGITS (scmptr));
1759 break;
1760 #endif /* def SCM_BIGDIG */
1761 case scm_tc16_complex:
1762 m += sizeof (scm_t_complex);
1763 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1764 break;
1765 default:
1766 {
1767 int k;
1768 k = SCM_SMOBNUM (scmptr);
1769 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1770 if (!(k < scm_numsmob))
1771 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1772 #endif
1773 if (scm_smobs[k].free)
1774 m += (scm_smobs[k].free) (scmptr);
1775 break;
1776 }
1777 }
1778 break;
1779 default:
1780 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1781 }
1782
1783 if (!--left_to_collect)
1784 {
1785 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1786 *freelist->clustertail = scmptr;
1787 freelist->clustertail = SCM_CDRLOC (scmptr);
1788
1789 nfreelist = SCM_EOL;
1790 freelist->collected += span * freelist->cluster_size;
1791 left_to_collect = freelist->cluster_size;
1792 }
1793 else
1794 {
1795 /* Stick the new cell on the front of nfreelist. It's
1796 critical that we mark this cell as freed; otherwise, the
1797 conservative collector might trace it as some other type
1798 of object. */
1799 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1800 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1801 nfreelist = scmptr;
1802 }
1803 }
1804
1805 #ifdef GC_FREE_SEGMENTS
1806 if (n == seg_size)
1807 {
1808 register long j;
1809
1810 freelist->heap_size -= seg_size;
1811 free ((char *) scm_heap_table[i].bounds[0]);
1812 scm_heap_table[i].bounds[0] = 0;
1813 for (j = i + 1; j < scm_n_heap_segs; j++)
1814 scm_heap_table[j - 1] = scm_heap_table[j];
1815 scm_n_heap_segs -= 1;
1816 i--; /* We need to scan the segment just moved. */
1817 }
1818 else
1819 #endif /* ifdef GC_FREE_SEGMENTS */
1820 {
1821 /* Update the real freelist pointer to point to the head of
1822 the list of free cells we've built for this segment. */
1823 freelist->cells = nfreelist;
1824 freelist->left_to_collect = left_to_collect;
1825 }
1826
1827 #ifdef GUILE_DEBUG_FREELIST
1828 scm_map_free_list ();
1829 #endif
1830 }
1831
1832 gc_sweep_freelist_finish (&scm_master_freelist);
1833 gc_sweep_freelist_finish (&scm_master_freelist2);
1834
1835 /* When we move to POSIX threads private freelists should probably
1836 be GC-protected instead. */
1837 scm_freelist = SCM_EOL;
1838 scm_freelist2 = SCM_EOL;
1839
1840 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1841 scm_gc_yield -= scm_cells_allocated;
1842
1843 if (scm_mallocated < m)
1844 /* The byte count of allocated objects has underflowed. This is
1845 probably because you forgot to report the sizes of objects you
1846 have allocated, by calling scm_done_malloc or some such. When
1847 the GC freed them, it subtracted their size from
1848 scm_mallocated, which underflowed. */
1849 abort ();
1850
1851 scm_mallocated -= m;
1852 scm_gc_malloc_collected = m;
1853 }
1854 #undef FUNC_NAME
1855
1856
1857 \f
1858 /* {Front end to malloc}
1859 *
1860 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1861 * scm_done_free
1862 *
1863 * These functions provide services comparable to malloc, realloc, and
1864 * free. They should be used when allocating memory that will be under
1865 * control of the garbage collector, i.e., if the memory may be freed
1866 * during garbage collection.
1867 */
1868
1869 /* scm_must_malloc
1870 * Return newly malloced storage or throw an error.
1871 *
1872 * The parameter WHAT is a string for error reporting.
1873 * If the threshold scm_mtrigger will be passed by this
1874 * allocation, or if the first call to malloc fails,
1875 * garbage collect -- on the presumption that some objects
1876 * using malloced storage may be collected.
1877 *
1878 * The limit scm_mtrigger may be raised by this allocation.
1879 */
1880 void *
1881 scm_must_malloc (size_t size, const char *what)
1882 {
1883 void *ptr;
1884 unsigned long nm = scm_mallocated + size;
1885
1886 if (nm < size)
1887 /* The byte count of allocated objects has overflowed. This is
1888 probably because you forgot to report the correct size of freed
1889 memory in some of your smob free methods. */
1890 abort ();
1891
1892 if (nm <= scm_mtrigger)
1893 {
1894 SCM_SYSCALL (ptr = malloc (size));
1895 if (NULL != ptr)
1896 {
1897 scm_mallocated = nm;
1898 #ifdef GUILE_DEBUG_MALLOC
1899 scm_malloc_register (ptr, what);
1900 #endif
1901 return ptr;
1902 }
1903 }
1904
1905 scm_igc (what);
1906
1907 nm = scm_mallocated + size;
1908
1909 if (nm < size)
1910 /* The byte count of allocated objects has overflowed. This is
1911 probably because you forgot to report the correct size of freed
1912 memory in some of your smob free methods. */
1913 abort ();
1914
1915 SCM_SYSCALL (ptr = malloc (size));
1916 if (NULL != ptr)
1917 {
1918 scm_mallocated = nm;
1919
1920 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1921 unsigned long old_trigger = scm_mtrigger;
1922 if (nm > scm_mtrigger)
1923 scm_mtrigger = nm + nm / 2;
1924 else
1925 scm_mtrigger += scm_mtrigger / 2;
1926 if (scm_mtrigger < old_trigger)
1927 abort ();
1928 }
1929 #ifdef GUILE_DEBUG_MALLOC
1930 scm_malloc_register (ptr, what);
1931 #endif
1932
1933 return ptr;
1934 }
1935
1936 scm_memory_error (what);
1937 }
1938
1939
1940 /* scm_must_realloc
1941 * is similar to scm_must_malloc.
1942 */
1943 void *
1944 scm_must_realloc (void *where,
1945 size_t old_size,
1946 size_t size,
1947 const char *what)
1948 {
1949 void *ptr;
1950 unsigned long nm;
1951
1952 if (size <= old_size)
1953 return where;
1954
1955 nm = scm_mallocated + size - old_size;
1956
1957 if (nm < (size - old_size))
1958 /* The byte count of allocated objects has overflowed. This is
1959 probably because you forgot to report the correct size of freed
1960 memory in some of your smob free methods. */
1961 abort ();
1962
1963 if (nm <= scm_mtrigger)
1964 {
1965 SCM_SYSCALL (ptr = realloc (where, size));
1966 if (NULL != ptr)
1967 {
1968 scm_mallocated = nm;
1969 #ifdef GUILE_DEBUG_MALLOC
1970 scm_malloc_reregister (where, ptr, what);
1971 #endif
1972 return ptr;
1973 }
1974 }
1975
1976 scm_igc (what);
1977
1978 nm = scm_mallocated + size - old_size;
1979
1980 if (nm < (size - old_size))
1981 /* The byte count of allocated objects has overflowed. This is
1982 probably because you forgot to report the correct size of freed
1983 memory in some of your smob free methods. */
1984 abort ();
1985
1986 SCM_SYSCALL (ptr = realloc (where, size));
1987 if (NULL != ptr)
1988 {
1989 scm_mallocated = nm;
1990 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1991 unsigned long old_trigger = scm_mtrigger;
1992 if (nm > scm_mtrigger)
1993 scm_mtrigger = nm + nm / 2;
1994 else
1995 scm_mtrigger += scm_mtrigger / 2;
1996 if (scm_mtrigger < old_trigger)
1997 abort ();
1998 }
1999 #ifdef GUILE_DEBUG_MALLOC
2000 scm_malloc_reregister (where, ptr, what);
2001 #endif
2002 return ptr;
2003 }
2004
2005 scm_memory_error (what);
2006 }
2007
2008 char *
2009 scm_must_strndup (const char *str, size_t length)
2010 {
2011 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2012 memcpy (dst, str, length);
2013 dst[length] = 0;
2014 return dst;
2015 }
2016
2017 char *
2018 scm_must_strdup (const char *str)
2019 {
2020 return scm_must_strndup (str, strlen (str));
2021 }
2022
2023 void
2024 scm_must_free (void *obj)
2025 #define FUNC_NAME "scm_must_free"
2026 {
2027 #ifdef GUILE_DEBUG_MALLOC
2028 scm_malloc_unregister (obj);
2029 #endif
2030 if (obj)
2031 free (obj);
2032 else
2033 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2034 }
2035 #undef FUNC_NAME
2036
2037
2038 /* Announce that there has been some malloc done that will be freed
2039 * during gc. A typical use is for a smob that uses some malloced
2040 * memory but can not get it from scm_must_malloc (for whatever
2041 * reason). When a new object of this smob is created you call
2042 * scm_done_malloc with the size of the object. When your smob free
2043 * function is called, be sure to include this size in the return
2044 * value.
2045 *
2046 * If you can't actually free the memory in the smob free function,
2047 * for whatever reason (like reference counting), you still can (and
2048 * should) report the amount of memory freed when you actually free it.
2049 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2050 * eh? Or even better, call scm_done_free. */
2051
2052 void
2053 scm_done_malloc (long size)
2054 {
2055 if (size < 0) {
2056 if (scm_mallocated < size)
2057 /* The byte count of allocated objects has underflowed. This is
2058 probably because you forgot to report the sizes of objects you
2059 have allocated, by calling scm_done_malloc or some such. When
2060 the GC freed them, it subtracted their size from
2061 scm_mallocated, which underflowed. */
2062 abort ();
2063 } else {
2064 unsigned long nm = scm_mallocated + size;
2065 if (nm < size)
2066 /* The byte count of allocated objects has overflowed. This is
2067 probably because you forgot to report the correct size of freed
2068 memory in some of your smob free methods. */
2069 abort ();
2070 }
2071
2072 scm_mallocated += size;
2073
2074 if (scm_mallocated > scm_mtrigger)
2075 {
2076 scm_igc ("foreign mallocs");
2077 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2078 {
2079 if (scm_mallocated > scm_mtrigger)
2080 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2081 else
2082 scm_mtrigger += scm_mtrigger / 2;
2083 }
2084 }
2085 }
2086
2087 void
2088 scm_done_free (long size)
2089 {
2090 if (size >= 0) {
2091 if (scm_mallocated < size)
2092 /* The byte count of allocated objects has underflowed. This is
2093 probably because you forgot to report the sizes of objects you
2094 have allocated, by calling scm_done_malloc or some such. When
2095 the GC freed them, it subtracted their size from
2096 scm_mallocated, which underflowed. */
2097 abort ();
2098 } else {
2099 unsigned long nm = scm_mallocated - size;
2100 if (nm < size)
2101 /* The byte count of allocated objects has overflowed. This is
2102 probably because you forgot to report the correct size of freed
2103 memory in some of your smob free methods. */
2104 abort ();
2105 }
2106
2107 scm_mallocated -= size;
2108 }
2109
2110
2111 \f
2112 /* {Heap Segments}
2113 *
2114 * Each heap segment is an array of objects of a particular size.
2115 * Every segment has an associated (possibly shared) freelist.
2116 * A table of segment records is kept that records the upper and
2117 * lower extents of the segment; this is used during the conservative
2118 * phase of gc to identify probably gc roots (because they point
2119 * into valid segments at reasonable offsets). */
2120
2121 /* scm_expmem
2122 * is true if the first segment was smaller than INIT_HEAP_SEG.
2123 * If scm_expmem is set to one, subsequent segment allocations will
2124 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2125 */
2126 int scm_expmem = 0;
2127
2128 size_t scm_max_segment_size;
2129
2130 /* scm_heap_org
2131 * is the lowest base address of any heap segment.
2132 */
2133 SCM_CELLPTR scm_heap_org;
2134
2135 scm_t_heap_seg_data * scm_heap_table = 0;
2136 static size_t heap_segment_table_size = 0;
2137 size_t scm_n_heap_segs = 0;
2138
2139 /* init_heap_seg
2140 * initializes a new heap segment and returns the number of objects it contains.
2141 *
2142 * The segment origin and segment size in bytes are input parameters.
2143 * The freelist is both input and output.
2144 *
2145 * This function presumes that the scm_heap_table has already been expanded
2146 * to accomodate a new segment record and that the markbit space was reserved
2147 * for all the cards in this segment.
2148 */
2149
2150 #define INIT_CARD(card, span) \
2151 do { \
2152 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2153 if ((span) == 2) \
2154 SCM_GC_SET_CARD_DOUBLECELL (card); \
2155 } while (0)
2156
2157 static size_t
2158 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2159 {
2160 register SCM_CELLPTR ptr;
2161 SCM_CELLPTR seg_end;
2162 size_t new_seg_index;
2163 ptrdiff_t n_new_cells;
2164 int span = freelist->span;
2165
2166 if (seg_org == NULL)
2167 return 0;
2168
2169 /* Align the begin ptr up.
2170 */
2171 ptr = SCM_GC_CARD_UP (seg_org);
2172
2173 /* Compute the ceiling on valid object pointers w/in this segment.
2174 */
2175 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2176
2177 /* Find the right place and insert the segment record.
2178 */
2179 new_seg_index = 0;
2180 while (new_seg_index < scm_n_heap_segs
2181 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2182 new_seg_index++;
2183
2184 {
2185 int i;
2186 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2187 scm_heap_table[i] = scm_heap_table[i - 1];
2188 }
2189
2190 ++scm_n_heap_segs;
2191
2192 scm_heap_table[new_seg_index].span = span;
2193 scm_heap_table[new_seg_index].freelist = freelist;
2194 scm_heap_table[new_seg_index].bounds[0] = ptr;
2195 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2196
2197 /*n_new_cells*/
2198 n_new_cells = seg_end - ptr;
2199
2200 freelist->heap_size += n_new_cells;
2201
2202 /* Partition objects in this segment into clusters */
2203 {
2204 SCM clusters;
2205 SCM *clusterp = &clusters;
2206
2207 NEXT_DATA_CELL (ptr, span);
2208 while (ptr < seg_end)
2209 {
2210 scm_cell *nxt = ptr;
2211 scm_cell *prv = NULL;
2212 scm_cell *last_card = NULL;
2213 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2214 NEXT_DATA_CELL(nxt, span);
2215
2216 /* Allocate cluster spine
2217 */
2218 *clusterp = PTR2SCM (ptr);
2219 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2220 clusterp = SCM_CDRLOC (*clusterp);
2221 ptr = nxt;
2222
2223 while (n_data_cells--)
2224 {
2225 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2226 SCM scmptr = PTR2SCM (ptr);
2227 nxt = ptr;
2228 NEXT_DATA_CELL (nxt, span);
2229 prv = ptr;
2230
2231 if (card != last_card)
2232 {
2233 INIT_CARD (card, span);
2234 last_card = card;
2235 }
2236
2237 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2238 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2239
2240 ptr = nxt;
2241 }
2242
2243 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2244 }
2245
2246 /* sanity check */
2247 {
2248 scm_cell *ref = seg_end;
2249 NEXT_DATA_CELL (ref, span);
2250 if (ref != ptr)
2251 /* [cmm] looks like the segment size doesn't divide cleanly by
2252 cluster size. bad cmm! */
2253 abort();
2254 }
2255
2256 /* Patch up the last cluster pointer in the segment
2257 * to join it to the input freelist.
2258 */
2259 *clusterp = freelist->clusters;
2260 freelist->clusters = clusters;
2261 }
2262
2263 #ifdef DEBUGINFO
2264 fprintf (stderr, "H");
2265 #endif
2266 return size;
2267 }
2268
2269 static size_t
2270 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2271 {
2272 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2273
2274 return
2275 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2276 + ALIGNMENT_SLACK (freelist);
2277 }
2278
2279 static void
2280 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2281 #define FUNC_NAME "alloc_some_heap"
2282 {
2283 SCM_CELLPTR ptr;
2284 size_t len;
2285
2286 if (scm_gc_heap_lock)
2287 {
2288 /* Critical code sections (such as the garbage collector) aren't
2289 * supposed to add heap segments.
2290 */
2291 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2292 abort ();
2293 }
2294
2295 if (scm_n_heap_segs == heap_segment_table_size)
2296 {
2297 /* We have to expand the heap segment table to have room for the new
2298 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2299 * init_heap_seg only if the allocation of the segment itself succeeds.
2300 */
2301 size_t new_table_size = scm_n_heap_segs + 1;
2302 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2303 scm_t_heap_seg_data *new_heap_table;
2304
2305 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2306 realloc ((char *)scm_heap_table, size)));
2307 if (!new_heap_table)
2308 {
2309 if (error_policy == abort_on_error)
2310 {
2311 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2312 abort ();
2313 }
2314 else
2315 {
2316 return;
2317 }
2318 }
2319 else
2320 {
2321 scm_heap_table = new_heap_table;
2322 heap_segment_table_size = new_table_size;
2323 }
2324 }
2325
2326 /* Pick a size for the new heap segment.
2327 * The rule for picking the size of a segment is explained in
2328 * gc.h
2329 */
2330 {
2331 /* Assure that the new segment is predicted to be large enough.
2332 *
2333 * New yield should at least equal GC fraction of new heap size, i.e.
2334 *
2335 * y + dh > f * (h + dh)
2336 *
2337 * y : yield
2338 * f : min yield fraction
2339 * h : heap size
2340 * dh : size of new heap segment
2341 *
2342 * This gives dh > (f * h - y) / (1 - f)
2343 */
2344 int f = freelist->min_yield_fraction;
2345 unsigned long h = SCM_HEAP_SIZE;
2346 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2347 len = SCM_EXPHEAP (freelist->heap_size);
2348 #ifdef DEBUGINFO
2349 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2350 #endif
2351 if (len < min_cells)
2352 len = min_cells + freelist->cluster_size;
2353 len *= sizeof (scm_cell);
2354 /* force new sampling */
2355 freelist->collected = LONG_MAX;
2356 }
2357
2358 if (len > scm_max_segment_size)
2359 len = scm_max_segment_size;
2360
2361 {
2362 size_t smallest;
2363
2364 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2365
2366 if (len < smallest)
2367 len = smallest;
2368
2369 /* Allocate with decaying ambition. */
2370 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2371 && (len >= smallest))
2372 {
2373 size_t rounded_len = round_to_cluster_size (freelist, len);
2374 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2375 if (ptr)
2376 {
2377 init_heap_seg (ptr, rounded_len, freelist);
2378 return;
2379 }
2380 len /= 2;
2381 }
2382 }
2383
2384 if (error_policy == abort_on_error)
2385 {
2386 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2387 abort ();
2388 }
2389 }
2390 #undef FUNC_NAME
2391
2392 \f
2393 /* {GC Protection Helper Functions}
2394 */
2395
2396
2397 /*
2398 * If within a function you need to protect one or more scheme objects from
2399 * garbage collection, pass them as parameters to one of the
2400 * scm_remember_upto_here* functions below. These functions don't do
2401 * anything, but since the compiler does not know that they are actually
2402 * no-ops, it will generate code that calls these functions with the given
2403 * parameters. Therefore, you can be sure that the compiler will keep those
2404 * scheme values alive (on the stack or in a register) up to the point where
2405 * scm_remember_upto_here* is called. In other words, place the call to
2406 * scm_remember_upto_here* _behind_ the last code in your function, that
2407 * depends on the scheme object to exist.
2408 *
2409 * Example: We want to make sure that the string object str does not get
2410 * garbage collected during the execution of 'some_function' in the code
2411 * below, because otherwise the characters belonging to str would be freed and
2412 * 'some_function' might access freed memory. To make sure that the compiler
2413 * keeps str alive on the stack or in a register such that it is visible to
2414 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2415 * call to 'some_function'. Note that this would not be necessary if str was
2416 * used anyway after the call to 'some_function'.
2417 * char *chars = SCM_STRING_CHARS (str);
2418 * some_function (chars);
2419 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2420 */
2421
2422 void
2423 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2424 {
2425 /* Empty. Protects a single object from garbage collection. */
2426 }
2427
2428 void
2429 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2430 {
2431 /* Empty. Protects two objects from garbage collection. */
2432 }
2433
2434 void
2435 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2436 {
2437 /* Empty. Protects any number of objects from garbage collection. */
2438 }
2439
2440 /*
2441 These crazy functions prevent garbage collection
2442 of arguments after the first argument by
2443 ensuring they remain live throughout the
2444 function because they are used in the last
2445 line of the code block.
2446 It'd be better to have a nice compiler hint to
2447 aid the conservative stack-scanning GC. --03/09/00 gjb */
2448 SCM
2449 scm_return_first (SCM elt, ...)
2450 {
2451 return elt;
2452 }
2453
2454 int
2455 scm_return_first_int (int i, ...)
2456 {
2457 return i;
2458 }
2459
2460
2461 SCM
2462 scm_permanent_object (SCM obj)
2463 {
2464 SCM_REDEFER_INTS;
2465 scm_permobjs = scm_cons (obj, scm_permobjs);
2466 SCM_REALLOW_INTS;
2467 return obj;
2468 }
2469
2470
2471 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2472 other references are dropped, until the object is unprotected by calling
2473 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2474 i. e. it is possible to protect the same object several times, but it is
2475 necessary to unprotect the object the same number of times to actually get
2476 the object unprotected. It is an error to unprotect an object more often
2477 than it has been protected before. The function scm_protect_object returns
2478 OBJ.
2479 */
2480
2481 /* Implementation note: For every object X, there is a counter which
2482 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2483 */
2484
2485 SCM
2486 scm_gc_protect_object (SCM obj)
2487 {
2488 SCM handle;
2489
2490 /* This critical section barrier will be replaced by a mutex. */
2491 SCM_REDEFER_INTS;
2492
2493 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2494 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2495
2496 SCM_REALLOW_INTS;
2497
2498 return obj;
2499 }
2500
2501
2502 /* Remove any protection for OBJ established by a prior call to
2503 scm_protect_object. This function returns OBJ.
2504
2505 See scm_protect_object for more information. */
2506 SCM
2507 scm_gc_unprotect_object (SCM obj)
2508 {
2509 SCM handle;
2510
2511 /* This critical section barrier will be replaced by a mutex. */
2512 SCM_REDEFER_INTS;
2513
2514 handle = scm_hashq_get_handle (scm_protects, obj);
2515
2516 if (SCM_FALSEP (handle))
2517 {
2518 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2519 abort ();
2520 }
2521 else
2522 {
2523 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2524 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2525 scm_hashq_remove_x (scm_protects, obj);
2526 else
2527 SCM_SETCDR (handle, count);
2528 }
2529
2530 SCM_REALLOW_INTS;
2531
2532 return obj;
2533 }
2534
2535 void
2536 scm_gc_register_root (SCM *p)
2537 {
2538 SCM handle;
2539 SCM key = scm_long2num ((long) p);
2540
2541 /* This critical section barrier will be replaced by a mutex. */
2542 SCM_REDEFER_INTS;
2543
2544 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2545 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2546
2547 SCM_REALLOW_INTS;
2548 }
2549
2550 void
2551 scm_gc_unregister_root (SCM *p)
2552 {
2553 SCM handle;
2554 SCM key = scm_long2num ((long) p);
2555
2556 /* This critical section barrier will be replaced by a mutex. */
2557 SCM_REDEFER_INTS;
2558
2559 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2560
2561 if (SCM_FALSEP (handle))
2562 {
2563 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2564 abort ();
2565 }
2566 else
2567 {
2568 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2569 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2570 scm_hashv_remove_x (scm_gc_registered_roots, key);
2571 else
2572 SCM_SETCDR (handle, count);
2573 }
2574
2575 SCM_REALLOW_INTS;
2576 }
2577
2578 void
2579 scm_gc_register_roots (SCM *b, unsigned long n)
2580 {
2581 SCM *p = b;
2582 for (; p < b + n; ++p)
2583 scm_gc_register_root (p);
2584 }
2585
2586 void
2587 scm_gc_unregister_roots (SCM *b, unsigned long n)
2588 {
2589 SCM *p = b;
2590 for (; p < b + n; ++p)
2591 scm_gc_unregister_root (p);
2592 }
2593
2594 int terminating;
2595
2596 /* called on process termination. */
2597 #ifdef HAVE_ATEXIT
2598 static void
2599 cleanup (void)
2600 #else
2601 #ifdef HAVE_ON_EXIT
2602 extern int on_exit (void (*procp) (), int arg);
2603
2604 static void
2605 cleanup (int status, void *arg)
2606 #else
2607 #error Dont know how to setup a cleanup handler on your system.
2608 #endif
2609 #endif
2610 {
2611 terminating = 1;
2612 scm_flush_all_ports ();
2613 }
2614
2615 \f
2616 static int
2617 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2618 {
2619 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2620
2621 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2622 rounded_size,
2623 freelist))
2624 {
2625 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2626 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2627 rounded_size,
2628 freelist))
2629 return 1;
2630 }
2631 else
2632 scm_expmem = 1;
2633
2634 if (freelist->min_yield_fraction)
2635 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2636 / 100);
2637 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2638
2639 return 0;
2640 }
2641
2642 \f
2643 static void
2644 init_freelist (scm_t_freelist *freelist,
2645 int span,
2646 long cluster_size,
2647 int min_yield)
2648 {
2649 freelist->clusters = SCM_EOL;
2650 freelist->cluster_size = cluster_size + 1;
2651 freelist->left_to_collect = 0;
2652 freelist->clusters_allocated = 0;
2653 freelist->min_yield = 0;
2654 freelist->min_yield_fraction = min_yield;
2655 freelist->span = span;
2656 freelist->collected = 0;
2657 freelist->collected_1 = 0;
2658 freelist->heap_size = 0;
2659 }
2660
2661
2662 /* Get an integer from an environment variable. */
2663 static int
2664 scm_i_getenv_int (const char *var, int def)
2665 {
2666 char *end, *val = getenv (var);
2667 long res;
2668 if (!val)
2669 return def;
2670 res = strtol (val, &end, 10);
2671 if (end == val)
2672 return def;
2673 return res;
2674 }
2675
2676
2677 int
2678 scm_init_storage ()
2679 {
2680 unsigned long gc_trigger_1;
2681 unsigned long gc_trigger_2;
2682 size_t init_heap_size_1;
2683 size_t init_heap_size_2;
2684 size_t j;
2685
2686 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2687 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2688 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2689
2690 j = SCM_NUM_PROTECTS;
2691 while (j)
2692 scm_sys_protects[--j] = SCM_BOOL_F;
2693 scm_block_gc = 1;
2694
2695 scm_freelist = SCM_EOL;
2696 scm_freelist2 = SCM_EOL;
2697 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2698 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2699 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2700 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2701 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2702
2703 scm_expmem = 0;
2704
2705 j = SCM_HEAP_SEG_SIZE;
2706 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2707 scm_heap_table = ((scm_t_heap_seg_data *)
2708 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2709 heap_segment_table_size = 2;
2710
2711 mark_space_ptr = &mark_space_head;
2712
2713 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2714 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2715 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2716 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2717 return 1;
2718
2719 /* scm_hplims[0] can change. do not remove scm_heap_org */
2720 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2721
2722 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2723 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2724 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2725 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2726 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2727
2728 /* Initialise the list of ports. */
2729 scm_port_table = (scm_t_port **)
2730 malloc (sizeof (scm_t_port *) * scm_port_table_room);
2731 if (!scm_port_table)
2732 return 1;
2733
2734 #ifdef HAVE_ATEXIT
2735 atexit (cleanup);
2736 #else
2737 #ifdef HAVE_ON_EXIT
2738 on_exit (cleanup, 0);
2739 #endif
2740 #endif
2741
2742 scm_stand_in_procs = SCM_EOL;
2743 scm_permobjs = SCM_EOL;
2744 scm_protects = scm_c_make_hash_table (31);
2745 scm_gc_registered_roots = scm_c_make_hash_table (31);
2746
2747 return 0;
2748 }
2749
2750 \f
2751
2752 SCM scm_after_gc_hook;
2753
2754 static SCM gc_async;
2755
2756 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2757 * is run after the gc, as soon as the asynchronous events are handled by the
2758 * evaluator.
2759 */
2760 static SCM
2761 gc_async_thunk (void)
2762 {
2763 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2764 return SCM_UNSPECIFIED;
2765 }
2766
2767
2768 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2769 * the garbage collection. The only purpose of this function is to mark the
2770 * gc_async (which will eventually lead to the execution of the
2771 * gc_async_thunk).
2772 */
2773 static void *
2774 mark_gc_async (void * hook_data SCM_UNUSED,
2775 void *func_data SCM_UNUSED,
2776 void *data SCM_UNUSED)
2777 {
2778 /* If cell access debugging is enabled, the user may choose to perform
2779 * additional garbage collections after an arbitrary number of cell
2780 * accesses. We don't want the scheme level after-gc-hook to be performed
2781 * for each of these garbage collections for the following reason: The
2782 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2783 * after-gc-hook was performed with every gc, and if the gc was performed
2784 * after a very small number of cell accesses, then the number of cell
2785 * accesses during the execution of the after-gc-hook will suffice to cause
2786 * the execution of the next gc. Then, guile would keep executing the
2787 * after-gc-hook over and over again, and would never come to do other
2788 * things.
2789 *
2790 * To overcome this problem, if cell access debugging with additional
2791 * garbage collections is enabled, the after-gc-hook is never run by the
2792 * garbage collecter. When running guile with cell access debugging and the
2793 * execution of the after-gc-hook is desired, then it is necessary to run
2794 * the hook explicitly from the user code. This has the effect, that from
2795 * the scheme level point of view it seems that garbage collection is
2796 * performed with a much lower frequency than it actually is. Obviously,
2797 * this will not work for code that depends on a fixed one to one
2798 * relationship between the execution counts of the C level garbage
2799 * collection hooks and the execution count of the scheme level
2800 * after-gc-hook.
2801 */
2802 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2803 if (debug_cells_gc_interval == 0)
2804 scm_system_async_mark (gc_async);
2805 #else
2806 scm_system_async_mark (gc_async);
2807 #endif
2808
2809 return NULL;
2810 }
2811
2812
2813 void
2814 scm_init_gc ()
2815 {
2816 SCM after_gc_thunk;
2817
2818 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2819 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2820
2821 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2822 gc_async_thunk);
2823 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2824
2825 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2826
2827 #ifndef SCM_MAGIC_SNARFER
2828 #include "libguile/gc.x"
2829 #endif
2830 }
2831
2832 #endif /*MARK_DEPENDENCIES*/
2833
2834 /*
2835 Local Variables:
2836 c-file-style: "gnu"
2837 End:
2838 */