Replaced "scm_t_portable" with "scm_port_table" which was an artifact
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
70
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
74
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
77 #endif
78
79 #ifdef HAVE_MALLOC_H
80 #include <malloc.h>
81 #endif
82
83 #ifdef HAVE_UNISTD_H
84 #include <unistd.h>
85 #endif
86
87 #ifdef __STDC__
88 #include <stdarg.h>
89 #define var_start(x, y) va_start(x, y)
90 #else
91 #include <varargs.h>
92 #define var_start(x, y) va_start(x)
93 #endif
94
95 \f
96
97 unsigned int scm_gc_running_p = 0;
98
99 \f
100
101 #if (SCM_DEBUG_CELL_ACCESSES == 1)
102
103 scm_t_bits scm_tc16_allocated;
104
105 /* Set this to != 0 if every cell that is accessed shall be checked:
106 */
107 unsigned int scm_debug_cell_accesses_p = 1;
108
109 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
110 * the number of cell accesses after which a gc shall be called.
111 */
112 static unsigned int debug_cells_gc_interval = 0;
113
114
115 /* If an allocated cell is detected during garbage collection, this means that
116 * some code has just obtained the object but was preempted before the
117 * initialization of the object was completed. This meanst that some entries
118 * of the allocated cell may already contain SCM objects. Therefore,
119 * allocated cells are scanned conservatively. */
120 static SCM
121 allocated_mark (SCM allocated)
122 {
123 scm_gc_mark_cell_conservatively (allocated);
124 return SCM_BOOL_F;
125 }
126
127
128 /* Assert that the given object is a valid reference to a valid cell. This
129 * test involves to determine whether the object is a cell pointer, whether
130 * this pointer actually points into a heap segment and whether the cell
131 * pointed to is not a free cell. Further, additional garbage collections may
132 * get executed after a user defined number of cell accesses. This helps to
133 * find places in the C code where references are dropped for extremely short
134 * periods.
135 */
136 void
137 scm_assert_cell_valid (SCM cell)
138 {
139 static unsigned int already_running = 0;
140
141 if (scm_debug_cell_accesses_p && !already_running)
142 {
143 already_running = 1; /* set to avoid recursion */
144
145 if (!scm_cellp (cell))
146 {
147 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
148 (unsigned long) SCM_UNPACK (cell));
149 abort ();
150 }
151 else if (!scm_gc_running_p)
152 {
153 /* Dirk::FIXME:: During garbage collection there occur references to
154 free cells. This is allright during conservative marking, but
155 should not happen otherwise (I think). The case of free cells
156 accessed during conservative marking is handled in function
157 scm_mark_locations. However, there still occur accesses to free
158 cells during gc. I don't understand why this happens. If it is
159 a bug and gets fixed, the following test should also work while
160 gc is running.
161 */
162 if (SCM_FREE_CELL_P (cell))
163 {
164 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
165 (unsigned long) SCM_UNPACK (cell));
166 abort ();
167 }
168
169 /* If desired, perform additional garbage collections after a user
170 * defined number of cell accesses.
171 */
172 if (debug_cells_gc_interval)
173 {
174 static unsigned int counter = 0;
175
176 if (counter != 0)
177 {
178 --counter;
179 }
180 else
181 {
182 counter = debug_cells_gc_interval;
183 scm_igc ("scm_assert_cell_valid");
184 }
185 }
186 }
187 already_running = 0; /* re-enable */
188 }
189 }
190
191
192 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
193 (SCM flag),
194 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
195 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
196 "but no additional calls to garbage collection are issued.\n"
197 "If @var{flag} is a number, cell access checking is enabled,\n"
198 "with an additional garbage collection after the given\n"
199 "number of cell accesses.\n"
200 "This procedure only exists when the compile-time flag\n"
201 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
202 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
203 {
204 if (SCM_FALSEP (flag)) {
205 scm_debug_cell_accesses_p = 0;
206 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
207 debug_cells_gc_interval = 0;
208 scm_debug_cell_accesses_p = 1;
209 } else if (SCM_INUMP (flag)) {
210 long int f = SCM_INUM (flag);
211 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
212 debug_cells_gc_interval = f;
213 scm_debug_cell_accesses_p = 1;
214 } else {
215 SCM_WRONG_TYPE_ARG (1, flag);
216 }
217 return SCM_UNSPECIFIED;
218 }
219 #undef FUNC_NAME
220
221 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
222
223 \f
224
225 /* {heap tuning parameters}
226 *
227 * These are parameters for controlling memory allocation. The heap
228 * is the area out of which scm_cons, and object headers are allocated.
229 *
230 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
231 * 64 bit machine. The units of the _SIZE parameters are bytes.
232 * Cons pairs and object headers occupy one heap cell.
233 *
234 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
235 * allocated initially the heap will grow by half its current size
236 * each subsequent time more heap is needed.
237 *
238 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
239 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
240 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
241 * is in scm_init_storage() and alloc_some_heap() in sys.c
242 *
243 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
244 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
245 *
246 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
247 * is needed.
248 *
249 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
250 * trigger a GC.
251 *
252 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
253 * reclaimed by a GC triggered by must_malloc. If less than this is
254 * reclaimed, the trigger threshold is raised. [I don't know what a
255 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
256 * work around a oscillation that caused almost constant GC.]
257 */
258
259 /*
260 * Heap size 45000 and 40% min yield gives quick startup and no extra
261 * heap allocation. Having higher values on min yield may lead to
262 * large heaps, especially if code behaviour is varying its
263 * maximum consumption between different freelists.
264 */
265
266 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
267 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
268 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
269 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
270 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
271 int scm_default_min_yield_1 = 40;
272
273 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
274 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
275 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
276 /* The following value may seem large, but note that if we get to GC at
277 * all, this means that we have a numerically intensive application
278 */
279 int scm_default_min_yield_2 = 40;
280
281 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
282
283 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
284 #ifdef _QC
285 # define SCM_HEAP_SEG_SIZE 32768L
286 #else
287 # ifdef sequent
288 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
289 # else
290 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
291 # endif
292 #endif
293 /* Make heap grow with factor 1.5 */
294 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
295 #define SCM_INIT_MALLOC_LIMIT 100000
296 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
297
298 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
299 aligned inner bounds for allocated storage */
300
301 #ifdef PROT386
302 /*in 386 protected mode we must only adjust the offset */
303 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
304 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
305 #else
306 # ifdef _UNICOS
307 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
308 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
309 # else
310 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
311 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
312 # endif /* UNICOS */
313 #endif /* PROT386 */
314
315 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
316
317 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
318 #define CLUSTER_SIZE_IN_BYTES(freelist) \
319 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
320
321 \f
322 /* scm_freelists
323 */
324
325 typedef struct scm_t_freelist {
326 /* collected cells */
327 SCM cells;
328 /* number of cells left to collect before cluster is full */
329 unsigned int left_to_collect;
330 /* number of clusters which have been allocated */
331 unsigned int clusters_allocated;
332 /* a list of freelists, each of size cluster_size,
333 * except the last one which may be shorter
334 */
335 SCM clusters;
336 SCM *clustertail;
337 /* this is the number of objects in each cluster, including the spine cell */
338 unsigned int cluster_size;
339 /* indicates that we should grow heap instead of GC:ing
340 */
341 int grow_heap_p;
342 /* minimum yield on this list in order not to grow the heap
343 */
344 long min_yield;
345 /* defines min_yield as percent of total heap size
346 */
347 int min_yield_fraction;
348 /* number of cells per object on this list */
349 int span;
350 /* number of collected cells during last GC */
351 unsigned long collected;
352 /* number of collected cells during penultimate GC */
353 unsigned long collected_1;
354 /* total number of cells in heap segments
355 * belonging to this list.
356 */
357 unsigned long heap_size;
358 } scm_t_freelist;
359
360 SCM scm_freelist = SCM_EOL;
361 scm_t_freelist scm_master_freelist = {
362 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
363 };
364 SCM scm_freelist2 = SCM_EOL;
365 scm_t_freelist scm_master_freelist2 = {
366 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
367 };
368
369 /* scm_mtrigger
370 * is the number of bytes of must_malloc allocation needed to trigger gc.
371 */
372 unsigned long scm_mtrigger;
373
374 /* scm_gc_heap_lock
375 * If set, don't expand the heap. Set only during gc, during which no allocation
376 * is supposed to take place anyway.
377 */
378 int scm_gc_heap_lock = 0;
379
380 /* GC Blocking
381 * Don't pause for collection if this is set -- just
382 * expand the heap.
383 */
384 int scm_block_gc = 1;
385
386 /* During collection, this accumulates objects holding
387 * weak references.
388 */
389 SCM scm_weak_vectors;
390
391 /* During collection, this accumulates structures which are to be freed.
392 */
393 SCM scm_structs_to_free;
394
395 /* GC Statistics Keeping
396 */
397 unsigned long scm_cells_allocated = 0;
398 unsigned long scm_mallocated = 0;
399 unsigned long scm_gc_cells_collected;
400 unsigned long scm_gc_yield;
401 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
402 unsigned long scm_gc_malloc_collected;
403 unsigned long scm_gc_ports_collected;
404 unsigned long scm_gc_time_taken = 0;
405 static unsigned long t_before_gc;
406 static unsigned long t_before_sweep;
407 unsigned long scm_gc_mark_time_taken = 0;
408 unsigned long scm_gc_sweep_time_taken = 0;
409 unsigned long scm_gc_times = 0;
410 unsigned long scm_gc_cells_swept = 0;
411 double scm_gc_cells_marked_acc = 0.;
412 double scm_gc_cells_swept_acc = 0.;
413
414 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
415 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
416 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
417 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
418 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
419 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
420 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
421 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
422 SCM_SYMBOL (sym_times, "gc-times");
423 SCM_SYMBOL (sym_cells_marked, "cells-marked");
424 SCM_SYMBOL (sym_cells_swept, "cells-swept");
425
426 typedef struct scm_t_heap_seg_data
427 {
428 /* lower and upper bounds of the segment */
429 SCM_CELLPTR bounds[2];
430
431 /* address of the head-of-freelist pointer for this segment's cells.
432 All segments usually point to the same one, scm_freelist. */
433 scm_t_freelist *freelist;
434
435 /* number of cells per object in this segment */
436 int span;
437 } scm_t_heap_seg_data;
438
439
440
441 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
442
443 typedef enum { return_on_error, abort_on_error } policy_on_error;
444 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
445
446
447 #define SCM_HEAP_SIZE \
448 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
449 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
450
451 #define BVEC_GROW_SIZE 256
452 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
453 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
454
455 /* mark space allocation */
456
457 typedef struct scm_t_mark_space
458 {
459 scm_t_c_bvec_limb *bvec_space;
460 struct scm_t_mark_space *next;
461 } scm_t_mark_space;
462
463 static scm_t_mark_space *current_mark_space;
464 static scm_t_mark_space **mark_space_ptr;
465 static ptrdiff_t current_mark_space_offset;
466 static scm_t_mark_space *mark_space_head;
467
468 static scm_t_c_bvec_limb *
469 get_bvec ()
470 #define FUNC_NAME "get_bvec"
471 {
472 scm_t_c_bvec_limb *res;
473
474 if (!current_mark_space)
475 {
476 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
477 if (!current_mark_space)
478 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
479
480 current_mark_space->bvec_space = NULL;
481 current_mark_space->next = NULL;
482
483 *mark_space_ptr = current_mark_space;
484 mark_space_ptr = &(current_mark_space->next);
485
486 return get_bvec ();
487 }
488
489 if (!(current_mark_space->bvec_space))
490 {
491 SCM_SYSCALL (current_mark_space->bvec_space =
492 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
493 if (!(current_mark_space->bvec_space))
494 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
495
496 current_mark_space_offset = 0;
497
498 return get_bvec ();
499 }
500
501 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
502 {
503 current_mark_space = NULL;
504
505 return get_bvec ();
506 }
507
508 res = current_mark_space->bvec_space + current_mark_space_offset;
509 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
510
511 return res;
512 }
513 #undef FUNC_NAME
514
515
516 static void
517 clear_mark_space ()
518 {
519 scm_t_mark_space *ms;
520
521 for (ms = mark_space_head; ms; ms = ms->next)
522 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
523 }
524
525
526 \f
527 /* Debugging functions. */
528
529 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
530
531 static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
532
533 static void
534 map_free_list (scm_t_freelist *master, SCM freelist)
535 {
536 long last_seg = -1, count = 0;
537 SCM f;
538
539 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
540 {
541 long int this_seg = heap_segment (f);
542
543 if (this_seg == -1)
544 {
545 fprintf (stderr,
546 "map_free_list: can't find segment containing cell %lux\n",
547 (unsigned long int) SCM_UNPACK (f));
548 abort ();
549 }
550 else if (this_seg != last_seg)
551 {
552 if (last_seg != -1)
553 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
554 (long) count, master->span, (long) last_seg);
555 last_seg = this_seg;
556 count = 0;
557 }
558 count++;
559 }
560 if (last_seg != -1)
561 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
562 (long) count, master->span, (long) last_seg);
563 }
564
565 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
566 (),
567 "Print debugging information about the free-list.\n"
568 "@code{map-free-list} is only included in\n"
569 "@code{--enable-guile-debug} builds of Guile.")
570 #define FUNC_NAME s_scm_map_free_list
571 {
572 size_t i;
573
574 fprintf (stderr, "%ld segments total (%d:%ld",
575 (long) scm_n_heap_segs,
576 scm_heap_table[0].span,
577 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
578
579 for (i = 1; i != scm_n_heap_segs; i++)
580 fprintf (stderr, ", %d:%ld",
581 scm_heap_table[i].span,
582 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
583 fprintf (stderr, ")\n");
584 map_free_list (&scm_master_freelist, scm_freelist);
585 map_free_list (&scm_master_freelist2, scm_freelist2);
586 fflush (stderr);
587
588 return SCM_UNSPECIFIED;
589 }
590 #undef FUNC_NAME
591
592 static long last_cluster;
593 static long last_size;
594
595 static long
596 free_list_length (char *title, long i, SCM freelist)
597 {
598 SCM ls;
599 long n = 0;
600 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
601 if (SCM_FREE_CELL_P (ls))
602 ++n;
603 else
604 {
605 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
606 abort ();
607 }
608 if (n != last_size)
609 {
610 if (i > 0)
611 {
612 if (last_cluster == i - 1)
613 fprintf (stderr, "\t%ld\n", (long) last_size);
614 else
615 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
616 }
617 if (i >= 0)
618 fprintf (stderr, "%s %ld", title, (long) i);
619 else
620 fprintf (stderr, "%s\t%ld\n", title, (long) n);
621 last_cluster = i;
622 last_size = n;
623 }
624 return n;
625 }
626
627 static void
628 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
629 {
630 SCM clusters;
631 long i = 0, len, n = 0;
632 fprintf (stderr, "%s\n\n", title);
633 n += free_list_length ("free list", -1, freelist);
634 for (clusters = master->clusters;
635 SCM_NNULLP (clusters);
636 clusters = SCM_CDR (clusters))
637 {
638 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
639 n += len;
640 }
641 if (last_cluster == i - 1)
642 fprintf (stderr, "\t%ld\n", (long) last_size);
643 else
644 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
645 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
646 }
647
648 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
649 (),
650 "Print debugging information about the free-list.\n"
651 "@code{free-list-length} is only included in\n"
652 "@code{--enable-guile-debug} builds of Guile.")
653 #define FUNC_NAME s_scm_free_list_length
654 {
655 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
656 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
657 return SCM_UNSPECIFIED;
658 }
659 #undef FUNC_NAME
660
661 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
662
663 #ifdef GUILE_DEBUG_FREELIST
664
665 /* Non-zero if freelist debugging is in effect. Set this via
666 `gc-set-debug-check-freelist!'. */
667 static int scm_debug_check_freelist = 0;
668
669 /* Number of calls to SCM_NEWCELL since startup. */
670 static unsigned long scm_newcell_count;
671 static unsigned long scm_newcell2_count;
672
673 /* Search freelist for anything that isn't marked as a free cell.
674 Abort if we find something. */
675 static void
676 scm_check_freelist (SCM freelist)
677 {
678 SCM f;
679 long i = 0;
680
681 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
682 if (!SCM_FREE_CELL_P (f))
683 {
684 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
685 (long) scm_newcell_count, (long) i);
686 abort ();
687 }
688 }
689
690 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
691 (SCM flag),
692 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
693 "on each cell allocation. This procedure only exists when the\n"
694 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
695 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
696 {
697 /* [cmm] I did a double-take when I read this code the first time.
698 well, FWIW. */
699 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
700 return SCM_UNSPECIFIED;
701 }
702 #undef FUNC_NAME
703
704
705 SCM
706 scm_debug_newcell (void)
707 {
708 SCM new;
709
710 scm_newcell_count++;
711 if (scm_debug_check_freelist)
712 {
713 scm_check_freelist (scm_freelist);
714 scm_gc();
715 }
716
717 /* The rest of this is supposed to be identical to the SCM_NEWCELL
718 macro. */
719 if (SCM_NULLP (scm_freelist))
720 {
721 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
722 SCM_GC_SET_ALLOCATED (new);
723 }
724 else
725 {
726 new = scm_freelist;
727 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
728 SCM_GC_SET_ALLOCATED (new);
729 }
730
731 return new;
732 }
733
734 SCM
735 scm_debug_newcell2 (void)
736 {
737 SCM new;
738
739 scm_newcell2_count++;
740 if (scm_debug_check_freelist)
741 {
742 scm_check_freelist (scm_freelist2);
743 scm_gc ();
744 }
745
746 /* The rest of this is supposed to be identical to the SCM_NEWCELL
747 macro. */
748 if (SCM_NULLP (scm_freelist2))
749 {
750 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
751 SCM_GC_SET_ALLOCATED (new);
752 }
753 else
754 {
755 new = scm_freelist2;
756 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
757 SCM_GC_SET_ALLOCATED (new);
758 }
759
760 return new;
761 }
762
763 #endif /* GUILE_DEBUG_FREELIST */
764
765 \f
766
767 static unsigned long
768 master_cells_allocated (scm_t_freelist *master)
769 {
770 /* the '- 1' below is to ignore the cluster spine cells. */
771 long objects = master->clusters_allocated * (master->cluster_size - 1);
772 if (SCM_NULLP (master->clusters))
773 objects -= master->left_to_collect;
774 return master->span * objects;
775 }
776
777 static unsigned long
778 freelist_length (SCM freelist)
779 {
780 long n;
781 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
782 ++n;
783 return n;
784 }
785
786 static unsigned long
787 compute_cells_allocated ()
788 {
789 return (scm_cells_allocated
790 + master_cells_allocated (&scm_master_freelist)
791 + master_cells_allocated (&scm_master_freelist2)
792 - scm_master_freelist.span * freelist_length (scm_freelist)
793 - scm_master_freelist2.span * freelist_length (scm_freelist2));
794 }
795
796 /* {Scheme Interface to GC}
797 */
798
799 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
800 (),
801 "Return an association list of statistics about Guile's current\n"
802 "use of storage.")
803 #define FUNC_NAME s_scm_gc_stats
804 {
805 long i;
806 long n;
807 SCM heap_segs;
808 unsigned long int local_scm_mtrigger;
809 unsigned long int local_scm_mallocated;
810 unsigned long int local_scm_heap_size;
811 unsigned long int local_scm_cells_allocated;
812 unsigned long int local_scm_gc_time_taken;
813 unsigned long int local_scm_gc_times;
814 unsigned long int local_scm_gc_mark_time_taken;
815 unsigned long int local_scm_gc_sweep_time_taken;
816 double local_scm_gc_cells_swept;
817 double local_scm_gc_cells_marked;
818 SCM answer;
819
820 SCM_DEFER_INTS;
821
822 ++scm_block_gc;
823
824 retry:
825 heap_segs = SCM_EOL;
826 n = scm_n_heap_segs;
827 for (i = scm_n_heap_segs; i--; )
828 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
829 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
830 heap_segs);
831 if (scm_n_heap_segs != n)
832 goto retry;
833
834 --scm_block_gc;
835
836 /* Below, we cons to produce the resulting list. We want a snapshot of
837 * the heap situation before consing.
838 */
839 local_scm_mtrigger = scm_mtrigger;
840 local_scm_mallocated = scm_mallocated;
841 local_scm_heap_size = SCM_HEAP_SIZE;
842 local_scm_cells_allocated = compute_cells_allocated ();
843 local_scm_gc_time_taken = scm_gc_time_taken;
844 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
845 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
846 local_scm_gc_times = scm_gc_times;
847 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
848 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
849
850 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
851 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
852 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
853 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
854 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
855 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
856 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
857 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
858 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
859 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
860 scm_cons (sym_heap_segments, heap_segs),
861 SCM_UNDEFINED);
862 SCM_ALLOW_INTS;
863 return answer;
864 }
865 #undef FUNC_NAME
866
867
868 static void
869 gc_start_stats (const char *what SCM_UNUSED)
870 {
871 t_before_gc = scm_c_get_internal_run_time ();
872 scm_gc_cells_swept = 0;
873 scm_gc_cells_collected = 0;
874 scm_gc_yield_1 = scm_gc_yield;
875 scm_gc_yield = (scm_cells_allocated
876 + master_cells_allocated (&scm_master_freelist)
877 + master_cells_allocated (&scm_master_freelist2));
878 scm_gc_malloc_collected = 0;
879 scm_gc_ports_collected = 0;
880 }
881
882
883 static void
884 gc_end_stats ()
885 {
886 unsigned long t = scm_c_get_internal_run_time ();
887 scm_gc_time_taken += (t - t_before_gc);
888 scm_gc_sweep_time_taken += (t - t_before_sweep);
889 ++scm_gc_times;
890
891 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
892 scm_gc_cells_swept_acc += scm_gc_cells_swept;
893 }
894
895
896 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
897 (SCM obj),
898 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
899 "returned by this function for @var{obj}")
900 #define FUNC_NAME s_scm_object_address
901 {
902 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
903 }
904 #undef FUNC_NAME
905
906
907 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
908 (),
909 "Scans all of SCM objects and reclaims for further use those that are\n"
910 "no longer accessible.")
911 #define FUNC_NAME s_scm_gc
912 {
913 SCM_DEFER_INTS;
914 scm_igc ("call");
915 SCM_ALLOW_INTS;
916 return SCM_UNSPECIFIED;
917 }
918 #undef FUNC_NAME
919
920
921 \f
922 /* {C Interface For When GC is Triggered}
923 */
924
925 static void
926 adjust_min_yield (scm_t_freelist *freelist)
927 {
928 /* min yield is adjusted upwards so that next predicted total yield
929 * (allocated cells actually freed by GC) becomes
930 * `min_yield_fraction' of total heap size. Note, however, that
931 * the absolute value of min_yield will correspond to `collected'
932 * on one master (the one which currently is triggering GC).
933 *
934 * The reason why we look at total yield instead of cells collected
935 * on one list is that we want to take other freelists into account.
936 * On this freelist, we know that (local) yield = collected cells,
937 * but that's probably not the case on the other lists.
938 *
939 * (We might consider computing a better prediction, for example
940 * by computing an average over multiple GC:s.)
941 */
942 if (freelist->min_yield_fraction)
943 {
944 /* Pick largest of last two yields. */
945 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
946 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
947 #ifdef DEBUGINFO
948 fprintf (stderr, " after GC = %lu, delta = %ld\n",
949 (long) scm_cells_allocated,
950 (long) delta);
951 #endif
952 if (delta > 0)
953 freelist->min_yield += delta;
954 }
955 }
956
957
958 /* When we get POSIX threads support, the master will be global and
959 * common while the freelist will be individual for each thread.
960 */
961
962 SCM
963 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
964 {
965 SCM cell;
966 ++scm_ints_disabled;
967 do
968 {
969 if (SCM_NULLP (master->clusters))
970 {
971 if (master->grow_heap_p || scm_block_gc)
972 {
973 /* In order to reduce gc frequency, try to allocate a new heap
974 * segment first, even if gc might find some free cells. If we
975 * can't obtain a new heap segment, we will try gc later.
976 */
977 master->grow_heap_p = 0;
978 alloc_some_heap (master, return_on_error);
979 }
980 if (SCM_NULLP (master->clusters))
981 {
982 /* The heap was not grown, either because it wasn't scheduled to
983 * grow, or because there was not enough memory available. In
984 * both cases we have to try gc to get some free cells.
985 */
986 #ifdef DEBUGINFO
987 fprintf (stderr, "allocated = %lu, ",
988 (long) (scm_cells_allocated
989 + master_cells_allocated (&scm_master_freelist)
990 + master_cells_allocated (&scm_master_freelist2)));
991 #endif
992 scm_igc ("cells");
993 adjust_min_yield (master);
994 if (SCM_NULLP (master->clusters))
995 {
996 /* gc could not free any cells. Now, we _must_ allocate a
997 * new heap segment, because there is no other possibility
998 * to provide a new cell for the caller.
999 */
1000 alloc_some_heap (master, abort_on_error);
1001 }
1002 }
1003 }
1004 cell = SCM_CAR (master->clusters);
1005 master->clusters = SCM_CDR (master->clusters);
1006 ++master->clusters_allocated;
1007 }
1008 while (SCM_NULLP (cell));
1009
1010 #ifdef GUILE_DEBUG_FREELIST
1011 scm_check_freelist (cell);
1012 #endif
1013
1014 --scm_ints_disabled;
1015 *freelist = SCM_FREE_CELL_CDR (cell);
1016 return cell;
1017 }
1018
1019
1020 #if 0
1021 /* This is a support routine which can be used to reserve a cluster
1022 * for some special use, such as debugging. It won't be useful until
1023 * free cells are preserved between garbage collections.
1024 */
1025
1026 void
1027 scm_alloc_cluster (scm_t_freelist *master)
1028 {
1029 SCM freelist, cell;
1030 cell = scm_gc_for_newcell (master, &freelist);
1031 SCM_SETCDR (cell, freelist);
1032 return cell;
1033 }
1034 #endif
1035
1036
1037 scm_t_c_hook scm_before_gc_c_hook;
1038 scm_t_c_hook scm_before_mark_c_hook;
1039 scm_t_c_hook scm_before_sweep_c_hook;
1040 scm_t_c_hook scm_after_sweep_c_hook;
1041 scm_t_c_hook scm_after_gc_c_hook;
1042
1043
1044 void
1045 scm_igc (const char *what)
1046 {
1047 long j;
1048
1049 ++scm_gc_running_p;
1050 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1051 #ifdef DEBUGINFO
1052 fprintf (stderr,
1053 SCM_NULLP (scm_freelist)
1054 ? "*"
1055 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1056 #endif
1057 /* During the critical section, only the current thread may run. */
1058 SCM_CRITICAL_SECTION_START;
1059
1060 /* fprintf (stderr, "gc: %s\n", what); */
1061
1062 if (!scm_stack_base || scm_block_gc)
1063 {
1064 --scm_gc_running_p;
1065 return;
1066 }
1067
1068 gc_start_stats (what);
1069
1070 if (scm_gc_heap_lock)
1071 /* We've invoked the collector while a GC is already in progress.
1072 That should never happen. */
1073 abort ();
1074
1075 ++scm_gc_heap_lock;
1076
1077 /* flush dead entries from the continuation stack */
1078 {
1079 long x;
1080 long bound;
1081 SCM * elts;
1082 elts = SCM_VELTS (scm_continuation_stack);
1083 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1084 x = SCM_INUM (scm_continuation_stack_ptr);
1085 while (x < bound)
1086 {
1087 elts[x] = SCM_BOOL_F;
1088 ++x;
1089 }
1090 }
1091
1092 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1093
1094 clear_mark_space ();
1095
1096 #ifndef USE_THREADS
1097
1098 /* Mark objects on the C stack. */
1099 SCM_FLUSH_REGISTER_WINDOWS;
1100 /* This assumes that all registers are saved into the jmp_buf */
1101 setjmp (scm_save_regs_gc_mark);
1102 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1103 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1104 sizeof scm_save_regs_gc_mark)
1105 / sizeof (SCM_STACKITEM)));
1106
1107 {
1108 unsigned long stack_len = scm_stack_size (scm_stack_base);
1109 #ifdef SCM_STACK_GROWS_UP
1110 scm_mark_locations (scm_stack_base, stack_len);
1111 #else
1112 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1113 #endif
1114 }
1115
1116 #else /* USE_THREADS */
1117
1118 /* Mark every thread's stack and registers */
1119 scm_threads_mark_stacks ();
1120
1121 #endif /* USE_THREADS */
1122
1123 j = SCM_NUM_PROTECTS;
1124 while (j--)
1125 scm_gc_mark (scm_sys_protects[j]);
1126
1127 /* mark the registered roots */
1128 {
1129 size_t i;
1130 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1131 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1132 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1133 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1134 scm_gc_mark (*p);
1135 }
1136 }
1137 }
1138
1139 /* FIXME: we should have a means to register C functions to be run
1140 * in different phases of GC
1141 */
1142 scm_mark_subr_table ();
1143
1144 #ifndef USE_THREADS
1145 scm_gc_mark (scm_root->handle);
1146 #endif
1147
1148 t_before_sweep = scm_c_get_internal_run_time ();
1149 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1150
1151 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1152
1153 scm_gc_sweep ();
1154
1155 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1156
1157 --scm_gc_heap_lock;
1158 gc_end_stats ();
1159
1160 SCM_CRITICAL_SECTION_END;
1161 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1162 --scm_gc_running_p;
1163 }
1164
1165 \f
1166
1167 /* {Mark/Sweep}
1168 */
1169
1170 #define MARK scm_gc_mark
1171 #define FNAME "scm_gc_mark"
1172
1173 #endif /*!MARK_DEPENDENCIES*/
1174
1175 /* Mark an object precisely.
1176 */
1177 void
1178 MARK (SCM p)
1179 #define FUNC_NAME FNAME
1180 {
1181 register long i;
1182 register SCM ptr;
1183 scm_t_bits cell_type;
1184
1185 #ifndef MARK_DEPENDENCIES
1186 # define RECURSE scm_gc_mark
1187 #else
1188 /* go through the usual marking, but not for self-cycles. */
1189 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1190 #endif
1191 ptr = p;
1192
1193 #ifdef MARK_DEPENDENCIES
1194 goto gc_mark_loop_first_time;
1195 #endif
1196
1197 /* A simple hack for debugging. Chose the second branch to get a
1198 meaningful backtrace for crashes inside the GC.
1199 */
1200 #if 1
1201 #define goto_gc_mark_loop goto gc_mark_loop
1202 #define goto_gc_mark_nimp goto gc_mark_nimp
1203 #else
1204 #define goto_gc_mark_loop RECURSE(ptr); return
1205 #define goto_gc_mark_nimp RECURSE(ptr); return
1206 #endif
1207
1208 gc_mark_loop:
1209 if (SCM_IMP (ptr))
1210 return;
1211
1212 gc_mark_nimp:
1213
1214 #ifdef MARK_DEPENDENCIES
1215 if (SCM_EQ_P (ptr, p))
1216 return;
1217
1218 scm_gc_mark (ptr);
1219 return;
1220
1221 gc_mark_loop_first_time:
1222 #endif
1223
1224 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1225 /* We are in debug mode. Check the ptr exhaustively. */
1226 if (!scm_cellp (ptr))
1227 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1228 #else
1229 /* In non-debug mode, do at least some cheap testing. */
1230 if (!SCM_CELLP (ptr))
1231 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1232 #endif
1233
1234 #ifndef MARK_DEPENDENCIES
1235
1236 if (SCM_GCMARKP (ptr))
1237 return;
1238
1239 SCM_SETGCMARK (ptr);
1240
1241 #endif
1242
1243 cell_type = SCM_GC_CELL_TYPE (ptr);
1244 switch (SCM_ITAG7 (cell_type))
1245 {
1246 case scm_tcs_cons_nimcar:
1247 if (SCM_IMP (SCM_CDR (ptr)))
1248 {
1249 ptr = SCM_CAR (ptr);
1250 goto_gc_mark_nimp;
1251 }
1252 RECURSE (SCM_CAR (ptr));
1253 ptr = SCM_CDR (ptr);
1254 goto_gc_mark_nimp;
1255 case scm_tcs_cons_imcar:
1256 ptr = SCM_CDR (ptr);
1257 goto_gc_mark_loop;
1258 case scm_tc7_pws:
1259 RECURSE (SCM_SETTER (ptr));
1260 ptr = SCM_PROCEDURE (ptr);
1261 goto_gc_mark_loop;
1262 case scm_tcs_struct:
1263 {
1264 /* XXX - use less explicit code. */
1265 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1266 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1267 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1268 long len = SCM_SYMBOL_LENGTH (layout);
1269 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1270 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1271
1272 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1273 {
1274 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1275 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1276 }
1277 if (len)
1278 {
1279 long x;
1280
1281 for (x = 0; x < len - 2; x += 2, ++struct_data)
1282 if (fields_desc[x] == 'p')
1283 RECURSE (SCM_PACK (*struct_data));
1284 if (fields_desc[x] == 'p')
1285 {
1286 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1287 for (x = *struct_data++; x; --x, ++struct_data)
1288 RECURSE (SCM_PACK (*struct_data));
1289 else
1290 RECURSE (SCM_PACK (*struct_data));
1291 }
1292 }
1293 /* mark vtable */
1294 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1295 goto_gc_mark_loop;
1296 }
1297 break;
1298 case scm_tcs_closures:
1299 if (SCM_IMP (SCM_ENV (ptr)))
1300 {
1301 ptr = SCM_CLOSCAR (ptr);
1302 goto_gc_mark_nimp;
1303 }
1304 RECURSE (SCM_CLOSCAR (ptr));
1305 ptr = SCM_ENV (ptr);
1306 goto_gc_mark_nimp;
1307 case scm_tc7_vector:
1308 i = SCM_VECTOR_LENGTH (ptr);
1309 if (i == 0)
1310 break;
1311 while (--i > 0)
1312 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1313 RECURSE (SCM_VELTS (ptr)[i]);
1314 ptr = SCM_VELTS (ptr)[0];
1315 goto_gc_mark_loop;
1316 #ifdef CCLO
1317 case scm_tc7_cclo:
1318 {
1319 size_t i = SCM_CCLO_LENGTH (ptr);
1320 size_t j;
1321 for (j = 1; j != i; ++j)
1322 {
1323 SCM obj = SCM_CCLO_REF (ptr, j);
1324 if (!SCM_IMP (obj))
1325 RECURSE (obj);
1326 }
1327 ptr = SCM_CCLO_REF (ptr, 0);
1328 goto_gc_mark_loop;
1329 }
1330 #endif
1331 #ifdef HAVE_ARRAYS
1332 case scm_tc7_bvect:
1333 case scm_tc7_byvect:
1334 case scm_tc7_ivect:
1335 case scm_tc7_uvect:
1336 case scm_tc7_fvect:
1337 case scm_tc7_dvect:
1338 case scm_tc7_cvect:
1339 case scm_tc7_svect:
1340 #ifdef HAVE_LONG_LONGS
1341 case scm_tc7_llvect:
1342 #endif
1343 #endif
1344 case scm_tc7_string:
1345 break;
1346
1347 case scm_tc7_substring:
1348 ptr = SCM_CDR (ptr);
1349 goto_gc_mark_loop;
1350
1351 case scm_tc7_wvect:
1352 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1353 scm_weak_vectors = ptr;
1354 if (SCM_IS_WHVEC_ANY (ptr))
1355 {
1356 long x;
1357 long len;
1358 int weak_keys;
1359 int weak_values;
1360
1361 len = SCM_VECTOR_LENGTH (ptr);
1362 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1363 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1364
1365 for (x = 0; x < len; ++x)
1366 {
1367 SCM alist;
1368 alist = SCM_VELTS (ptr)[x];
1369
1370 /* mark everything on the alist except the keys or
1371 * values, according to weak_values and weak_keys. */
1372 while ( SCM_CONSP (alist)
1373 && !SCM_GCMARKP (alist)
1374 && SCM_CONSP (SCM_CAR (alist)))
1375 {
1376 SCM kvpair;
1377 SCM next_alist;
1378
1379 kvpair = SCM_CAR (alist);
1380 next_alist = SCM_CDR (alist);
1381 /*
1382 * Do not do this:
1383 * SCM_SETGCMARK (alist);
1384 * SCM_SETGCMARK (kvpair);
1385 *
1386 * It may be that either the key or value is protected by
1387 * an escaped reference to part of the spine of this alist.
1388 * If we mark the spine here, and only mark one or neither of the
1389 * key and value, they may never be properly marked.
1390 * This leads to a horrible situation in which an alist containing
1391 * freelist cells is exported.
1392 *
1393 * So only mark the spines of these arrays last of all marking.
1394 * If somebody confuses us by constructing a weak vector
1395 * with a circular alist then we are hosed, but at least we
1396 * won't prematurely drop table entries.
1397 */
1398 if (!weak_keys)
1399 RECURSE (SCM_CAR (kvpair));
1400 if (!weak_values)
1401 RECURSE (SCM_CDR (kvpair));
1402 alist = next_alist;
1403 }
1404 if (SCM_NIMP (alist))
1405 RECURSE (alist);
1406 }
1407 }
1408 break;
1409
1410 case scm_tc7_symbol:
1411 ptr = SCM_PROP_SLOTS (ptr);
1412 goto_gc_mark_loop;
1413 case scm_tc7_variable:
1414 ptr = SCM_CELL_OBJECT_1 (ptr);
1415 goto_gc_mark_loop;
1416 case scm_tcs_subrs:
1417 break;
1418 case scm_tc7_port:
1419 i = SCM_PTOBNUM (ptr);
1420 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1421 if (!(i < scm_numptob))
1422 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1423 #endif
1424 if (SCM_PTAB_ENTRY(ptr))
1425 RECURSE (SCM_FILENAME (ptr));
1426 if (scm_ptobs[i].mark)
1427 {
1428 ptr = (scm_ptobs[i].mark) (ptr);
1429 goto_gc_mark_loop;
1430 }
1431 else
1432 return;
1433 break;
1434 case scm_tc7_smob:
1435 switch (SCM_TYP16 (ptr))
1436 { /* should be faster than going through scm_smobs */
1437 case scm_tc_free_cell:
1438 /* We have detected a free cell. This can happen if non-object data
1439 * on the C stack points into guile's heap and is scanned during
1440 * conservative marking. */
1441 #if (SCM_DEBUG_CELL_ACCESSES == 0)
1442 /* If cell debugging is disabled, there is a second situation in
1443 * which a free cell can be encountered, namely if with preemptive
1444 * threading one thread has just obtained a fresh cell and was
1445 * preempted before the cell initialization was completed. In this
1446 * case, some entries of the cell may already contain objects.
1447 * Thus, if cell debugging is disabled, free cells are scanned
1448 * conservatively. */
1449 scm_gc_mark_cell_conservatively (ptr);
1450 #else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1451 /* With cell debugging enabled, a freshly obtained but not fully
1452 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1453 * Thus, no conservative scanning for free cells is necessary, but
1454 * instead cells of type scm_tc16_allocated have to be scanned
1455 * conservatively. This is done in the mark function of the
1456 * scm_tc16_allocated smob type. */
1457 #endif
1458 break;
1459 case scm_tc16_big:
1460 case scm_tc16_real:
1461 case scm_tc16_complex:
1462 break;
1463 default:
1464 i = SCM_SMOBNUM (ptr);
1465 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1466 if (!(i < scm_numsmob))
1467 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1468 #endif
1469 if (scm_smobs[i].mark)
1470 {
1471 ptr = (scm_smobs[i].mark) (ptr);
1472 goto_gc_mark_loop;
1473 }
1474 else
1475 return;
1476 }
1477 break;
1478 default:
1479 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1480 }
1481 #undef RECURSE
1482 }
1483 #undef FUNC_NAME
1484
1485 #ifndef MARK_DEPENDENCIES
1486
1487 #undef MARK
1488 #undef FNAME
1489
1490 /* And here we define `scm_gc_mark_dependencies', by including this
1491 * same file in itself.
1492 */
1493 #define MARK scm_gc_mark_dependencies
1494 #define FNAME "scm_gc_mark_dependencies"
1495 #define MARK_DEPENDENCIES
1496 #include "gc.c"
1497 #undef MARK_DEPENDENCIES
1498 #undef MARK
1499 #undef FNAME
1500
1501
1502 /* Determine whether the given value does actually represent a cell in some
1503 * heap segment. If this is the case, the number of the heap segment is
1504 * returned. Otherwise, -1 is returned. Binary search is used in order to
1505 * determine the heap segment that contains the cell.*/
1506 /* FIXME: To be used within scm_gc_mark_cell_conservatively,
1507 * scm_mark_locations and scm_cellp this function should be an inline
1508 * function. */
1509 static long int
1510 heap_segment (SCM obj)
1511 {
1512 if (!SCM_CELLP (obj))
1513 return -1;
1514 else
1515 {
1516 SCM_CELLPTR ptr = SCM2PTR (obj);
1517 unsigned long int i = 0;
1518 unsigned long int j = scm_n_heap_segs - 1;
1519
1520 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1521 return -1;
1522 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1523 return -1;
1524 else
1525 {
1526 while (i < j)
1527 {
1528 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1529 {
1530 break;
1531 }
1532 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1533 {
1534 i = j;
1535 break;
1536 }
1537 else
1538 {
1539 unsigned long int k = (i + j) / 2;
1540
1541 if (k == i)
1542 return -1;
1543 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1544 {
1545 j = k;
1546 ++i;
1547 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1548 return -1;
1549 }
1550 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1551 {
1552 i = k;
1553 --j;
1554 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1555 return -1;
1556 }
1557 }
1558 }
1559
1560 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1561 return -1;
1562 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1563 return -1;
1564 else
1565 return i;
1566 }
1567 }
1568 }
1569
1570
1571 /* Mark the entries of a cell conservatively. The given cell is known to be
1572 * on the heap. Still we have to determine its heap segment in order to
1573 * figure out whether it is a single or a double cell. Then, each of the cell
1574 * elements itself is checked and potentially marked. */
1575 void
1576 scm_gc_mark_cell_conservatively (SCM cell)
1577 {
1578 unsigned long int cell_segment = heap_segment (cell);
1579 unsigned int span = scm_heap_table[cell_segment].span;
1580 unsigned int i;
1581
1582 for (i = 1; i != span * 2; ++i)
1583 {
1584 SCM obj = SCM_CELL_OBJECT (cell, i);
1585 long int obj_segment = heap_segment (obj);
1586 if (obj_segment >= 0)
1587 scm_gc_mark (obj);
1588 }
1589 }
1590
1591
1592 /* Mark a region conservatively */
1593 void
1594 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1595 {
1596 unsigned long m;
1597
1598 for (m = 0; m < n; ++m)
1599 {
1600 SCM obj = * (SCM *) &x[m];
1601 long int segment = heap_segment (obj);
1602 if (segment >= 0)
1603 scm_gc_mark (obj);
1604 }
1605 }
1606
1607
1608 /* The function scm_cellp determines whether an SCM value can be regarded as a
1609 * pointer to a cell on the heap.
1610 */
1611 int
1612 scm_cellp (SCM value)
1613 {
1614 long int segment = heap_segment (value);
1615 return (segment >= 0);
1616 }
1617
1618
1619 static void
1620 gc_sweep_freelist_start (scm_t_freelist *freelist)
1621 {
1622 freelist->cells = SCM_EOL;
1623 freelist->left_to_collect = freelist->cluster_size;
1624 freelist->clusters_allocated = 0;
1625 freelist->clusters = SCM_EOL;
1626 freelist->clustertail = &freelist->clusters;
1627 freelist->collected_1 = freelist->collected;
1628 freelist->collected = 0;
1629 }
1630
1631 static void
1632 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1633 {
1634 long collected;
1635 *freelist->clustertail = freelist->cells;
1636 if (!SCM_NULLP (freelist->cells))
1637 {
1638 SCM c = freelist->cells;
1639 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1640 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1641 freelist->collected +=
1642 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1643 }
1644 scm_gc_cells_collected += freelist->collected;
1645
1646 /* Although freelist->min_yield is used to test freelist->collected
1647 * (which is the local GC yield for freelist), it is adjusted so
1648 * that *total* yield is freelist->min_yield_fraction of total heap
1649 * size. This means that a too low yield is compensated by more
1650 * heap on the list which is currently doing most work, which is
1651 * just what we want.
1652 */
1653 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1654 freelist->grow_heap_p = (collected < freelist->min_yield);
1655 }
1656
1657 #define NEXT_DATA_CELL(ptr, span) \
1658 do { \
1659 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1660 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1661 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1662 : nxt__); \
1663 } while (0)
1664
1665 void
1666 scm_gc_sweep ()
1667 #define FUNC_NAME "scm_gc_sweep"
1668 {
1669 register SCM_CELLPTR ptr;
1670 register SCM nfreelist;
1671 register scm_t_freelist *freelist;
1672 register unsigned long m;
1673 register int span;
1674 size_t i;
1675 size_t seg_size;
1676
1677 m = 0;
1678
1679 gc_sweep_freelist_start (&scm_master_freelist);
1680 gc_sweep_freelist_start (&scm_master_freelist2);
1681
1682 for (i = 0; i < scm_n_heap_segs; i++)
1683 {
1684 register long left_to_collect;
1685 register size_t j;
1686
1687 /* Unmarked cells go onto the front of the freelist this heap
1688 segment points to. Rather than updating the real freelist
1689 pointer as we go along, we accumulate the new head in
1690 nfreelist. Then, if it turns out that the entire segment is
1691 free, we free (i.e., malloc's free) the whole segment, and
1692 simply don't assign nfreelist back into the real freelist. */
1693 freelist = scm_heap_table[i].freelist;
1694 nfreelist = freelist->cells;
1695 left_to_collect = freelist->left_to_collect;
1696 span = scm_heap_table[i].span;
1697
1698 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1699 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1700
1701 /* use only data cells in seg_size */
1702 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1703
1704 scm_gc_cells_swept += seg_size;
1705
1706 for (j = seg_size + span; j -= span; ptr += span)
1707 {
1708 SCM scmptr;
1709
1710 if (SCM_GC_IN_CARD_HEADERP (ptr))
1711 {
1712 SCM_CELLPTR nxt;
1713
1714 /* cheat here */
1715 nxt = ptr;
1716 NEXT_DATA_CELL (nxt, span);
1717 j += span;
1718
1719 ptr = nxt - span;
1720 continue;
1721 }
1722
1723 scmptr = PTR2SCM (ptr);
1724
1725 if (SCM_GCMARKP (scmptr))
1726 continue;
1727
1728 switch SCM_TYP7 (scmptr)
1729 {
1730 case scm_tcs_struct:
1731 {
1732 /* Structs need to be freed in a special order.
1733 * This is handled by GC C hooks in struct.c.
1734 */
1735 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1736 scm_structs_to_free = scmptr;
1737 }
1738 continue;
1739 case scm_tcs_cons_imcar:
1740 case scm_tcs_cons_nimcar:
1741 case scm_tcs_closures:
1742 case scm_tc7_pws:
1743 break;
1744 case scm_tc7_wvect:
1745 case scm_tc7_vector:
1746 {
1747 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1748 if (length > 0)
1749 {
1750 m += length * sizeof (scm_t_bits);
1751 scm_must_free (SCM_VECTOR_BASE (scmptr));
1752 }
1753 break;
1754 }
1755 #ifdef CCLO
1756 case scm_tc7_cclo:
1757 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1758 scm_must_free (SCM_CCLO_BASE (scmptr));
1759 break;
1760 #endif
1761 #ifdef HAVE_ARRAYS
1762 case scm_tc7_bvect:
1763 {
1764 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1765 if (length > 0)
1766 {
1767 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1768 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1769 }
1770 }
1771 break;
1772 case scm_tc7_byvect:
1773 case scm_tc7_ivect:
1774 case scm_tc7_uvect:
1775 case scm_tc7_svect:
1776 #ifdef HAVE_LONG_LONGS
1777 case scm_tc7_llvect:
1778 #endif
1779 case scm_tc7_fvect:
1780 case scm_tc7_dvect:
1781 case scm_tc7_cvect:
1782 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1783 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1784 break;
1785 #endif
1786 case scm_tc7_substring:
1787 break;
1788 case scm_tc7_string:
1789 m += SCM_STRING_LENGTH (scmptr) + 1;
1790 scm_must_free (SCM_STRING_CHARS (scmptr));
1791 break;
1792 case scm_tc7_symbol:
1793 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1794 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1795 break;
1796 case scm_tc7_variable:
1797 break;
1798 case scm_tcs_subrs:
1799 /* the various "subrs" (primitives) are never freed */
1800 continue;
1801 case scm_tc7_port:
1802 if SCM_OPENP (scmptr)
1803 {
1804 int k = SCM_PTOBNUM (scmptr);
1805 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1806 if (!(k < scm_numptob))
1807 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1808 #endif
1809 /* Keep "revealed" ports alive. */
1810 if (scm_revealed_count (scmptr) > 0)
1811 continue;
1812 /* Yes, I really do mean scm_ptobs[k].free */
1813 /* rather than ftobs[k].close. .close */
1814 /* is for explicit CLOSE-PORT by user */
1815 m += (scm_ptobs[k].free) (scmptr);
1816 SCM_SETSTREAM (scmptr, 0);
1817 scm_remove_from_port_table (scmptr);
1818 scm_gc_ports_collected++;
1819 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1820 }
1821 break;
1822 case scm_tc7_smob:
1823 switch SCM_TYP16 (scmptr)
1824 {
1825 case scm_tc_free_cell:
1826 case scm_tc16_real:
1827 break;
1828 #ifdef SCM_BIGDIG
1829 case scm_tc16_big:
1830 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1831 scm_must_free (SCM_BDIGITS (scmptr));
1832 break;
1833 #endif /* def SCM_BIGDIG */
1834 case scm_tc16_complex:
1835 m += sizeof (scm_t_complex);
1836 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1837 break;
1838 default:
1839 {
1840 int k;
1841 k = SCM_SMOBNUM (scmptr);
1842 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1843 if (!(k < scm_numsmob))
1844 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1845 #endif
1846 if (scm_smobs[k].free)
1847 m += (scm_smobs[k].free) (scmptr);
1848 break;
1849 }
1850 }
1851 break;
1852 default:
1853 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1854 }
1855
1856 if (!--left_to_collect)
1857 {
1858 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1859 *freelist->clustertail = scmptr;
1860 freelist->clustertail = SCM_CDRLOC (scmptr);
1861
1862 nfreelist = SCM_EOL;
1863 freelist->collected += span * freelist->cluster_size;
1864 left_to_collect = freelist->cluster_size;
1865 }
1866 else
1867 {
1868 /* Stick the new cell on the front of nfreelist. It's
1869 critical that we mark this cell as freed; otherwise, the
1870 conservative collector might trace it as some other type
1871 of object. */
1872 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1873 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1874 nfreelist = scmptr;
1875 }
1876 }
1877
1878 #ifdef GC_FREE_SEGMENTS
1879 if (n == seg_size)
1880 {
1881 register long j;
1882
1883 freelist->heap_size -= seg_size;
1884 free ((char *) scm_heap_table[i].bounds[0]);
1885 scm_heap_table[i].bounds[0] = 0;
1886 for (j = i + 1; j < scm_n_heap_segs; j++)
1887 scm_heap_table[j - 1] = scm_heap_table[j];
1888 scm_n_heap_segs -= 1;
1889 i--; /* We need to scan the segment just moved. */
1890 }
1891 else
1892 #endif /* ifdef GC_FREE_SEGMENTS */
1893 {
1894 /* Update the real freelist pointer to point to the head of
1895 the list of free cells we've built for this segment. */
1896 freelist->cells = nfreelist;
1897 freelist->left_to_collect = left_to_collect;
1898 }
1899
1900 #ifdef GUILE_DEBUG_FREELIST
1901 scm_map_free_list ();
1902 #endif
1903 }
1904
1905 gc_sweep_freelist_finish (&scm_master_freelist);
1906 gc_sweep_freelist_finish (&scm_master_freelist2);
1907
1908 /* When we move to POSIX threads private freelists should probably
1909 be GC-protected instead. */
1910 scm_freelist = SCM_EOL;
1911 scm_freelist2 = SCM_EOL;
1912
1913 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1914 scm_gc_yield -= scm_cells_allocated;
1915
1916 if (scm_mallocated < m)
1917 /* The byte count of allocated objects has underflowed. This is
1918 probably because you forgot to report the sizes of objects you
1919 have allocated, by calling scm_done_malloc or some such. When
1920 the GC freed them, it subtracted their size from
1921 scm_mallocated, which underflowed. */
1922 abort ();
1923
1924 scm_mallocated -= m;
1925 scm_gc_malloc_collected = m;
1926 }
1927 #undef FUNC_NAME
1928
1929
1930 \f
1931 /* {Front end to malloc}
1932 *
1933 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1934 * scm_done_free
1935 *
1936 * These functions provide services comparable to malloc, realloc, and
1937 * free. They should be used when allocating memory that will be under
1938 * control of the garbage collector, i.e., if the memory may be freed
1939 * during garbage collection.
1940 */
1941
1942 /* scm_must_malloc
1943 * Return newly malloced storage or throw an error.
1944 *
1945 * The parameter WHAT is a string for error reporting.
1946 * If the threshold scm_mtrigger will be passed by this
1947 * allocation, or if the first call to malloc fails,
1948 * garbage collect -- on the presumption that some objects
1949 * using malloced storage may be collected.
1950 *
1951 * The limit scm_mtrigger may be raised by this allocation.
1952 */
1953 void *
1954 scm_must_malloc (size_t size, const char *what)
1955 {
1956 void *ptr;
1957 unsigned long nm = scm_mallocated + size;
1958
1959 if (nm < size)
1960 /* The byte count of allocated objects has overflowed. This is
1961 probably because you forgot to report the correct size of freed
1962 memory in some of your smob free methods. */
1963 abort ();
1964
1965 if (nm <= scm_mtrigger)
1966 {
1967 SCM_SYSCALL (ptr = malloc (size));
1968 if (NULL != ptr)
1969 {
1970 scm_mallocated = nm;
1971 #ifdef GUILE_DEBUG_MALLOC
1972 scm_malloc_register (ptr, what);
1973 #endif
1974 return ptr;
1975 }
1976 }
1977
1978 scm_igc (what);
1979
1980 nm = scm_mallocated + size;
1981
1982 if (nm < size)
1983 /* The byte count of allocated objects has overflowed. This is
1984 probably because you forgot to report the correct size of freed
1985 memory in some of your smob free methods. */
1986 abort ();
1987
1988 SCM_SYSCALL (ptr = malloc (size));
1989 if (NULL != ptr)
1990 {
1991 scm_mallocated = nm;
1992 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1993 if (nm > scm_mtrigger)
1994 scm_mtrigger = nm + nm / 2;
1995 else
1996 scm_mtrigger += scm_mtrigger / 2;
1997 }
1998 #ifdef GUILE_DEBUG_MALLOC
1999 scm_malloc_register (ptr, what);
2000 #endif
2001
2002 return ptr;
2003 }
2004
2005 scm_memory_error (what);
2006 }
2007
2008
2009 /* scm_must_realloc
2010 * is similar to scm_must_malloc.
2011 */
2012 void *
2013 scm_must_realloc (void *where,
2014 size_t old_size,
2015 size_t size,
2016 const char *what)
2017 {
2018 void *ptr;
2019 unsigned long nm;
2020
2021 if (size <= old_size)
2022 return where;
2023
2024 nm = scm_mallocated + size - old_size;
2025
2026 if (nm < (size - old_size))
2027 /* The byte count of allocated objects has overflowed. This is
2028 probably because you forgot to report the correct size of freed
2029 memory in some of your smob free methods. */
2030 abort ();
2031
2032 if (nm <= scm_mtrigger)
2033 {
2034 SCM_SYSCALL (ptr = realloc (where, size));
2035 if (NULL != ptr)
2036 {
2037 scm_mallocated = nm;
2038 #ifdef GUILE_DEBUG_MALLOC
2039 scm_malloc_reregister (where, ptr, what);
2040 #endif
2041 return ptr;
2042 }
2043 }
2044
2045 scm_igc (what);
2046
2047 nm = scm_mallocated + size - old_size;
2048
2049 if (nm < (size - old_size))
2050 /* The byte count of allocated objects has overflowed. This is
2051 probably because you forgot to report the correct size of freed
2052 memory in some of your smob free methods. */
2053 abort ();
2054
2055 SCM_SYSCALL (ptr = realloc (where, size));
2056 if (NULL != ptr)
2057 {
2058 scm_mallocated = nm;
2059 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2060 if (nm > scm_mtrigger)
2061 scm_mtrigger = nm + nm / 2;
2062 else
2063 scm_mtrigger += scm_mtrigger / 2;
2064 }
2065 #ifdef GUILE_DEBUG_MALLOC
2066 scm_malloc_reregister (where, ptr, what);
2067 #endif
2068 return ptr;
2069 }
2070
2071 scm_memory_error (what);
2072 }
2073
2074 char *
2075 scm_must_strndup (const char *str, size_t length)
2076 {
2077 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2078 memcpy (dst, str, length);
2079 dst[length] = 0;
2080 return dst;
2081 }
2082
2083 char *
2084 scm_must_strdup (const char *str)
2085 {
2086 return scm_must_strndup (str, strlen (str));
2087 }
2088
2089 void
2090 scm_must_free (void *obj)
2091 #define FUNC_NAME "scm_must_free"
2092 {
2093 #ifdef GUILE_DEBUG_MALLOC
2094 scm_malloc_unregister (obj);
2095 #endif
2096 if (obj)
2097 free (obj);
2098 else
2099 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2100 }
2101 #undef FUNC_NAME
2102
2103
2104 /* Announce that there has been some malloc done that will be freed
2105 * during gc. A typical use is for a smob that uses some malloced
2106 * memory but can not get it from scm_must_malloc (for whatever
2107 * reason). When a new object of this smob is created you call
2108 * scm_done_malloc with the size of the object. When your smob free
2109 * function is called, be sure to include this size in the return
2110 * value.
2111 *
2112 * If you can't actually free the memory in the smob free function,
2113 * for whatever reason (like reference counting), you still can (and
2114 * should) report the amount of memory freed when you actually free it.
2115 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2116 * eh? Or even better, call scm_done_free. */
2117
2118 void
2119 scm_done_malloc (long size)
2120 {
2121 if (size < 0) {
2122 if (scm_mallocated < size)
2123 /* The byte count of allocated objects has underflowed. This is
2124 probably because you forgot to report the sizes of objects you
2125 have allocated, by calling scm_done_malloc or some such. When
2126 the GC freed them, it subtracted their size from
2127 scm_mallocated, which underflowed. */
2128 abort ();
2129 } else {
2130 unsigned long nm = scm_mallocated + size;
2131 if (nm < size)
2132 /* The byte count of allocated objects has overflowed. This is
2133 probably because you forgot to report the correct size of freed
2134 memory in some of your smob free methods. */
2135 abort ();
2136 }
2137
2138 scm_mallocated += size;
2139
2140 if (scm_mallocated > scm_mtrigger)
2141 {
2142 scm_igc ("foreign mallocs");
2143 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2144 {
2145 if (scm_mallocated > scm_mtrigger)
2146 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2147 else
2148 scm_mtrigger += scm_mtrigger / 2;
2149 }
2150 }
2151 }
2152
2153 void
2154 scm_done_free (long size)
2155 {
2156 if (size >= 0) {
2157 if (scm_mallocated < size)
2158 /* The byte count of allocated objects has underflowed. This is
2159 probably because you forgot to report the sizes of objects you
2160 have allocated, by calling scm_done_malloc or some such. When
2161 the GC freed them, it subtracted their size from
2162 scm_mallocated, which underflowed. */
2163 abort ();
2164 } else {
2165 unsigned long nm = scm_mallocated + size;
2166 if (nm < size)
2167 /* The byte count of allocated objects has overflowed. This is
2168 probably because you forgot to report the correct size of freed
2169 memory in some of your smob free methods. */
2170 abort ();
2171 }
2172
2173 scm_mallocated -= size;
2174 }
2175
2176
2177 \f
2178 /* {Heap Segments}
2179 *
2180 * Each heap segment is an array of objects of a particular size.
2181 * Every segment has an associated (possibly shared) freelist.
2182 * A table of segment records is kept that records the upper and
2183 * lower extents of the segment; this is used during the conservative
2184 * phase of gc to identify probably gc roots (because they point
2185 * into valid segments at reasonable offsets). */
2186
2187 /* scm_expmem
2188 * is true if the first segment was smaller than INIT_HEAP_SEG.
2189 * If scm_expmem is set to one, subsequent segment allocations will
2190 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2191 */
2192 int scm_expmem = 0;
2193
2194 size_t scm_max_segment_size;
2195
2196 /* scm_heap_org
2197 * is the lowest base address of any heap segment.
2198 */
2199 SCM_CELLPTR scm_heap_org;
2200
2201 scm_t_heap_seg_data * scm_heap_table = 0;
2202 static size_t heap_segment_table_size = 0;
2203 size_t scm_n_heap_segs = 0;
2204
2205 /* init_heap_seg
2206 * initializes a new heap segment and returns the number of objects it contains.
2207 *
2208 * The segment origin and segment size in bytes are input parameters.
2209 * The freelist is both input and output.
2210 *
2211 * This function presumes that the scm_heap_table has already been expanded
2212 * to accomodate a new segment record and that the markbit space was reserved
2213 * for all the cards in this segment.
2214 */
2215
2216 #define INIT_CARD(card, span) \
2217 do { \
2218 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2219 if ((span) == 2) \
2220 SCM_GC_SET_CARD_DOUBLECELL (card); \
2221 } while (0)
2222
2223 static size_t
2224 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2225 {
2226 register SCM_CELLPTR ptr;
2227 SCM_CELLPTR seg_end;
2228 size_t new_seg_index;
2229 ptrdiff_t n_new_cells;
2230 int span = freelist->span;
2231
2232 if (seg_org == NULL)
2233 return 0;
2234
2235 /* Align the begin ptr up.
2236 */
2237 ptr = SCM_GC_CARD_UP (seg_org);
2238
2239 /* Compute the ceiling on valid object pointers w/in this segment.
2240 */
2241 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2242
2243 /* Find the right place and insert the segment record.
2244 */
2245 new_seg_index = 0;
2246 while (new_seg_index < scm_n_heap_segs
2247 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2248 new_seg_index++;
2249
2250 {
2251 int i;
2252 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2253 scm_heap_table[i] = scm_heap_table[i - 1];
2254 }
2255
2256 ++scm_n_heap_segs;
2257
2258 scm_heap_table[new_seg_index].span = span;
2259 scm_heap_table[new_seg_index].freelist = freelist;
2260 scm_heap_table[new_seg_index].bounds[0] = ptr;
2261 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2262
2263 /*n_new_cells*/
2264 n_new_cells = seg_end - ptr;
2265
2266 freelist->heap_size += n_new_cells;
2267
2268 /* Partition objects in this segment into clusters */
2269 {
2270 SCM clusters;
2271 SCM *clusterp = &clusters;
2272
2273 NEXT_DATA_CELL (ptr, span);
2274 while (ptr < seg_end)
2275 {
2276 scm_cell *nxt = ptr;
2277 scm_cell *prv = NULL;
2278 scm_cell *last_card = NULL;
2279 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2280 NEXT_DATA_CELL(nxt, span);
2281
2282 /* Allocate cluster spine
2283 */
2284 *clusterp = PTR2SCM (ptr);
2285 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2286 clusterp = SCM_CDRLOC (*clusterp);
2287 ptr = nxt;
2288
2289 while (n_data_cells--)
2290 {
2291 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2292 SCM scmptr = PTR2SCM (ptr);
2293 nxt = ptr;
2294 NEXT_DATA_CELL (nxt, span);
2295 prv = ptr;
2296
2297 if (card != last_card)
2298 {
2299 INIT_CARD (card, span);
2300 last_card = card;
2301 }
2302
2303 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2304 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2305
2306 ptr = nxt;
2307 }
2308
2309 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2310 }
2311
2312 /* sanity check */
2313 {
2314 scm_cell *ref = seg_end;
2315 NEXT_DATA_CELL (ref, span);
2316 if (ref != ptr)
2317 /* [cmm] looks like the segment size doesn't divide cleanly by
2318 cluster size. bad cmm! */
2319 abort();
2320 }
2321
2322 /* Patch up the last cluster pointer in the segment
2323 * to join it to the input freelist.
2324 */
2325 *clusterp = freelist->clusters;
2326 freelist->clusters = clusters;
2327 }
2328
2329 #ifdef DEBUGINFO
2330 fprintf (stderr, "H");
2331 #endif
2332 return size;
2333 }
2334
2335 static size_t
2336 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2337 {
2338 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2339
2340 return
2341 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2342 + ALIGNMENT_SLACK (freelist);
2343 }
2344
2345 static void
2346 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2347 #define FUNC_NAME "alloc_some_heap"
2348 {
2349 SCM_CELLPTR ptr;
2350 size_t len;
2351
2352 if (scm_gc_heap_lock)
2353 {
2354 /* Critical code sections (such as the garbage collector) aren't
2355 * supposed to add heap segments.
2356 */
2357 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2358 abort ();
2359 }
2360
2361 if (scm_n_heap_segs == heap_segment_table_size)
2362 {
2363 /* We have to expand the heap segment table to have room for the new
2364 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2365 * init_heap_seg only if the allocation of the segment itself succeeds.
2366 */
2367 size_t new_table_size = scm_n_heap_segs + 1;
2368 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2369 scm_t_heap_seg_data *new_heap_table;
2370
2371 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2372 realloc ((char *)scm_heap_table, size)));
2373 if (!new_heap_table)
2374 {
2375 if (error_policy == abort_on_error)
2376 {
2377 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2378 abort ();
2379 }
2380 else
2381 {
2382 return;
2383 }
2384 }
2385 else
2386 {
2387 scm_heap_table = new_heap_table;
2388 heap_segment_table_size = new_table_size;
2389 }
2390 }
2391
2392 /* Pick a size for the new heap segment.
2393 * The rule for picking the size of a segment is explained in
2394 * gc.h
2395 */
2396 {
2397 /* Assure that the new segment is predicted to be large enough.
2398 *
2399 * New yield should at least equal GC fraction of new heap size, i.e.
2400 *
2401 * y + dh > f * (h + dh)
2402 *
2403 * y : yield
2404 * f : min yield fraction
2405 * h : heap size
2406 * dh : size of new heap segment
2407 *
2408 * This gives dh > (f * h - y) / (1 - f)
2409 */
2410 int f = freelist->min_yield_fraction;
2411 unsigned long h = SCM_HEAP_SIZE;
2412 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2413 len = SCM_EXPHEAP (freelist->heap_size);
2414 #ifdef DEBUGINFO
2415 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2416 #endif
2417 if (len < min_cells)
2418 len = min_cells + freelist->cluster_size;
2419 len *= sizeof (scm_cell);
2420 /* force new sampling */
2421 freelist->collected = LONG_MAX;
2422 }
2423
2424 if (len > scm_max_segment_size)
2425 len = scm_max_segment_size;
2426
2427 {
2428 size_t smallest;
2429
2430 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2431
2432 if (len < smallest)
2433 len = smallest;
2434
2435 /* Allocate with decaying ambition. */
2436 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2437 && (len >= smallest))
2438 {
2439 size_t rounded_len = round_to_cluster_size (freelist, len);
2440 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2441 if (ptr)
2442 {
2443 init_heap_seg (ptr, rounded_len, freelist);
2444 return;
2445 }
2446 len /= 2;
2447 }
2448 }
2449
2450 if (error_policy == abort_on_error)
2451 {
2452 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2453 abort ();
2454 }
2455 }
2456 #undef FUNC_NAME
2457
2458 \f
2459 /* {GC Protection Helper Functions}
2460 */
2461
2462
2463 /*
2464 * If within a function you need to protect one or more scheme objects from
2465 * garbage collection, pass them as parameters to one of the
2466 * scm_remember_upto_here* functions below. These functions don't do
2467 * anything, but since the compiler does not know that they are actually
2468 * no-ops, it will generate code that calls these functions with the given
2469 * parameters. Therefore, you can be sure that the compiler will keep those
2470 * scheme values alive (on the stack or in a register) up to the point where
2471 * scm_remember_upto_here* is called. In other words, place the call to
2472 * scm_remember_upto_here* _behind_ the last code in your function, that
2473 * depends on the scheme object to exist.
2474 *
2475 * Example: We want to make sure, that the string object str does not get
2476 * garbage collected during the execution of 'some_function', because
2477 * otherwise the characters belonging to str would be freed and
2478 * 'some_function' might access freed memory. To make sure that the compiler
2479 * keeps str alive on the stack or in a register such that it is visible to
2480 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2481 * call to 'some_function'. Note that this would not be necessary if str was
2482 * used anyway after the call to 'some_function'.
2483 * char *chars = SCM_STRING_CHARS (str);
2484 * some_function (chars);
2485 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2486 */
2487
2488 void
2489 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2490 {
2491 /* Empty. Protects a single object from garbage collection. */
2492 }
2493
2494 void
2495 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2496 {
2497 /* Empty. Protects two objects from garbage collection. */
2498 }
2499
2500 void
2501 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2502 {
2503 /* Empty. Protects any number of objects from garbage collection. */
2504 }
2505
2506
2507 #if (SCM_DEBUG_DEPRECATED == 0)
2508
2509 void
2510 scm_remember (SCM *ptr)
2511 {
2512 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2513 "Use the `scm_remember_upto_here*' family of functions instead.");
2514 }
2515
2516 SCM
2517 scm_protect_object (SCM obj)
2518 {
2519 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2520 "Use `scm_gc_protect_object' instead.");
2521 return scm_gc_protect_object (obj);
2522 }
2523
2524 SCM
2525 scm_unprotect_object (SCM obj)
2526 {
2527 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2528 "Use `scm_gc_unprotect_object' instead.");
2529 return scm_gc_unprotect_object (obj);
2530 }
2531
2532 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2533
2534 /*
2535 These crazy functions prevent garbage collection
2536 of arguments after the first argument by
2537 ensuring they remain live throughout the
2538 function because they are used in the last
2539 line of the code block.
2540 It'd be better to have a nice compiler hint to
2541 aid the conservative stack-scanning GC. --03/09/00 gjb */
2542 SCM
2543 scm_return_first (SCM elt, ...)
2544 {
2545 return elt;
2546 }
2547
2548 int
2549 scm_return_first_int (int i, ...)
2550 {
2551 return i;
2552 }
2553
2554
2555 SCM
2556 scm_permanent_object (SCM obj)
2557 {
2558 SCM_REDEFER_INTS;
2559 scm_permobjs = scm_cons (obj, scm_permobjs);
2560 SCM_REALLOW_INTS;
2561 return obj;
2562 }
2563
2564
2565 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2566 other references are dropped, until the object is unprotected by calling
2567 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2568 i. e. it is possible to protect the same object several times, but it is
2569 necessary to unprotect the object the same number of times to actually get
2570 the object unprotected. It is an error to unprotect an object more often
2571 than it has been protected before. The function scm_protect_object returns
2572 OBJ.
2573 */
2574
2575 /* Implementation note: For every object X, there is a counter which
2576 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2577 */
2578
2579 SCM
2580 scm_gc_protect_object (SCM obj)
2581 {
2582 SCM handle;
2583
2584 /* This critical section barrier will be replaced by a mutex. */
2585 SCM_REDEFER_INTS;
2586
2587 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2588 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2589
2590 SCM_REALLOW_INTS;
2591
2592 return obj;
2593 }
2594
2595
2596 /* Remove any protection for OBJ established by a prior call to
2597 scm_protect_object. This function returns OBJ.
2598
2599 See scm_protect_object for more information. */
2600 SCM
2601 scm_gc_unprotect_object (SCM obj)
2602 {
2603 SCM handle;
2604
2605 /* This critical section barrier will be replaced by a mutex. */
2606 SCM_REDEFER_INTS;
2607
2608 handle = scm_hashq_get_handle (scm_protects, obj);
2609
2610 if (SCM_FALSEP (handle))
2611 {
2612 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2613 abort ();
2614 }
2615 else
2616 {
2617 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2618 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2619 scm_hashq_remove_x (scm_protects, obj);
2620 else
2621 SCM_SETCDR (handle, count);
2622 }
2623
2624 SCM_REALLOW_INTS;
2625
2626 return obj;
2627 }
2628
2629 void
2630 scm_gc_register_root (SCM *p)
2631 {
2632 SCM handle;
2633 SCM key = scm_long2num ((long) p);
2634
2635 /* This critical section barrier will be replaced by a mutex. */
2636 SCM_REDEFER_INTS;
2637
2638 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2639 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2640
2641 SCM_REALLOW_INTS;
2642 }
2643
2644 void
2645 scm_gc_unregister_root (SCM *p)
2646 {
2647 SCM handle;
2648 SCM key = scm_long2num ((long) p);
2649
2650 /* This critical section barrier will be replaced by a mutex. */
2651 SCM_REDEFER_INTS;
2652
2653 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2654
2655 if (SCM_FALSEP (handle))
2656 {
2657 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2658 abort ();
2659 }
2660 else
2661 {
2662 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2663 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2664 scm_hashv_remove_x (scm_gc_registered_roots, key);
2665 else
2666 SCM_SETCDR (handle, count);
2667 }
2668
2669 SCM_REALLOW_INTS;
2670 }
2671
2672 void
2673 scm_gc_register_roots (SCM *b, unsigned long n)
2674 {
2675 SCM *p = b;
2676 for (; p < b + n; ++p)
2677 scm_gc_register_root (p);
2678 }
2679
2680 void
2681 scm_gc_unregister_roots (SCM *b, unsigned long n)
2682 {
2683 SCM *p = b;
2684 for (; p < b + n; ++p)
2685 scm_gc_unregister_root (p);
2686 }
2687
2688 int terminating;
2689
2690 /* called on process termination. */
2691 #ifdef HAVE_ATEXIT
2692 static void
2693 cleanup (void)
2694 #else
2695 #ifdef HAVE_ON_EXIT
2696 extern int on_exit (void (*procp) (), int arg);
2697
2698 static void
2699 cleanup (int status, void *arg)
2700 #else
2701 #error Dont know how to setup a cleanup handler on your system.
2702 #endif
2703 #endif
2704 {
2705 terminating = 1;
2706 scm_flush_all_ports ();
2707 }
2708
2709 \f
2710 static int
2711 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2712 {
2713 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2714
2715 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2716 rounded_size,
2717 freelist))
2718 {
2719 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2720 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2721 rounded_size,
2722 freelist))
2723 return 1;
2724 }
2725 else
2726 scm_expmem = 1;
2727
2728 if (freelist->min_yield_fraction)
2729 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2730 / 100);
2731 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2732
2733 return 0;
2734 }
2735
2736 \f
2737 static void
2738 init_freelist (scm_t_freelist *freelist,
2739 int span,
2740 long cluster_size,
2741 int min_yield)
2742 {
2743 freelist->clusters = SCM_EOL;
2744 freelist->cluster_size = cluster_size + 1;
2745 freelist->left_to_collect = 0;
2746 freelist->clusters_allocated = 0;
2747 freelist->min_yield = 0;
2748 freelist->min_yield_fraction = min_yield;
2749 freelist->span = span;
2750 freelist->collected = 0;
2751 freelist->collected_1 = 0;
2752 freelist->heap_size = 0;
2753 }
2754
2755
2756 /* Get an integer from an environment variable. */
2757 static int
2758 scm_i_getenv_int (const char *var, int def)
2759 {
2760 char *end, *val = getenv (var);
2761 long res;
2762 if (!val)
2763 return def;
2764 res = strtol (val, &end, 10);
2765 if (end == val)
2766 return def;
2767 return res;
2768 }
2769
2770
2771 int
2772 scm_init_storage ()
2773 {
2774 unsigned long gc_trigger_1;
2775 unsigned long gc_trigger_2;
2776 size_t init_heap_size_1;
2777 size_t init_heap_size_2;
2778 size_t j;
2779
2780 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2781 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2782 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
2783 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2784
2785 j = SCM_NUM_PROTECTS;
2786 while (j)
2787 scm_sys_protects[--j] = SCM_BOOL_F;
2788 scm_block_gc = 1;
2789
2790 scm_freelist = SCM_EOL;
2791 scm_freelist2 = SCM_EOL;
2792 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2793 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2794 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2795 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2796 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2797
2798 scm_expmem = 0;
2799
2800 j = SCM_HEAP_SEG_SIZE;
2801 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2802 scm_heap_table = ((scm_t_heap_seg_data *)
2803 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2804 heap_segment_table_size = 2;
2805
2806 mark_space_ptr = &mark_space_head;
2807
2808 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2809 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2810 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2811 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2812 return 1;
2813
2814 /* scm_hplims[0] can change. do not remove scm_heap_org */
2815 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2816
2817 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2818 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2819 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2820 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2821 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2822
2823 /* Initialise the list of ports. */
2824 scm_port_table = (scm_t_port **)
2825 malloc (sizeof (scm_t_port *) * scm_port_table_room);
2826 if (!scm_port_table)
2827 return 1;
2828
2829 #ifdef HAVE_ATEXIT
2830 atexit (cleanup);
2831 #else
2832 #ifdef HAVE_ON_EXIT
2833 on_exit (cleanup, 0);
2834 #endif
2835 #endif
2836
2837 scm_stand_in_procs = SCM_EOL;
2838 scm_permobjs = SCM_EOL;
2839 scm_protects = scm_c_make_hash_table (31);
2840 scm_gc_registered_roots = scm_c_make_hash_table (31);
2841
2842 return 0;
2843 }
2844
2845 \f
2846
2847 SCM scm_after_gc_hook;
2848
2849 static SCM gc_async;
2850
2851 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2852 * is run after the gc, as soon as the asynchronous events are handled by the
2853 * evaluator.
2854 */
2855 static SCM
2856 gc_async_thunk (void)
2857 {
2858 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2859 return SCM_UNSPECIFIED;
2860 }
2861
2862
2863 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2864 * the garbage collection. The only purpose of this function is to mark the
2865 * gc_async (which will eventually lead to the execution of the
2866 * gc_async_thunk).
2867 */
2868 static void *
2869 mark_gc_async (void * hook_data SCM_UNUSED,
2870 void *func_data SCM_UNUSED,
2871 void *data SCM_UNUSED)
2872 {
2873 /* If cell access debugging is enabled, the user may choose to perform
2874 * additional garbage collections after an arbitrary number of cell
2875 * accesses. We don't want the scheme level after-gc-hook to be performed
2876 * for each of these garbage collections for the following reason: The
2877 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2878 * after-gc-hook was performed with every gc, and if the gc was performed
2879 * after a very small number of cell accesses, then the number of cell
2880 * accesses during the execution of the after-gc-hook will suffice to cause
2881 * the execution of the next gc. Then, guile would keep executing the
2882 * after-gc-hook over and over again, and would never come to do other
2883 * things.
2884 *
2885 * To overcome this problem, if cell access debugging with additional
2886 * garbage collections is enabled, the after-gc-hook is never run by the
2887 * garbage collecter. When running guile with cell access debugging and the
2888 * execution of the after-gc-hook is desired, then it is necessary to run
2889 * the hook explicitly from the user code. This has the effect, that from
2890 * the scheme level point of view it seems that garbage collection is
2891 * performed with a much lower frequency than it actually is. Obviously,
2892 * this will not work for code that depends on a fixed one to one
2893 * relationship between the execution counts of the C level garbage
2894 * collection hooks and the execution count of the scheme level
2895 * after-gc-hook.
2896 */
2897 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2898 if (debug_cells_gc_interval == 0)
2899 scm_system_async_mark (gc_async);
2900 #else
2901 scm_system_async_mark (gc_async);
2902 #endif
2903
2904 return NULL;
2905 }
2906
2907
2908 void
2909 scm_init_gc ()
2910 {
2911 SCM after_gc_thunk;
2912
2913 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2914 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2915
2916 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2917 gc_async_thunk);
2918 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2919
2920 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2921
2922 #ifndef SCM_MAGIC_SNARFER
2923 #include "libguile/gc.x"
2924 #endif
2925 }
2926
2927 #endif /*MARK_DEPENDENCIES*/
2928
2929 /*
2930 Local Variables:
2931 c-file-style: "gnu"
2932 End:
2933 */