`__libc_ia64_register_backing_store_base'
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #ifdef __ia64__
56 #include <ucontext.h>
57 extern unsigned long * __libc_ia64_register_backing_store_base;
58 #endif
59
60 #include "libguile/_scm.h"
61 #include "libguile/eval.h"
62 #include "libguile/stime.h"
63 #include "libguile/stackchk.h"
64 #include "libguile/struct.h"
65 #include "libguile/smob.h"
66 #include "libguile/unif.h"
67 #include "libguile/async.h"
68 #include "libguile/ports.h"
69 #include "libguile/root.h"
70 #include "libguile/strings.h"
71 #include "libguile/vectors.h"
72 #include "libguile/weaks.h"
73 #include "libguile/hashtab.h"
74 #include "libguile/tags.h"
75
76 #include "libguile/validate.h"
77 #include "libguile/deprecation.h"
78 #include "libguile/gc.h"
79
80 #ifdef GUILE_DEBUG_MALLOC
81 #include "libguile/debug-malloc.h"
82 #endif
83
84 #ifdef HAVE_MALLOC_H
85 #include <malloc.h>
86 #endif
87
88 #ifdef HAVE_UNISTD_H
89 #include <unistd.h>
90 #endif
91
92 #ifdef __STDC__
93 #include <stdarg.h>
94 #define var_start(x, y) va_start(x, y)
95 #else
96 #include <varargs.h>
97 #define var_start(x, y) va_start(x)
98 #endif
99
100 \f
101
102 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
103
104 unsigned int scm_gc_running_p = 0;
105
106 \f
107
108 #if (SCM_DEBUG_CELL_ACCESSES == 1)
109
110 scm_t_bits scm_tc16_allocated;
111
112 /* Set this to != 0 if every cell that is accessed shall be checked:
113 */
114 unsigned int scm_debug_cell_accesses_p = 1;
115
116 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
117 * the number of cell accesses after which a gc shall be called.
118 */
119 static unsigned int debug_cells_gc_interval = 0;
120
121
122 /* Assert that the given object is a valid reference to a valid cell. This
123 * test involves to determine whether the object is a cell pointer, whether
124 * this pointer actually points into a heap segment and whether the cell
125 * pointed to is not a free cell. Further, additional garbage collections may
126 * get executed after a user defined number of cell accesses. This helps to
127 * find places in the C code where references are dropped for extremely short
128 * periods.
129 */
130 void
131 scm_assert_cell_valid (SCM cell)
132 {
133 static unsigned int already_running = 0;
134
135 if (scm_debug_cell_accesses_p && !already_running)
136 {
137 already_running = 1; /* set to avoid recursion */
138
139 if (!scm_cellp (cell))
140 {
141 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
142 (unsigned long) SCM_UNPACK (cell));
143 abort ();
144 }
145 else if (!scm_gc_running_p)
146 {
147 /* Dirk::FIXME:: During garbage collection there occur references to
148 free cells. This is allright during conservative marking, but
149 should not happen otherwise (I think). The case of free cells
150 accessed during conservative marking is handled in function
151 scm_mark_locations. However, there still occur accesses to free
152 cells during gc. I don't understand why this happens. If it is
153 a bug and gets fixed, the following test should also work while
154 gc is running.
155 */
156 if (SCM_FREE_CELL_P (cell))
157 {
158 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
159 (unsigned long) SCM_UNPACK (cell));
160 abort ();
161 }
162
163 /* If desired, perform additional garbage collections after a user
164 * defined number of cell accesses.
165 */
166 if (debug_cells_gc_interval)
167 {
168 static unsigned int counter = 0;
169
170 if (counter != 0)
171 {
172 --counter;
173 }
174 else
175 {
176 counter = debug_cells_gc_interval;
177 scm_igc ("scm_assert_cell_valid");
178 }
179 }
180 }
181 already_running = 0; /* re-enable */
182 }
183 }
184
185
186 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
187 (SCM flag),
188 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
189 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
190 "but no additional calls to garbage collection are issued.\n"
191 "If @var{flag} is a number, cell access checking is enabled,\n"
192 "with an additional garbage collection after the given\n"
193 "number of cell accesses.\n"
194 "This procedure only exists when the compile-time flag\n"
195 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
196 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
197 {
198 if (SCM_FALSEP (flag)) {
199 scm_debug_cell_accesses_p = 0;
200 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
201 debug_cells_gc_interval = 0;
202 scm_debug_cell_accesses_p = 1;
203 } else if (SCM_INUMP (flag)) {
204 long int f = SCM_INUM (flag);
205 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
206 debug_cells_gc_interval = f;
207 scm_debug_cell_accesses_p = 1;
208 } else {
209 SCM_WRONG_TYPE_ARG (1, flag);
210 }
211 return SCM_UNSPECIFIED;
212 }
213 #undef FUNC_NAME
214
215 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
216
217 \f
218
219 /* {heap tuning parameters}
220 *
221 * These are parameters for controlling memory allocation. The heap
222 * is the area out of which scm_cons, and object headers are allocated.
223 *
224 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
225 * 64 bit machine. The units of the _SIZE parameters are bytes.
226 * Cons pairs and object headers occupy one heap cell.
227 *
228 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
229 * allocated initially the heap will grow by half its current size
230 * each subsequent time more heap is needed.
231 *
232 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
233 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
234 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
235 * is in scm_init_storage() and alloc_some_heap() in sys.c
236 *
237 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
238 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
239 *
240 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
241 * is needed.
242 *
243 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
244 * trigger a GC.
245 *
246 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
247 * reclaimed by a GC triggered by must_malloc. If less than this is
248 * reclaimed, the trigger threshold is raised. [I don't know what a
249 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
250 * work around a oscillation that caused almost constant GC.]
251 */
252
253 /*
254 * Heap size 45000 and 40% min yield gives quick startup and no extra
255 * heap allocation. Having higher values on min yield may lead to
256 * large heaps, especially if code behaviour is varying its
257 * maximum consumption between different freelists.
258 */
259
260 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
261 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
262 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
263 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
264 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
265 int scm_default_min_yield_1 = 40;
266
267 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
268 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
269 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
270 /* The following value may seem large, but note that if we get to GC at
271 * all, this means that we have a numerically intensive application
272 */
273 int scm_default_min_yield_2 = 40;
274
275 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
276
277 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
278 #ifdef _QC
279 # define SCM_HEAP_SEG_SIZE 32768L
280 #else
281 # ifdef sequent
282 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
283 # else
284 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
285 # endif
286 #endif
287 /* Make heap grow with factor 1.5 */
288 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
289 #define SCM_INIT_MALLOC_LIMIT 100000
290 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
291
292 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
293 aligned inner bounds for allocated storage */
294
295 #ifdef PROT386
296 /*in 386 protected mode we must only adjust the offset */
297 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
298 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
299 #else
300 # ifdef _UNICOS
301 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
302 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
303 # else
304 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
305 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
306 # endif /* UNICOS */
307 #endif /* PROT386 */
308
309 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
310
311 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
312 #define CLUSTER_SIZE_IN_BYTES(freelist) \
313 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
314
315 \f
316 /* scm_freelists
317 */
318
319 typedef struct scm_t_freelist {
320 /* collected cells */
321 SCM cells;
322 /* number of cells left to collect before cluster is full */
323 unsigned int left_to_collect;
324 /* number of clusters which have been allocated */
325 unsigned int clusters_allocated;
326 /* a list of freelists, each of size cluster_size,
327 * except the last one which may be shorter
328 */
329 SCM clusters;
330 SCM *clustertail;
331 /* this is the number of objects in each cluster, including the spine cell */
332 unsigned int cluster_size;
333 /* indicates that we should grow heap instead of GC:ing
334 */
335 int grow_heap_p;
336 /* minimum yield on this list in order not to grow the heap
337 */
338 long min_yield;
339 /* defines min_yield as percent of total heap size
340 */
341 int min_yield_fraction;
342 /* number of cells per object on this list */
343 int span;
344 /* number of collected cells during last GC */
345 unsigned long collected;
346 /* number of collected cells during penultimate GC */
347 unsigned long collected_1;
348 /* total number of cells in heap segments
349 * belonging to this list.
350 */
351 unsigned long heap_size;
352 } scm_t_freelist;
353
354 SCM scm_freelist = SCM_EOL;
355 scm_t_freelist scm_master_freelist = {
356 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
357 };
358 SCM scm_freelist2 = SCM_EOL;
359 scm_t_freelist scm_master_freelist2 = {
360 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
361 };
362
363 /* scm_mtrigger
364 * is the number of bytes of must_malloc allocation needed to trigger gc.
365 */
366 unsigned long scm_mtrigger;
367
368 /* scm_gc_heap_lock
369 * If set, don't expand the heap. Set only during gc, during which no allocation
370 * is supposed to take place anyway.
371 */
372 int scm_gc_heap_lock = 0;
373
374 /* GC Blocking
375 * Don't pause for collection if this is set -- just
376 * expand the heap.
377 */
378 int scm_block_gc = 1;
379
380 /* During collection, this accumulates objects holding
381 * weak references.
382 */
383 SCM scm_weak_vectors;
384
385 /* During collection, this accumulates structures which are to be freed.
386 */
387 SCM scm_structs_to_free;
388
389 /* GC Statistics Keeping
390 */
391 unsigned long scm_cells_allocated = 0;
392 unsigned long scm_mallocated = 0;
393 unsigned long scm_gc_cells_collected;
394 unsigned long scm_gc_yield;
395 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
396 unsigned long scm_gc_malloc_collected;
397 unsigned long scm_gc_ports_collected;
398 unsigned long scm_gc_time_taken = 0;
399 static unsigned long t_before_gc;
400 static unsigned long t_before_sweep;
401 unsigned long scm_gc_mark_time_taken = 0;
402 unsigned long scm_gc_sweep_time_taken = 0;
403 unsigned long scm_gc_times = 0;
404 unsigned long scm_gc_cells_swept = 0;
405 double scm_gc_cells_marked_acc = 0.;
406 double scm_gc_cells_swept_acc = 0.;
407
408 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
409 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
410 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
411 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
412 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
413 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
414 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
415 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
416 SCM_SYMBOL (sym_times, "gc-times");
417 SCM_SYMBOL (sym_cells_marked, "cells-marked");
418 SCM_SYMBOL (sym_cells_swept, "cells-swept");
419
420 typedef struct scm_t_heap_seg_data
421 {
422 /* lower and upper bounds of the segment */
423 SCM_CELLPTR bounds[2];
424
425 /* address of the head-of-freelist pointer for this segment's cells.
426 All segments usually point to the same one, scm_freelist. */
427 scm_t_freelist *freelist;
428
429 /* number of cells per object in this segment */
430 int span;
431 } scm_t_heap_seg_data;
432
433
434
435 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
436
437 typedef enum { return_on_error, abort_on_error } policy_on_error;
438 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
439
440
441 #define SCM_HEAP_SIZE \
442 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
443 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
444
445 #define BVEC_GROW_SIZE 256
446 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
447 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
448
449 /* mark space allocation */
450
451 typedef struct scm_t_mark_space
452 {
453 scm_t_c_bvec_limb *bvec_space;
454 struct scm_t_mark_space *next;
455 } scm_t_mark_space;
456
457 static scm_t_mark_space *current_mark_space;
458 static scm_t_mark_space **mark_space_ptr;
459 static ptrdiff_t current_mark_space_offset;
460 static scm_t_mark_space *mark_space_head;
461
462 static scm_t_c_bvec_limb *
463 get_bvec ()
464 #define FUNC_NAME "get_bvec"
465 {
466 scm_t_c_bvec_limb *res;
467
468 if (!current_mark_space)
469 {
470 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
471 if (!current_mark_space)
472 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
473
474 current_mark_space->bvec_space = NULL;
475 current_mark_space->next = NULL;
476
477 *mark_space_ptr = current_mark_space;
478 mark_space_ptr = &(current_mark_space->next);
479
480 return get_bvec ();
481 }
482
483 if (!(current_mark_space->bvec_space))
484 {
485 SCM_SYSCALL (current_mark_space->bvec_space =
486 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
487 if (!(current_mark_space->bvec_space))
488 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
489
490 current_mark_space_offset = 0;
491
492 return get_bvec ();
493 }
494
495 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
496 {
497 current_mark_space = NULL;
498
499 return get_bvec ();
500 }
501
502 res = current_mark_space->bvec_space + current_mark_space_offset;
503 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
504
505 return res;
506 }
507 #undef FUNC_NAME
508
509
510 static void
511 clear_mark_space ()
512 {
513 scm_t_mark_space *ms;
514
515 for (ms = mark_space_head; ms; ms = ms->next)
516 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
517 }
518
519
520 \f
521 /* Debugging functions. */
522
523 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
524
525 static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
526
527 static void
528 map_free_list (scm_t_freelist *master, SCM freelist)
529 {
530 long last_seg = -1, count = 0;
531 SCM f;
532
533 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
534 {
535 long int this_seg = heap_segment (f);
536
537 if (this_seg == -1)
538 {
539 fprintf (stderr,
540 "map_free_list: can't find segment containing cell %lux\n",
541 (unsigned long int) SCM_UNPACK (f));
542 abort ();
543 }
544 else if (this_seg != last_seg)
545 {
546 if (last_seg != -1)
547 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
548 (long) count, master->span, (long) last_seg);
549 last_seg = this_seg;
550 count = 0;
551 }
552 count++;
553 }
554 if (last_seg != -1)
555 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
556 (long) count, master->span, (long) last_seg);
557 }
558
559 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
560 (),
561 "Print debugging information about the free-list.\n"
562 "@code{map-free-list} is only included in\n"
563 "@code{--enable-guile-debug} builds of Guile.")
564 #define FUNC_NAME s_scm_map_free_list
565 {
566 size_t i;
567
568 fprintf (stderr, "%ld segments total (%d:%ld",
569 (long) scm_n_heap_segs,
570 scm_heap_table[0].span,
571 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
572
573 for (i = 1; i != scm_n_heap_segs; i++)
574 fprintf (stderr, ", %d:%ld",
575 scm_heap_table[i].span,
576 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
577 fprintf (stderr, ")\n");
578 map_free_list (&scm_master_freelist, scm_freelist);
579 map_free_list (&scm_master_freelist2, scm_freelist2);
580 fflush (stderr);
581
582 return SCM_UNSPECIFIED;
583 }
584 #undef FUNC_NAME
585
586 static long last_cluster;
587 static long last_size;
588
589 static long
590 free_list_length (char *title, long i, SCM freelist)
591 {
592 SCM ls;
593 long n = 0;
594 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
595 if (SCM_FREE_CELL_P (ls))
596 ++n;
597 else
598 {
599 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
600 abort ();
601 }
602 if (n != last_size)
603 {
604 if (i > 0)
605 {
606 if (last_cluster == i - 1)
607 fprintf (stderr, "\t%ld\n", (long) last_size);
608 else
609 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
610 }
611 if (i >= 0)
612 fprintf (stderr, "%s %ld", title, (long) i);
613 else
614 fprintf (stderr, "%s\t%ld\n", title, (long) n);
615 last_cluster = i;
616 last_size = n;
617 }
618 return n;
619 }
620
621 static void
622 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
623 {
624 SCM clusters;
625 long i = 0, len, n = 0;
626 fprintf (stderr, "%s\n\n", title);
627 n += free_list_length ("free list", -1, freelist);
628 for (clusters = master->clusters;
629 SCM_NNULLP (clusters);
630 clusters = SCM_CDR (clusters))
631 {
632 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
633 n += len;
634 }
635 if (last_cluster == i - 1)
636 fprintf (stderr, "\t%ld\n", (long) last_size);
637 else
638 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
639 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
640 }
641
642 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
643 (),
644 "Print debugging information about the free-list.\n"
645 "@code{free-list-length} is only included in\n"
646 "@code{--enable-guile-debug} builds of Guile.")
647 #define FUNC_NAME s_scm_free_list_length
648 {
649 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
650 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
651 return SCM_UNSPECIFIED;
652 }
653 #undef FUNC_NAME
654
655 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
656
657 #ifdef GUILE_DEBUG_FREELIST
658
659 /* Non-zero if freelist debugging is in effect. Set this via
660 `gc-set-debug-check-freelist!'. */
661 static int scm_debug_check_freelist = 0;
662
663 /* Number of calls to SCM_NEWCELL since startup. */
664 static unsigned long scm_newcell_count;
665 static unsigned long scm_newcell2_count;
666
667 /* Search freelist for anything that isn't marked as a free cell.
668 Abort if we find something. */
669 static void
670 scm_check_freelist (SCM freelist)
671 {
672 SCM f;
673 long i = 0;
674
675 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
676 if (!SCM_FREE_CELL_P (f))
677 {
678 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
679 (long) scm_newcell_count, (long) i);
680 abort ();
681 }
682 }
683
684 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
685 (SCM flag),
686 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
687 "on each cell allocation. This procedure only exists when the\n"
688 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
689 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
690 {
691 /* [cmm] I did a double-take when I read this code the first time.
692 well, FWIW. */
693 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
694 return SCM_UNSPECIFIED;
695 }
696 #undef FUNC_NAME
697
698
699 SCM
700 scm_debug_newcell (void)
701 {
702 SCM new;
703
704 scm_newcell_count++;
705 if (scm_debug_check_freelist)
706 {
707 scm_check_freelist (scm_freelist);
708 scm_gc();
709 }
710
711 /* The rest of this is supposed to be identical to the SCM_NEWCELL
712 macro. */
713 if (SCM_NULLP (scm_freelist))
714 {
715 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
716 SCM_GC_SET_ALLOCATED (new);
717 }
718 else
719 {
720 new = scm_freelist;
721 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
722 SCM_GC_SET_ALLOCATED (new);
723 }
724
725 return new;
726 }
727
728 SCM
729 scm_debug_newcell2 (void)
730 {
731 SCM new;
732
733 scm_newcell2_count++;
734 if (scm_debug_check_freelist)
735 {
736 scm_check_freelist (scm_freelist2);
737 scm_gc ();
738 }
739
740 /* The rest of this is supposed to be identical to the SCM_NEWCELL
741 macro. */
742 if (SCM_NULLP (scm_freelist2))
743 {
744 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
745 SCM_GC_SET_ALLOCATED (new);
746 }
747 else
748 {
749 new = scm_freelist2;
750 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
751 SCM_GC_SET_ALLOCATED (new);
752 }
753
754 return new;
755 }
756
757 #endif /* GUILE_DEBUG_FREELIST */
758
759 \f
760
761 static unsigned long
762 master_cells_allocated (scm_t_freelist *master)
763 {
764 /* the '- 1' below is to ignore the cluster spine cells. */
765 long objects = master->clusters_allocated * (master->cluster_size - 1);
766 if (SCM_NULLP (master->clusters))
767 objects -= master->left_to_collect;
768 return master->span * objects;
769 }
770
771 static unsigned long
772 freelist_length (SCM freelist)
773 {
774 long n;
775 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
776 ++n;
777 return n;
778 }
779
780 static unsigned long
781 compute_cells_allocated ()
782 {
783 return (scm_cells_allocated
784 + master_cells_allocated (&scm_master_freelist)
785 + master_cells_allocated (&scm_master_freelist2)
786 - scm_master_freelist.span * freelist_length (scm_freelist)
787 - scm_master_freelist2.span * freelist_length (scm_freelist2));
788 }
789
790 /* {Scheme Interface to GC}
791 */
792
793 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
794 (),
795 "Return an association list of statistics about Guile's current\n"
796 "use of storage.")
797 #define FUNC_NAME s_scm_gc_stats
798 {
799 long i;
800 long n;
801 SCM heap_segs;
802 unsigned long int local_scm_mtrigger;
803 unsigned long int local_scm_mallocated;
804 unsigned long int local_scm_heap_size;
805 unsigned long int local_scm_cells_allocated;
806 unsigned long int local_scm_gc_time_taken;
807 unsigned long int local_scm_gc_times;
808 unsigned long int local_scm_gc_mark_time_taken;
809 unsigned long int local_scm_gc_sweep_time_taken;
810 double local_scm_gc_cells_swept;
811 double local_scm_gc_cells_marked;
812 SCM answer;
813
814 SCM_DEFER_INTS;
815
816 ++scm_block_gc;
817
818 retry:
819 heap_segs = SCM_EOL;
820 n = scm_n_heap_segs;
821 for (i = scm_n_heap_segs; i--; )
822 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
823 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
824 heap_segs);
825 if (scm_n_heap_segs != n)
826 goto retry;
827
828 --scm_block_gc;
829
830 /* Below, we cons to produce the resulting list. We want a snapshot of
831 * the heap situation before consing.
832 */
833 local_scm_mtrigger = scm_mtrigger;
834 local_scm_mallocated = scm_mallocated;
835 local_scm_heap_size = SCM_HEAP_SIZE;
836 local_scm_cells_allocated = compute_cells_allocated ();
837 local_scm_gc_time_taken = scm_gc_time_taken;
838 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
839 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
840 local_scm_gc_times = scm_gc_times;
841 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
842 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
843
844 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
845 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
846 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
847 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
848 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
849 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
850 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
851 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
852 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
853 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
854 scm_cons (sym_heap_segments, heap_segs),
855 SCM_UNDEFINED);
856 SCM_ALLOW_INTS;
857 return answer;
858 }
859 #undef FUNC_NAME
860
861
862 static void
863 gc_start_stats (const char *what SCM_UNUSED)
864 {
865 t_before_gc = scm_c_get_internal_run_time ();
866 scm_gc_cells_swept = 0;
867 scm_gc_cells_collected = 0;
868 scm_gc_yield_1 = scm_gc_yield;
869 scm_gc_yield = (scm_cells_allocated
870 + master_cells_allocated (&scm_master_freelist)
871 + master_cells_allocated (&scm_master_freelist2));
872 scm_gc_malloc_collected = 0;
873 scm_gc_ports_collected = 0;
874 }
875
876
877 static void
878 gc_end_stats ()
879 {
880 unsigned long t = scm_c_get_internal_run_time ();
881 scm_gc_time_taken += (t - t_before_gc);
882 scm_gc_sweep_time_taken += (t - t_before_sweep);
883 ++scm_gc_times;
884
885 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
886 scm_gc_cells_swept_acc += scm_gc_cells_swept;
887 }
888
889
890 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
891 (SCM obj),
892 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
893 "returned by this function for @var{obj}")
894 #define FUNC_NAME s_scm_object_address
895 {
896 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
897 }
898 #undef FUNC_NAME
899
900
901 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
902 (),
903 "Scans all of SCM objects and reclaims for further use those that are\n"
904 "no longer accessible.")
905 #define FUNC_NAME s_scm_gc
906 {
907 SCM_DEFER_INTS;
908 scm_igc ("call");
909 SCM_ALLOW_INTS;
910 return SCM_UNSPECIFIED;
911 }
912 #undef FUNC_NAME
913
914
915 \f
916 /* {C Interface For When GC is Triggered}
917 */
918
919 static void
920 adjust_min_yield (scm_t_freelist *freelist)
921 {
922 /* min yield is adjusted upwards so that next predicted total yield
923 * (allocated cells actually freed by GC) becomes
924 * `min_yield_fraction' of total heap size. Note, however, that
925 * the absolute value of min_yield will correspond to `collected'
926 * on one master (the one which currently is triggering GC).
927 *
928 * The reason why we look at total yield instead of cells collected
929 * on one list is that we want to take other freelists into account.
930 * On this freelist, we know that (local) yield = collected cells,
931 * but that's probably not the case on the other lists.
932 *
933 * (We might consider computing a better prediction, for example
934 * by computing an average over multiple GC:s.)
935 */
936 if (freelist->min_yield_fraction)
937 {
938 /* Pick largest of last two yields. */
939 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
940 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
941 #ifdef DEBUGINFO
942 fprintf (stderr, " after GC = %lu, delta = %ld\n",
943 (long) scm_cells_allocated,
944 (long) delta);
945 #endif
946 if (delta > 0)
947 freelist->min_yield += delta;
948 }
949 }
950
951
952 /* When we get POSIX threads support, the master will be global and
953 * common while the freelist will be individual for each thread.
954 */
955
956 SCM
957 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
958 {
959 SCM cell;
960 ++scm_ints_disabled;
961 do
962 {
963 if (SCM_NULLP (master->clusters))
964 {
965 if (master->grow_heap_p || scm_block_gc)
966 {
967 /* In order to reduce gc frequency, try to allocate a new heap
968 * segment first, even if gc might find some free cells. If we
969 * can't obtain a new heap segment, we will try gc later.
970 */
971 master->grow_heap_p = 0;
972 alloc_some_heap (master, return_on_error);
973 }
974 if (SCM_NULLP (master->clusters))
975 {
976 /* The heap was not grown, either because it wasn't scheduled to
977 * grow, or because there was not enough memory available. In
978 * both cases we have to try gc to get some free cells.
979 */
980 #ifdef DEBUGINFO
981 fprintf (stderr, "allocated = %lu, ",
982 (long) (scm_cells_allocated
983 + master_cells_allocated (&scm_master_freelist)
984 + master_cells_allocated (&scm_master_freelist2)));
985 #endif
986 scm_igc ("cells");
987 adjust_min_yield (master);
988 if (SCM_NULLP (master->clusters))
989 {
990 /* gc could not free any cells. Now, we _must_ allocate a
991 * new heap segment, because there is no other possibility
992 * to provide a new cell for the caller.
993 */
994 alloc_some_heap (master, abort_on_error);
995 }
996 }
997 }
998 cell = SCM_CAR (master->clusters);
999 master->clusters = SCM_CDR (master->clusters);
1000 ++master->clusters_allocated;
1001 }
1002 while (SCM_NULLP (cell));
1003
1004 #ifdef GUILE_DEBUG_FREELIST
1005 scm_check_freelist (cell);
1006 #endif
1007
1008 --scm_ints_disabled;
1009 *freelist = SCM_FREE_CELL_CDR (cell);
1010 return cell;
1011 }
1012
1013
1014 #if 0
1015 /* This is a support routine which can be used to reserve a cluster
1016 * for some special use, such as debugging. It won't be useful until
1017 * free cells are preserved between garbage collections.
1018 */
1019
1020 void
1021 scm_alloc_cluster (scm_t_freelist *master)
1022 {
1023 SCM freelist, cell;
1024 cell = scm_gc_for_newcell (master, &freelist);
1025 SCM_SETCDR (cell, freelist);
1026 return cell;
1027 }
1028 #endif
1029
1030
1031 scm_t_c_hook scm_before_gc_c_hook;
1032 scm_t_c_hook scm_before_mark_c_hook;
1033 scm_t_c_hook scm_before_sweep_c_hook;
1034 scm_t_c_hook scm_after_sweep_c_hook;
1035 scm_t_c_hook scm_after_gc_c_hook;
1036
1037 #ifdef __ia64__
1038 # define SCM_MARK_BACKING_STORE() do { \
1039 ucontext_t ctx; \
1040 SCM_STACKITEM * top, * bot; \
1041 getcontext (&ctx); \
1042 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
1043 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
1044 / sizeof (SCM_STACKITEM))); \
1045 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
1046 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
1047 scm_mark_locations (bot, top - bot); } while (0)
1048 #else
1049 # define SCM_MARK_BACKING_STORE()
1050 #endif
1051
1052 void
1053 scm_igc (const char *what)
1054 {
1055 long j;
1056
1057 ++scm_gc_running_p;
1058 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1059 #ifdef DEBUGINFO
1060 fprintf (stderr,
1061 SCM_NULLP (scm_freelist)
1062 ? "*"
1063 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1064 #endif
1065 /* During the critical section, only the current thread may run. */
1066 SCM_CRITICAL_SECTION_START;
1067
1068 if (!scm_stack_base || scm_block_gc)
1069 {
1070 --scm_gc_running_p;
1071 return;
1072 }
1073
1074 gc_start_stats (what);
1075
1076 if (scm_gc_heap_lock)
1077 /* We've invoked the collector while a GC is already in progress.
1078 That should never happen. */
1079 abort ();
1080
1081 ++scm_gc_heap_lock;
1082
1083 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1084
1085 clear_mark_space ();
1086
1087 #ifndef USE_THREADS
1088
1089 /* Mark objects on the C stack. */
1090 SCM_FLUSH_REGISTER_WINDOWS;
1091 /* This assumes that all registers are saved into the jmp_buf */
1092 setjmp (scm_save_regs_gc_mark);
1093 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1094 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1095 sizeof scm_save_regs_gc_mark)
1096 / sizeof (SCM_STACKITEM)));
1097
1098 {
1099 unsigned long stack_len = scm_stack_size (scm_stack_base);
1100 #ifdef SCM_STACK_GROWS_UP
1101 scm_mark_locations (scm_stack_base, stack_len);
1102 #else
1103 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1104 #endif
1105 }
1106 SCM_MARK_BACKING_STORE();
1107
1108 #else /* USE_THREADS */
1109
1110 /* Mark every thread's stack and registers */
1111 scm_threads_mark_stacks ();
1112
1113 #endif /* USE_THREADS */
1114
1115 j = SCM_NUM_PROTECTS;
1116 while (j--)
1117 scm_gc_mark (scm_sys_protects[j]);
1118
1119 /* mark the registered roots */
1120 {
1121 size_t i;
1122 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1123 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1124 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1125 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1126 scm_gc_mark (*p);
1127 }
1128 }
1129 }
1130
1131 /* FIXME: we should have a means to register C functions to be run
1132 * in different phases of GC
1133 */
1134 scm_mark_subr_table ();
1135
1136 #ifndef USE_THREADS
1137 scm_gc_mark (scm_root->handle);
1138 #endif
1139
1140 t_before_sweep = scm_c_get_internal_run_time ();
1141 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1142
1143 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1144
1145 scm_gc_sweep ();
1146
1147 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1148
1149 --scm_gc_heap_lock;
1150 gc_end_stats ();
1151
1152 SCM_CRITICAL_SECTION_END;
1153 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1154 --scm_gc_running_p;
1155 }
1156
1157 \f
1158
1159 /* {Mark/Sweep}
1160 */
1161
1162 #define MARK scm_gc_mark
1163 #define FNAME "scm_gc_mark"
1164
1165 #endif /*!MARK_DEPENDENCIES*/
1166
1167 /* Mark an object precisely.
1168 */
1169 void
1170 MARK (SCM p)
1171 #define FUNC_NAME FNAME
1172 {
1173 register long i;
1174 register SCM ptr;
1175 scm_t_bits cell_type;
1176
1177 #ifndef MARK_DEPENDENCIES
1178 # define RECURSE scm_gc_mark
1179 #else
1180 /* go through the usual marking, but not for self-cycles. */
1181 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1182 #endif
1183 ptr = p;
1184
1185 #ifdef MARK_DEPENDENCIES
1186 goto gc_mark_loop_first_time;
1187 #endif
1188
1189 /* A simple hack for debugging. Chose the second branch to get a
1190 meaningful backtrace for crashes inside the GC.
1191 */
1192 #if 1
1193 #define goto_gc_mark_loop goto gc_mark_loop
1194 #define goto_gc_mark_nimp goto gc_mark_nimp
1195 #else
1196 #define goto_gc_mark_loop RECURSE(ptr); return
1197 #define goto_gc_mark_nimp RECURSE(ptr); return
1198 #endif
1199
1200 gc_mark_loop:
1201 if (SCM_IMP (ptr))
1202 return;
1203
1204 gc_mark_nimp:
1205
1206 #ifdef MARK_DEPENDENCIES
1207 if (SCM_EQ_P (ptr, p))
1208 return;
1209
1210 scm_gc_mark (ptr);
1211 return;
1212
1213 gc_mark_loop_first_time:
1214 #endif
1215
1216 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1217 /* We are in debug mode. Check the ptr exhaustively. */
1218 if (!scm_cellp (ptr))
1219 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1220 #else
1221 /* In non-debug mode, do at least some cheap testing. */
1222 if (!CELL_P (ptr))
1223 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1224 #endif
1225
1226 #ifndef MARK_DEPENDENCIES
1227
1228 if (SCM_GCMARKP (ptr))
1229 return;
1230
1231 SCM_SETGCMARK (ptr);
1232
1233 #endif
1234
1235 cell_type = SCM_GC_CELL_TYPE (ptr);
1236 switch (SCM_ITAG7 (cell_type))
1237 {
1238 case scm_tcs_cons_nimcar:
1239 if (SCM_IMP (SCM_CDR (ptr)))
1240 {
1241 ptr = SCM_CAR (ptr);
1242 goto_gc_mark_nimp;
1243 }
1244 RECURSE (SCM_CAR (ptr));
1245 ptr = SCM_CDR (ptr);
1246 goto_gc_mark_nimp;
1247 case scm_tcs_cons_imcar:
1248 ptr = SCM_CDR (ptr);
1249 goto_gc_mark_loop;
1250 case scm_tc7_pws:
1251 RECURSE (SCM_SETTER (ptr));
1252 ptr = SCM_PROCEDURE (ptr);
1253 goto_gc_mark_loop;
1254 case scm_tcs_struct:
1255 {
1256 /* XXX - use less explicit code. */
1257 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1258 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1259 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1260 long len = SCM_SYMBOL_LENGTH (layout);
1261 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1262 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1263
1264 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1265 {
1266 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1267 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1268 }
1269 if (len)
1270 {
1271 long x;
1272
1273 for (x = 0; x < len - 2; x += 2, ++struct_data)
1274 if (fields_desc[x] == 'p')
1275 RECURSE (SCM_PACK (*struct_data));
1276 if (fields_desc[x] == 'p')
1277 {
1278 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1279 for (x = *struct_data++; x; --x, ++struct_data)
1280 RECURSE (SCM_PACK (*struct_data));
1281 else
1282 RECURSE (SCM_PACK (*struct_data));
1283 }
1284 }
1285 /* mark vtable */
1286 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1287 goto_gc_mark_loop;
1288 }
1289 break;
1290 case scm_tcs_closures:
1291 if (SCM_IMP (SCM_ENV (ptr)))
1292 {
1293 ptr = SCM_CLOSCAR (ptr);
1294 goto_gc_mark_nimp;
1295 }
1296 RECURSE (SCM_CLOSCAR (ptr));
1297 ptr = SCM_ENV (ptr);
1298 goto_gc_mark_nimp;
1299 case scm_tc7_vector:
1300 i = SCM_VECTOR_LENGTH (ptr);
1301 if (i == 0)
1302 break;
1303 while (--i > 0)
1304 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1305 RECURSE (SCM_VELTS (ptr)[i]);
1306 ptr = SCM_VELTS (ptr)[0];
1307 goto_gc_mark_loop;
1308 #ifdef CCLO
1309 case scm_tc7_cclo:
1310 {
1311 size_t i = SCM_CCLO_LENGTH (ptr);
1312 size_t j;
1313 for (j = 1; j != i; ++j)
1314 {
1315 SCM obj = SCM_CCLO_REF (ptr, j);
1316 if (!SCM_IMP (obj))
1317 RECURSE (obj);
1318 }
1319 ptr = SCM_CCLO_REF (ptr, 0);
1320 goto_gc_mark_loop;
1321 }
1322 #endif
1323 #ifdef HAVE_ARRAYS
1324 case scm_tc7_bvect:
1325 case scm_tc7_byvect:
1326 case scm_tc7_ivect:
1327 case scm_tc7_uvect:
1328 case scm_tc7_fvect:
1329 case scm_tc7_dvect:
1330 case scm_tc7_cvect:
1331 case scm_tc7_svect:
1332 #ifdef HAVE_LONG_LONGS
1333 case scm_tc7_llvect:
1334 #endif
1335 #endif
1336 case scm_tc7_string:
1337 break;
1338
1339 case scm_tc7_wvect:
1340 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1341 scm_weak_vectors = ptr;
1342 if (SCM_IS_WHVEC_ANY (ptr))
1343 {
1344 long x;
1345 long len;
1346 int weak_keys;
1347 int weak_values;
1348
1349 len = SCM_VECTOR_LENGTH (ptr);
1350 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1351 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1352
1353 for (x = 0; x < len; ++x)
1354 {
1355 SCM alist;
1356 alist = SCM_VELTS (ptr)[x];
1357
1358 /* mark everything on the alist except the keys or
1359 * values, according to weak_values and weak_keys. */
1360 while ( SCM_CONSP (alist)
1361 && !SCM_GCMARKP (alist)
1362 && SCM_CONSP (SCM_CAR (alist)))
1363 {
1364 SCM kvpair;
1365 SCM next_alist;
1366
1367 kvpair = SCM_CAR (alist);
1368 next_alist = SCM_CDR (alist);
1369 /*
1370 * Do not do this:
1371 * SCM_SETGCMARK (alist);
1372 * SCM_SETGCMARK (kvpair);
1373 *
1374 * It may be that either the key or value is protected by
1375 * an escaped reference to part of the spine of this alist.
1376 * If we mark the spine here, and only mark one or neither of the
1377 * key and value, they may never be properly marked.
1378 * This leads to a horrible situation in which an alist containing
1379 * freelist cells is exported.
1380 *
1381 * So only mark the spines of these arrays last of all marking.
1382 * If somebody confuses us by constructing a weak vector
1383 * with a circular alist then we are hosed, but at least we
1384 * won't prematurely drop table entries.
1385 */
1386 if (!weak_keys)
1387 RECURSE (SCM_CAR (kvpair));
1388 if (!weak_values)
1389 RECURSE (SCM_CDR (kvpair));
1390 alist = next_alist;
1391 }
1392 if (SCM_NIMP (alist))
1393 RECURSE (alist);
1394 }
1395 }
1396 break;
1397
1398 case scm_tc7_symbol:
1399 ptr = SCM_PROP_SLOTS (ptr);
1400 goto_gc_mark_loop;
1401 case scm_tc7_variable:
1402 ptr = SCM_CELL_OBJECT_1 (ptr);
1403 goto_gc_mark_loop;
1404 case scm_tcs_subrs:
1405 break;
1406 case scm_tc7_port:
1407 i = SCM_PTOBNUM (ptr);
1408 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1409 if (!(i < scm_numptob))
1410 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1411 #endif
1412 if (SCM_PTAB_ENTRY(ptr))
1413 RECURSE (SCM_FILENAME (ptr));
1414 if (scm_ptobs[i].mark)
1415 {
1416 ptr = (scm_ptobs[i].mark) (ptr);
1417 goto_gc_mark_loop;
1418 }
1419 else
1420 return;
1421 break;
1422 case scm_tc7_smob:
1423 switch (SCM_TYP16 (ptr))
1424 { /* should be faster than going through scm_smobs */
1425 case scm_tc_free_cell:
1426 /* We have detected a free cell. This can happen if non-object data
1427 * on the C stack points into guile's heap and is scanned during
1428 * conservative marking. */
1429 break;
1430 case scm_tc16_big:
1431 case scm_tc16_real:
1432 case scm_tc16_complex:
1433 break;
1434 default:
1435 i = SCM_SMOBNUM (ptr);
1436 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1437 if (!(i < scm_numsmob))
1438 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1439 #endif
1440 if (scm_smobs[i].mark)
1441 {
1442 ptr = (scm_smobs[i].mark) (ptr);
1443 goto_gc_mark_loop;
1444 }
1445 else
1446 return;
1447 }
1448 break;
1449 default:
1450 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1451 }
1452 #undef RECURSE
1453 }
1454 #undef FUNC_NAME
1455
1456 #ifndef MARK_DEPENDENCIES
1457
1458 #undef MARK
1459 #undef FNAME
1460
1461 /* And here we define `scm_gc_mark_dependencies', by including this
1462 * same file in itself.
1463 */
1464 #define MARK scm_gc_mark_dependencies
1465 #define FNAME "scm_gc_mark_dependencies"
1466 #define MARK_DEPENDENCIES
1467 #include "gc.c"
1468 #undef MARK_DEPENDENCIES
1469 #undef MARK
1470 #undef FNAME
1471
1472
1473 /* Determine whether the given value does actually represent a cell in some
1474 * heap segment. If this is the case, the number of the heap segment is
1475 * returned. Otherwise, -1 is returned. Binary search is used in order to
1476 * determine the heap segment that contains the cell.*/
1477 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1478 * should be an inline function. */
1479 static long int
1480 heap_segment (SCM obj)
1481 {
1482 if (!CELL_P (obj))
1483 return -1;
1484 else
1485 {
1486 SCM_CELLPTR ptr = SCM2PTR (obj);
1487 unsigned long int i = 0;
1488 unsigned long int j = scm_n_heap_segs - 1;
1489
1490 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1491 return -1;
1492 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1493 return -1;
1494 else
1495 {
1496 while (i < j)
1497 {
1498 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1499 {
1500 break;
1501 }
1502 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1503 {
1504 i = j;
1505 break;
1506 }
1507 else
1508 {
1509 unsigned long int k = (i + j) / 2;
1510
1511 if (k == i)
1512 return -1;
1513 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1514 {
1515 j = k;
1516 ++i;
1517 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1518 return -1;
1519 }
1520 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1521 {
1522 i = k;
1523 --j;
1524 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1525 return -1;
1526 }
1527 }
1528 }
1529
1530 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1531 return -1;
1532 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1533 return -1;
1534 else
1535 return i;
1536 }
1537 }
1538 }
1539
1540
1541 /* Mark a region conservatively */
1542 void
1543 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1544 {
1545 unsigned long m;
1546
1547 for (m = 0; m < n; ++m)
1548 {
1549 SCM obj = * (SCM *) &x[m];
1550 long int segment = heap_segment (obj);
1551 if (segment >= 0)
1552 scm_gc_mark (obj);
1553 }
1554 }
1555
1556
1557 /* The function scm_cellp determines whether an SCM value can be regarded as a
1558 * pointer to a cell on the heap.
1559 */
1560 int
1561 scm_cellp (SCM value)
1562 {
1563 long int segment = heap_segment (value);
1564 return (segment >= 0);
1565 }
1566
1567
1568 static void
1569 gc_sweep_freelist_start (scm_t_freelist *freelist)
1570 {
1571 freelist->cells = SCM_EOL;
1572 freelist->left_to_collect = freelist->cluster_size;
1573 freelist->clusters_allocated = 0;
1574 freelist->clusters = SCM_EOL;
1575 freelist->clustertail = &freelist->clusters;
1576 freelist->collected_1 = freelist->collected;
1577 freelist->collected = 0;
1578 }
1579
1580 static void
1581 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1582 {
1583 long collected;
1584 *freelist->clustertail = freelist->cells;
1585 if (!SCM_NULLP (freelist->cells))
1586 {
1587 SCM c = freelist->cells;
1588 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1589 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1590 freelist->collected +=
1591 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1592 }
1593 scm_gc_cells_collected += freelist->collected;
1594
1595 /* Although freelist->min_yield is used to test freelist->collected
1596 * (which is the local GC yield for freelist), it is adjusted so
1597 * that *total* yield is freelist->min_yield_fraction of total heap
1598 * size. This means that a too low yield is compensated by more
1599 * heap on the list which is currently doing most work, which is
1600 * just what we want.
1601 */
1602 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1603 freelist->grow_heap_p = (collected < freelist->min_yield);
1604 }
1605
1606 #define NEXT_DATA_CELL(ptr, span) \
1607 do { \
1608 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1609 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1610 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1611 : nxt__); \
1612 } while (0)
1613
1614 void
1615 scm_gc_sweep ()
1616 #define FUNC_NAME "scm_gc_sweep"
1617 {
1618 register SCM_CELLPTR ptr;
1619 register SCM nfreelist;
1620 register scm_t_freelist *freelist;
1621 register unsigned long m;
1622 register int span;
1623 size_t i;
1624 size_t seg_size;
1625
1626 m = 0;
1627
1628 gc_sweep_freelist_start (&scm_master_freelist);
1629 gc_sweep_freelist_start (&scm_master_freelist2);
1630
1631 for (i = 0; i < scm_n_heap_segs; i++)
1632 {
1633 register long left_to_collect;
1634 register size_t j;
1635
1636 /* Unmarked cells go onto the front of the freelist this heap
1637 segment points to. Rather than updating the real freelist
1638 pointer as we go along, we accumulate the new head in
1639 nfreelist. Then, if it turns out that the entire segment is
1640 free, we free (i.e., malloc's free) the whole segment, and
1641 simply don't assign nfreelist back into the real freelist. */
1642 freelist = scm_heap_table[i].freelist;
1643 nfreelist = freelist->cells;
1644 left_to_collect = freelist->left_to_collect;
1645 span = scm_heap_table[i].span;
1646
1647 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1648 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1649
1650 /* use only data cells in seg_size */
1651 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1652
1653 scm_gc_cells_swept += seg_size;
1654
1655 for (j = seg_size + span; j -= span; ptr += span)
1656 {
1657 SCM scmptr;
1658
1659 if (SCM_GC_IN_CARD_HEADERP (ptr))
1660 {
1661 SCM_CELLPTR nxt;
1662
1663 /* cheat here */
1664 nxt = ptr;
1665 NEXT_DATA_CELL (nxt, span);
1666 j += span;
1667
1668 ptr = nxt - span;
1669 continue;
1670 }
1671
1672 scmptr = PTR2SCM (ptr);
1673
1674 if (SCM_GCMARKP (scmptr))
1675 continue;
1676
1677 switch SCM_TYP7 (scmptr)
1678 {
1679 case scm_tcs_struct:
1680 {
1681 /* Structs need to be freed in a special order.
1682 * This is handled by GC C hooks in struct.c.
1683 */
1684 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1685 scm_structs_to_free = scmptr;
1686 }
1687 continue;
1688 case scm_tcs_cons_imcar:
1689 case scm_tcs_cons_nimcar:
1690 case scm_tcs_closures:
1691 case scm_tc7_pws:
1692 break;
1693 case scm_tc7_wvect:
1694 case scm_tc7_vector:
1695 {
1696 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1697 if (length > 0)
1698 {
1699 m += length * sizeof (scm_t_bits);
1700 scm_must_free (SCM_VECTOR_BASE (scmptr));
1701 }
1702 break;
1703 }
1704 #ifdef CCLO
1705 case scm_tc7_cclo:
1706 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1707 scm_must_free (SCM_CCLO_BASE (scmptr));
1708 break;
1709 #endif
1710 #ifdef HAVE_ARRAYS
1711 case scm_tc7_bvect:
1712 {
1713 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1714 if (length > 0)
1715 {
1716 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1717 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1718 }
1719 }
1720 break;
1721 case scm_tc7_byvect:
1722 case scm_tc7_ivect:
1723 case scm_tc7_uvect:
1724 case scm_tc7_svect:
1725 #ifdef HAVE_LONG_LONGS
1726 case scm_tc7_llvect:
1727 #endif
1728 case scm_tc7_fvect:
1729 case scm_tc7_dvect:
1730 case scm_tc7_cvect:
1731 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1732 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1733 break;
1734 #endif
1735 case scm_tc7_string:
1736 m += SCM_STRING_LENGTH (scmptr) + 1;
1737 scm_must_free (SCM_STRING_CHARS (scmptr));
1738 break;
1739 case scm_tc7_symbol:
1740 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1741 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1742 break;
1743 case scm_tc7_variable:
1744 break;
1745 case scm_tcs_subrs:
1746 /* the various "subrs" (primitives) are never freed */
1747 continue;
1748 case scm_tc7_port:
1749 if SCM_OPENP (scmptr)
1750 {
1751 int k = SCM_PTOBNUM (scmptr);
1752 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1753 if (!(k < scm_numptob))
1754 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1755 #endif
1756 /* Keep "revealed" ports alive. */
1757 if (scm_revealed_count (scmptr) > 0)
1758 continue;
1759 /* Yes, I really do mean scm_ptobs[k].free */
1760 /* rather than ftobs[k].close. .close */
1761 /* is for explicit CLOSE-PORT by user */
1762 m += (scm_ptobs[k].free) (scmptr);
1763 SCM_SETSTREAM (scmptr, 0);
1764 scm_remove_from_port_table (scmptr);
1765 scm_gc_ports_collected++;
1766 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1767 }
1768 break;
1769 case scm_tc7_smob:
1770 switch SCM_TYP16 (scmptr)
1771 {
1772 case scm_tc_free_cell:
1773 case scm_tc16_real:
1774 break;
1775 #ifdef SCM_BIGDIG
1776 case scm_tc16_big:
1777 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1778 scm_must_free (SCM_BDIGITS (scmptr));
1779 break;
1780 #endif /* def SCM_BIGDIG */
1781 case scm_tc16_complex:
1782 m += sizeof (scm_t_complex);
1783 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1784 break;
1785 default:
1786 {
1787 int k;
1788 k = SCM_SMOBNUM (scmptr);
1789 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1790 if (!(k < scm_numsmob))
1791 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1792 #endif
1793 if (scm_smobs[k].free)
1794 m += (scm_smobs[k].free) (scmptr);
1795 break;
1796 }
1797 }
1798 break;
1799 default:
1800 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1801 }
1802
1803 if (!--left_to_collect)
1804 {
1805 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1806 *freelist->clustertail = scmptr;
1807 freelist->clustertail = SCM_CDRLOC (scmptr);
1808
1809 nfreelist = SCM_EOL;
1810 freelist->collected += span * freelist->cluster_size;
1811 left_to_collect = freelist->cluster_size;
1812 }
1813 else
1814 {
1815 /* Stick the new cell on the front of nfreelist. It's
1816 critical that we mark this cell as freed; otherwise, the
1817 conservative collector might trace it as some other type
1818 of object. */
1819 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1820 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1821 nfreelist = scmptr;
1822 }
1823 }
1824
1825 #ifdef GC_FREE_SEGMENTS
1826 if (n == seg_size)
1827 {
1828 register long j;
1829
1830 freelist->heap_size -= seg_size;
1831 free ((char *) scm_heap_table[i].bounds[0]);
1832 scm_heap_table[i].bounds[0] = 0;
1833 for (j = i + 1; j < scm_n_heap_segs; j++)
1834 scm_heap_table[j - 1] = scm_heap_table[j];
1835 scm_n_heap_segs -= 1;
1836 i--; /* We need to scan the segment just moved. */
1837 }
1838 else
1839 #endif /* ifdef GC_FREE_SEGMENTS */
1840 {
1841 /* Update the real freelist pointer to point to the head of
1842 the list of free cells we've built for this segment. */
1843 freelist->cells = nfreelist;
1844 freelist->left_to_collect = left_to_collect;
1845 }
1846
1847 #ifdef GUILE_DEBUG_FREELIST
1848 scm_map_free_list ();
1849 #endif
1850 }
1851
1852 gc_sweep_freelist_finish (&scm_master_freelist);
1853 gc_sweep_freelist_finish (&scm_master_freelist2);
1854
1855 /* When we move to POSIX threads private freelists should probably
1856 be GC-protected instead. */
1857 scm_freelist = SCM_EOL;
1858 scm_freelist2 = SCM_EOL;
1859
1860 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1861 scm_gc_yield -= scm_cells_allocated;
1862
1863 if (scm_mallocated < m)
1864 /* The byte count of allocated objects has underflowed. This is
1865 probably because you forgot to report the sizes of objects you
1866 have allocated, by calling scm_done_malloc or some such. When
1867 the GC freed them, it subtracted their size from
1868 scm_mallocated, which underflowed. */
1869 abort ();
1870
1871 scm_mallocated -= m;
1872 scm_gc_malloc_collected = m;
1873 }
1874 #undef FUNC_NAME
1875
1876
1877 \f
1878 /* {Front end to malloc}
1879 *
1880 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1881 * scm_done_free
1882 *
1883 * These functions provide services comparable to malloc, realloc, and
1884 * free. They should be used when allocating memory that will be under
1885 * control of the garbage collector, i.e., if the memory may be freed
1886 * during garbage collection.
1887 */
1888
1889 /* scm_must_malloc
1890 * Return newly malloced storage or throw an error.
1891 *
1892 * The parameter WHAT is a string for error reporting.
1893 * If the threshold scm_mtrigger will be passed by this
1894 * allocation, or if the first call to malloc fails,
1895 * garbage collect -- on the presumption that some objects
1896 * using malloced storage may be collected.
1897 *
1898 * The limit scm_mtrigger may be raised by this allocation.
1899 */
1900 void *
1901 scm_must_malloc (size_t size, const char *what)
1902 {
1903 void *ptr;
1904 unsigned long nm = scm_mallocated + size;
1905
1906 if (nm < size)
1907 /* The byte count of allocated objects has overflowed. This is
1908 probably because you forgot to report the correct size of freed
1909 memory in some of your smob free methods. */
1910 abort ();
1911
1912 if (nm <= scm_mtrigger)
1913 {
1914 SCM_SYSCALL (ptr = malloc (size));
1915 if (NULL != ptr)
1916 {
1917 scm_mallocated = nm;
1918 #ifdef GUILE_DEBUG_MALLOC
1919 scm_malloc_register (ptr, what);
1920 #endif
1921 return ptr;
1922 }
1923 }
1924
1925 scm_igc (what);
1926
1927 nm = scm_mallocated + size;
1928
1929 if (nm < size)
1930 /* The byte count of allocated objects has overflowed. This is
1931 probably because you forgot to report the correct size of freed
1932 memory in some of your smob free methods. */
1933 abort ();
1934
1935 SCM_SYSCALL (ptr = malloc (size));
1936 if (NULL != ptr)
1937 {
1938 scm_mallocated = nm;
1939
1940 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1941 unsigned long old_trigger = scm_mtrigger;
1942 if (nm > scm_mtrigger)
1943 scm_mtrigger = nm + nm / 2;
1944 else
1945 scm_mtrigger += scm_mtrigger / 2;
1946 if (scm_mtrigger < old_trigger)
1947 abort ();
1948 }
1949 #ifdef GUILE_DEBUG_MALLOC
1950 scm_malloc_register (ptr, what);
1951 #endif
1952
1953 return ptr;
1954 }
1955
1956 scm_memory_error (what);
1957 }
1958
1959
1960 /* scm_must_realloc
1961 * is similar to scm_must_malloc.
1962 */
1963 void *
1964 scm_must_realloc (void *where,
1965 size_t old_size,
1966 size_t size,
1967 const char *what)
1968 {
1969 void *ptr;
1970 unsigned long nm;
1971
1972 if (size <= old_size)
1973 return where;
1974
1975 nm = scm_mallocated + size - old_size;
1976
1977 if (nm < (size - old_size))
1978 /* The byte count of allocated objects has overflowed. This is
1979 probably because you forgot to report the correct size of freed
1980 memory in some of your smob free methods. */
1981 abort ();
1982
1983 if (nm <= scm_mtrigger)
1984 {
1985 SCM_SYSCALL (ptr = realloc (where, size));
1986 if (NULL != ptr)
1987 {
1988 scm_mallocated = nm;
1989 #ifdef GUILE_DEBUG_MALLOC
1990 scm_malloc_reregister (where, ptr, what);
1991 #endif
1992 return ptr;
1993 }
1994 }
1995
1996 scm_igc (what);
1997
1998 nm = scm_mallocated + size - old_size;
1999
2000 if (nm < (size - old_size))
2001 /* The byte count of allocated objects has overflowed. This is
2002 probably because you forgot to report the correct size of freed
2003 memory in some of your smob free methods. */
2004 abort ();
2005
2006 SCM_SYSCALL (ptr = realloc (where, size));
2007 if (NULL != ptr)
2008 {
2009 scm_mallocated = nm;
2010 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2011 unsigned long old_trigger = scm_mtrigger;
2012 if (nm > scm_mtrigger)
2013 scm_mtrigger = nm + nm / 2;
2014 else
2015 scm_mtrigger += scm_mtrigger / 2;
2016 if (scm_mtrigger < old_trigger)
2017 abort ();
2018 }
2019 #ifdef GUILE_DEBUG_MALLOC
2020 scm_malloc_reregister (where, ptr, what);
2021 #endif
2022 return ptr;
2023 }
2024
2025 scm_memory_error (what);
2026 }
2027
2028 char *
2029 scm_must_strndup (const char *str, size_t length)
2030 {
2031 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2032 memcpy (dst, str, length);
2033 dst[length] = 0;
2034 return dst;
2035 }
2036
2037 char *
2038 scm_must_strdup (const char *str)
2039 {
2040 return scm_must_strndup (str, strlen (str));
2041 }
2042
2043 void
2044 scm_must_free (void *obj)
2045 #define FUNC_NAME "scm_must_free"
2046 {
2047 #ifdef GUILE_DEBUG_MALLOC
2048 scm_malloc_unregister (obj);
2049 #endif
2050 if (obj)
2051 free (obj);
2052 else
2053 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2054 }
2055 #undef FUNC_NAME
2056
2057
2058 /* Announce that there has been some malloc done that will be freed
2059 * during gc. A typical use is for a smob that uses some malloced
2060 * memory but can not get it from scm_must_malloc (for whatever
2061 * reason). When a new object of this smob is created you call
2062 * scm_done_malloc with the size of the object. When your smob free
2063 * function is called, be sure to include this size in the return
2064 * value.
2065 *
2066 * If you can't actually free the memory in the smob free function,
2067 * for whatever reason (like reference counting), you still can (and
2068 * should) report the amount of memory freed when you actually free it.
2069 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2070 * eh? Or even better, call scm_done_free. */
2071
2072 void
2073 scm_done_malloc (long size)
2074 {
2075 if (size < 0) {
2076 if (scm_mallocated < size)
2077 /* The byte count of allocated objects has underflowed. This is
2078 probably because you forgot to report the sizes of objects you
2079 have allocated, by calling scm_done_malloc or some such. When
2080 the GC freed them, it subtracted their size from
2081 scm_mallocated, which underflowed. */
2082 abort ();
2083 } else {
2084 unsigned long nm = scm_mallocated + size;
2085 if (nm < size)
2086 /* The byte count of allocated objects has overflowed. This is
2087 probably because you forgot to report the correct size of freed
2088 memory in some of your smob free methods. */
2089 abort ();
2090 }
2091
2092 scm_mallocated += size;
2093
2094 if (scm_mallocated > scm_mtrigger)
2095 {
2096 scm_igc ("foreign mallocs");
2097 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2098 {
2099 if (scm_mallocated > scm_mtrigger)
2100 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2101 else
2102 scm_mtrigger += scm_mtrigger / 2;
2103 }
2104 }
2105 }
2106
2107 void
2108 scm_done_free (long size)
2109 {
2110 if (size >= 0) {
2111 if (scm_mallocated < size)
2112 /* The byte count of allocated objects has underflowed. This is
2113 probably because you forgot to report the sizes of objects you
2114 have allocated, by calling scm_done_malloc or some such. When
2115 the GC freed them, it subtracted their size from
2116 scm_mallocated, which underflowed. */
2117 abort ();
2118 } else {
2119 unsigned long nm = scm_mallocated - size;
2120 if (nm < size)
2121 /* The byte count of allocated objects has overflowed. This is
2122 probably because you forgot to report the correct size of freed
2123 memory in some of your smob free methods. */
2124 abort ();
2125 }
2126
2127 scm_mallocated -= size;
2128 }
2129
2130
2131 \f
2132 /* {Heap Segments}
2133 *
2134 * Each heap segment is an array of objects of a particular size.
2135 * Every segment has an associated (possibly shared) freelist.
2136 * A table of segment records is kept that records the upper and
2137 * lower extents of the segment; this is used during the conservative
2138 * phase of gc to identify probably gc roots (because they point
2139 * into valid segments at reasonable offsets). */
2140
2141 /* scm_expmem
2142 * is true if the first segment was smaller than INIT_HEAP_SEG.
2143 * If scm_expmem is set to one, subsequent segment allocations will
2144 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2145 */
2146 int scm_expmem = 0;
2147
2148 size_t scm_max_segment_size;
2149
2150 /* scm_heap_org
2151 * is the lowest base address of any heap segment.
2152 */
2153 SCM_CELLPTR scm_heap_org;
2154
2155 scm_t_heap_seg_data * scm_heap_table = 0;
2156 static size_t heap_segment_table_size = 0;
2157 size_t scm_n_heap_segs = 0;
2158
2159 /* init_heap_seg
2160 * initializes a new heap segment and returns the number of objects it contains.
2161 *
2162 * The segment origin and segment size in bytes are input parameters.
2163 * The freelist is both input and output.
2164 *
2165 * This function presumes that the scm_heap_table has already been expanded
2166 * to accomodate a new segment record and that the markbit space was reserved
2167 * for all the cards in this segment.
2168 */
2169
2170 #define INIT_CARD(card, span) \
2171 do { \
2172 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2173 if ((span) == 2) \
2174 SCM_GC_SET_CARD_DOUBLECELL (card); \
2175 } while (0)
2176
2177 static size_t
2178 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2179 {
2180 register SCM_CELLPTR ptr;
2181 SCM_CELLPTR seg_end;
2182 size_t new_seg_index;
2183 ptrdiff_t n_new_cells;
2184 int span = freelist->span;
2185
2186 if (seg_org == NULL)
2187 return 0;
2188
2189 /* Align the begin ptr up.
2190 */
2191 ptr = SCM_GC_CARD_UP (seg_org);
2192
2193 /* Compute the ceiling on valid object pointers w/in this segment.
2194 */
2195 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2196
2197 /* Find the right place and insert the segment record.
2198 */
2199 new_seg_index = 0;
2200 while (new_seg_index < scm_n_heap_segs
2201 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2202 new_seg_index++;
2203
2204 {
2205 int i;
2206 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2207 scm_heap_table[i] = scm_heap_table[i - 1];
2208 }
2209
2210 ++scm_n_heap_segs;
2211
2212 scm_heap_table[new_seg_index].span = span;
2213 scm_heap_table[new_seg_index].freelist = freelist;
2214 scm_heap_table[new_seg_index].bounds[0] = ptr;
2215 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2216
2217 /*n_new_cells*/
2218 n_new_cells = seg_end - ptr;
2219
2220 freelist->heap_size += n_new_cells;
2221
2222 /* Partition objects in this segment into clusters */
2223 {
2224 SCM clusters;
2225 SCM *clusterp = &clusters;
2226
2227 NEXT_DATA_CELL (ptr, span);
2228 while (ptr < seg_end)
2229 {
2230 scm_cell *nxt = ptr;
2231 scm_cell *prv = NULL;
2232 scm_cell *last_card = NULL;
2233 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2234 NEXT_DATA_CELL(nxt, span);
2235
2236 /* Allocate cluster spine
2237 */
2238 *clusterp = PTR2SCM (ptr);
2239 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2240 clusterp = SCM_CDRLOC (*clusterp);
2241 ptr = nxt;
2242
2243 while (n_data_cells--)
2244 {
2245 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2246 SCM scmptr = PTR2SCM (ptr);
2247 nxt = ptr;
2248 NEXT_DATA_CELL (nxt, span);
2249 prv = ptr;
2250
2251 if (card != last_card)
2252 {
2253 INIT_CARD (card, span);
2254 last_card = card;
2255 }
2256
2257 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2258 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2259
2260 ptr = nxt;
2261 }
2262
2263 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2264 }
2265
2266 /* sanity check */
2267 {
2268 scm_cell *ref = seg_end;
2269 NEXT_DATA_CELL (ref, span);
2270 if (ref != ptr)
2271 /* [cmm] looks like the segment size doesn't divide cleanly by
2272 cluster size. bad cmm! */
2273 abort();
2274 }
2275
2276 /* Patch up the last cluster pointer in the segment
2277 * to join it to the input freelist.
2278 */
2279 *clusterp = freelist->clusters;
2280 freelist->clusters = clusters;
2281 }
2282
2283 #ifdef DEBUGINFO
2284 fprintf (stderr, "H");
2285 #endif
2286 return size;
2287 }
2288
2289 static size_t
2290 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2291 {
2292 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2293
2294 return
2295 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2296 + ALIGNMENT_SLACK (freelist);
2297 }
2298
2299 static void
2300 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2301 #define FUNC_NAME "alloc_some_heap"
2302 {
2303 SCM_CELLPTR ptr;
2304 size_t len;
2305
2306 if (scm_gc_heap_lock)
2307 {
2308 /* Critical code sections (such as the garbage collector) aren't
2309 * supposed to add heap segments.
2310 */
2311 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2312 abort ();
2313 }
2314
2315 if (scm_n_heap_segs == heap_segment_table_size)
2316 {
2317 /* We have to expand the heap segment table to have room for the new
2318 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2319 * init_heap_seg only if the allocation of the segment itself succeeds.
2320 */
2321 size_t new_table_size = scm_n_heap_segs + 1;
2322 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2323 scm_t_heap_seg_data *new_heap_table;
2324
2325 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2326 realloc ((char *)scm_heap_table, size)));
2327 if (!new_heap_table)
2328 {
2329 if (error_policy == abort_on_error)
2330 {
2331 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2332 abort ();
2333 }
2334 else
2335 {
2336 return;
2337 }
2338 }
2339 else
2340 {
2341 scm_heap_table = new_heap_table;
2342 heap_segment_table_size = new_table_size;
2343 }
2344 }
2345
2346 /* Pick a size for the new heap segment.
2347 * The rule for picking the size of a segment is explained in
2348 * gc.h
2349 */
2350 {
2351 /* Assure that the new segment is predicted to be large enough.
2352 *
2353 * New yield should at least equal GC fraction of new heap size, i.e.
2354 *
2355 * y + dh > f * (h + dh)
2356 *
2357 * y : yield
2358 * f : min yield fraction
2359 * h : heap size
2360 * dh : size of new heap segment
2361 *
2362 * This gives dh > (f * h - y) / (1 - f)
2363 */
2364 int f = freelist->min_yield_fraction;
2365 unsigned long h = SCM_HEAP_SIZE;
2366 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2367 len = SCM_EXPHEAP (freelist->heap_size);
2368 #ifdef DEBUGINFO
2369 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2370 #endif
2371 if (len < min_cells)
2372 len = min_cells + freelist->cluster_size;
2373 len *= sizeof (scm_cell);
2374 /* force new sampling */
2375 freelist->collected = LONG_MAX;
2376 }
2377
2378 if (len > scm_max_segment_size)
2379 len = scm_max_segment_size;
2380
2381 {
2382 size_t smallest;
2383
2384 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2385
2386 if (len < smallest)
2387 len = smallest;
2388
2389 /* Allocate with decaying ambition. */
2390 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2391 && (len >= smallest))
2392 {
2393 size_t rounded_len = round_to_cluster_size (freelist, len);
2394 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2395 if (ptr)
2396 {
2397 init_heap_seg (ptr, rounded_len, freelist);
2398 return;
2399 }
2400 len /= 2;
2401 }
2402 }
2403
2404 if (error_policy == abort_on_error)
2405 {
2406 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2407 abort ();
2408 }
2409 }
2410 #undef FUNC_NAME
2411
2412 \f
2413 /* {GC Protection Helper Functions}
2414 */
2415
2416
2417 /*
2418 * If within a function you need to protect one or more scheme objects from
2419 * garbage collection, pass them as parameters to one of the
2420 * scm_remember_upto_here* functions below. These functions don't do
2421 * anything, but since the compiler does not know that they are actually
2422 * no-ops, it will generate code that calls these functions with the given
2423 * parameters. Therefore, you can be sure that the compiler will keep those
2424 * scheme values alive (on the stack or in a register) up to the point where
2425 * scm_remember_upto_here* is called. In other words, place the call to
2426 * scm_remember_upto_here* _behind_ the last code in your function, that
2427 * depends on the scheme object to exist.
2428 *
2429 * Example: We want to make sure that the string object str does not get
2430 * garbage collected during the execution of 'some_function' in the code
2431 * below, because otherwise the characters belonging to str would be freed and
2432 * 'some_function' might access freed memory. To make sure that the compiler
2433 * keeps str alive on the stack or in a register such that it is visible to
2434 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2435 * call to 'some_function'. Note that this would not be necessary if str was
2436 * used anyway after the call to 'some_function'.
2437 * char *chars = SCM_STRING_CHARS (str);
2438 * some_function (chars);
2439 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2440 */
2441
2442 void
2443 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2444 {
2445 /* Empty. Protects a single object from garbage collection. */
2446 }
2447
2448 void
2449 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2450 {
2451 /* Empty. Protects two objects from garbage collection. */
2452 }
2453
2454 void
2455 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2456 {
2457 /* Empty. Protects any number of objects from garbage collection. */
2458 }
2459
2460 /*
2461 These crazy functions prevent garbage collection
2462 of arguments after the first argument by
2463 ensuring they remain live throughout the
2464 function because they are used in the last
2465 line of the code block.
2466 It'd be better to have a nice compiler hint to
2467 aid the conservative stack-scanning GC. --03/09/00 gjb */
2468 SCM
2469 scm_return_first (SCM elt, ...)
2470 {
2471 return elt;
2472 }
2473
2474 int
2475 scm_return_first_int (int i, ...)
2476 {
2477 return i;
2478 }
2479
2480
2481 SCM
2482 scm_permanent_object (SCM obj)
2483 {
2484 SCM_REDEFER_INTS;
2485 scm_permobjs = scm_cons (obj, scm_permobjs);
2486 SCM_REALLOW_INTS;
2487 return obj;
2488 }
2489
2490
2491 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2492 other references are dropped, until the object is unprotected by calling
2493 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2494 i. e. it is possible to protect the same object several times, but it is
2495 necessary to unprotect the object the same number of times to actually get
2496 the object unprotected. It is an error to unprotect an object more often
2497 than it has been protected before. The function scm_protect_object returns
2498 OBJ.
2499 */
2500
2501 /* Implementation note: For every object X, there is a counter which
2502 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2503 */
2504
2505 SCM
2506 scm_gc_protect_object (SCM obj)
2507 {
2508 SCM handle;
2509
2510 /* This critical section barrier will be replaced by a mutex. */
2511 SCM_REDEFER_INTS;
2512
2513 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2514 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2515
2516 SCM_REALLOW_INTS;
2517
2518 return obj;
2519 }
2520
2521
2522 /* Remove any protection for OBJ established by a prior call to
2523 scm_protect_object. This function returns OBJ.
2524
2525 See scm_protect_object for more information. */
2526 SCM
2527 scm_gc_unprotect_object (SCM obj)
2528 {
2529 SCM handle;
2530
2531 /* This critical section barrier will be replaced by a mutex. */
2532 SCM_REDEFER_INTS;
2533
2534 handle = scm_hashq_get_handle (scm_protects, obj);
2535
2536 if (SCM_FALSEP (handle))
2537 {
2538 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2539 abort ();
2540 }
2541 else
2542 {
2543 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2544 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2545 scm_hashq_remove_x (scm_protects, obj);
2546 else
2547 SCM_SETCDR (handle, count);
2548 }
2549
2550 SCM_REALLOW_INTS;
2551
2552 return obj;
2553 }
2554
2555 void
2556 scm_gc_register_root (SCM *p)
2557 {
2558 SCM handle;
2559 SCM key = scm_long2num ((long) p);
2560
2561 /* This critical section barrier will be replaced by a mutex. */
2562 SCM_REDEFER_INTS;
2563
2564 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2565 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2566
2567 SCM_REALLOW_INTS;
2568 }
2569
2570 void
2571 scm_gc_unregister_root (SCM *p)
2572 {
2573 SCM handle;
2574 SCM key = scm_long2num ((long) p);
2575
2576 /* This critical section barrier will be replaced by a mutex. */
2577 SCM_REDEFER_INTS;
2578
2579 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2580
2581 if (SCM_FALSEP (handle))
2582 {
2583 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2584 abort ();
2585 }
2586 else
2587 {
2588 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2589 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2590 scm_hashv_remove_x (scm_gc_registered_roots, key);
2591 else
2592 SCM_SETCDR (handle, count);
2593 }
2594
2595 SCM_REALLOW_INTS;
2596 }
2597
2598 void
2599 scm_gc_register_roots (SCM *b, unsigned long n)
2600 {
2601 SCM *p = b;
2602 for (; p < b + n; ++p)
2603 scm_gc_register_root (p);
2604 }
2605
2606 void
2607 scm_gc_unregister_roots (SCM *b, unsigned long n)
2608 {
2609 SCM *p = b;
2610 for (; p < b + n; ++p)
2611 scm_gc_unregister_root (p);
2612 }
2613
2614 int terminating;
2615
2616 /* called on process termination. */
2617 #ifdef HAVE_ATEXIT
2618 static void
2619 cleanup (void)
2620 #else
2621 #ifdef HAVE_ON_EXIT
2622 extern int on_exit (void (*procp) (), int arg);
2623
2624 static void
2625 cleanup (int status, void *arg)
2626 #else
2627 #error Dont know how to setup a cleanup handler on your system.
2628 #endif
2629 #endif
2630 {
2631 terminating = 1;
2632 scm_flush_all_ports ();
2633 }
2634
2635 \f
2636 static int
2637 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2638 {
2639 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2640
2641 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2642 rounded_size,
2643 freelist))
2644 {
2645 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2646 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2647 rounded_size,
2648 freelist))
2649 return 1;
2650 }
2651 else
2652 scm_expmem = 1;
2653
2654 if (freelist->min_yield_fraction)
2655 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2656 / 100);
2657 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2658
2659 return 0;
2660 }
2661
2662 \f
2663 static void
2664 init_freelist (scm_t_freelist *freelist,
2665 int span,
2666 long cluster_size,
2667 int min_yield)
2668 {
2669 freelist->clusters = SCM_EOL;
2670 freelist->cluster_size = cluster_size + 1;
2671 freelist->left_to_collect = 0;
2672 freelist->clusters_allocated = 0;
2673 freelist->min_yield = 0;
2674 freelist->min_yield_fraction = min_yield;
2675 freelist->span = span;
2676 freelist->collected = 0;
2677 freelist->collected_1 = 0;
2678 freelist->heap_size = 0;
2679 }
2680
2681
2682 /* Get an integer from an environment variable. */
2683 static int
2684 scm_i_getenv_int (const char *var, int def)
2685 {
2686 char *end, *val = getenv (var);
2687 long res;
2688 if (!val)
2689 return def;
2690 res = strtol (val, &end, 10);
2691 if (end == val)
2692 return def;
2693 return res;
2694 }
2695
2696
2697 int
2698 scm_init_storage ()
2699 {
2700 unsigned long gc_trigger_1;
2701 unsigned long gc_trigger_2;
2702 size_t init_heap_size_1;
2703 size_t init_heap_size_2;
2704 size_t j;
2705
2706 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2707 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2708 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2709
2710 j = SCM_NUM_PROTECTS;
2711 while (j)
2712 scm_sys_protects[--j] = SCM_BOOL_F;
2713 scm_block_gc = 1;
2714
2715 scm_freelist = SCM_EOL;
2716 scm_freelist2 = SCM_EOL;
2717 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2718 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2719 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2720 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2721 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2722
2723 scm_expmem = 0;
2724
2725 j = SCM_HEAP_SEG_SIZE;
2726 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2727 scm_heap_table = ((scm_t_heap_seg_data *)
2728 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2729 heap_segment_table_size = 2;
2730
2731 mark_space_ptr = &mark_space_head;
2732
2733 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2734 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2735 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2736 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2737 return 1;
2738
2739 /* scm_hplims[0] can change. do not remove scm_heap_org */
2740 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2741
2742 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2743 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2744 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2745 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2746 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2747
2748 /* Initialise the list of ports. */
2749 scm_port_table = (scm_t_port **)
2750 malloc (sizeof (scm_t_port *) * scm_port_table_room);
2751 if (!scm_port_table)
2752 return 1;
2753
2754 #ifdef HAVE_ATEXIT
2755 atexit (cleanup);
2756 #else
2757 #ifdef HAVE_ON_EXIT
2758 on_exit (cleanup, 0);
2759 #endif
2760 #endif
2761
2762 scm_stand_in_procs = SCM_EOL;
2763 scm_permobjs = SCM_EOL;
2764 scm_protects = scm_c_make_hash_table (31);
2765 scm_gc_registered_roots = scm_c_make_hash_table (31);
2766
2767 return 0;
2768 }
2769
2770 \f
2771
2772 SCM scm_after_gc_hook;
2773
2774 static SCM gc_async;
2775
2776 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2777 * is run after the gc, as soon as the asynchronous events are handled by the
2778 * evaluator.
2779 */
2780 static SCM
2781 gc_async_thunk (void)
2782 {
2783 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2784 return SCM_UNSPECIFIED;
2785 }
2786
2787
2788 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2789 * the garbage collection. The only purpose of this function is to mark the
2790 * gc_async (which will eventually lead to the execution of the
2791 * gc_async_thunk).
2792 */
2793 static void *
2794 mark_gc_async (void * hook_data SCM_UNUSED,
2795 void *func_data SCM_UNUSED,
2796 void *data SCM_UNUSED)
2797 {
2798 /* If cell access debugging is enabled, the user may choose to perform
2799 * additional garbage collections after an arbitrary number of cell
2800 * accesses. We don't want the scheme level after-gc-hook to be performed
2801 * for each of these garbage collections for the following reason: The
2802 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2803 * after-gc-hook was performed with every gc, and if the gc was performed
2804 * after a very small number of cell accesses, then the number of cell
2805 * accesses during the execution of the after-gc-hook will suffice to cause
2806 * the execution of the next gc. Then, guile would keep executing the
2807 * after-gc-hook over and over again, and would never come to do other
2808 * things.
2809 *
2810 * To overcome this problem, if cell access debugging with additional
2811 * garbage collections is enabled, the after-gc-hook is never run by the
2812 * garbage collecter. When running guile with cell access debugging and the
2813 * execution of the after-gc-hook is desired, then it is necessary to run
2814 * the hook explicitly from the user code. This has the effect, that from
2815 * the scheme level point of view it seems that garbage collection is
2816 * performed with a much lower frequency than it actually is. Obviously,
2817 * this will not work for code that depends on a fixed one to one
2818 * relationship between the execution counts of the C level garbage
2819 * collection hooks and the execution count of the scheme level
2820 * after-gc-hook.
2821 */
2822 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2823 if (debug_cells_gc_interval == 0)
2824 scm_system_async_mark (gc_async);
2825 #else
2826 scm_system_async_mark (gc_async);
2827 #endif
2828
2829 return NULL;
2830 }
2831
2832
2833 void
2834 scm_init_gc ()
2835 {
2836 SCM after_gc_thunk;
2837
2838 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2839 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2840
2841 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2842 gc_async_thunk);
2843 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2844
2845 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2846
2847 #ifndef SCM_MAGIC_SNARFER
2848 #include "libguile/gc.x"
2849 #endif
2850 }
2851
2852 #endif /*MARK_DEPENDENCIES*/
2853
2854 /*
2855 Local Variables:
2856 c-file-style: "gnu"
2857 End:
2858 */