(SCM_GC_SET_ALLOCATED, scm_debug_newcell,
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #ifdef __ia64__
56 #include <ucontext.h>
57 extern unsigned long * __libc_ia64_register_backing_store_base;
58 #endif
59
60 #include "libguile/_scm.h"
61 #include "libguile/eval.h"
62 #include "libguile/stime.h"
63 #include "libguile/stackchk.h"
64 #include "libguile/struct.h"
65 #include "libguile/smob.h"
66 #include "libguile/unif.h"
67 #include "libguile/async.h"
68 #include "libguile/ports.h"
69 #include "libguile/root.h"
70 #include "libguile/strings.h"
71 #include "libguile/vectors.h"
72 #include "libguile/weaks.h"
73 #include "libguile/hashtab.h"
74 #include "libguile/tags.h"
75
76 #include "libguile/validate.h"
77 #include "libguile/deprecation.h"
78 #include "libguile/gc.h"
79
80 #ifdef GUILE_DEBUG_MALLOC
81 #include "libguile/debug-malloc.h"
82 #endif
83
84 #ifdef HAVE_MALLOC_H
85 #include <malloc.h>
86 #endif
87
88 #ifdef HAVE_UNISTD_H
89 #include <unistd.h>
90 #endif
91
92 #ifdef __STDC__
93 #include <stdarg.h>
94 #define var_start(x, y) va_start(x, y)
95 #else
96 #include <varargs.h>
97 #define var_start(x, y) va_start(x)
98 #endif
99
100 \f
101
102 #define CELL_P(x) (SCM_ITAG3 (x) == scm_tc3_cons)
103
104 unsigned int scm_gc_running_p = 0;
105
106 \f
107
108 #if (SCM_DEBUG_CELL_ACCESSES == 1)
109
110 /* Set this to != 0 if every cell that is accessed shall be checked:
111 */
112 unsigned int scm_debug_cell_accesses_p = 1;
113
114 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
115 * the number of cell accesses after which a gc shall be called.
116 */
117 static unsigned int debug_cells_gc_interval = 0;
118
119
120 /* Assert that the given object is a valid reference to a valid cell. This
121 * test involves to determine whether the object is a cell pointer, whether
122 * this pointer actually points into a heap segment and whether the cell
123 * pointed to is not a free cell. Further, additional garbage collections may
124 * get executed after a user defined number of cell accesses. This helps to
125 * find places in the C code where references are dropped for extremely short
126 * periods.
127 */
128 void
129 scm_assert_cell_valid (SCM cell)
130 {
131 static unsigned int already_running = 0;
132
133 if (scm_debug_cell_accesses_p && !already_running)
134 {
135 already_running = 1; /* set to avoid recursion */
136
137 if (!scm_cellp (cell))
138 {
139 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
140 (unsigned long) SCM_UNPACK (cell));
141 abort ();
142 }
143 else if (!scm_gc_running_p)
144 {
145 /* Dirk::FIXME:: During garbage collection there occur references to
146 free cells. This is allright during conservative marking, but
147 should not happen otherwise (I think). The case of free cells
148 accessed during conservative marking is handled in function
149 scm_mark_locations. However, there still occur accesses to free
150 cells during gc. I don't understand why this happens. If it is
151 a bug and gets fixed, the following test should also work while
152 gc is running.
153 */
154 if (SCM_FREE_CELL_P (cell))
155 {
156 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
157 (unsigned long) SCM_UNPACK (cell));
158 abort ();
159 }
160
161 /* If desired, perform additional garbage collections after a user
162 * defined number of cell accesses.
163 */
164 if (debug_cells_gc_interval)
165 {
166 static unsigned int counter = 0;
167
168 if (counter != 0)
169 {
170 --counter;
171 }
172 else
173 {
174 counter = debug_cells_gc_interval;
175 scm_igc ("scm_assert_cell_valid");
176 }
177 }
178 }
179 already_running = 0; /* re-enable */
180 }
181 }
182
183
184 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
185 (SCM flag),
186 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
187 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
188 "but no additional calls to garbage collection are issued.\n"
189 "If @var{flag} is a number, cell access checking is enabled,\n"
190 "with an additional garbage collection after the given\n"
191 "number of cell accesses.\n"
192 "This procedure only exists when the compile-time flag\n"
193 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
194 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
195 {
196 if (SCM_FALSEP (flag)) {
197 scm_debug_cell_accesses_p = 0;
198 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
199 debug_cells_gc_interval = 0;
200 scm_debug_cell_accesses_p = 1;
201 } else if (SCM_INUMP (flag)) {
202 long int f = SCM_INUM (flag);
203 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
204 debug_cells_gc_interval = f;
205 scm_debug_cell_accesses_p = 1;
206 } else {
207 SCM_WRONG_TYPE_ARG (1, flag);
208 }
209 return SCM_UNSPECIFIED;
210 }
211 #undef FUNC_NAME
212
213 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
214
215 \f
216
217 /* {heap tuning parameters}
218 *
219 * These are parameters for controlling memory allocation. The heap
220 * is the area out of which scm_cons, and object headers are allocated.
221 *
222 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
223 * 64 bit machine. The units of the _SIZE parameters are bytes.
224 * Cons pairs and object headers occupy one heap cell.
225 *
226 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
227 * allocated initially the heap will grow by half its current size
228 * each subsequent time more heap is needed.
229 *
230 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
231 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
232 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
233 * is in scm_init_storage() and alloc_some_heap() in sys.c
234 *
235 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
236 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
237 *
238 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
239 * is needed.
240 *
241 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
242 * trigger a GC.
243 *
244 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
245 * reclaimed by a GC triggered by must_malloc. If less than this is
246 * reclaimed, the trigger threshold is raised. [I don't know what a
247 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
248 * work around a oscillation that caused almost constant GC.]
249 */
250
251 /*
252 * Heap size 45000 and 40% min yield gives quick startup and no extra
253 * heap allocation. Having higher values on min yield may lead to
254 * large heaps, especially if code behaviour is varying its
255 * maximum consumption between different freelists.
256 */
257
258 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
259 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
260 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
261 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
262 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
263 int scm_default_min_yield_1 = 40;
264
265 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
266 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
267 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
268 /* The following value may seem large, but note that if we get to GC at
269 * all, this means that we have a numerically intensive application
270 */
271 int scm_default_min_yield_2 = 40;
272
273 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
274
275 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
276 #ifdef _QC
277 # define SCM_HEAP_SEG_SIZE 32768L
278 #else
279 # ifdef sequent
280 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
281 # else
282 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
283 # endif
284 #endif
285 /* Make heap grow with factor 1.5 */
286 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
287 #define SCM_INIT_MALLOC_LIMIT 100000
288 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
289
290 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
291 aligned inner bounds for allocated storage */
292
293 #ifdef PROT386
294 /*in 386 protected mode we must only adjust the offset */
295 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
296 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
297 #else
298 # ifdef _UNICOS
299 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
300 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
301 # else
302 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
303 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
304 # endif /* UNICOS */
305 #endif /* PROT386 */
306
307 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
308
309 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
310 #define CLUSTER_SIZE_IN_BYTES(freelist) \
311 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
312
313 \f
314 /* scm_freelists
315 */
316
317 typedef struct scm_t_freelist {
318 /* collected cells */
319 SCM cells;
320 /* number of cells left to collect before cluster is full */
321 unsigned int left_to_collect;
322 /* number of clusters which have been allocated */
323 unsigned int clusters_allocated;
324 /* a list of freelists, each of size cluster_size,
325 * except the last one which may be shorter
326 */
327 SCM clusters;
328 SCM *clustertail;
329 /* this is the number of objects in each cluster, including the spine cell */
330 unsigned int cluster_size;
331 /* indicates that we should grow heap instead of GC:ing
332 */
333 int grow_heap_p;
334 /* minimum yield on this list in order not to grow the heap
335 */
336 long min_yield;
337 /* defines min_yield as percent of total heap size
338 */
339 int min_yield_fraction;
340 /* number of cells per object on this list */
341 int span;
342 /* number of collected cells during last GC */
343 unsigned long collected;
344 /* number of collected cells during penultimate GC */
345 unsigned long collected_1;
346 /* total number of cells in heap segments
347 * belonging to this list.
348 */
349 unsigned long heap_size;
350 } scm_t_freelist;
351
352 SCM scm_freelist = SCM_EOL;
353 scm_t_freelist scm_master_freelist = {
354 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
355 };
356 SCM scm_freelist2 = SCM_EOL;
357 scm_t_freelist scm_master_freelist2 = {
358 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
359 };
360
361 /* scm_mtrigger
362 * is the number of bytes of must_malloc allocation needed to trigger gc.
363 */
364 unsigned long scm_mtrigger;
365
366 /* scm_gc_heap_lock
367 * If set, don't expand the heap. Set only during gc, during which no allocation
368 * is supposed to take place anyway.
369 */
370 int scm_gc_heap_lock = 0;
371
372 /* GC Blocking
373 * Don't pause for collection if this is set -- just
374 * expand the heap.
375 */
376 int scm_block_gc = 1;
377
378 /* During collection, this accumulates objects holding
379 * weak references.
380 */
381 SCM scm_weak_vectors;
382
383 /* During collection, this accumulates structures which are to be freed.
384 */
385 SCM scm_structs_to_free;
386
387 /* GC Statistics Keeping
388 */
389 unsigned long scm_cells_allocated = 0;
390 unsigned long scm_mallocated = 0;
391 unsigned long scm_gc_cells_collected;
392 unsigned long scm_gc_yield;
393 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
394 unsigned long scm_gc_malloc_collected;
395 unsigned long scm_gc_ports_collected;
396 unsigned long scm_gc_time_taken = 0;
397 static unsigned long t_before_gc;
398 static unsigned long t_before_sweep;
399 unsigned long scm_gc_mark_time_taken = 0;
400 unsigned long scm_gc_sweep_time_taken = 0;
401 unsigned long scm_gc_times = 0;
402 unsigned long scm_gc_cells_swept = 0;
403 double scm_gc_cells_marked_acc = 0.;
404 double scm_gc_cells_swept_acc = 0.;
405
406 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
407 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
408 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
409 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
410 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
411 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
412 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
413 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
414 SCM_SYMBOL (sym_times, "gc-times");
415 SCM_SYMBOL (sym_cells_marked, "cells-marked");
416 SCM_SYMBOL (sym_cells_swept, "cells-swept");
417
418 typedef struct scm_t_heap_seg_data
419 {
420 /* lower and upper bounds of the segment */
421 SCM_CELLPTR bounds[2];
422
423 /* address of the head-of-freelist pointer for this segment's cells.
424 All segments usually point to the same one, scm_freelist. */
425 scm_t_freelist *freelist;
426
427 /* number of cells per object in this segment */
428 int span;
429 } scm_t_heap_seg_data;
430
431
432
433 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
434
435 typedef enum { return_on_error, abort_on_error } policy_on_error;
436 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
437
438
439 #define SCM_HEAP_SIZE \
440 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
441 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
442
443 #define BVEC_GROW_SIZE 256
444 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
445 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
446
447 /* mark space allocation */
448
449 typedef struct scm_t_mark_space
450 {
451 scm_t_c_bvec_limb *bvec_space;
452 struct scm_t_mark_space *next;
453 } scm_t_mark_space;
454
455 static scm_t_mark_space *current_mark_space;
456 static scm_t_mark_space **mark_space_ptr;
457 static ptrdiff_t current_mark_space_offset;
458 static scm_t_mark_space *mark_space_head;
459
460 static scm_t_c_bvec_limb *
461 get_bvec ()
462 #define FUNC_NAME "get_bvec"
463 {
464 scm_t_c_bvec_limb *res;
465
466 if (!current_mark_space)
467 {
468 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
469 if (!current_mark_space)
470 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
471
472 current_mark_space->bvec_space = NULL;
473 current_mark_space->next = NULL;
474
475 *mark_space_ptr = current_mark_space;
476 mark_space_ptr = &(current_mark_space->next);
477
478 return get_bvec ();
479 }
480
481 if (!(current_mark_space->bvec_space))
482 {
483 SCM_SYSCALL (current_mark_space->bvec_space =
484 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
485 if (!(current_mark_space->bvec_space))
486 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
487
488 current_mark_space_offset = 0;
489
490 return get_bvec ();
491 }
492
493 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
494 {
495 current_mark_space = NULL;
496
497 return get_bvec ();
498 }
499
500 res = current_mark_space->bvec_space + current_mark_space_offset;
501 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
502
503 return res;
504 }
505 #undef FUNC_NAME
506
507
508 static void
509 clear_mark_space ()
510 {
511 scm_t_mark_space *ms;
512
513 for (ms = mark_space_head; ms; ms = ms->next)
514 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
515 }
516
517
518 \f
519 /* Debugging functions. */
520
521 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
522
523 static long int heap_segment (SCM obj); /* forw decl: non-debugging func */
524
525 static void
526 map_free_list (scm_t_freelist *master, SCM freelist)
527 {
528 long last_seg = -1, count = 0;
529 SCM f;
530
531 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
532 {
533 long int this_seg = heap_segment (f);
534
535 if (this_seg == -1)
536 {
537 fprintf (stderr,
538 "map_free_list: can't find segment containing cell %lux\n",
539 (unsigned long int) SCM_UNPACK (f));
540 abort ();
541 }
542 else if (this_seg != last_seg)
543 {
544 if (last_seg != -1)
545 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
546 (long) count, master->span, (long) last_seg);
547 last_seg = this_seg;
548 count = 0;
549 }
550 count++;
551 }
552 if (last_seg != -1)
553 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
554 (long) count, master->span, (long) last_seg);
555 }
556
557 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
558 (),
559 "Print debugging information about the free-list.\n"
560 "@code{map-free-list} is only included in\n"
561 "@code{--enable-guile-debug} builds of Guile.")
562 #define FUNC_NAME s_scm_map_free_list
563 {
564 size_t i;
565
566 fprintf (stderr, "%ld segments total (%d:%ld",
567 (long) scm_n_heap_segs,
568 scm_heap_table[0].span,
569 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
570
571 for (i = 1; i != scm_n_heap_segs; i++)
572 fprintf (stderr, ", %d:%ld",
573 scm_heap_table[i].span,
574 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
575 fprintf (stderr, ")\n");
576 map_free_list (&scm_master_freelist, scm_freelist);
577 map_free_list (&scm_master_freelist2, scm_freelist2);
578 fflush (stderr);
579
580 return SCM_UNSPECIFIED;
581 }
582 #undef FUNC_NAME
583
584 static long last_cluster;
585 static long last_size;
586
587 static long
588 free_list_length (char *title, long i, SCM freelist)
589 {
590 SCM ls;
591 long n = 0;
592 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
593 if (SCM_FREE_CELL_P (ls))
594 ++n;
595 else
596 {
597 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
598 abort ();
599 }
600 if (n != last_size)
601 {
602 if (i > 0)
603 {
604 if (last_cluster == i - 1)
605 fprintf (stderr, "\t%ld\n", (long) last_size);
606 else
607 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
608 }
609 if (i >= 0)
610 fprintf (stderr, "%s %ld", title, (long) i);
611 else
612 fprintf (stderr, "%s\t%ld\n", title, (long) n);
613 last_cluster = i;
614 last_size = n;
615 }
616 return n;
617 }
618
619 static void
620 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
621 {
622 SCM clusters;
623 long i = 0, len, n = 0;
624 fprintf (stderr, "%s\n\n", title);
625 n += free_list_length ("free list", -1, freelist);
626 for (clusters = master->clusters;
627 SCM_NNULLP (clusters);
628 clusters = SCM_CDR (clusters))
629 {
630 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
631 n += len;
632 }
633 if (last_cluster == i - 1)
634 fprintf (stderr, "\t%ld\n", (long) last_size);
635 else
636 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
637 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
638 }
639
640 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
641 (),
642 "Print debugging information about the free-list.\n"
643 "@code{free-list-length} is only included in\n"
644 "@code{--enable-guile-debug} builds of Guile.")
645 #define FUNC_NAME s_scm_free_list_length
646 {
647 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
648 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
649 return SCM_UNSPECIFIED;
650 }
651 #undef FUNC_NAME
652
653 #endif /* defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST) */
654
655 #ifdef GUILE_DEBUG_FREELIST
656
657 /* Non-zero if freelist debugging is in effect. Set this via
658 `gc-set-debug-check-freelist!'. */
659 static int scm_debug_check_freelist = 0;
660
661 /* Number of calls to SCM_NEWCELL since startup. */
662 static unsigned long scm_newcell_count;
663 static unsigned long scm_newcell2_count;
664
665 /* Search freelist for anything that isn't marked as a free cell.
666 Abort if we find something. */
667 static void
668 scm_check_freelist (SCM freelist)
669 {
670 SCM f;
671 long i = 0;
672
673 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
674 if (!SCM_FREE_CELL_P (f))
675 {
676 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
677 (long) scm_newcell_count, (long) i);
678 abort ();
679 }
680 }
681
682 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
683 (SCM flag),
684 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
685 "on each cell allocation. This procedure only exists when the\n"
686 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
687 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
688 {
689 /* [cmm] I did a double-take when I read this code the first time.
690 well, FWIW. */
691 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
692 return SCM_UNSPECIFIED;
693 }
694 #undef FUNC_NAME
695
696 #endif /* GUILE_DEBUG_FREELIST */
697
698 \f
699
700 static unsigned long
701 master_cells_allocated (scm_t_freelist *master)
702 {
703 /* the '- 1' below is to ignore the cluster spine cells. */
704 long objects = master->clusters_allocated * (master->cluster_size - 1);
705 if (SCM_NULLP (master->clusters))
706 objects -= master->left_to_collect;
707 return master->span * objects;
708 }
709
710 static unsigned long
711 freelist_length (SCM freelist)
712 {
713 long n;
714 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
715 ++n;
716 return n;
717 }
718
719 static unsigned long
720 compute_cells_allocated ()
721 {
722 return (scm_cells_allocated
723 + master_cells_allocated (&scm_master_freelist)
724 + master_cells_allocated (&scm_master_freelist2)
725 - scm_master_freelist.span * freelist_length (scm_freelist)
726 - scm_master_freelist2.span * freelist_length (scm_freelist2));
727 }
728
729 /* {Scheme Interface to GC}
730 */
731
732 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
733 (),
734 "Return an association list of statistics about Guile's current\n"
735 "use of storage.")
736 #define FUNC_NAME s_scm_gc_stats
737 {
738 long i;
739 long n;
740 SCM heap_segs;
741 unsigned long int local_scm_mtrigger;
742 unsigned long int local_scm_mallocated;
743 unsigned long int local_scm_heap_size;
744 unsigned long int local_scm_cells_allocated;
745 unsigned long int local_scm_gc_time_taken;
746 unsigned long int local_scm_gc_times;
747 unsigned long int local_scm_gc_mark_time_taken;
748 unsigned long int local_scm_gc_sweep_time_taken;
749 double local_scm_gc_cells_swept;
750 double local_scm_gc_cells_marked;
751 SCM answer;
752
753 SCM_DEFER_INTS;
754
755 ++scm_block_gc;
756
757 retry:
758 heap_segs = SCM_EOL;
759 n = scm_n_heap_segs;
760 for (i = scm_n_heap_segs; i--; )
761 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
762 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
763 heap_segs);
764 if (scm_n_heap_segs != n)
765 goto retry;
766
767 --scm_block_gc;
768
769 /* Below, we cons to produce the resulting list. We want a snapshot of
770 * the heap situation before consing.
771 */
772 local_scm_mtrigger = scm_mtrigger;
773 local_scm_mallocated = scm_mallocated;
774 local_scm_heap_size = SCM_HEAP_SIZE;
775 local_scm_cells_allocated = compute_cells_allocated ();
776 local_scm_gc_time_taken = scm_gc_time_taken;
777 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
778 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
779 local_scm_gc_times = scm_gc_times;
780 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
781 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
782
783 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
784 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
785 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
786 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
787 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
788 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
789 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
790 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
791 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
792 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
793 scm_cons (sym_heap_segments, heap_segs),
794 SCM_UNDEFINED);
795 SCM_ALLOW_INTS;
796 return answer;
797 }
798 #undef FUNC_NAME
799
800
801 static void
802 gc_start_stats (const char *what SCM_UNUSED)
803 {
804 t_before_gc = scm_c_get_internal_run_time ();
805 scm_gc_cells_swept = 0;
806 scm_gc_cells_collected = 0;
807 scm_gc_yield_1 = scm_gc_yield;
808 scm_gc_yield = (scm_cells_allocated
809 + master_cells_allocated (&scm_master_freelist)
810 + master_cells_allocated (&scm_master_freelist2));
811 scm_gc_malloc_collected = 0;
812 scm_gc_ports_collected = 0;
813 }
814
815
816 static void
817 gc_end_stats ()
818 {
819 unsigned long t = scm_c_get_internal_run_time ();
820 scm_gc_time_taken += (t - t_before_gc);
821 scm_gc_sweep_time_taken += (t - t_before_sweep);
822 ++scm_gc_times;
823
824 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
825 scm_gc_cells_swept_acc += scm_gc_cells_swept;
826 }
827
828
829 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
830 (SCM obj),
831 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
832 "returned by this function for @var{obj}")
833 #define FUNC_NAME s_scm_object_address
834 {
835 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
836 }
837 #undef FUNC_NAME
838
839
840 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
841 (),
842 "Scans all of SCM objects and reclaims for further use those that are\n"
843 "no longer accessible.")
844 #define FUNC_NAME s_scm_gc
845 {
846 SCM_DEFER_INTS;
847 scm_igc ("call");
848 SCM_ALLOW_INTS;
849 return SCM_UNSPECIFIED;
850 }
851 #undef FUNC_NAME
852
853
854 \f
855 /* {C Interface For When GC is Triggered}
856 */
857
858 static void
859 adjust_min_yield (scm_t_freelist *freelist)
860 {
861 /* min yield is adjusted upwards so that next predicted total yield
862 * (allocated cells actually freed by GC) becomes
863 * `min_yield_fraction' of total heap size. Note, however, that
864 * the absolute value of min_yield will correspond to `collected'
865 * on one master (the one which currently is triggering GC).
866 *
867 * The reason why we look at total yield instead of cells collected
868 * on one list is that we want to take other freelists into account.
869 * On this freelist, we know that (local) yield = collected cells,
870 * but that's probably not the case on the other lists.
871 *
872 * (We might consider computing a better prediction, for example
873 * by computing an average over multiple GC:s.)
874 */
875 if (freelist->min_yield_fraction)
876 {
877 /* Pick largest of last two yields. */
878 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
879 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
880 #ifdef DEBUGINFO
881 fprintf (stderr, " after GC = %lu, delta = %ld\n",
882 (long) scm_cells_allocated,
883 (long) delta);
884 #endif
885 if (delta > 0)
886 freelist->min_yield += delta;
887 }
888 }
889
890
891 /* When we get POSIX threads support, the master will be global and
892 * common while the freelist will be individual for each thread.
893 */
894
895 SCM
896 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
897 {
898 SCM cell;
899 ++scm_ints_disabled;
900 do
901 {
902 if (SCM_NULLP (master->clusters))
903 {
904 if (master->grow_heap_p || scm_block_gc)
905 {
906 /* In order to reduce gc frequency, try to allocate a new heap
907 * segment first, even if gc might find some free cells. If we
908 * can't obtain a new heap segment, we will try gc later.
909 */
910 master->grow_heap_p = 0;
911 alloc_some_heap (master, return_on_error);
912 }
913 if (SCM_NULLP (master->clusters))
914 {
915 /* The heap was not grown, either because it wasn't scheduled to
916 * grow, or because there was not enough memory available. In
917 * both cases we have to try gc to get some free cells.
918 */
919 #ifdef DEBUGINFO
920 fprintf (stderr, "allocated = %lu, ",
921 (long) (scm_cells_allocated
922 + master_cells_allocated (&scm_master_freelist)
923 + master_cells_allocated (&scm_master_freelist2)));
924 #endif
925 scm_igc ("cells");
926 adjust_min_yield (master);
927 if (SCM_NULLP (master->clusters))
928 {
929 /* gc could not free any cells. Now, we _must_ allocate a
930 * new heap segment, because there is no other possibility
931 * to provide a new cell for the caller.
932 */
933 alloc_some_heap (master, abort_on_error);
934 }
935 }
936 }
937 cell = SCM_CAR (master->clusters);
938 master->clusters = SCM_CDR (master->clusters);
939 ++master->clusters_allocated;
940 }
941 while (SCM_NULLP (cell));
942
943 #ifdef GUILE_DEBUG_FREELIST
944 scm_check_freelist (cell);
945 #endif
946
947 --scm_ints_disabled;
948 *freelist = SCM_FREE_CELL_CDR (cell);
949 return cell;
950 }
951
952
953 #if 0
954 /* This is a support routine which can be used to reserve a cluster
955 * for some special use, such as debugging. It won't be useful until
956 * free cells are preserved between garbage collections.
957 */
958
959 void
960 scm_alloc_cluster (scm_t_freelist *master)
961 {
962 SCM freelist, cell;
963 cell = scm_gc_for_newcell (master, &freelist);
964 SCM_SETCDR (cell, freelist);
965 return cell;
966 }
967 #endif
968
969
970 scm_t_c_hook scm_before_gc_c_hook;
971 scm_t_c_hook scm_before_mark_c_hook;
972 scm_t_c_hook scm_before_sweep_c_hook;
973 scm_t_c_hook scm_after_sweep_c_hook;
974 scm_t_c_hook scm_after_gc_c_hook;
975
976 #ifdef __ia64__
977 # define SCM_MARK_BACKING_STORE() do { \
978 ucontext_t ctx; \
979 SCM_STACKITEM * top, * bot; \
980 getcontext (&ctx); \
981 scm_mark_locations ((SCM_STACKITEM *) &ctx.uc_mcontext, \
982 ((size_t) (sizeof (SCM_STACKITEM) - 1 + sizeof ctx.uc_mcontext) \
983 / sizeof (SCM_STACKITEM))); \
984 bot = (SCM_STACKITEM *) __libc_ia64_register_backing_store_base; \
985 top = (SCM_STACKITEM *) ctx.uc_mcontext.sc_ar_bsp; \
986 scm_mark_locations (bot, top - bot); } while (0)
987 #else
988 # define SCM_MARK_BACKING_STORE()
989 #endif
990
991 void
992 scm_igc (const char *what)
993 {
994 long j;
995
996 ++scm_gc_running_p;
997 scm_c_hook_run (&scm_before_gc_c_hook, 0);
998 #ifdef DEBUGINFO
999 fprintf (stderr,
1000 SCM_NULLP (scm_freelist)
1001 ? "*"
1002 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1003 #endif
1004 /* During the critical section, only the current thread may run. */
1005 SCM_CRITICAL_SECTION_START;
1006
1007 if (!scm_stack_base || scm_block_gc)
1008 {
1009 --scm_gc_running_p;
1010 return;
1011 }
1012
1013 gc_start_stats (what);
1014
1015 if (scm_gc_heap_lock)
1016 /* We've invoked the collector while a GC is already in progress.
1017 That should never happen. */
1018 abort ();
1019
1020 ++scm_gc_heap_lock;
1021
1022 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1023
1024 clear_mark_space ();
1025
1026 #ifndef USE_THREADS
1027
1028 /* Mark objects on the C stack. */
1029 SCM_FLUSH_REGISTER_WINDOWS;
1030 /* This assumes that all registers are saved into the jmp_buf */
1031 setjmp (scm_save_regs_gc_mark);
1032 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1033 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1034 sizeof scm_save_regs_gc_mark)
1035 / sizeof (SCM_STACKITEM)));
1036
1037 {
1038 unsigned long stack_len = scm_stack_size (scm_stack_base);
1039 #ifdef SCM_STACK_GROWS_UP
1040 scm_mark_locations (scm_stack_base, stack_len);
1041 #else
1042 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1043 #endif
1044 }
1045 SCM_MARK_BACKING_STORE();
1046
1047 #else /* USE_THREADS */
1048
1049 /* Mark every thread's stack and registers */
1050 scm_threads_mark_stacks ();
1051
1052 #endif /* USE_THREADS */
1053
1054 j = SCM_NUM_PROTECTS;
1055 while (j--)
1056 scm_gc_mark (scm_sys_protects[j]);
1057
1058 /* mark the registered roots */
1059 {
1060 size_t i;
1061 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1062 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1063 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1064 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1065 scm_gc_mark (*p);
1066 }
1067 }
1068 }
1069
1070 /* FIXME: we should have a means to register C functions to be run
1071 * in different phases of GC
1072 */
1073 scm_mark_subr_table ();
1074
1075 #ifndef USE_THREADS
1076 scm_gc_mark (scm_root->handle);
1077 #endif
1078
1079 t_before_sweep = scm_c_get_internal_run_time ();
1080 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1081
1082 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1083
1084 scm_gc_sweep ();
1085
1086 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1087
1088 --scm_gc_heap_lock;
1089 gc_end_stats ();
1090
1091 SCM_CRITICAL_SECTION_END;
1092 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1093 --scm_gc_running_p;
1094 }
1095
1096 \f
1097
1098 /* {Mark/Sweep}
1099 */
1100
1101 #define MARK scm_gc_mark
1102 #define FNAME "scm_gc_mark"
1103
1104 #endif /*!MARK_DEPENDENCIES*/
1105
1106 /* Mark an object precisely.
1107 */
1108 void
1109 MARK (SCM p)
1110 #define FUNC_NAME FNAME
1111 {
1112 register long i;
1113 register SCM ptr;
1114 scm_t_bits cell_type;
1115
1116 #ifndef MARK_DEPENDENCIES
1117 # define RECURSE scm_gc_mark
1118 #else
1119 /* go through the usual marking, but not for self-cycles. */
1120 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1121 #endif
1122 ptr = p;
1123
1124 #ifdef MARK_DEPENDENCIES
1125 goto gc_mark_loop_first_time;
1126 #endif
1127
1128 /* A simple hack for debugging. Chose the second branch to get a
1129 meaningful backtrace for crashes inside the GC.
1130 */
1131 #if 1
1132 #define goto_gc_mark_loop goto gc_mark_loop
1133 #define goto_gc_mark_nimp goto gc_mark_nimp
1134 #else
1135 #define goto_gc_mark_loop RECURSE(ptr); return
1136 #define goto_gc_mark_nimp RECURSE(ptr); return
1137 #endif
1138
1139 gc_mark_loop:
1140 if (SCM_IMP (ptr))
1141 return;
1142
1143 gc_mark_nimp:
1144
1145 #ifdef MARK_DEPENDENCIES
1146 if (SCM_EQ_P (ptr, p))
1147 return;
1148
1149 scm_gc_mark (ptr);
1150 return;
1151
1152 gc_mark_loop_first_time:
1153 #endif
1154
1155 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1156 /* We are in debug mode. Check the ptr exhaustively. */
1157 if (!scm_cellp (ptr))
1158 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1159 #else
1160 /* In non-debug mode, do at least some cheap testing. */
1161 if (!CELL_P (ptr))
1162 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1163 #endif
1164
1165 #ifndef MARK_DEPENDENCIES
1166
1167 if (SCM_GCMARKP (ptr))
1168 return;
1169
1170 SCM_SETGCMARK (ptr);
1171
1172 #endif
1173
1174 cell_type = SCM_GC_CELL_TYPE (ptr);
1175 switch (SCM_ITAG7 (cell_type))
1176 {
1177 case scm_tcs_cons_nimcar:
1178 if (SCM_IMP (SCM_CDR (ptr)))
1179 {
1180 ptr = SCM_CAR (ptr);
1181 goto_gc_mark_nimp;
1182 }
1183 RECURSE (SCM_CAR (ptr));
1184 ptr = SCM_CDR (ptr);
1185 goto_gc_mark_nimp;
1186 case scm_tcs_cons_imcar:
1187 ptr = SCM_CDR (ptr);
1188 goto_gc_mark_loop;
1189 case scm_tc7_pws:
1190 RECURSE (SCM_SETTER (ptr));
1191 ptr = SCM_PROCEDURE (ptr);
1192 goto_gc_mark_loop;
1193 case scm_tcs_struct:
1194 {
1195 /* XXX - use less explicit code. */
1196 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1197 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1198 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1199 long len = SCM_SYMBOL_LENGTH (layout);
1200 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1201 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1202
1203 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1204 {
1205 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1206 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1207 }
1208 if (len)
1209 {
1210 long x;
1211
1212 for (x = 0; x < len - 2; x += 2, ++struct_data)
1213 if (fields_desc[x] == 'p')
1214 RECURSE (SCM_PACK (*struct_data));
1215 if (fields_desc[x] == 'p')
1216 {
1217 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1218 for (x = *struct_data++; x; --x, ++struct_data)
1219 RECURSE (SCM_PACK (*struct_data));
1220 else
1221 RECURSE (SCM_PACK (*struct_data));
1222 }
1223 }
1224 /* mark vtable */
1225 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1226 goto_gc_mark_loop;
1227 }
1228 break;
1229 case scm_tcs_closures:
1230 if (SCM_IMP (SCM_ENV (ptr)))
1231 {
1232 ptr = SCM_CLOSCAR (ptr);
1233 goto_gc_mark_nimp;
1234 }
1235 RECURSE (SCM_CLOSCAR (ptr));
1236 ptr = SCM_ENV (ptr);
1237 goto_gc_mark_nimp;
1238 case scm_tc7_vector:
1239 i = SCM_VECTOR_LENGTH (ptr);
1240 if (i == 0)
1241 break;
1242 while (--i > 0)
1243 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1244 RECURSE (SCM_VELTS (ptr)[i]);
1245 ptr = SCM_VELTS (ptr)[0];
1246 goto_gc_mark_loop;
1247 #ifdef CCLO
1248 case scm_tc7_cclo:
1249 {
1250 size_t i = SCM_CCLO_LENGTH (ptr);
1251 size_t j;
1252 for (j = 1; j != i; ++j)
1253 {
1254 SCM obj = SCM_CCLO_REF (ptr, j);
1255 if (!SCM_IMP (obj))
1256 RECURSE (obj);
1257 }
1258 ptr = SCM_CCLO_REF (ptr, 0);
1259 goto_gc_mark_loop;
1260 }
1261 #endif
1262 #ifdef HAVE_ARRAYS
1263 case scm_tc7_bvect:
1264 case scm_tc7_byvect:
1265 case scm_tc7_ivect:
1266 case scm_tc7_uvect:
1267 case scm_tc7_fvect:
1268 case scm_tc7_dvect:
1269 case scm_tc7_cvect:
1270 case scm_tc7_svect:
1271 #ifdef HAVE_LONG_LONGS
1272 case scm_tc7_llvect:
1273 #endif
1274 #endif
1275 case scm_tc7_string:
1276 break;
1277
1278 case scm_tc7_wvect:
1279 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1280 scm_weak_vectors = ptr;
1281 if (SCM_IS_WHVEC_ANY (ptr))
1282 {
1283 long x;
1284 long len;
1285 int weak_keys;
1286 int weak_values;
1287
1288 len = SCM_VECTOR_LENGTH (ptr);
1289 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1290 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1291
1292 for (x = 0; x < len; ++x)
1293 {
1294 SCM alist;
1295 alist = SCM_VELTS (ptr)[x];
1296
1297 /* mark everything on the alist except the keys or
1298 * values, according to weak_values and weak_keys. */
1299 while ( SCM_CONSP (alist)
1300 && !SCM_GCMARKP (alist)
1301 && SCM_CONSP (SCM_CAR (alist)))
1302 {
1303 SCM kvpair;
1304 SCM next_alist;
1305
1306 kvpair = SCM_CAR (alist);
1307 next_alist = SCM_CDR (alist);
1308 /*
1309 * Do not do this:
1310 * SCM_SETGCMARK (alist);
1311 * SCM_SETGCMARK (kvpair);
1312 *
1313 * It may be that either the key or value is protected by
1314 * an escaped reference to part of the spine of this alist.
1315 * If we mark the spine here, and only mark one or neither of the
1316 * key and value, they may never be properly marked.
1317 * This leads to a horrible situation in which an alist containing
1318 * freelist cells is exported.
1319 *
1320 * So only mark the spines of these arrays last of all marking.
1321 * If somebody confuses us by constructing a weak vector
1322 * with a circular alist then we are hosed, but at least we
1323 * won't prematurely drop table entries.
1324 */
1325 if (!weak_keys)
1326 RECURSE (SCM_CAR (kvpair));
1327 if (!weak_values)
1328 RECURSE (SCM_CDR (kvpair));
1329 alist = next_alist;
1330 }
1331 if (SCM_NIMP (alist))
1332 RECURSE (alist);
1333 }
1334 }
1335 break;
1336
1337 case scm_tc7_symbol:
1338 ptr = SCM_PROP_SLOTS (ptr);
1339 goto_gc_mark_loop;
1340 case scm_tc7_variable:
1341 ptr = SCM_CELL_OBJECT_1 (ptr);
1342 goto_gc_mark_loop;
1343 case scm_tcs_subrs:
1344 break;
1345 case scm_tc7_port:
1346 i = SCM_PTOBNUM (ptr);
1347 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1348 if (!(i < scm_numptob))
1349 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1350 #endif
1351 if (SCM_PTAB_ENTRY(ptr))
1352 RECURSE (SCM_FILENAME (ptr));
1353 if (scm_ptobs[i].mark)
1354 {
1355 ptr = (scm_ptobs[i].mark) (ptr);
1356 goto_gc_mark_loop;
1357 }
1358 else
1359 return;
1360 break;
1361 case scm_tc7_smob:
1362 switch (SCM_TYP16 (ptr))
1363 { /* should be faster than going through scm_smobs */
1364 case scm_tc_free_cell:
1365 /* We have detected a free cell. This can happen if non-object data
1366 * on the C stack points into guile's heap and is scanned during
1367 * conservative marking. */
1368 break;
1369 case scm_tc16_big:
1370 case scm_tc16_real:
1371 case scm_tc16_complex:
1372 break;
1373 default:
1374 i = SCM_SMOBNUM (ptr);
1375 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1376 if (!(i < scm_numsmob))
1377 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1378 #endif
1379 if (scm_smobs[i].mark)
1380 {
1381 ptr = (scm_smobs[i].mark) (ptr);
1382 goto_gc_mark_loop;
1383 }
1384 else
1385 return;
1386 }
1387 break;
1388 default:
1389 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1390 }
1391 #undef RECURSE
1392 }
1393 #undef FUNC_NAME
1394
1395 #ifndef MARK_DEPENDENCIES
1396
1397 #undef MARK
1398 #undef FNAME
1399
1400 /* And here we define `scm_gc_mark_dependencies', by including this
1401 * same file in itself.
1402 */
1403 #define MARK scm_gc_mark_dependencies
1404 #define FNAME "scm_gc_mark_dependencies"
1405 #define MARK_DEPENDENCIES
1406 #include "gc.c"
1407 #undef MARK_DEPENDENCIES
1408 #undef MARK
1409 #undef FNAME
1410
1411
1412 /* Determine whether the given value does actually represent a cell in some
1413 * heap segment. If this is the case, the number of the heap segment is
1414 * returned. Otherwise, -1 is returned. Binary search is used in order to
1415 * determine the heap segment that contains the cell.*/
1416 /* FIXME: To be used within scm_mark_locations and scm_cellp this function
1417 * should be an inline function. */
1418 static long int
1419 heap_segment (SCM obj)
1420 {
1421 if (!CELL_P (obj))
1422 return -1;
1423 else
1424 {
1425 SCM_CELLPTR ptr = SCM2PTR (obj);
1426 unsigned long int i = 0;
1427 unsigned long int j = scm_n_heap_segs - 1;
1428
1429 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1430 return -1;
1431 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1432 return -1;
1433 else
1434 {
1435 while (i < j)
1436 {
1437 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1438 {
1439 break;
1440 }
1441 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1442 {
1443 i = j;
1444 break;
1445 }
1446 else
1447 {
1448 unsigned long int k = (i + j) / 2;
1449
1450 if (k == i)
1451 return -1;
1452 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1453 {
1454 j = k;
1455 ++i;
1456 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1457 return -1;
1458 }
1459 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1460 {
1461 i = k;
1462 --j;
1463 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1464 return -1;
1465 }
1466 }
1467 }
1468
1469 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1470 return -1;
1471 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1472 return -1;
1473 else
1474 return i;
1475 }
1476 }
1477 }
1478
1479
1480 /* Mark a region conservatively */
1481 void
1482 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1483 {
1484 unsigned long m;
1485
1486 for (m = 0; m < n; ++m)
1487 {
1488 SCM obj = * (SCM *) &x[m];
1489 long int segment = heap_segment (obj);
1490 if (segment >= 0)
1491 scm_gc_mark (obj);
1492 }
1493 }
1494
1495
1496 /* The function scm_cellp determines whether an SCM value can be regarded as a
1497 * pointer to a cell on the heap.
1498 */
1499 int
1500 scm_cellp (SCM value)
1501 {
1502 long int segment = heap_segment (value);
1503 return (segment >= 0);
1504 }
1505
1506
1507 static void
1508 gc_sweep_freelist_start (scm_t_freelist *freelist)
1509 {
1510 freelist->cells = SCM_EOL;
1511 freelist->left_to_collect = freelist->cluster_size;
1512 freelist->clusters_allocated = 0;
1513 freelist->clusters = SCM_EOL;
1514 freelist->clustertail = &freelist->clusters;
1515 freelist->collected_1 = freelist->collected;
1516 freelist->collected = 0;
1517 }
1518
1519 static void
1520 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1521 {
1522 long collected;
1523 *freelist->clustertail = freelist->cells;
1524 if (!SCM_NULLP (freelist->cells))
1525 {
1526 SCM c = freelist->cells;
1527 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1528 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1529 freelist->collected +=
1530 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1531 }
1532 scm_gc_cells_collected += freelist->collected;
1533
1534 /* Although freelist->min_yield is used to test freelist->collected
1535 * (which is the local GC yield for freelist), it is adjusted so
1536 * that *total* yield is freelist->min_yield_fraction of total heap
1537 * size. This means that a too low yield is compensated by more
1538 * heap on the list which is currently doing most work, which is
1539 * just what we want.
1540 */
1541 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1542 freelist->grow_heap_p = (collected < freelist->min_yield);
1543 }
1544
1545 #define NEXT_DATA_CELL(ptr, span) \
1546 do { \
1547 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1548 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1549 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1550 : nxt__); \
1551 } while (0)
1552
1553 void
1554 scm_gc_sweep ()
1555 #define FUNC_NAME "scm_gc_sweep"
1556 {
1557 register SCM_CELLPTR ptr;
1558 register SCM nfreelist;
1559 register scm_t_freelist *freelist;
1560 register unsigned long m;
1561 register int span;
1562 size_t i;
1563 size_t seg_size;
1564
1565 m = 0;
1566
1567 gc_sweep_freelist_start (&scm_master_freelist);
1568 gc_sweep_freelist_start (&scm_master_freelist2);
1569
1570 for (i = 0; i < scm_n_heap_segs; i++)
1571 {
1572 register long left_to_collect;
1573 register size_t j;
1574
1575 /* Unmarked cells go onto the front of the freelist this heap
1576 segment points to. Rather than updating the real freelist
1577 pointer as we go along, we accumulate the new head in
1578 nfreelist. Then, if it turns out that the entire segment is
1579 free, we free (i.e., malloc's free) the whole segment, and
1580 simply don't assign nfreelist back into the real freelist. */
1581 freelist = scm_heap_table[i].freelist;
1582 nfreelist = freelist->cells;
1583 left_to_collect = freelist->left_to_collect;
1584 span = scm_heap_table[i].span;
1585
1586 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1587 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1588
1589 /* use only data cells in seg_size */
1590 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1591
1592 scm_gc_cells_swept += seg_size;
1593
1594 for (j = seg_size + span; j -= span; ptr += span)
1595 {
1596 SCM scmptr;
1597
1598 if (SCM_GC_IN_CARD_HEADERP (ptr))
1599 {
1600 SCM_CELLPTR nxt;
1601
1602 /* cheat here */
1603 nxt = ptr;
1604 NEXT_DATA_CELL (nxt, span);
1605 j += span;
1606
1607 ptr = nxt - span;
1608 continue;
1609 }
1610
1611 scmptr = PTR2SCM (ptr);
1612
1613 if (SCM_GCMARKP (scmptr))
1614 continue;
1615
1616 switch SCM_TYP7 (scmptr)
1617 {
1618 case scm_tcs_struct:
1619 {
1620 /* Structs need to be freed in a special order.
1621 * This is handled by GC C hooks in struct.c.
1622 */
1623 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1624 scm_structs_to_free = scmptr;
1625 }
1626 continue;
1627 case scm_tcs_cons_imcar:
1628 case scm_tcs_cons_nimcar:
1629 case scm_tcs_closures:
1630 case scm_tc7_pws:
1631 break;
1632 case scm_tc7_wvect:
1633 case scm_tc7_vector:
1634 {
1635 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1636 if (length > 0)
1637 {
1638 m += length * sizeof (scm_t_bits);
1639 scm_must_free (SCM_VECTOR_BASE (scmptr));
1640 }
1641 break;
1642 }
1643 #ifdef CCLO
1644 case scm_tc7_cclo:
1645 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1646 scm_must_free (SCM_CCLO_BASE (scmptr));
1647 break;
1648 #endif
1649 #ifdef HAVE_ARRAYS
1650 case scm_tc7_bvect:
1651 {
1652 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1653 if (length > 0)
1654 {
1655 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1656 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1657 }
1658 }
1659 break;
1660 case scm_tc7_byvect:
1661 case scm_tc7_ivect:
1662 case scm_tc7_uvect:
1663 case scm_tc7_svect:
1664 #ifdef HAVE_LONG_LONGS
1665 case scm_tc7_llvect:
1666 #endif
1667 case scm_tc7_fvect:
1668 case scm_tc7_dvect:
1669 case scm_tc7_cvect:
1670 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1671 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1672 break;
1673 #endif
1674 case scm_tc7_string:
1675 m += SCM_STRING_LENGTH (scmptr) + 1;
1676 scm_must_free (SCM_STRING_CHARS (scmptr));
1677 break;
1678 case scm_tc7_symbol:
1679 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1680 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1681 break;
1682 case scm_tc7_variable:
1683 break;
1684 case scm_tcs_subrs:
1685 /* the various "subrs" (primitives) are never freed */
1686 continue;
1687 case scm_tc7_port:
1688 if SCM_OPENP (scmptr)
1689 {
1690 int k = SCM_PTOBNUM (scmptr);
1691 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1692 if (!(k < scm_numptob))
1693 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1694 #endif
1695 /* Keep "revealed" ports alive. */
1696 if (scm_revealed_count (scmptr) > 0)
1697 continue;
1698 /* Yes, I really do mean scm_ptobs[k].free */
1699 /* rather than ftobs[k].close. .close */
1700 /* is for explicit CLOSE-PORT by user */
1701 m += (scm_ptobs[k].free) (scmptr);
1702 SCM_SETSTREAM (scmptr, 0);
1703 scm_remove_from_port_table (scmptr);
1704 scm_gc_ports_collected++;
1705 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1706 }
1707 break;
1708 case scm_tc7_smob:
1709 switch SCM_TYP16 (scmptr)
1710 {
1711 case scm_tc_free_cell:
1712 case scm_tc16_real:
1713 break;
1714 #ifdef SCM_BIGDIG
1715 case scm_tc16_big:
1716 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1717 scm_must_free (SCM_BDIGITS (scmptr));
1718 break;
1719 #endif /* def SCM_BIGDIG */
1720 case scm_tc16_complex:
1721 m += sizeof (scm_t_complex);
1722 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1723 break;
1724 default:
1725 {
1726 int k;
1727 k = SCM_SMOBNUM (scmptr);
1728 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1729 if (!(k < scm_numsmob))
1730 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1731 #endif
1732 if (scm_smobs[k].free)
1733 m += (scm_smobs[k].free) (scmptr);
1734 break;
1735 }
1736 }
1737 break;
1738 default:
1739 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1740 }
1741
1742 if (!--left_to_collect)
1743 {
1744 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1745 *freelist->clustertail = scmptr;
1746 freelist->clustertail = SCM_CDRLOC (scmptr);
1747
1748 nfreelist = SCM_EOL;
1749 freelist->collected += span * freelist->cluster_size;
1750 left_to_collect = freelist->cluster_size;
1751 }
1752 else
1753 {
1754 /* Stick the new cell on the front of nfreelist. It's
1755 critical that we mark this cell as freed; otherwise, the
1756 conservative collector might trace it as some other type
1757 of object. */
1758 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1759 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1760 nfreelist = scmptr;
1761 }
1762 }
1763
1764 #ifdef GC_FREE_SEGMENTS
1765 if (n == seg_size)
1766 {
1767 register long j;
1768
1769 freelist->heap_size -= seg_size;
1770 free ((char *) scm_heap_table[i].bounds[0]);
1771 scm_heap_table[i].bounds[0] = 0;
1772 for (j = i + 1; j < scm_n_heap_segs; j++)
1773 scm_heap_table[j - 1] = scm_heap_table[j];
1774 scm_n_heap_segs -= 1;
1775 i--; /* We need to scan the segment just moved. */
1776 }
1777 else
1778 #endif /* ifdef GC_FREE_SEGMENTS */
1779 {
1780 /* Update the real freelist pointer to point to the head of
1781 the list of free cells we've built for this segment. */
1782 freelist->cells = nfreelist;
1783 freelist->left_to_collect = left_to_collect;
1784 }
1785
1786 #ifdef GUILE_DEBUG_FREELIST
1787 scm_map_free_list ();
1788 #endif
1789 }
1790
1791 gc_sweep_freelist_finish (&scm_master_freelist);
1792 gc_sweep_freelist_finish (&scm_master_freelist2);
1793
1794 /* When we move to POSIX threads private freelists should probably
1795 be GC-protected instead. */
1796 scm_freelist = SCM_EOL;
1797 scm_freelist2 = SCM_EOL;
1798
1799 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1800 scm_gc_yield -= scm_cells_allocated;
1801
1802 if (scm_mallocated < m)
1803 /* The byte count of allocated objects has underflowed. This is
1804 probably because you forgot to report the sizes of objects you
1805 have allocated, by calling scm_done_malloc or some such. When
1806 the GC freed them, it subtracted their size from
1807 scm_mallocated, which underflowed. */
1808 abort ();
1809
1810 scm_mallocated -= m;
1811 scm_gc_malloc_collected = m;
1812 }
1813 #undef FUNC_NAME
1814
1815
1816 \f
1817 /* {Front end to malloc}
1818 *
1819 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1820 * scm_done_free
1821 *
1822 * These functions provide services comparable to malloc, realloc, and
1823 * free. They should be used when allocating memory that will be under
1824 * control of the garbage collector, i.e., if the memory may be freed
1825 * during garbage collection.
1826 */
1827
1828 /* scm_must_malloc
1829 * Return newly malloced storage or throw an error.
1830 *
1831 * The parameter WHAT is a string for error reporting.
1832 * If the threshold scm_mtrigger will be passed by this
1833 * allocation, or if the first call to malloc fails,
1834 * garbage collect -- on the presumption that some objects
1835 * using malloced storage may be collected.
1836 *
1837 * The limit scm_mtrigger may be raised by this allocation.
1838 */
1839 void *
1840 scm_must_malloc (size_t size, const char *what)
1841 {
1842 void *ptr;
1843 unsigned long nm = scm_mallocated + size;
1844
1845 if (nm < size)
1846 /* The byte count of allocated objects has overflowed. This is
1847 probably because you forgot to report the correct size of freed
1848 memory in some of your smob free methods. */
1849 abort ();
1850
1851 if (nm <= scm_mtrigger)
1852 {
1853 SCM_SYSCALL (ptr = malloc (size));
1854 if (NULL != ptr)
1855 {
1856 scm_mallocated = nm;
1857 #ifdef GUILE_DEBUG_MALLOC
1858 scm_malloc_register (ptr, what);
1859 #endif
1860 return ptr;
1861 }
1862 }
1863
1864 scm_igc (what);
1865
1866 nm = scm_mallocated + size;
1867
1868 if (nm < size)
1869 /* The byte count of allocated objects has overflowed. This is
1870 probably because you forgot to report the correct size of freed
1871 memory in some of your smob free methods. */
1872 abort ();
1873
1874 SCM_SYSCALL (ptr = malloc (size));
1875 if (NULL != ptr)
1876 {
1877 scm_mallocated = nm;
1878
1879 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1880 unsigned long old_trigger = scm_mtrigger;
1881 if (nm > scm_mtrigger)
1882 scm_mtrigger = nm + nm / 2;
1883 else
1884 scm_mtrigger += scm_mtrigger / 2;
1885 if (scm_mtrigger < old_trigger)
1886 abort ();
1887 }
1888 #ifdef GUILE_DEBUG_MALLOC
1889 scm_malloc_register (ptr, what);
1890 #endif
1891
1892 return ptr;
1893 }
1894
1895 scm_memory_error (what);
1896 }
1897
1898
1899 /* scm_must_realloc
1900 * is similar to scm_must_malloc.
1901 */
1902 void *
1903 scm_must_realloc (void *where,
1904 size_t old_size,
1905 size_t size,
1906 const char *what)
1907 {
1908 void *ptr;
1909 unsigned long nm;
1910
1911 if (size <= old_size)
1912 return where;
1913
1914 nm = scm_mallocated + size - old_size;
1915
1916 if (nm < (size - old_size))
1917 /* The byte count of allocated objects has overflowed. This is
1918 probably because you forgot to report the correct size of freed
1919 memory in some of your smob free methods. */
1920 abort ();
1921
1922 if (nm <= scm_mtrigger)
1923 {
1924 SCM_SYSCALL (ptr = realloc (where, size));
1925 if (NULL != ptr)
1926 {
1927 scm_mallocated = nm;
1928 #ifdef GUILE_DEBUG_MALLOC
1929 scm_malloc_reregister (where, ptr, what);
1930 #endif
1931 return ptr;
1932 }
1933 }
1934
1935 scm_igc (what);
1936
1937 nm = scm_mallocated + size - old_size;
1938
1939 if (nm < (size - old_size))
1940 /* The byte count of allocated objects has overflowed. This is
1941 probably because you forgot to report the correct size of freed
1942 memory in some of your smob free methods. */
1943 abort ();
1944
1945 SCM_SYSCALL (ptr = realloc (where, size));
1946 if (NULL != ptr)
1947 {
1948 scm_mallocated = nm;
1949 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1950 unsigned long old_trigger = scm_mtrigger;
1951 if (nm > scm_mtrigger)
1952 scm_mtrigger = nm + nm / 2;
1953 else
1954 scm_mtrigger += scm_mtrigger / 2;
1955 if (scm_mtrigger < old_trigger)
1956 abort ();
1957 }
1958 #ifdef GUILE_DEBUG_MALLOC
1959 scm_malloc_reregister (where, ptr, what);
1960 #endif
1961 return ptr;
1962 }
1963
1964 scm_memory_error (what);
1965 }
1966
1967 char *
1968 scm_must_strndup (const char *str, size_t length)
1969 {
1970 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
1971 memcpy (dst, str, length);
1972 dst[length] = 0;
1973 return dst;
1974 }
1975
1976 char *
1977 scm_must_strdup (const char *str)
1978 {
1979 return scm_must_strndup (str, strlen (str));
1980 }
1981
1982 void
1983 scm_must_free (void *obj)
1984 #define FUNC_NAME "scm_must_free"
1985 {
1986 #ifdef GUILE_DEBUG_MALLOC
1987 scm_malloc_unregister (obj);
1988 #endif
1989 if (obj)
1990 free (obj);
1991 else
1992 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1993 }
1994 #undef FUNC_NAME
1995
1996
1997 /* Announce that there has been some malloc done that will be freed
1998 * during gc. A typical use is for a smob that uses some malloced
1999 * memory but can not get it from scm_must_malloc (for whatever
2000 * reason). When a new object of this smob is created you call
2001 * scm_done_malloc with the size of the object. When your smob free
2002 * function is called, be sure to include this size in the return
2003 * value.
2004 *
2005 * If you can't actually free the memory in the smob free function,
2006 * for whatever reason (like reference counting), you still can (and
2007 * should) report the amount of memory freed when you actually free it.
2008 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2009 * eh? Or even better, call scm_done_free. */
2010
2011 void
2012 scm_done_malloc (long size)
2013 {
2014 if (size < 0) {
2015 if (scm_mallocated < size)
2016 /* The byte count of allocated objects has underflowed. This is
2017 probably because you forgot to report the sizes of objects you
2018 have allocated, by calling scm_done_malloc or some such. When
2019 the GC freed them, it subtracted their size from
2020 scm_mallocated, which underflowed. */
2021 abort ();
2022 } else {
2023 unsigned long nm = scm_mallocated + size;
2024 if (nm < size)
2025 /* The byte count of allocated objects has overflowed. This is
2026 probably because you forgot to report the correct size of freed
2027 memory in some of your smob free methods. */
2028 abort ();
2029 }
2030
2031 scm_mallocated += size;
2032
2033 if (scm_mallocated > scm_mtrigger)
2034 {
2035 scm_igc ("foreign mallocs");
2036 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2037 {
2038 if (scm_mallocated > scm_mtrigger)
2039 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2040 else
2041 scm_mtrigger += scm_mtrigger / 2;
2042 }
2043 }
2044 }
2045
2046 void
2047 scm_done_free (long size)
2048 {
2049 if (size >= 0) {
2050 if (scm_mallocated < size)
2051 /* The byte count of allocated objects has underflowed. This is
2052 probably because you forgot to report the sizes of objects you
2053 have allocated, by calling scm_done_malloc or some such. When
2054 the GC freed them, it subtracted their size from
2055 scm_mallocated, which underflowed. */
2056 abort ();
2057 } else {
2058 unsigned long nm = scm_mallocated - size;
2059 if (nm < size)
2060 /* The byte count of allocated objects has overflowed. This is
2061 probably because you forgot to report the correct size of freed
2062 memory in some of your smob free methods. */
2063 abort ();
2064 }
2065
2066 scm_mallocated -= size;
2067 }
2068
2069
2070 \f
2071 /* {Heap Segments}
2072 *
2073 * Each heap segment is an array of objects of a particular size.
2074 * Every segment has an associated (possibly shared) freelist.
2075 * A table of segment records is kept that records the upper and
2076 * lower extents of the segment; this is used during the conservative
2077 * phase of gc to identify probably gc roots (because they point
2078 * into valid segments at reasonable offsets). */
2079
2080 /* scm_expmem
2081 * is true if the first segment was smaller than INIT_HEAP_SEG.
2082 * If scm_expmem is set to one, subsequent segment allocations will
2083 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2084 */
2085 int scm_expmem = 0;
2086
2087 size_t scm_max_segment_size;
2088
2089 /* scm_heap_org
2090 * is the lowest base address of any heap segment.
2091 */
2092 SCM_CELLPTR scm_heap_org;
2093
2094 scm_t_heap_seg_data * scm_heap_table = 0;
2095 static size_t heap_segment_table_size = 0;
2096 size_t scm_n_heap_segs = 0;
2097
2098 /* init_heap_seg
2099 * initializes a new heap segment and returns the number of objects it contains.
2100 *
2101 * The segment origin and segment size in bytes are input parameters.
2102 * The freelist is both input and output.
2103 *
2104 * This function presumes that the scm_heap_table has already been expanded
2105 * to accomodate a new segment record and that the markbit space was reserved
2106 * for all the cards in this segment.
2107 */
2108
2109 #define INIT_CARD(card, span) \
2110 do { \
2111 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2112 if ((span) == 2) \
2113 SCM_GC_SET_CARD_DOUBLECELL (card); \
2114 } while (0)
2115
2116 static size_t
2117 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2118 {
2119 register SCM_CELLPTR ptr;
2120 SCM_CELLPTR seg_end;
2121 size_t new_seg_index;
2122 ptrdiff_t n_new_cells;
2123 int span = freelist->span;
2124
2125 if (seg_org == NULL)
2126 return 0;
2127
2128 /* Align the begin ptr up.
2129 */
2130 ptr = SCM_GC_CARD_UP (seg_org);
2131
2132 /* Compute the ceiling on valid object pointers w/in this segment.
2133 */
2134 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2135
2136 /* Find the right place and insert the segment record.
2137 */
2138 new_seg_index = 0;
2139 while (new_seg_index < scm_n_heap_segs
2140 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2141 new_seg_index++;
2142
2143 {
2144 int i;
2145 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2146 scm_heap_table[i] = scm_heap_table[i - 1];
2147 }
2148
2149 ++scm_n_heap_segs;
2150
2151 scm_heap_table[new_seg_index].span = span;
2152 scm_heap_table[new_seg_index].freelist = freelist;
2153 scm_heap_table[new_seg_index].bounds[0] = ptr;
2154 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2155
2156 /*n_new_cells*/
2157 n_new_cells = seg_end - ptr;
2158
2159 freelist->heap_size += n_new_cells;
2160
2161 /* Partition objects in this segment into clusters */
2162 {
2163 SCM clusters;
2164 SCM *clusterp = &clusters;
2165
2166 NEXT_DATA_CELL (ptr, span);
2167 while (ptr < seg_end)
2168 {
2169 scm_cell *nxt = ptr;
2170 scm_cell *prv = NULL;
2171 scm_cell *last_card = NULL;
2172 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2173 NEXT_DATA_CELL(nxt, span);
2174
2175 /* Allocate cluster spine
2176 */
2177 *clusterp = PTR2SCM (ptr);
2178 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2179 clusterp = SCM_CDRLOC (*clusterp);
2180 ptr = nxt;
2181
2182 while (n_data_cells--)
2183 {
2184 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2185 SCM scmptr = PTR2SCM (ptr);
2186 nxt = ptr;
2187 NEXT_DATA_CELL (nxt, span);
2188 prv = ptr;
2189
2190 if (card != last_card)
2191 {
2192 INIT_CARD (card, span);
2193 last_card = card;
2194 }
2195
2196 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2197 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2198
2199 ptr = nxt;
2200 }
2201
2202 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2203 }
2204
2205 /* sanity check */
2206 {
2207 scm_cell *ref = seg_end;
2208 NEXT_DATA_CELL (ref, span);
2209 if (ref != ptr)
2210 /* [cmm] looks like the segment size doesn't divide cleanly by
2211 cluster size. bad cmm! */
2212 abort();
2213 }
2214
2215 /* Patch up the last cluster pointer in the segment
2216 * to join it to the input freelist.
2217 */
2218 *clusterp = freelist->clusters;
2219 freelist->clusters = clusters;
2220 }
2221
2222 #ifdef DEBUGINFO
2223 fprintf (stderr, "H");
2224 #endif
2225 return size;
2226 }
2227
2228 static size_t
2229 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2230 {
2231 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2232
2233 return
2234 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2235 + ALIGNMENT_SLACK (freelist);
2236 }
2237
2238 static void
2239 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2240 #define FUNC_NAME "alloc_some_heap"
2241 {
2242 SCM_CELLPTR ptr;
2243 size_t len;
2244
2245 if (scm_gc_heap_lock)
2246 {
2247 /* Critical code sections (such as the garbage collector) aren't
2248 * supposed to add heap segments.
2249 */
2250 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2251 abort ();
2252 }
2253
2254 if (scm_n_heap_segs == heap_segment_table_size)
2255 {
2256 /* We have to expand the heap segment table to have room for the new
2257 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2258 * init_heap_seg only if the allocation of the segment itself succeeds.
2259 */
2260 size_t new_table_size = scm_n_heap_segs + 1;
2261 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2262 scm_t_heap_seg_data *new_heap_table;
2263
2264 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2265 realloc ((char *)scm_heap_table, size)));
2266 if (!new_heap_table)
2267 {
2268 if (error_policy == abort_on_error)
2269 {
2270 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2271 abort ();
2272 }
2273 else
2274 {
2275 return;
2276 }
2277 }
2278 else
2279 {
2280 scm_heap_table = new_heap_table;
2281 heap_segment_table_size = new_table_size;
2282 }
2283 }
2284
2285 /* Pick a size for the new heap segment.
2286 * The rule for picking the size of a segment is explained in
2287 * gc.h
2288 */
2289 {
2290 /* Assure that the new segment is predicted to be large enough.
2291 *
2292 * New yield should at least equal GC fraction of new heap size, i.e.
2293 *
2294 * y + dh > f * (h + dh)
2295 *
2296 * y : yield
2297 * f : min yield fraction
2298 * h : heap size
2299 * dh : size of new heap segment
2300 *
2301 * This gives dh > (f * h - y) / (1 - f)
2302 */
2303 int f = freelist->min_yield_fraction;
2304 unsigned long h = SCM_HEAP_SIZE;
2305 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2306 len = SCM_EXPHEAP (freelist->heap_size);
2307 #ifdef DEBUGINFO
2308 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2309 #endif
2310 if (len < min_cells)
2311 len = min_cells + freelist->cluster_size;
2312 len *= sizeof (scm_cell);
2313 /* force new sampling */
2314 freelist->collected = LONG_MAX;
2315 }
2316
2317 if (len > scm_max_segment_size)
2318 len = scm_max_segment_size;
2319
2320 {
2321 size_t smallest;
2322
2323 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2324
2325 if (len < smallest)
2326 len = smallest;
2327
2328 /* Allocate with decaying ambition. */
2329 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2330 && (len >= smallest))
2331 {
2332 size_t rounded_len = round_to_cluster_size (freelist, len);
2333 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2334 if (ptr)
2335 {
2336 init_heap_seg (ptr, rounded_len, freelist);
2337 return;
2338 }
2339 len /= 2;
2340 }
2341 }
2342
2343 if (error_policy == abort_on_error)
2344 {
2345 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2346 abort ();
2347 }
2348 }
2349 #undef FUNC_NAME
2350
2351 \f
2352 /* {GC Protection Helper Functions}
2353 */
2354
2355
2356 /*
2357 * If within a function you need to protect one or more scheme objects from
2358 * garbage collection, pass them as parameters to one of the
2359 * scm_remember_upto_here* functions below. These functions don't do
2360 * anything, but since the compiler does not know that they are actually
2361 * no-ops, it will generate code that calls these functions with the given
2362 * parameters. Therefore, you can be sure that the compiler will keep those
2363 * scheme values alive (on the stack or in a register) up to the point where
2364 * scm_remember_upto_here* is called. In other words, place the call to
2365 * scm_remember_upto_here* _behind_ the last code in your function, that
2366 * depends on the scheme object to exist.
2367 *
2368 * Example: We want to make sure that the string object str does not get
2369 * garbage collected during the execution of 'some_function' in the code
2370 * below, because otherwise the characters belonging to str would be freed and
2371 * 'some_function' might access freed memory. To make sure that the compiler
2372 * keeps str alive on the stack or in a register such that it is visible to
2373 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2374 * call to 'some_function'. Note that this would not be necessary if str was
2375 * used anyway after the call to 'some_function'.
2376 * char *chars = SCM_STRING_CHARS (str);
2377 * some_function (chars);
2378 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2379 */
2380
2381 void
2382 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2383 {
2384 /* Empty. Protects a single object from garbage collection. */
2385 }
2386
2387 void
2388 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2389 {
2390 /* Empty. Protects two objects from garbage collection. */
2391 }
2392
2393 void
2394 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2395 {
2396 /* Empty. Protects any number of objects from garbage collection. */
2397 }
2398
2399 /*
2400 These crazy functions prevent garbage collection
2401 of arguments after the first argument by
2402 ensuring they remain live throughout the
2403 function because they are used in the last
2404 line of the code block.
2405 It'd be better to have a nice compiler hint to
2406 aid the conservative stack-scanning GC. --03/09/00 gjb */
2407 SCM
2408 scm_return_first (SCM elt, ...)
2409 {
2410 return elt;
2411 }
2412
2413 int
2414 scm_return_first_int (int i, ...)
2415 {
2416 return i;
2417 }
2418
2419
2420 SCM
2421 scm_permanent_object (SCM obj)
2422 {
2423 SCM_REDEFER_INTS;
2424 scm_permobjs = scm_cons (obj, scm_permobjs);
2425 SCM_REALLOW_INTS;
2426 return obj;
2427 }
2428
2429
2430 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2431 other references are dropped, until the object is unprotected by calling
2432 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2433 i. e. it is possible to protect the same object several times, but it is
2434 necessary to unprotect the object the same number of times to actually get
2435 the object unprotected. It is an error to unprotect an object more often
2436 than it has been protected before. The function scm_protect_object returns
2437 OBJ.
2438 */
2439
2440 /* Implementation note: For every object X, there is a counter which
2441 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2442 */
2443
2444 SCM
2445 scm_gc_protect_object (SCM obj)
2446 {
2447 SCM handle;
2448
2449 /* This critical section barrier will be replaced by a mutex. */
2450 SCM_REDEFER_INTS;
2451
2452 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2453 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2454
2455 SCM_REALLOW_INTS;
2456
2457 return obj;
2458 }
2459
2460
2461 /* Remove any protection for OBJ established by a prior call to
2462 scm_protect_object. This function returns OBJ.
2463
2464 See scm_protect_object for more information. */
2465 SCM
2466 scm_gc_unprotect_object (SCM obj)
2467 {
2468 SCM handle;
2469
2470 /* This critical section barrier will be replaced by a mutex. */
2471 SCM_REDEFER_INTS;
2472
2473 handle = scm_hashq_get_handle (scm_protects, obj);
2474
2475 if (SCM_FALSEP (handle))
2476 {
2477 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2478 abort ();
2479 }
2480 else
2481 {
2482 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2483 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2484 scm_hashq_remove_x (scm_protects, obj);
2485 else
2486 SCM_SETCDR (handle, count);
2487 }
2488
2489 SCM_REALLOW_INTS;
2490
2491 return obj;
2492 }
2493
2494 void
2495 scm_gc_register_root (SCM *p)
2496 {
2497 SCM handle;
2498 SCM key = scm_long2num ((long) p);
2499
2500 /* This critical section barrier will be replaced by a mutex. */
2501 SCM_REDEFER_INTS;
2502
2503 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2504 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2505
2506 SCM_REALLOW_INTS;
2507 }
2508
2509 void
2510 scm_gc_unregister_root (SCM *p)
2511 {
2512 SCM handle;
2513 SCM key = scm_long2num ((long) p);
2514
2515 /* This critical section barrier will be replaced by a mutex. */
2516 SCM_REDEFER_INTS;
2517
2518 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2519
2520 if (SCM_FALSEP (handle))
2521 {
2522 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2523 abort ();
2524 }
2525 else
2526 {
2527 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2528 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2529 scm_hashv_remove_x (scm_gc_registered_roots, key);
2530 else
2531 SCM_SETCDR (handle, count);
2532 }
2533
2534 SCM_REALLOW_INTS;
2535 }
2536
2537 void
2538 scm_gc_register_roots (SCM *b, unsigned long n)
2539 {
2540 SCM *p = b;
2541 for (; p < b + n; ++p)
2542 scm_gc_register_root (p);
2543 }
2544
2545 void
2546 scm_gc_unregister_roots (SCM *b, unsigned long n)
2547 {
2548 SCM *p = b;
2549 for (; p < b + n; ++p)
2550 scm_gc_unregister_root (p);
2551 }
2552
2553 int terminating;
2554
2555 /* called on process termination. */
2556 #ifdef HAVE_ATEXIT
2557 static void
2558 cleanup (void)
2559 #else
2560 #ifdef HAVE_ON_EXIT
2561 extern int on_exit (void (*procp) (), int arg);
2562
2563 static void
2564 cleanup (int status, void *arg)
2565 #else
2566 #error Dont know how to setup a cleanup handler on your system.
2567 #endif
2568 #endif
2569 {
2570 terminating = 1;
2571 scm_flush_all_ports ();
2572 }
2573
2574 \f
2575 static int
2576 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2577 {
2578 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2579
2580 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2581 rounded_size,
2582 freelist))
2583 {
2584 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2585 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2586 rounded_size,
2587 freelist))
2588 return 1;
2589 }
2590 else
2591 scm_expmem = 1;
2592
2593 if (freelist->min_yield_fraction)
2594 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2595 / 100);
2596 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2597
2598 return 0;
2599 }
2600
2601 \f
2602 static void
2603 init_freelist (scm_t_freelist *freelist,
2604 int span,
2605 long cluster_size,
2606 int min_yield)
2607 {
2608 freelist->clusters = SCM_EOL;
2609 freelist->cluster_size = cluster_size + 1;
2610 freelist->left_to_collect = 0;
2611 freelist->clusters_allocated = 0;
2612 freelist->min_yield = 0;
2613 freelist->min_yield_fraction = min_yield;
2614 freelist->span = span;
2615 freelist->collected = 0;
2616 freelist->collected_1 = 0;
2617 freelist->heap_size = 0;
2618 }
2619
2620
2621 /* Get an integer from an environment variable. */
2622 static int
2623 scm_i_getenv_int (const char *var, int def)
2624 {
2625 char *end, *val = getenv (var);
2626 long res;
2627 if (!val)
2628 return def;
2629 res = strtol (val, &end, 10);
2630 if (end == val)
2631 return def;
2632 return res;
2633 }
2634
2635
2636 int
2637 scm_init_storage ()
2638 {
2639 unsigned long gc_trigger_1;
2640 unsigned long gc_trigger_2;
2641 size_t init_heap_size_1;
2642 size_t init_heap_size_2;
2643 size_t j;
2644
2645 j = SCM_NUM_PROTECTS;
2646 while (j)
2647 scm_sys_protects[--j] = SCM_BOOL_F;
2648 scm_block_gc = 1;
2649
2650 scm_freelist = SCM_EOL;
2651 scm_freelist2 = SCM_EOL;
2652 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2653 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2654 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2655 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2656 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2657
2658 scm_expmem = 0;
2659
2660 j = SCM_HEAP_SEG_SIZE;
2661 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2662 scm_heap_table = ((scm_t_heap_seg_data *)
2663 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2664 heap_segment_table_size = 2;
2665
2666 mark_space_ptr = &mark_space_head;
2667
2668 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2669 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2670 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2671 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2672 return 1;
2673
2674 /* scm_hplims[0] can change. do not remove scm_heap_org */
2675 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2676
2677 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2678 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2679 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2680 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2681 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2682
2683 /* Initialise the list of ports. */
2684 scm_port_table = (scm_t_port **)
2685 malloc (sizeof (scm_t_port *) * scm_port_table_room);
2686 if (!scm_port_table)
2687 return 1;
2688
2689 #ifdef HAVE_ATEXIT
2690 atexit (cleanup);
2691 #else
2692 #ifdef HAVE_ON_EXIT
2693 on_exit (cleanup, 0);
2694 #endif
2695 #endif
2696
2697 scm_stand_in_procs = SCM_EOL;
2698 scm_permobjs = SCM_EOL;
2699 scm_protects = scm_c_make_hash_table (31);
2700 scm_gc_registered_roots = scm_c_make_hash_table (31);
2701
2702 return 0;
2703 }
2704
2705 \f
2706
2707 SCM scm_after_gc_hook;
2708
2709 static SCM gc_async;
2710
2711 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2712 * is run after the gc, as soon as the asynchronous events are handled by the
2713 * evaluator.
2714 */
2715 static SCM
2716 gc_async_thunk (void)
2717 {
2718 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2719 return SCM_UNSPECIFIED;
2720 }
2721
2722
2723 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2724 * the garbage collection. The only purpose of this function is to mark the
2725 * gc_async (which will eventually lead to the execution of the
2726 * gc_async_thunk).
2727 */
2728 static void *
2729 mark_gc_async (void * hook_data SCM_UNUSED,
2730 void *func_data SCM_UNUSED,
2731 void *data SCM_UNUSED)
2732 {
2733 /* If cell access debugging is enabled, the user may choose to perform
2734 * additional garbage collections after an arbitrary number of cell
2735 * accesses. We don't want the scheme level after-gc-hook to be performed
2736 * for each of these garbage collections for the following reason: The
2737 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2738 * after-gc-hook was performed with every gc, and if the gc was performed
2739 * after a very small number of cell accesses, then the number of cell
2740 * accesses during the execution of the after-gc-hook will suffice to cause
2741 * the execution of the next gc. Then, guile would keep executing the
2742 * after-gc-hook over and over again, and would never come to do other
2743 * things.
2744 *
2745 * To overcome this problem, if cell access debugging with additional
2746 * garbage collections is enabled, the after-gc-hook is never run by the
2747 * garbage collecter. When running guile with cell access debugging and the
2748 * execution of the after-gc-hook is desired, then it is necessary to run
2749 * the hook explicitly from the user code. This has the effect, that from
2750 * the scheme level point of view it seems that garbage collection is
2751 * performed with a much lower frequency than it actually is. Obviously,
2752 * this will not work for code that depends on a fixed one to one
2753 * relationship between the execution counts of the C level garbage
2754 * collection hooks and the execution count of the scheme level
2755 * after-gc-hook.
2756 */
2757 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2758 if (debug_cells_gc_interval == 0)
2759 scm_system_async_mark (gc_async);
2760 #else
2761 scm_system_async_mark (gc_async);
2762 #endif
2763
2764 return NULL;
2765 }
2766
2767 #if SCM_ENABLE_DEPRECATED == 1
2768
2769 /* If an allocated cell is detected during garbage collection, this
2770 * means that some code has just obtained the object but was preempted
2771 * before the initialization of the object was completed. This meanst
2772 * that some entries of the allocated cell may already contain SCM
2773 * objects. Therefore, allocated cells are scanned conservatively.
2774 */
2775
2776 scm_t_bits scm_tc16_allocated;
2777
2778 static SCM
2779 allocated_mark (SCM cell)
2780 {
2781 unsigned long int cell_segment = heap_segment (cell);
2782 unsigned int span = scm_heap_table[cell_segment].span;
2783 unsigned int i;
2784
2785 for (i = 1; i != span * 2; ++i)
2786 {
2787 SCM obj = SCM_CELL_OBJECT (cell, i);
2788 long int obj_segment = heap_segment (obj);
2789 if (obj_segment >= 0)
2790 scm_gc_mark (obj);
2791 }
2792 return SCM_BOOL_F;
2793 }
2794
2795 SCM
2796 scm_deprecated_newcell (void)
2797 {
2798 scm_c_issue_deprecation_warning
2799 ("SCM_NEWCELL is deprecated. Use `scm_alloc_cell' instead.\n");
2800
2801 return scm_alloc_cell (scm_tc16_allocated, 0);
2802 }
2803
2804 SCM
2805 scm_deprecated_newcell2 (void)
2806 {
2807 scm_c_issue_deprecation_warning
2808 ("SCM_NEWCELL2 is deprecated. Use `scm_alloc_double_cell' instead.\n");
2809
2810 return scm_alloc_double_cell (scm_tc16_allocated, 0, 0, 0);
2811 }
2812
2813 #endif /* SCM_ENABLE_DEPRECATED == 1 */
2814
2815 void
2816 scm_init_gc ()
2817 {
2818 SCM after_gc_thunk;
2819
2820 #if SCM_ENABLE_DEPRECATED == 1
2821 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2822 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
2823 #endif
2824
2825 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2826 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2827
2828 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2829 gc_async_thunk);
2830 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2831
2832 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2833
2834 #ifndef SCM_MAGIC_SNARFER
2835 #include "libguile/gc.x"
2836 #endif
2837 }
2838
2839 #endif /*MARK_DEPENDENCIES*/
2840
2841 /*
2842 Local Variables:
2843 c-file-style: "gnu"
2844 End:
2845 */