* tags.h: Update tag system docs.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42
43 /* #define DEBUGINFO */
44
45 /* SECTION: This code is compiled once.
46 */
47
48 #ifndef MARK_DEPENDENCIES
49
50 \f
51 #include <stdio.h>
52 #include <errno.h>
53 #include <string.h>
54
55 #include "libguile/_scm.h"
56 #include "libguile/eval.h"
57 #include "libguile/stime.h"
58 #include "libguile/stackchk.h"
59 #include "libguile/struct.h"
60 #include "libguile/smob.h"
61 #include "libguile/unif.h"
62 #include "libguile/async.h"
63 #include "libguile/ports.h"
64 #include "libguile/root.h"
65 #include "libguile/strings.h"
66 #include "libguile/vectors.h"
67 #include "libguile/weaks.h"
68 #include "libguile/hashtab.h"
69 #include "libguile/tags.h"
70
71 #include "libguile/validate.h"
72 #include "libguile/deprecation.h"
73 #include "libguile/gc.h"
74
75 #ifdef GUILE_DEBUG_MALLOC
76 #include "libguile/debug-malloc.h"
77 #endif
78
79 #ifdef HAVE_MALLOC_H
80 #include <malloc.h>
81 #endif
82
83 #ifdef HAVE_UNISTD_H
84 #include <unistd.h>
85 #endif
86
87 #ifdef __STDC__
88 #include <stdarg.h>
89 #define var_start(x, y) va_start(x, y)
90 #else
91 #include <varargs.h>
92 #define var_start(x, y) va_start(x)
93 #endif
94
95 \f
96
97 unsigned int scm_gc_running_p = 0;
98
99 \f
100
101 #if (SCM_DEBUG_CELL_ACCESSES == 1)
102
103 scm_t_bits scm_tc16_allocated;
104
105 /* Set this to != 0 if every cell that is accessed shall be checked:
106 */
107 unsigned int scm_debug_cell_accesses_p = 1;
108
109 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
110 * the number of cell accesses after which a gc shall be called.
111 */
112 static unsigned int debug_cells_gc_interval = 0;
113
114
115 /* If an allocated cell is detected during garbage collection, this means that
116 * some code has just obtained the object but was preempted before the
117 * initialization of the object was completed. This meanst that some entries
118 * of the allocated cell may already contain SCM objects. Therefore,
119 * allocated cells are scanned conservatively. */
120 static SCM
121 allocated_mark (SCM allocated)
122 {
123 scm_gc_mark_cell_conservatively (allocated);
124 return SCM_BOOL_F;
125 }
126
127
128 /* Assert that the given object is a valid reference to a valid cell. This
129 * test involves to determine whether the object is a cell pointer, whether
130 * this pointer actually points into a heap segment and whether the cell
131 * pointed to is not a free cell. Further, additional garbage collections may
132 * get executed after a user defined number of cell accesses. This helps to
133 * find places in the C code where references are dropped for extremely short
134 * periods.
135 */
136 void
137 scm_assert_cell_valid (SCM cell)
138 {
139 static unsigned int already_running = 0;
140
141 if (scm_debug_cell_accesses_p && !already_running)
142 {
143 already_running = 1; /* set to avoid recursion */
144
145 if (!scm_cellp (cell))
146 {
147 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
148 (unsigned long) SCM_UNPACK (cell));
149 abort ();
150 }
151 else if (!scm_gc_running_p)
152 {
153 /* Dirk::FIXME:: During garbage collection there occur references to
154 free cells. This is allright during conservative marking, but
155 should not happen otherwise (I think). The case of free cells
156 accessed during conservative marking is handled in function
157 scm_mark_locations. However, there still occur accesses to free
158 cells during gc. I don't understand why this happens. If it is
159 a bug and gets fixed, the following test should also work while
160 gc is running.
161 */
162 if (SCM_FREE_CELL_P (cell))
163 {
164 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
165 (unsigned long) SCM_UNPACK (cell));
166 abort ();
167 }
168
169 /* If desired, perform additional garbage collections after a user
170 * defined number of cell accesses.
171 */
172 if (debug_cells_gc_interval)
173 {
174 static unsigned int counter = 0;
175
176 if (counter != 0)
177 {
178 --counter;
179 }
180 else
181 {
182 counter = debug_cells_gc_interval;
183 scm_igc ("scm_assert_cell_valid");
184 }
185 }
186 }
187 already_running = 0; /* re-enable */
188 }
189 }
190
191
192 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
193 (SCM flag),
194 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
195 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
196 "but no additional calls to garbage collection are issued.\n"
197 "If @var{flag} is a number, cell access checking is enabled,\n"
198 "with an additional garbage collection after the given\n"
199 "number of cell accesses.\n"
200 "This procedure only exists when the compile-time flag\n"
201 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
202 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
203 {
204 if (SCM_FALSEP (flag)) {
205 scm_debug_cell_accesses_p = 0;
206 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
207 debug_cells_gc_interval = 0;
208 scm_debug_cell_accesses_p = 1;
209 } else if (SCM_INUMP (flag)) {
210 long int f = SCM_INUM (flag);
211 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
212 debug_cells_gc_interval = f;
213 scm_debug_cell_accesses_p = 1;
214 } else {
215 SCM_WRONG_TYPE_ARG (1, flag);
216 }
217 return SCM_UNSPECIFIED;
218 }
219 #undef FUNC_NAME
220
221 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
222
223 \f
224
225 /* {heap tuning parameters}
226 *
227 * These are parameters for controlling memory allocation. The heap
228 * is the area out of which scm_cons, and object headers are allocated.
229 *
230 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
231 * 64 bit machine. The units of the _SIZE parameters are bytes.
232 * Cons pairs and object headers occupy one heap cell.
233 *
234 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
235 * allocated initially the heap will grow by half its current size
236 * each subsequent time more heap is needed.
237 *
238 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
239 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
240 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
241 * is in scm_init_storage() and alloc_some_heap() in sys.c
242 *
243 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
244 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
245 *
246 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
247 * is needed.
248 *
249 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
250 * trigger a GC.
251 *
252 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
253 * reclaimed by a GC triggered by must_malloc. If less than this is
254 * reclaimed, the trigger threshold is raised. [I don't know what a
255 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
256 * work around a oscillation that caused almost constant GC.]
257 */
258
259 /*
260 * Heap size 45000 and 40% min yield gives quick startup and no extra
261 * heap allocation. Having higher values on min yield may lead to
262 * large heaps, especially if code behaviour is varying its
263 * maximum consumption between different freelists.
264 */
265
266 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
267 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
268 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
269 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
270 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
271 int scm_default_min_yield_1 = 40;
272
273 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
274 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
275 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
276 /* The following value may seem large, but note that if we get to GC at
277 * all, this means that we have a numerically intensive application
278 */
279 int scm_default_min_yield_2 = 40;
280
281 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
282
283 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
284 #ifdef _QC
285 # define SCM_HEAP_SEG_SIZE 32768L
286 #else
287 # ifdef sequent
288 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
289 # else
290 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
291 # endif
292 #endif
293 /* Make heap grow with factor 1.5 */
294 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
295 #define SCM_INIT_MALLOC_LIMIT 100000
296 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
297
298 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
299 aligned inner bounds for allocated storage */
300
301 #ifdef PROT386
302 /*in 386 protected mode we must only adjust the offset */
303 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
304 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
305 #else
306 # ifdef _UNICOS
307 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
308 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
309 # else
310 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
311 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
312 # endif /* UNICOS */
313 #endif /* PROT386 */
314
315 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
316
317 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
318 #define CLUSTER_SIZE_IN_BYTES(freelist) \
319 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
320
321 \f
322 /* scm_freelists
323 */
324
325 typedef struct scm_t_freelist {
326 /* collected cells */
327 SCM cells;
328 /* number of cells left to collect before cluster is full */
329 unsigned int left_to_collect;
330 /* number of clusters which have been allocated */
331 unsigned int clusters_allocated;
332 /* a list of freelists, each of size cluster_size,
333 * except the last one which may be shorter
334 */
335 SCM clusters;
336 SCM *clustertail;
337 /* this is the number of objects in each cluster, including the spine cell */
338 unsigned int cluster_size;
339 /* indicates that we should grow heap instead of GC:ing
340 */
341 int grow_heap_p;
342 /* minimum yield on this list in order not to grow the heap
343 */
344 long min_yield;
345 /* defines min_yield as percent of total heap size
346 */
347 int min_yield_fraction;
348 /* number of cells per object on this list */
349 int span;
350 /* number of collected cells during last GC */
351 unsigned long collected;
352 /* number of collected cells during penultimate GC */
353 unsigned long collected_1;
354 /* total number of cells in heap segments
355 * belonging to this list.
356 */
357 unsigned long heap_size;
358 } scm_t_freelist;
359
360 SCM scm_freelist = SCM_EOL;
361 scm_t_freelist scm_master_freelist = {
362 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
363 };
364 SCM scm_freelist2 = SCM_EOL;
365 scm_t_freelist scm_master_freelist2 = {
366 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
367 };
368
369 /* scm_mtrigger
370 * is the number of bytes of must_malloc allocation needed to trigger gc.
371 */
372 unsigned long scm_mtrigger;
373
374 /* scm_gc_heap_lock
375 * If set, don't expand the heap. Set only during gc, during which no allocation
376 * is supposed to take place anyway.
377 */
378 int scm_gc_heap_lock = 0;
379
380 /* GC Blocking
381 * Don't pause for collection if this is set -- just
382 * expand the heap.
383 */
384 int scm_block_gc = 1;
385
386 /* During collection, this accumulates objects holding
387 * weak references.
388 */
389 SCM scm_weak_vectors;
390
391 /* During collection, this accumulates structures which are to be freed.
392 */
393 SCM scm_structs_to_free;
394
395 /* GC Statistics Keeping
396 */
397 unsigned long scm_cells_allocated = 0;
398 unsigned long scm_mallocated = 0;
399 unsigned long scm_gc_cells_collected;
400 unsigned long scm_gc_yield;
401 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
402 unsigned long scm_gc_malloc_collected;
403 unsigned long scm_gc_ports_collected;
404 unsigned long scm_gc_time_taken = 0;
405 static unsigned long t_before_gc;
406 static unsigned long t_before_sweep;
407 unsigned long scm_gc_mark_time_taken = 0;
408 unsigned long scm_gc_sweep_time_taken = 0;
409 unsigned long scm_gc_times = 0;
410 unsigned long scm_gc_cells_swept = 0;
411 double scm_gc_cells_marked_acc = 0.;
412 double scm_gc_cells_swept_acc = 0.;
413
414 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
415 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
416 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
417 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
418 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
419 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
420 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
421 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
422 SCM_SYMBOL (sym_times, "gc-times");
423 SCM_SYMBOL (sym_cells_marked, "cells-marked");
424 SCM_SYMBOL (sym_cells_swept, "cells-swept");
425
426 typedef struct scm_t_heap_seg_data
427 {
428 /* lower and upper bounds of the segment */
429 SCM_CELLPTR bounds[2];
430
431 /* address of the head-of-freelist pointer for this segment's cells.
432 All segments usually point to the same one, scm_freelist. */
433 scm_t_freelist *freelist;
434
435 /* number of cells per object in this segment */
436 int span;
437 } scm_t_heap_seg_data;
438
439
440
441 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
442
443 typedef enum { return_on_error, abort_on_error } policy_on_error;
444 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
445
446
447 #define SCM_HEAP_SIZE \
448 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
449 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
450
451 #define BVEC_GROW_SIZE 256
452 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
453 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
454
455 /* mark space allocation */
456
457 typedef struct scm_t_mark_space
458 {
459 scm_t_c_bvec_limb *bvec_space;
460 struct scm_t_mark_space *next;
461 } scm_t_mark_space;
462
463 static scm_t_mark_space *current_mark_space;
464 static scm_t_mark_space **mark_space_ptr;
465 static ptrdiff_t current_mark_space_offset;
466 static scm_t_mark_space *mark_space_head;
467
468 static scm_t_c_bvec_limb *
469 get_bvec ()
470 #define FUNC_NAME "get_bvec"
471 {
472 scm_t_c_bvec_limb *res;
473
474 if (!current_mark_space)
475 {
476 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
477 if (!current_mark_space)
478 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
479
480 current_mark_space->bvec_space = NULL;
481 current_mark_space->next = NULL;
482
483 *mark_space_ptr = current_mark_space;
484 mark_space_ptr = &(current_mark_space->next);
485
486 return get_bvec ();
487 }
488
489 if (!(current_mark_space->bvec_space))
490 {
491 SCM_SYSCALL (current_mark_space->bvec_space =
492 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
493 if (!(current_mark_space->bvec_space))
494 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
495
496 current_mark_space_offset = 0;
497
498 return get_bvec ();
499 }
500
501 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
502 {
503 current_mark_space = NULL;
504
505 return get_bvec ();
506 }
507
508 res = current_mark_space->bvec_space + current_mark_space_offset;
509 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
510
511 return res;
512 }
513 #undef FUNC_NAME
514
515
516 static void
517 clear_mark_space ()
518 {
519 scm_t_mark_space *ms;
520
521 for (ms = mark_space_head; ms; ms = ms->next)
522 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
523 }
524
525
526 \f
527 /* Debugging functions. */
528
529 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
530
531 static void
532 map_free_list (scm_t_freelist *master, SCM freelist)
533 {
534 long last_seg = -1, count = 0;
535 SCM f;
536
537 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
538 {
539 long int this_seg = heap_segment (f);
540
541 if (this_seg == -1)
542 {
543 fprintf (stderr,
544 "map_free_list: can't find segment containing cell %lux\n",
545 (unsigned long int) SCM_UNPACK (cell));
546 abort ();
547 }
548 else if (this_seg != last_seg)
549 {
550 if (last_seg != -1)
551 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
552 (long) count, master->span, (long) last_seg);
553 last_seg = this_seg;
554 count = 0;
555 }
556 count++;
557 }
558 if (last_seg != -1)
559 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
560 (long) count, master->span, (long) last_seg);
561 }
562
563 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
564 (),
565 "Print debugging information about the free-list.\n"
566 "@code{map-free-list} is only included in\n"
567 "@code{--enable-guile-debug} builds of Guile.")
568 #define FUNC_NAME s_scm_map_free_list
569 {
570 size_t i;
571
572 fprintf (stderr, "%ld segments total (%d:%ld",
573 (long) scm_n_heap_segs,
574 scm_heap_table[0].span,
575 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
576
577 for (i = 1; i != scm_n_heap_segs; i++)
578 fprintf (stderr, ", %d:%ld",
579 scm_heap_table[i].span,
580 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
581 fprintf (stderr, ")\n");
582 map_free_list (&scm_master_freelist, scm_freelist);
583 map_free_list (&scm_master_freelist2, scm_freelist2);
584 fflush (stderr);
585
586 return SCM_UNSPECIFIED;
587 }
588 #undef FUNC_NAME
589
590 static long last_cluster;
591 static long last_size;
592
593 static long
594 free_list_length (char *title, long i, SCM freelist)
595 {
596 SCM ls;
597 long n = 0;
598 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
599 if (SCM_FREE_CELL_P (ls))
600 ++n;
601 else
602 {
603 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
604 abort ();
605 }
606 if (n != last_size)
607 {
608 if (i > 0)
609 {
610 if (last_cluster == i - 1)
611 fprintf (stderr, "\t%ld\n", (long) last_size);
612 else
613 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
614 }
615 if (i >= 0)
616 fprintf (stderr, "%s %ld", title, (long) i);
617 else
618 fprintf (stderr, "%s\t%ld\n", title, (long) n);
619 last_cluster = i;
620 last_size = n;
621 }
622 return n;
623 }
624
625 static void
626 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
627 {
628 SCM clusters;
629 long i = 0, len, n = 0;
630 fprintf (stderr, "%s\n\n", title);
631 n += free_list_length ("free list", -1, freelist);
632 for (clusters = master->clusters;
633 SCM_NNULLP (clusters);
634 clusters = SCM_CDR (clusters))
635 {
636 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
637 n += len;
638 }
639 if (last_cluster == i - 1)
640 fprintf (stderr, "\t%ld\n", (long) last_size);
641 else
642 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
643 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
644 }
645
646 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
647 (),
648 "Print debugging information about the free-list.\n"
649 "@code{free-list-length} is only included in\n"
650 "@code{--enable-guile-debug} builds of Guile.")
651 #define FUNC_NAME s_scm_free_list_length
652 {
653 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
654 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
655 return SCM_UNSPECIFIED;
656 }
657 #undef FUNC_NAME
658
659 #endif
660
661 #ifdef GUILE_DEBUG_FREELIST
662
663 /* Non-zero if freelist debugging is in effect. Set this via
664 `gc-set-debug-check-freelist!'. */
665 static int scm_debug_check_freelist = 0;
666
667 /* Number of calls to SCM_NEWCELL since startup. */
668 static unsigned long scm_newcell_count;
669 static unsigned long scm_newcell2_count;
670
671 /* Search freelist for anything that isn't marked as a free cell.
672 Abort if we find something. */
673 static void
674 scm_check_freelist (SCM freelist)
675 {
676 SCM f;
677 long i = 0;
678
679 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
680 if (!SCM_FREE_CELL_P (f))
681 {
682 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
683 (long) scm_newcell_count, (long) i);
684 abort ();
685 }
686 }
687
688 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
689 (SCM flag),
690 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
691 "on each cell allocation. This procedure only exists when the\n"
692 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
693 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
694 {
695 /* [cmm] I did a double-take when I read this code the first time.
696 well, FWIW. */
697 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
698 return SCM_UNSPECIFIED;
699 }
700 #undef FUNC_NAME
701
702
703 SCM
704 scm_debug_newcell (void)
705 {
706 SCM new;
707
708 scm_newcell_count++;
709 if (scm_debug_check_freelist)
710 {
711 scm_check_freelist (scm_freelist);
712 scm_gc();
713 }
714
715 /* The rest of this is supposed to be identical to the SCM_NEWCELL
716 macro. */
717 if (SCM_NULLP (scm_freelist))
718 {
719 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
720 SCM_GC_SET_ALLOCATED (new);
721 }
722 else
723 {
724 new = scm_freelist;
725 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
726 SCM_GC_SET_ALLOCATED (new);
727 }
728
729 return new;
730 }
731
732 SCM
733 scm_debug_newcell2 (void)
734 {
735 SCM new;
736
737 scm_newcell2_count++;
738 if (scm_debug_check_freelist)
739 {
740 scm_check_freelist (scm_freelist2);
741 scm_gc ();
742 }
743
744 /* The rest of this is supposed to be identical to the SCM_NEWCELL
745 macro. */
746 if (SCM_NULLP (scm_freelist2))
747 {
748 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
749 SCM_GC_SET_ALLOCATED (new);
750 }
751 else
752 {
753 new = scm_freelist2;
754 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
755 SCM_GC_SET_ALLOCATED (new);
756 }
757
758 return new;
759 }
760
761 #endif /* GUILE_DEBUG_FREELIST */
762
763 \f
764
765 static unsigned long
766 master_cells_allocated (scm_t_freelist *master)
767 {
768 /* the '- 1' below is to ignore the cluster spine cells. */
769 long objects = master->clusters_allocated * (master->cluster_size - 1);
770 if (SCM_NULLP (master->clusters))
771 objects -= master->left_to_collect;
772 return master->span * objects;
773 }
774
775 static unsigned long
776 freelist_length (SCM freelist)
777 {
778 long n;
779 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
780 ++n;
781 return n;
782 }
783
784 static unsigned long
785 compute_cells_allocated ()
786 {
787 return (scm_cells_allocated
788 + master_cells_allocated (&scm_master_freelist)
789 + master_cells_allocated (&scm_master_freelist2)
790 - scm_master_freelist.span * freelist_length (scm_freelist)
791 - scm_master_freelist2.span * freelist_length (scm_freelist2));
792 }
793
794 /* {Scheme Interface to GC}
795 */
796
797 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
798 (),
799 "Return an association list of statistics about Guile's current\n"
800 "use of storage.")
801 #define FUNC_NAME s_scm_gc_stats
802 {
803 long i;
804 long n;
805 SCM heap_segs;
806 unsigned long int local_scm_mtrigger;
807 unsigned long int local_scm_mallocated;
808 unsigned long int local_scm_heap_size;
809 unsigned long int local_scm_cells_allocated;
810 unsigned long int local_scm_gc_time_taken;
811 unsigned long int local_scm_gc_times;
812 unsigned long int local_scm_gc_mark_time_taken;
813 unsigned long int local_scm_gc_sweep_time_taken;
814 double local_scm_gc_cells_swept;
815 double local_scm_gc_cells_marked;
816 SCM answer;
817
818 SCM_DEFER_INTS;
819
820 ++scm_block_gc;
821
822 retry:
823 heap_segs = SCM_EOL;
824 n = scm_n_heap_segs;
825 for (i = scm_n_heap_segs; i--; )
826 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
827 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
828 heap_segs);
829 if (scm_n_heap_segs != n)
830 goto retry;
831
832 --scm_block_gc;
833
834 /* Below, we cons to produce the resulting list. We want a snapshot of
835 * the heap situation before consing.
836 */
837 local_scm_mtrigger = scm_mtrigger;
838 local_scm_mallocated = scm_mallocated;
839 local_scm_heap_size = SCM_HEAP_SIZE;
840 local_scm_cells_allocated = compute_cells_allocated ();
841 local_scm_gc_time_taken = scm_gc_time_taken;
842 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
843 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
844 local_scm_gc_times = scm_gc_times;
845 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
846 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
847
848 answer = scm_list_n (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
849 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
850 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
851 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
852 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
853 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
854 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
855 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
856 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
857 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
858 scm_cons (sym_heap_segments, heap_segs),
859 SCM_UNDEFINED);
860 SCM_ALLOW_INTS;
861 return answer;
862 }
863 #undef FUNC_NAME
864
865
866 static void
867 gc_start_stats (const char *what SCM_UNUSED)
868 {
869 t_before_gc = scm_c_get_internal_run_time ();
870 scm_gc_cells_swept = 0;
871 scm_gc_cells_collected = 0;
872 scm_gc_yield_1 = scm_gc_yield;
873 scm_gc_yield = (scm_cells_allocated
874 + master_cells_allocated (&scm_master_freelist)
875 + master_cells_allocated (&scm_master_freelist2));
876 scm_gc_malloc_collected = 0;
877 scm_gc_ports_collected = 0;
878 }
879
880
881 static void
882 gc_end_stats ()
883 {
884 unsigned long t = scm_c_get_internal_run_time ();
885 scm_gc_time_taken += (t - t_before_gc);
886 scm_gc_sweep_time_taken += (t - t_before_sweep);
887 ++scm_gc_times;
888
889 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
890 scm_gc_cells_swept_acc += scm_gc_cells_swept;
891 }
892
893
894 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
895 (SCM obj),
896 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
897 "returned by this function for @var{obj}")
898 #define FUNC_NAME s_scm_object_address
899 {
900 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
901 }
902 #undef FUNC_NAME
903
904
905 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
906 (),
907 "Scans all of SCM objects and reclaims for further use those that are\n"
908 "no longer accessible.")
909 #define FUNC_NAME s_scm_gc
910 {
911 SCM_DEFER_INTS;
912 scm_igc ("call");
913 SCM_ALLOW_INTS;
914 return SCM_UNSPECIFIED;
915 }
916 #undef FUNC_NAME
917
918
919 \f
920 /* {C Interface For When GC is Triggered}
921 */
922
923 static void
924 adjust_min_yield (scm_t_freelist *freelist)
925 {
926 /* min yield is adjusted upwards so that next predicted total yield
927 * (allocated cells actually freed by GC) becomes
928 * `min_yield_fraction' of total heap size. Note, however, that
929 * the absolute value of min_yield will correspond to `collected'
930 * on one master (the one which currently is triggering GC).
931 *
932 * The reason why we look at total yield instead of cells collected
933 * on one list is that we want to take other freelists into account.
934 * On this freelist, we know that (local) yield = collected cells,
935 * but that's probably not the case on the other lists.
936 *
937 * (We might consider computing a better prediction, for example
938 * by computing an average over multiple GC:s.)
939 */
940 if (freelist->min_yield_fraction)
941 {
942 /* Pick largest of last two yields. */
943 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
944 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
945 #ifdef DEBUGINFO
946 fprintf (stderr, " after GC = %lu, delta = %ld\n",
947 (long) scm_cells_allocated,
948 (long) delta);
949 #endif
950 if (delta > 0)
951 freelist->min_yield += delta;
952 }
953 }
954
955
956 /* When we get POSIX threads support, the master will be global and
957 * common while the freelist will be individual for each thread.
958 */
959
960 SCM
961 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
962 {
963 SCM cell;
964 ++scm_ints_disabled;
965 do
966 {
967 if (SCM_NULLP (master->clusters))
968 {
969 if (master->grow_heap_p || scm_block_gc)
970 {
971 /* In order to reduce gc frequency, try to allocate a new heap
972 * segment first, even if gc might find some free cells. If we
973 * can't obtain a new heap segment, we will try gc later.
974 */
975 master->grow_heap_p = 0;
976 alloc_some_heap (master, return_on_error);
977 }
978 if (SCM_NULLP (master->clusters))
979 {
980 /* The heap was not grown, either because it wasn't scheduled to
981 * grow, or because there was not enough memory available. In
982 * both cases we have to try gc to get some free cells.
983 */
984 #ifdef DEBUGINFO
985 fprintf (stderr, "allocated = %lu, ",
986 (long) (scm_cells_allocated
987 + master_cells_allocated (&scm_master_freelist)
988 + master_cells_allocated (&scm_master_freelist2)));
989 #endif
990 scm_igc ("cells");
991 adjust_min_yield (master);
992 if (SCM_NULLP (master->clusters))
993 {
994 /* gc could not free any cells. Now, we _must_ allocate a
995 * new heap segment, because there is no other possibility
996 * to provide a new cell for the caller.
997 */
998 alloc_some_heap (master, abort_on_error);
999 }
1000 }
1001 }
1002 cell = SCM_CAR (master->clusters);
1003 master->clusters = SCM_CDR (master->clusters);
1004 ++master->clusters_allocated;
1005 }
1006 while (SCM_NULLP (cell));
1007
1008 #ifdef GUILE_DEBUG_FREELIST
1009 scm_check_freelist (cell);
1010 #endif
1011
1012 --scm_ints_disabled;
1013 *freelist = SCM_FREE_CELL_CDR (cell);
1014 return cell;
1015 }
1016
1017
1018 #if 0
1019 /* This is a support routine which can be used to reserve a cluster
1020 * for some special use, such as debugging. It won't be useful until
1021 * free cells are preserved between garbage collections.
1022 */
1023
1024 void
1025 scm_alloc_cluster (scm_t_freelist *master)
1026 {
1027 SCM freelist, cell;
1028 cell = scm_gc_for_newcell (master, &freelist);
1029 SCM_SETCDR (cell, freelist);
1030 return cell;
1031 }
1032 #endif
1033
1034
1035 scm_t_c_hook scm_before_gc_c_hook;
1036 scm_t_c_hook scm_before_mark_c_hook;
1037 scm_t_c_hook scm_before_sweep_c_hook;
1038 scm_t_c_hook scm_after_sweep_c_hook;
1039 scm_t_c_hook scm_after_gc_c_hook;
1040
1041
1042 void
1043 scm_igc (const char *what)
1044 {
1045 long j;
1046
1047 ++scm_gc_running_p;
1048 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1049 #ifdef DEBUGINFO
1050 fprintf (stderr,
1051 SCM_NULLP (scm_freelist)
1052 ? "*"
1053 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1054 #endif
1055 /* During the critical section, only the current thread may run. */
1056 SCM_CRITICAL_SECTION_START;
1057
1058 /* fprintf (stderr, "gc: %s\n", what); */
1059
1060 if (!scm_stack_base || scm_block_gc)
1061 {
1062 --scm_gc_running_p;
1063 return;
1064 }
1065
1066 gc_start_stats (what);
1067
1068 if (scm_gc_heap_lock)
1069 /* We've invoked the collector while a GC is already in progress.
1070 That should never happen. */
1071 abort ();
1072
1073 ++scm_gc_heap_lock;
1074
1075 /* flush dead entries from the continuation stack */
1076 {
1077 long x;
1078 long bound;
1079 SCM * elts;
1080 elts = SCM_VELTS (scm_continuation_stack);
1081 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1082 x = SCM_INUM (scm_continuation_stack_ptr);
1083 while (x < bound)
1084 {
1085 elts[x] = SCM_BOOL_F;
1086 ++x;
1087 }
1088 }
1089
1090 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1091
1092 clear_mark_space ();
1093
1094 #ifndef USE_THREADS
1095
1096 /* Mark objects on the C stack. */
1097 SCM_FLUSH_REGISTER_WINDOWS;
1098 /* This assumes that all registers are saved into the jmp_buf */
1099 setjmp (scm_save_regs_gc_mark);
1100 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1101 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1102 sizeof scm_save_regs_gc_mark)
1103 / sizeof (SCM_STACKITEM)));
1104
1105 {
1106 unsigned long stack_len = scm_stack_size (scm_stack_base);
1107 #ifdef SCM_STACK_GROWS_UP
1108 scm_mark_locations (scm_stack_base, stack_len);
1109 #else
1110 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1111 #endif
1112 }
1113
1114 #else /* USE_THREADS */
1115
1116 /* Mark every thread's stack and registers */
1117 scm_threads_mark_stacks ();
1118
1119 #endif /* USE_THREADS */
1120
1121 j = SCM_NUM_PROTECTS;
1122 while (j--)
1123 scm_gc_mark (scm_sys_protects[j]);
1124
1125 /* mark the registered roots */
1126 {
1127 size_t i;
1128 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1129 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1130 for (; !SCM_NULLP (l); l = SCM_CDR (l)) {
1131 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1132 scm_gc_mark (*p);
1133 }
1134 }
1135 }
1136
1137 /* FIXME: we should have a means to register C functions to be run
1138 * in different phases of GC
1139 */
1140 scm_mark_subr_table ();
1141
1142 #ifndef USE_THREADS
1143 scm_gc_mark (scm_root->handle);
1144 #endif
1145
1146 t_before_sweep = scm_c_get_internal_run_time ();
1147 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1148
1149 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1150
1151 scm_gc_sweep ();
1152
1153 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1154
1155 --scm_gc_heap_lock;
1156 gc_end_stats ();
1157
1158 SCM_CRITICAL_SECTION_END;
1159 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1160 --scm_gc_running_p;
1161 }
1162
1163 \f
1164
1165 /* {Mark/Sweep}
1166 */
1167
1168 #define MARK scm_gc_mark
1169 #define FNAME "scm_gc_mark"
1170
1171 #endif /*!MARK_DEPENDENCIES*/
1172
1173 /* Mark an object precisely.
1174 */
1175 void
1176 MARK (SCM p)
1177 #define FUNC_NAME FNAME
1178 {
1179 register long i;
1180 register SCM ptr;
1181 scm_t_bits cell_type;
1182
1183 #ifndef MARK_DEPENDENCIES
1184 # define RECURSE scm_gc_mark
1185 #else
1186 /* go through the usual marking, but not for self-cycles. */
1187 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1188 #endif
1189 ptr = p;
1190
1191 #ifdef MARK_DEPENDENCIES
1192 goto gc_mark_loop_first_time;
1193 #endif
1194
1195 /* A simple hack for debugging. Chose the second branch to get a
1196 meaningful backtrace for crashes inside the GC.
1197 */
1198 #if 1
1199 #define goto_gc_mark_loop goto gc_mark_loop
1200 #define goto_gc_mark_nimp goto gc_mark_nimp
1201 #else
1202 #define goto_gc_mark_loop RECURSE(ptr); return
1203 #define goto_gc_mark_nimp RECURSE(ptr); return
1204 #endif
1205
1206 gc_mark_loop:
1207 if (SCM_IMP (ptr))
1208 return;
1209
1210 gc_mark_nimp:
1211
1212 #ifdef MARK_DEPENDENCIES
1213 if (SCM_EQ_P (ptr, p))
1214 return;
1215
1216 scm_gc_mark (ptr);
1217 return;
1218
1219 gc_mark_loop_first_time:
1220 #endif
1221
1222 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1223 /* We are in debug mode. Check the ptr exhaustively. */
1224 if (!scm_cellp (ptr))
1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1226 #else
1227 /* In non-debug mode, do at least some cheap testing. */
1228 if (!SCM_CELLP (ptr))
1229 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1230 #endif
1231
1232 #ifndef MARK_DEPENDENCIES
1233
1234 if (SCM_GCMARKP (ptr))
1235 return;
1236
1237 SCM_SETGCMARK (ptr);
1238
1239 #endif
1240
1241 cell_type = SCM_GC_CELL_TYPE (ptr);
1242 switch (SCM_ITAG7 (cell_type))
1243 {
1244 case scm_tcs_cons_nimcar:
1245 if (SCM_IMP (SCM_CDR (ptr)))
1246 {
1247 ptr = SCM_CAR (ptr);
1248 goto_gc_mark_nimp;
1249 }
1250 RECURSE (SCM_CAR (ptr));
1251 ptr = SCM_CDR (ptr);
1252 goto_gc_mark_nimp;
1253 case scm_tcs_cons_imcar:
1254 ptr = SCM_CDR (ptr);
1255 goto_gc_mark_loop;
1256 case scm_tc7_pws:
1257 RECURSE (SCM_SETTER (ptr));
1258 ptr = SCM_PROCEDURE (ptr);
1259 goto_gc_mark_loop;
1260 case scm_tcs_struct:
1261 {
1262 /* XXX - use less explicit code. */
1263 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_struct;
1264 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1265 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1266 long len = SCM_SYMBOL_LENGTH (layout);
1267 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1268 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1269
1270 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1271 {
1272 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1273 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1274 }
1275 if (len)
1276 {
1277 long x;
1278
1279 for (x = 0; x < len - 2; x += 2, ++struct_data)
1280 if (fields_desc[x] == 'p')
1281 RECURSE (SCM_PACK (*struct_data));
1282 if (fields_desc[x] == 'p')
1283 {
1284 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1285 for (x = *struct_data++; x; --x, ++struct_data)
1286 RECURSE (SCM_PACK (*struct_data));
1287 else
1288 RECURSE (SCM_PACK (*struct_data));
1289 }
1290 }
1291 /* mark vtable */
1292 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1293 goto_gc_mark_loop;
1294 }
1295 break;
1296 case scm_tcs_closures:
1297 if (SCM_IMP (SCM_ENV (ptr)))
1298 {
1299 ptr = SCM_CLOSCAR (ptr);
1300 goto_gc_mark_nimp;
1301 }
1302 RECURSE (SCM_CLOSCAR (ptr));
1303 ptr = SCM_ENV (ptr);
1304 goto_gc_mark_nimp;
1305 case scm_tc7_vector:
1306 i = SCM_VECTOR_LENGTH (ptr);
1307 if (i == 0)
1308 break;
1309 while (--i > 0)
1310 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1311 RECURSE (SCM_VELTS (ptr)[i]);
1312 ptr = SCM_VELTS (ptr)[0];
1313 goto_gc_mark_loop;
1314 #ifdef CCLO
1315 case scm_tc7_cclo:
1316 {
1317 size_t i = SCM_CCLO_LENGTH (ptr);
1318 size_t j;
1319 for (j = 1; j != i; ++j)
1320 {
1321 SCM obj = SCM_CCLO_REF (ptr, j);
1322 if (!SCM_IMP (obj))
1323 RECURSE (obj);
1324 }
1325 ptr = SCM_CCLO_REF (ptr, 0);
1326 goto_gc_mark_loop;
1327 }
1328 #endif
1329 #ifdef HAVE_ARRAYS
1330 case scm_tc7_bvect:
1331 case scm_tc7_byvect:
1332 case scm_tc7_ivect:
1333 case scm_tc7_uvect:
1334 case scm_tc7_fvect:
1335 case scm_tc7_dvect:
1336 case scm_tc7_cvect:
1337 case scm_tc7_svect:
1338 #ifdef HAVE_LONG_LONGS
1339 case scm_tc7_llvect:
1340 #endif
1341 #endif
1342 case scm_tc7_string:
1343 break;
1344
1345 case scm_tc7_substring:
1346 ptr = SCM_CDR (ptr);
1347 goto_gc_mark_loop;
1348
1349 case scm_tc7_wvect:
1350 SCM_SET_WVECT_GC_CHAIN (ptr, scm_weak_vectors);
1351 scm_weak_vectors = ptr;
1352 if (SCM_IS_WHVEC_ANY (ptr))
1353 {
1354 long x;
1355 long len;
1356 int weak_keys;
1357 int weak_values;
1358
1359 len = SCM_VECTOR_LENGTH (ptr);
1360 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1361 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1362
1363 for (x = 0; x < len; ++x)
1364 {
1365 SCM alist;
1366 alist = SCM_VELTS (ptr)[x];
1367
1368 /* mark everything on the alist except the keys or
1369 * values, according to weak_values and weak_keys. */
1370 while ( SCM_CONSP (alist)
1371 && !SCM_GCMARKP (alist)
1372 && SCM_CONSP (SCM_CAR (alist)))
1373 {
1374 SCM kvpair;
1375 SCM next_alist;
1376
1377 kvpair = SCM_CAR (alist);
1378 next_alist = SCM_CDR (alist);
1379 /*
1380 * Do not do this:
1381 * SCM_SETGCMARK (alist);
1382 * SCM_SETGCMARK (kvpair);
1383 *
1384 * It may be that either the key or value is protected by
1385 * an escaped reference to part of the spine of this alist.
1386 * If we mark the spine here, and only mark one or neither of the
1387 * key and value, they may never be properly marked.
1388 * This leads to a horrible situation in which an alist containing
1389 * freelist cells is exported.
1390 *
1391 * So only mark the spines of these arrays last of all marking.
1392 * If somebody confuses us by constructing a weak vector
1393 * with a circular alist then we are hosed, but at least we
1394 * won't prematurely drop table entries.
1395 */
1396 if (!weak_keys)
1397 RECURSE (SCM_CAR (kvpair));
1398 if (!weak_values)
1399 RECURSE (SCM_CDR (kvpair));
1400 alist = next_alist;
1401 }
1402 if (SCM_NIMP (alist))
1403 RECURSE (alist);
1404 }
1405 }
1406 break;
1407
1408 case scm_tc7_symbol:
1409 ptr = SCM_PROP_SLOTS (ptr);
1410 goto_gc_mark_loop;
1411 case scm_tc7_variable:
1412 ptr = SCM_CELL_OBJECT_1 (ptr);
1413 goto_gc_mark_loop;
1414 case scm_tcs_subrs:
1415 break;
1416 case scm_tc7_port:
1417 i = SCM_PTOBNUM (ptr);
1418 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1419 if (!(i < scm_numptob))
1420 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1421 #endif
1422 if (SCM_PTAB_ENTRY(ptr))
1423 RECURSE (SCM_FILENAME (ptr));
1424 if (scm_ptobs[i].mark)
1425 {
1426 ptr = (scm_ptobs[i].mark) (ptr);
1427 goto_gc_mark_loop;
1428 }
1429 else
1430 return;
1431 break;
1432 case scm_tc7_smob:
1433 switch (SCM_TYP16 (ptr))
1434 { /* should be faster than going through scm_smobs */
1435 case scm_tc_free_cell:
1436 /* We have detected a free cell. This can happen if non-object data
1437 * on the C stack points into guile's heap and is scanned during
1438 * conservative marking. */
1439 #if (SCM_DEBUG_CELL_ACCESSES == 0)
1440 /* If cell debugging is disabled, there is a second situation in
1441 * which a free cell can be encountered, namely if with preemptive
1442 * threading one thread has just obtained a fresh cell and was
1443 * preempted before the cell initialization was completed. In this
1444 * case, some entries of the cell may already contain objects.
1445 * Thus, if cell debugging is disabled, free cells are scanned
1446 * conservatively. */
1447 scm_gc_mark_cell_conservatively (ptr);
1448 #else /* SCM_DEBUG_CELL_ACCESSES == 1 */
1449 /* With cell debugging enabled, a freshly obtained but not fully
1450 * initialized cell is guaranteed to be of type scm_tc16_allocated.
1451 * Thus, no conservative scanning for free cells is necessary, but
1452 * instead cells of type scm_tc16_allocated have to be scanned
1453 * conservatively. This is done in the mark function of the
1454 * scm_tc16_allocated smob type. */
1455 #endif
1456 break;
1457 case scm_tc16_big:
1458 case scm_tc16_real:
1459 case scm_tc16_complex:
1460 break;
1461 default:
1462 i = SCM_SMOBNUM (ptr);
1463 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1464 if (!(i < scm_numsmob))
1465 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1466 #endif
1467 if (scm_smobs[i].mark)
1468 {
1469 ptr = (scm_smobs[i].mark) (ptr);
1470 goto_gc_mark_loop;
1471 }
1472 else
1473 return;
1474 }
1475 break;
1476 default:
1477 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1478 }
1479 #undef RECURSE
1480 }
1481 #undef FUNC_NAME
1482
1483 #ifndef MARK_DEPENDENCIES
1484
1485 #undef MARK
1486 #undef FNAME
1487
1488 /* And here we define `scm_gc_mark_dependencies', by including this
1489 * same file in itself.
1490 */
1491 #define MARK scm_gc_mark_dependencies
1492 #define FNAME "scm_gc_mark_dependencies"
1493 #define MARK_DEPENDENCIES
1494 #include "gc.c"
1495 #undef MARK_DEPENDENCIES
1496 #undef MARK
1497 #undef FNAME
1498
1499
1500 /* Determine whether the given value does actually represent a cell in some
1501 * heap segment. If this is the case, the number of the heap segment is
1502 * returned. Otherwise, -1 is returned. Binary search is used in order to
1503 * determine the heap segment that contains the cell.*/
1504 /* FIXME: To be used within scm_gc_mark_cell_conservatively,
1505 * scm_mark_locations and scm_cellp this function should be an inline
1506 * function. */
1507 static long int
1508 heap_segment (SCM obj)
1509 {
1510 if (!SCM_CELLP (obj))
1511 return -1;
1512 else
1513 {
1514 SCM_CELLPTR ptr = SCM2PTR (obj);
1515 unsigned long int i = 0;
1516 unsigned long int j = scm_n_heap_segs - 1;
1517
1518 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1519 return -1;
1520 else if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1521 return -1;
1522 else
1523 {
1524 while (i < j)
1525 {
1526 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[1]))
1527 {
1528 break;
1529 }
1530 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1531 {
1532 i = j;
1533 break;
1534 }
1535 else
1536 {
1537 unsigned long int k = (i + j) / 2;
1538
1539 if (k == i)
1540 return -1;
1541 else if (SCM_PTR_LT (ptr, scm_heap_table[k].bounds[1]))
1542 {
1543 j = k;
1544 ++i;
1545 if (SCM_PTR_LT (ptr, scm_heap_table[i].bounds[0]))
1546 return -1;
1547 }
1548 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1549 {
1550 i = k;
1551 --j;
1552 if (SCM_PTR_LE (scm_heap_table[j].bounds[1], ptr))
1553 return -1;
1554 }
1555 }
1556 }
1557
1558 if (!DOUBLECELL_ALIGNED_P (obj) && scm_heap_table[i].span == 2)
1559 return -1;
1560 else if (SCM_GC_IN_CARD_HEADERP (ptr))
1561 return -1;
1562 else
1563 return i;
1564 }
1565 }
1566 }
1567
1568
1569 /* Mark the entries of a cell conservatively. The given cell is known to be
1570 * on the heap. Still we have to determine its heap segment in order to
1571 * figure out whether it is a single or a double cell. Then, each of the cell
1572 * elements itself is checked and potentially marked. */
1573 void
1574 scm_gc_mark_cell_conservatively (SCM cell)
1575 {
1576 unsigned long int cell_segment = heap_segment (cell);
1577 unsigned int span = scm_heap_table[cell_segment].span;
1578 unsigned int i;
1579
1580 for (i = 1; i != span * 2; ++i)
1581 {
1582 SCM obj = SCM_CELL_OBJECT (cell, i);
1583 long int obj_segment = heap_segment (obj);
1584 if (obj_segment >= 0)
1585 scm_gc_mark (obj);
1586 }
1587 }
1588
1589
1590 /* Mark a region conservatively */
1591 void
1592 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1593 {
1594 unsigned long m;
1595
1596 for (m = 0; m < n; ++m)
1597 {
1598 SCM obj = * (SCM *) &x[m];
1599 long int segment = heap_segment (obj);
1600 if (segment >= 0)
1601 scm_gc_mark (obj);
1602 }
1603 }
1604
1605
1606 /* The function scm_cellp determines whether an SCM value can be regarded as a
1607 * pointer to a cell on the heap.
1608 */
1609 int
1610 scm_cellp (SCM value)
1611 {
1612 long int segment = heap_segment (value);
1613 return (segment >= 0);
1614 }
1615
1616
1617 static void
1618 gc_sweep_freelist_start (scm_t_freelist *freelist)
1619 {
1620 freelist->cells = SCM_EOL;
1621 freelist->left_to_collect = freelist->cluster_size;
1622 freelist->clusters_allocated = 0;
1623 freelist->clusters = SCM_EOL;
1624 freelist->clustertail = &freelist->clusters;
1625 freelist->collected_1 = freelist->collected;
1626 freelist->collected = 0;
1627 }
1628
1629 static void
1630 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1631 {
1632 long collected;
1633 *freelist->clustertail = freelist->cells;
1634 if (!SCM_NULLP (freelist->cells))
1635 {
1636 SCM c = freelist->cells;
1637 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1638 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1639 freelist->collected +=
1640 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1641 }
1642 scm_gc_cells_collected += freelist->collected;
1643
1644 /* Although freelist->min_yield is used to test freelist->collected
1645 * (which is the local GC yield for freelist), it is adjusted so
1646 * that *total* yield is freelist->min_yield_fraction of total heap
1647 * size. This means that a too low yield is compensated by more
1648 * heap on the list which is currently doing most work, which is
1649 * just what we want.
1650 */
1651 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1652 freelist->grow_heap_p = (collected < freelist->min_yield);
1653 }
1654
1655 #define NEXT_DATA_CELL(ptr, span) \
1656 do { \
1657 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1658 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1659 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1660 : nxt__); \
1661 } while (0)
1662
1663 void
1664 scm_gc_sweep ()
1665 #define FUNC_NAME "scm_gc_sweep"
1666 {
1667 register SCM_CELLPTR ptr;
1668 register SCM nfreelist;
1669 register scm_t_freelist *freelist;
1670 register unsigned long m;
1671 register int span;
1672 size_t i;
1673 size_t seg_size;
1674
1675 m = 0;
1676
1677 gc_sweep_freelist_start (&scm_master_freelist);
1678 gc_sweep_freelist_start (&scm_master_freelist2);
1679
1680 for (i = 0; i < scm_n_heap_segs; i++)
1681 {
1682 register long left_to_collect;
1683 register size_t j;
1684
1685 /* Unmarked cells go onto the front of the freelist this heap
1686 segment points to. Rather than updating the real freelist
1687 pointer as we go along, we accumulate the new head in
1688 nfreelist. Then, if it turns out that the entire segment is
1689 free, we free (i.e., malloc's free) the whole segment, and
1690 simply don't assign nfreelist back into the real freelist. */
1691 freelist = scm_heap_table[i].freelist;
1692 nfreelist = freelist->cells;
1693 left_to_collect = freelist->left_to_collect;
1694 span = scm_heap_table[i].span;
1695
1696 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1697 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1698
1699 /* use only data cells in seg_size */
1700 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1701
1702 scm_gc_cells_swept += seg_size;
1703
1704 for (j = seg_size + span; j -= span; ptr += span)
1705 {
1706 SCM scmptr;
1707
1708 if (SCM_GC_IN_CARD_HEADERP (ptr))
1709 {
1710 SCM_CELLPTR nxt;
1711
1712 /* cheat here */
1713 nxt = ptr;
1714 NEXT_DATA_CELL (nxt, span);
1715 j += span;
1716
1717 ptr = nxt - span;
1718 continue;
1719 }
1720
1721 scmptr = PTR2SCM (ptr);
1722
1723 if (SCM_GCMARKP (scmptr))
1724 continue;
1725
1726 switch SCM_TYP7 (scmptr)
1727 {
1728 case scm_tcs_struct:
1729 {
1730 /* Structs need to be freed in a special order.
1731 * This is handled by GC C hooks in struct.c.
1732 */
1733 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1734 scm_structs_to_free = scmptr;
1735 }
1736 continue;
1737 case scm_tcs_cons_imcar:
1738 case scm_tcs_cons_nimcar:
1739 case scm_tcs_closures:
1740 case scm_tc7_pws:
1741 break;
1742 case scm_tc7_wvect:
1743 case scm_tc7_vector:
1744 {
1745 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1746 if (length > 0)
1747 {
1748 m += length * sizeof (scm_t_bits);
1749 scm_must_free (SCM_VECTOR_BASE (scmptr));
1750 }
1751 break;
1752 }
1753 #ifdef CCLO
1754 case scm_tc7_cclo:
1755 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1756 scm_must_free (SCM_CCLO_BASE (scmptr));
1757 break;
1758 #endif
1759 #ifdef HAVE_ARRAYS
1760 case scm_tc7_bvect:
1761 {
1762 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1763 if (length > 0)
1764 {
1765 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1766 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1767 }
1768 }
1769 break;
1770 case scm_tc7_byvect:
1771 case scm_tc7_ivect:
1772 case scm_tc7_uvect:
1773 case scm_tc7_svect:
1774 #ifdef HAVE_LONG_LONGS
1775 case scm_tc7_llvect:
1776 #endif
1777 case scm_tc7_fvect:
1778 case scm_tc7_dvect:
1779 case scm_tc7_cvect:
1780 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1781 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1782 break;
1783 #endif
1784 case scm_tc7_substring:
1785 break;
1786 case scm_tc7_string:
1787 m += SCM_STRING_LENGTH (scmptr) + 1;
1788 scm_must_free (SCM_STRING_CHARS (scmptr));
1789 break;
1790 case scm_tc7_symbol:
1791 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1792 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1793 break;
1794 case scm_tcs_subrs:
1795 /* the various "subrs" (primitives) are never freed */
1796 continue;
1797 case scm_tc7_port:
1798 if SCM_OPENP (scmptr)
1799 {
1800 int k = SCM_PTOBNUM (scmptr);
1801 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1802 if (!(k < scm_numptob))
1803 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1804 #endif
1805 /* Keep "revealed" ports alive. */
1806 if (scm_revealed_count (scmptr) > 0)
1807 continue;
1808 /* Yes, I really do mean scm_ptobs[k].free */
1809 /* rather than ftobs[k].close. .close */
1810 /* is for explicit CLOSE-PORT by user */
1811 m += (scm_ptobs[k].free) (scmptr);
1812 SCM_SETSTREAM (scmptr, 0);
1813 scm_remove_from_port_table (scmptr);
1814 scm_gc_ports_collected++;
1815 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1816 }
1817 break;
1818 case scm_tc7_smob:
1819 switch SCM_TYP16 (scmptr)
1820 {
1821 case scm_tc_free_cell:
1822 case scm_tc16_real:
1823 break;
1824 #ifdef SCM_BIGDIG
1825 case scm_tc16_big:
1826 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1827 scm_must_free (SCM_BDIGITS (scmptr));
1828 break;
1829 #endif /* def SCM_BIGDIG */
1830 case scm_tc16_complex:
1831 m += sizeof (scm_t_complex);
1832 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1833 break;
1834 default:
1835 {
1836 int k;
1837 k = SCM_SMOBNUM (scmptr);
1838 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1839 if (!(k < scm_numsmob))
1840 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1841 #endif
1842 if (scm_smobs[k].free)
1843 m += (scm_smobs[k].free) (scmptr);
1844 break;
1845 }
1846 }
1847 break;
1848 default:
1849 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1850 }
1851
1852 if (!--left_to_collect)
1853 {
1854 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1855 *freelist->clustertail = scmptr;
1856 freelist->clustertail = SCM_CDRLOC (scmptr);
1857
1858 nfreelist = SCM_EOL;
1859 freelist->collected += span * freelist->cluster_size;
1860 left_to_collect = freelist->cluster_size;
1861 }
1862 else
1863 {
1864 /* Stick the new cell on the front of nfreelist. It's
1865 critical that we mark this cell as freed; otherwise, the
1866 conservative collector might trace it as some other type
1867 of object. */
1868 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1869 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1870 nfreelist = scmptr;
1871 }
1872 }
1873
1874 #ifdef GC_FREE_SEGMENTS
1875 if (n == seg_size)
1876 {
1877 register long j;
1878
1879 freelist->heap_size -= seg_size;
1880 free ((char *) scm_heap_table[i].bounds[0]);
1881 scm_heap_table[i].bounds[0] = 0;
1882 for (j = i + 1; j < scm_n_heap_segs; j++)
1883 scm_heap_table[j - 1] = scm_heap_table[j];
1884 scm_n_heap_segs -= 1;
1885 i--; /* We need to scan the segment just moved. */
1886 }
1887 else
1888 #endif /* ifdef GC_FREE_SEGMENTS */
1889 {
1890 /* Update the real freelist pointer to point to the head of
1891 the list of free cells we've built for this segment. */
1892 freelist->cells = nfreelist;
1893 freelist->left_to_collect = left_to_collect;
1894 }
1895
1896 #ifdef GUILE_DEBUG_FREELIST
1897 scm_map_free_list ();
1898 #endif
1899 }
1900
1901 gc_sweep_freelist_finish (&scm_master_freelist);
1902 gc_sweep_freelist_finish (&scm_master_freelist2);
1903
1904 /* When we move to POSIX threads private freelists should probably
1905 be GC-protected instead. */
1906 scm_freelist = SCM_EOL;
1907 scm_freelist2 = SCM_EOL;
1908
1909 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1910 scm_gc_yield -= scm_cells_allocated;
1911
1912 if (scm_mallocated < m)
1913 /* The byte count of allocated objects has underflowed. This is
1914 probably because you forgot to report the sizes of objects you
1915 have allocated, by calling scm_done_malloc or some such. When
1916 the GC freed them, it subtracted their size from
1917 scm_mallocated, which underflowed. */
1918 abort ();
1919
1920 scm_mallocated -= m;
1921 scm_gc_malloc_collected = m;
1922 }
1923 #undef FUNC_NAME
1924
1925
1926 \f
1927 /* {Front end to malloc}
1928 *
1929 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1930 * scm_done_free
1931 *
1932 * These functions provide services comparable to malloc, realloc, and
1933 * free. They should be used when allocating memory that will be under
1934 * control of the garbage collector, i.e., if the memory may be freed
1935 * during garbage collection.
1936 */
1937
1938 /* scm_must_malloc
1939 * Return newly malloced storage or throw an error.
1940 *
1941 * The parameter WHAT is a string for error reporting.
1942 * If the threshold scm_mtrigger will be passed by this
1943 * allocation, or if the first call to malloc fails,
1944 * garbage collect -- on the presumption that some objects
1945 * using malloced storage may be collected.
1946 *
1947 * The limit scm_mtrigger may be raised by this allocation.
1948 */
1949 void *
1950 scm_must_malloc (size_t size, const char *what)
1951 {
1952 void *ptr;
1953 unsigned long nm = scm_mallocated + size;
1954
1955 if (nm < size)
1956 /* The byte count of allocated objects has overflowed. This is
1957 probably because you forgot to report the correct size of freed
1958 memory in some of your smob free methods. */
1959 abort ();
1960
1961 if (nm <= scm_mtrigger)
1962 {
1963 SCM_SYSCALL (ptr = malloc (size));
1964 if (NULL != ptr)
1965 {
1966 scm_mallocated = nm;
1967 #ifdef GUILE_DEBUG_MALLOC
1968 scm_malloc_register (ptr, what);
1969 #endif
1970 return ptr;
1971 }
1972 }
1973
1974 scm_igc (what);
1975
1976 nm = scm_mallocated + size;
1977
1978 if (nm < size)
1979 /* The byte count of allocated objects has overflowed. This is
1980 probably because you forgot to report the correct size of freed
1981 memory in some of your smob free methods. */
1982 abort ();
1983
1984 SCM_SYSCALL (ptr = malloc (size));
1985 if (NULL != ptr)
1986 {
1987 scm_mallocated = nm;
1988 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1989 if (nm > scm_mtrigger)
1990 scm_mtrigger = nm + nm / 2;
1991 else
1992 scm_mtrigger += scm_mtrigger / 2;
1993 }
1994 #ifdef GUILE_DEBUG_MALLOC
1995 scm_malloc_register (ptr, what);
1996 #endif
1997
1998 return ptr;
1999 }
2000
2001 scm_memory_error (what);
2002 }
2003
2004
2005 /* scm_must_realloc
2006 * is similar to scm_must_malloc.
2007 */
2008 void *
2009 scm_must_realloc (void *where,
2010 size_t old_size,
2011 size_t size,
2012 const char *what)
2013 {
2014 void *ptr;
2015 unsigned long nm;
2016
2017 if (size <= old_size)
2018 return where;
2019
2020 nm = scm_mallocated + size - old_size;
2021
2022 if (nm < (size - old_size))
2023 /* The byte count of allocated objects has overflowed. This is
2024 probably because you forgot to report the correct size of freed
2025 memory in some of your smob free methods. */
2026 abort ();
2027
2028 if (nm <= scm_mtrigger)
2029 {
2030 SCM_SYSCALL (ptr = realloc (where, size));
2031 if (NULL != ptr)
2032 {
2033 scm_mallocated = nm;
2034 #ifdef GUILE_DEBUG_MALLOC
2035 scm_malloc_reregister (where, ptr, what);
2036 #endif
2037 return ptr;
2038 }
2039 }
2040
2041 scm_igc (what);
2042
2043 nm = scm_mallocated + size - old_size;
2044
2045 if (nm < (size - old_size))
2046 /* The byte count of allocated objects has overflowed. This is
2047 probably because you forgot to report the correct size of freed
2048 memory in some of your smob free methods. */
2049 abort ();
2050
2051 SCM_SYSCALL (ptr = realloc (where, size));
2052 if (NULL != ptr)
2053 {
2054 scm_mallocated = nm;
2055 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2056 if (nm > scm_mtrigger)
2057 scm_mtrigger = nm + nm / 2;
2058 else
2059 scm_mtrigger += scm_mtrigger / 2;
2060 }
2061 #ifdef GUILE_DEBUG_MALLOC
2062 scm_malloc_reregister (where, ptr, what);
2063 #endif
2064 return ptr;
2065 }
2066
2067 scm_memory_error (what);
2068 }
2069
2070 char *
2071 scm_must_strndup (const char *str, size_t length)
2072 {
2073 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2074 memcpy (dst, str, length);
2075 dst[length] = 0;
2076 return dst;
2077 }
2078
2079 char *
2080 scm_must_strdup (const char *str)
2081 {
2082 return scm_must_strndup (str, strlen (str));
2083 }
2084
2085 void
2086 scm_must_free (void *obj)
2087 #define FUNC_NAME "scm_must_free"
2088 {
2089 #ifdef GUILE_DEBUG_MALLOC
2090 scm_malloc_unregister (obj);
2091 #endif
2092 if (obj)
2093 free (obj);
2094 else
2095 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2096 }
2097 #undef FUNC_NAME
2098
2099
2100 /* Announce that there has been some malloc done that will be freed
2101 * during gc. A typical use is for a smob that uses some malloced
2102 * memory but can not get it from scm_must_malloc (for whatever
2103 * reason). When a new object of this smob is created you call
2104 * scm_done_malloc with the size of the object. When your smob free
2105 * function is called, be sure to include this size in the return
2106 * value.
2107 *
2108 * If you can't actually free the memory in the smob free function,
2109 * for whatever reason (like reference counting), you still can (and
2110 * should) report the amount of memory freed when you actually free it.
2111 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2112 * eh? Or even better, call scm_done_free. */
2113
2114 void
2115 scm_done_malloc (long size)
2116 {
2117 if (size < 0) {
2118 if (scm_mallocated < size)
2119 /* The byte count of allocated objects has underflowed. This is
2120 probably because you forgot to report the sizes of objects you
2121 have allocated, by calling scm_done_malloc or some such. When
2122 the GC freed them, it subtracted their size from
2123 scm_mallocated, which underflowed. */
2124 abort ();
2125 } else {
2126 unsigned long nm = scm_mallocated + size;
2127 if (nm < size)
2128 /* The byte count of allocated objects has overflowed. This is
2129 probably because you forgot to report the correct size of freed
2130 memory in some of your smob free methods. */
2131 abort ();
2132 }
2133
2134 scm_mallocated += size;
2135
2136 if (scm_mallocated > scm_mtrigger)
2137 {
2138 scm_igc ("foreign mallocs");
2139 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2140 {
2141 if (scm_mallocated > scm_mtrigger)
2142 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2143 else
2144 scm_mtrigger += scm_mtrigger / 2;
2145 }
2146 }
2147 }
2148
2149 void
2150 scm_done_free (long size)
2151 {
2152 if (size >= 0) {
2153 if (scm_mallocated < size)
2154 /* The byte count of allocated objects has underflowed. This is
2155 probably because you forgot to report the sizes of objects you
2156 have allocated, by calling scm_done_malloc or some such. When
2157 the GC freed them, it subtracted their size from
2158 scm_mallocated, which underflowed. */
2159 abort ();
2160 } else {
2161 unsigned long nm = scm_mallocated + size;
2162 if (nm < size)
2163 /* The byte count of allocated objects has overflowed. This is
2164 probably because you forgot to report the correct size of freed
2165 memory in some of your smob free methods. */
2166 abort ();
2167 }
2168
2169 scm_mallocated -= size;
2170 }
2171
2172
2173 \f
2174 /* {Heap Segments}
2175 *
2176 * Each heap segment is an array of objects of a particular size.
2177 * Every segment has an associated (possibly shared) freelist.
2178 * A table of segment records is kept that records the upper and
2179 * lower extents of the segment; this is used during the conservative
2180 * phase of gc to identify probably gc roots (because they point
2181 * into valid segments at reasonable offsets). */
2182
2183 /* scm_expmem
2184 * is true if the first segment was smaller than INIT_HEAP_SEG.
2185 * If scm_expmem is set to one, subsequent segment allocations will
2186 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2187 */
2188 int scm_expmem = 0;
2189
2190 size_t scm_max_segment_size;
2191
2192 /* scm_heap_org
2193 * is the lowest base address of any heap segment.
2194 */
2195 SCM_CELLPTR scm_heap_org;
2196
2197 scm_t_heap_seg_data * scm_heap_table = 0;
2198 static size_t heap_segment_table_size = 0;
2199 size_t scm_n_heap_segs = 0;
2200
2201 /* init_heap_seg
2202 * initializes a new heap segment and returns the number of objects it contains.
2203 *
2204 * The segment origin and segment size in bytes are input parameters.
2205 * The freelist is both input and output.
2206 *
2207 * This function presumes that the scm_heap_table has already been expanded
2208 * to accomodate a new segment record and that the markbit space was reserved
2209 * for all the cards in this segment.
2210 */
2211
2212 #define INIT_CARD(card, span) \
2213 do { \
2214 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2215 if ((span) == 2) \
2216 SCM_GC_SET_CARD_DOUBLECELL (card); \
2217 } while (0)
2218
2219 static size_t
2220 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2221 {
2222 register SCM_CELLPTR ptr;
2223 SCM_CELLPTR seg_end;
2224 size_t new_seg_index;
2225 ptrdiff_t n_new_cells;
2226 int span = freelist->span;
2227
2228 if (seg_org == NULL)
2229 return 0;
2230
2231 /* Align the begin ptr up.
2232 */
2233 ptr = SCM_GC_CARD_UP (seg_org);
2234
2235 /* Compute the ceiling on valid object pointers w/in this segment.
2236 */
2237 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2238
2239 /* Find the right place and insert the segment record.
2240 */
2241 new_seg_index = 0;
2242 while (new_seg_index < scm_n_heap_segs
2243 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org))
2244 new_seg_index++;
2245
2246 {
2247 int i;
2248 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2249 scm_heap_table[i] = scm_heap_table[i - 1];
2250 }
2251
2252 ++scm_n_heap_segs;
2253
2254 scm_heap_table[new_seg_index].span = span;
2255 scm_heap_table[new_seg_index].freelist = freelist;
2256 scm_heap_table[new_seg_index].bounds[0] = ptr;
2257 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2258
2259 /*n_new_cells*/
2260 n_new_cells = seg_end - ptr;
2261
2262 freelist->heap_size += n_new_cells;
2263
2264 /* Partition objects in this segment into clusters */
2265 {
2266 SCM clusters;
2267 SCM *clusterp = &clusters;
2268
2269 NEXT_DATA_CELL (ptr, span);
2270 while (ptr < seg_end)
2271 {
2272 scm_cell *nxt = ptr;
2273 scm_cell *prv = NULL;
2274 scm_cell *last_card = NULL;
2275 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2276 NEXT_DATA_CELL(nxt, span);
2277
2278 /* Allocate cluster spine
2279 */
2280 *clusterp = PTR2SCM (ptr);
2281 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2282 clusterp = SCM_CDRLOC (*clusterp);
2283 ptr = nxt;
2284
2285 while (n_data_cells--)
2286 {
2287 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2288 SCM scmptr = PTR2SCM (ptr);
2289 nxt = ptr;
2290 NEXT_DATA_CELL (nxt, span);
2291 prv = ptr;
2292
2293 if (card != last_card)
2294 {
2295 INIT_CARD (card, span);
2296 last_card = card;
2297 }
2298
2299 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2300 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2301
2302 ptr = nxt;
2303 }
2304
2305 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2306 }
2307
2308 /* sanity check */
2309 {
2310 scm_cell *ref = seg_end;
2311 NEXT_DATA_CELL (ref, span);
2312 if (ref != ptr)
2313 /* [cmm] looks like the segment size doesn't divide cleanly by
2314 cluster size. bad cmm! */
2315 abort();
2316 }
2317
2318 /* Patch up the last cluster pointer in the segment
2319 * to join it to the input freelist.
2320 */
2321 *clusterp = freelist->clusters;
2322 freelist->clusters = clusters;
2323 }
2324
2325 #ifdef DEBUGINFO
2326 fprintf (stderr, "H");
2327 #endif
2328 return size;
2329 }
2330
2331 static size_t
2332 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2333 {
2334 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2335
2336 return
2337 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2338 + ALIGNMENT_SLACK (freelist);
2339 }
2340
2341 static void
2342 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2343 #define FUNC_NAME "alloc_some_heap"
2344 {
2345 SCM_CELLPTR ptr;
2346 size_t len;
2347
2348 if (scm_gc_heap_lock)
2349 {
2350 /* Critical code sections (such as the garbage collector) aren't
2351 * supposed to add heap segments.
2352 */
2353 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2354 abort ();
2355 }
2356
2357 if (scm_n_heap_segs == heap_segment_table_size)
2358 {
2359 /* We have to expand the heap segment table to have room for the new
2360 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2361 * init_heap_seg only if the allocation of the segment itself succeeds.
2362 */
2363 size_t new_table_size = scm_n_heap_segs + 1;
2364 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2365 scm_t_heap_seg_data *new_heap_table;
2366
2367 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2368 realloc ((char *)scm_heap_table, size)));
2369 if (!new_heap_table)
2370 {
2371 if (error_policy == abort_on_error)
2372 {
2373 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2374 abort ();
2375 }
2376 else
2377 {
2378 return;
2379 }
2380 }
2381 else
2382 {
2383 scm_heap_table = new_heap_table;
2384 heap_segment_table_size = new_table_size;
2385 }
2386 }
2387
2388 /* Pick a size for the new heap segment.
2389 * The rule for picking the size of a segment is explained in
2390 * gc.h
2391 */
2392 {
2393 /* Assure that the new segment is predicted to be large enough.
2394 *
2395 * New yield should at least equal GC fraction of new heap size, i.e.
2396 *
2397 * y + dh > f * (h + dh)
2398 *
2399 * y : yield
2400 * f : min yield fraction
2401 * h : heap size
2402 * dh : size of new heap segment
2403 *
2404 * This gives dh > (f * h - y) / (1 - f)
2405 */
2406 int f = freelist->min_yield_fraction;
2407 unsigned long h = SCM_HEAP_SIZE;
2408 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2409 len = SCM_EXPHEAP (freelist->heap_size);
2410 #ifdef DEBUGINFO
2411 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2412 #endif
2413 if (len < min_cells)
2414 len = min_cells + freelist->cluster_size;
2415 len *= sizeof (scm_cell);
2416 /* force new sampling */
2417 freelist->collected = LONG_MAX;
2418 }
2419
2420 if (len > scm_max_segment_size)
2421 len = scm_max_segment_size;
2422
2423 {
2424 size_t smallest;
2425
2426 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2427
2428 if (len < smallest)
2429 len = smallest;
2430
2431 /* Allocate with decaying ambition. */
2432 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2433 && (len >= smallest))
2434 {
2435 size_t rounded_len = round_to_cluster_size (freelist, len);
2436 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2437 if (ptr)
2438 {
2439 init_heap_seg (ptr, rounded_len, freelist);
2440 return;
2441 }
2442 len /= 2;
2443 }
2444 }
2445
2446 if (error_policy == abort_on_error)
2447 {
2448 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2449 abort ();
2450 }
2451 }
2452 #undef FUNC_NAME
2453
2454 \f
2455 /* {GC Protection Helper Functions}
2456 */
2457
2458
2459 /*
2460 * If within a function you need to protect one or more scheme objects from
2461 * garbage collection, pass them as parameters to one of the
2462 * scm_remember_upto_here* functions below. These functions don't do
2463 * anything, but since the compiler does not know that they are actually
2464 * no-ops, it will generate code that calls these functions with the given
2465 * parameters. Therefore, you can be sure that the compiler will keep those
2466 * scheme values alive (on the stack or in a register) up to the point where
2467 * scm_remember_upto_here* is called. In other words, place the call to
2468 * scm_remember_upto_here* _behind_ the last code in your function, that
2469 * depends on the scheme object to exist.
2470 *
2471 * Example: We want to make sure, that the string object str does not get
2472 * garbage collected during the execution of 'some_function', because
2473 * otherwise the characters belonging to str would be freed and
2474 * 'some_function' might access freed memory. To make sure that the compiler
2475 * keeps str alive on the stack or in a register such that it is visible to
2476 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2477 * call to 'some_function'. Note that this would not be necessary if str was
2478 * used anyway after the call to 'some_function'.
2479 * char *chars = SCM_STRING_CHARS (str);
2480 * some_function (chars);
2481 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2482 */
2483
2484 void
2485 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2486 {
2487 /* Empty. Protects a single object from garbage collection. */
2488 }
2489
2490 void
2491 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2492 {
2493 /* Empty. Protects two objects from garbage collection. */
2494 }
2495
2496 void
2497 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2498 {
2499 /* Empty. Protects any number of objects from garbage collection. */
2500 }
2501
2502
2503 #if (SCM_DEBUG_DEPRECATED == 0)
2504
2505 void
2506 scm_remember (SCM *ptr)
2507 {
2508 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2509 "Use the `scm_remember_upto_here*' family of functions instead.");
2510 }
2511
2512 SCM
2513 scm_protect_object (SCM obj)
2514 {
2515 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2516 "Use `scm_gc_protect_object' instead.");
2517 return scm_gc_protect_object (obj);
2518 }
2519
2520 SCM
2521 scm_unprotect_object (SCM obj)
2522 {
2523 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2524 "Use `scm_gc_unprotect_object' instead.");
2525 return scm_gc_unprotect_object (obj);
2526 }
2527
2528 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2529
2530 /*
2531 These crazy functions prevent garbage collection
2532 of arguments after the first argument by
2533 ensuring they remain live throughout the
2534 function because they are used in the last
2535 line of the code block.
2536 It'd be better to have a nice compiler hint to
2537 aid the conservative stack-scanning GC. --03/09/00 gjb */
2538 SCM
2539 scm_return_first (SCM elt, ...)
2540 {
2541 return elt;
2542 }
2543
2544 int
2545 scm_return_first_int (int i, ...)
2546 {
2547 return i;
2548 }
2549
2550
2551 SCM
2552 scm_permanent_object (SCM obj)
2553 {
2554 SCM_REDEFER_INTS;
2555 scm_permobjs = scm_cons (obj, scm_permobjs);
2556 SCM_REALLOW_INTS;
2557 return obj;
2558 }
2559
2560
2561 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2562 other references are dropped, until the object is unprotected by calling
2563 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2564 i. e. it is possible to protect the same object several times, but it is
2565 necessary to unprotect the object the same number of times to actually get
2566 the object unprotected. It is an error to unprotect an object more often
2567 than it has been protected before. The function scm_protect_object returns
2568 OBJ.
2569 */
2570
2571 /* Implementation note: For every object X, there is a counter which
2572 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2573 */
2574
2575 SCM
2576 scm_gc_protect_object (SCM obj)
2577 {
2578 SCM handle;
2579
2580 /* This critical section barrier will be replaced by a mutex. */
2581 SCM_REDEFER_INTS;
2582
2583 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2584 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2585
2586 SCM_REALLOW_INTS;
2587
2588 return obj;
2589 }
2590
2591
2592 /* Remove any protection for OBJ established by a prior call to
2593 scm_protect_object. This function returns OBJ.
2594
2595 See scm_protect_object for more information. */
2596 SCM
2597 scm_gc_unprotect_object (SCM obj)
2598 {
2599 SCM handle;
2600
2601 /* This critical section barrier will be replaced by a mutex. */
2602 SCM_REDEFER_INTS;
2603
2604 handle = scm_hashq_get_handle (scm_protects, obj);
2605
2606 if (SCM_FALSEP (handle))
2607 {
2608 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2609 abort ();
2610 }
2611 else
2612 {
2613 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2614 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2615 scm_hashq_remove_x (scm_protects, obj);
2616 else
2617 SCM_SETCDR (handle, count);
2618 }
2619
2620 SCM_REALLOW_INTS;
2621
2622 return obj;
2623 }
2624
2625 void
2626 scm_gc_register_root (SCM *p)
2627 {
2628 SCM handle;
2629 SCM key = scm_long2num ((long) p);
2630
2631 /* This critical section barrier will be replaced by a mutex. */
2632 SCM_REDEFER_INTS;
2633
2634 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2635 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2636
2637 SCM_REALLOW_INTS;
2638 }
2639
2640 void
2641 scm_gc_unregister_root (SCM *p)
2642 {
2643 SCM handle;
2644 SCM key = scm_long2num ((long) p);
2645
2646 /* This critical section barrier will be replaced by a mutex. */
2647 SCM_REDEFER_INTS;
2648
2649 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2650
2651 if (SCM_FALSEP (handle))
2652 {
2653 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2654 abort ();
2655 }
2656 else
2657 {
2658 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2659 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2660 scm_hashv_remove_x (scm_gc_registered_roots, key);
2661 else
2662 SCM_SETCDR (handle, count);
2663 }
2664
2665 SCM_REALLOW_INTS;
2666 }
2667
2668 void
2669 scm_gc_register_roots (SCM *b, unsigned long n)
2670 {
2671 SCM *p = b;
2672 for (; p < b + n; ++p)
2673 scm_gc_register_root (p);
2674 }
2675
2676 void
2677 scm_gc_unregister_roots (SCM *b, unsigned long n)
2678 {
2679 SCM *p = b;
2680 for (; p < b + n; ++p)
2681 scm_gc_unregister_root (p);
2682 }
2683
2684 int terminating;
2685
2686 /* called on process termination. */
2687 #ifdef HAVE_ATEXIT
2688 static void
2689 cleanup (void)
2690 #else
2691 #ifdef HAVE_ON_EXIT
2692 extern int on_exit (void (*procp) (), int arg);
2693
2694 static void
2695 cleanup (int status, void *arg)
2696 #else
2697 #error Dont know how to setup a cleanup handler on your system.
2698 #endif
2699 #endif
2700 {
2701 terminating = 1;
2702 scm_flush_all_ports ();
2703 }
2704
2705 \f
2706 static int
2707 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2708 {
2709 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2710
2711 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2712 rounded_size,
2713 freelist))
2714 {
2715 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2716 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2717 rounded_size,
2718 freelist))
2719 return 1;
2720 }
2721 else
2722 scm_expmem = 1;
2723
2724 if (freelist->min_yield_fraction)
2725 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2726 / 100);
2727 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2728
2729 return 0;
2730 }
2731
2732 \f
2733 static void
2734 init_freelist (scm_t_freelist *freelist,
2735 int span,
2736 long cluster_size,
2737 int min_yield)
2738 {
2739 freelist->clusters = SCM_EOL;
2740 freelist->cluster_size = cluster_size + 1;
2741 freelist->left_to_collect = 0;
2742 freelist->clusters_allocated = 0;
2743 freelist->min_yield = 0;
2744 freelist->min_yield_fraction = min_yield;
2745 freelist->span = span;
2746 freelist->collected = 0;
2747 freelist->collected_1 = 0;
2748 freelist->heap_size = 0;
2749 }
2750
2751
2752 /* Get an integer from an environment variable. */
2753 static int
2754 scm_i_getenv_int (const char *var, int def)
2755 {
2756 char *end, *val = getenv (var);
2757 long res;
2758 if (!val)
2759 return def;
2760 res = strtol (val, &end, 10);
2761 if (end == val)
2762 return def;
2763 return res;
2764 }
2765
2766
2767 int
2768 scm_init_storage ()
2769 {
2770 unsigned long gc_trigger_1;
2771 unsigned long gc_trigger_2;
2772 size_t init_heap_size_1;
2773 size_t init_heap_size_2;
2774 size_t j;
2775
2776 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2777 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2778 scm_set_smob_mark (scm_tc16_allocated, allocated_mark);
2779 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2780
2781 j = SCM_NUM_PROTECTS;
2782 while (j)
2783 scm_sys_protects[--j] = SCM_BOOL_F;
2784 scm_block_gc = 1;
2785
2786 scm_freelist = SCM_EOL;
2787 scm_freelist2 = SCM_EOL;
2788 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2789 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2790 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2791 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2792 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2793
2794 scm_expmem = 0;
2795
2796 j = SCM_HEAP_SEG_SIZE;
2797 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2798 scm_heap_table = ((scm_t_heap_seg_data *)
2799 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2800 heap_segment_table_size = 2;
2801
2802 mark_space_ptr = &mark_space_head;
2803
2804 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2805 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2806 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2807 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2808 return 1;
2809
2810 /* scm_hplims[0] can change. do not remove scm_heap_org */
2811 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2812
2813 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2814 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2815 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2816 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2817 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2818
2819 /* Initialise the list of ports. */
2820 scm_t_portable = (scm_t_port **)
2821 malloc (sizeof (scm_t_port *) * scm_t_portable_room);
2822 if (!scm_t_portable)
2823 return 1;
2824
2825 #ifdef HAVE_ATEXIT
2826 atexit (cleanup);
2827 #else
2828 #ifdef HAVE_ON_EXIT
2829 on_exit (cleanup, 0);
2830 #endif
2831 #endif
2832
2833 scm_stand_in_procs = SCM_EOL;
2834 scm_permobjs = SCM_EOL;
2835 scm_protects = scm_c_make_hash_table (31);
2836 scm_gc_registered_roots = scm_c_make_hash_table (31);
2837
2838 return 0;
2839 }
2840
2841 \f
2842
2843 SCM scm_after_gc_hook;
2844
2845 static SCM gc_async;
2846
2847 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2848 * is run after the gc, as soon as the asynchronous events are handled by the
2849 * evaluator.
2850 */
2851 static SCM
2852 gc_async_thunk (void)
2853 {
2854 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2855 return SCM_UNSPECIFIED;
2856 }
2857
2858
2859 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2860 * the garbage collection. The only purpose of this function is to mark the
2861 * gc_async (which will eventually lead to the execution of the
2862 * gc_async_thunk).
2863 */
2864 static void *
2865 mark_gc_async (void * hook_data SCM_UNUSED,
2866 void *func_data SCM_UNUSED,
2867 void *data SCM_UNUSED)
2868 {
2869 /* If cell access debugging is enabled, the user may choose to perform
2870 * additional garbage collections after an arbitrary number of cell
2871 * accesses. We don't want the scheme level after-gc-hook to be performed
2872 * for each of these garbage collections for the following reason: The
2873 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2874 * after-gc-hook was performed with every gc, and if the gc was performed
2875 * after a very small number of cell accesses, then the number of cell
2876 * accesses during the execution of the after-gc-hook will suffice to cause
2877 * the execution of the next gc. Then, guile would keep executing the
2878 * after-gc-hook over and over again, and would never come to do other
2879 * things.
2880 *
2881 * To overcome this problem, if cell access debugging with additional
2882 * garbage collections is enabled, the after-gc-hook is never run by the
2883 * garbage collecter. When running guile with cell access debugging and the
2884 * execution of the after-gc-hook is desired, then it is necessary to run
2885 * the hook explicitly from the user code. This has the effect, that from
2886 * the scheme level point of view it seems that garbage collection is
2887 * performed with a much lower frequency than it actually is. Obviously,
2888 * this will not work for code that depends on a fixed one to one
2889 * relationship between the execution counts of the C level garbage
2890 * collection hooks and the execution count of the scheme level
2891 * after-gc-hook.
2892 */
2893 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2894 if (debug_cells_gc_interval == 0)
2895 scm_system_async_mark (gc_async);
2896 #else
2897 scm_system_async_mark (gc_async);
2898 #endif
2899
2900 return NULL;
2901 }
2902
2903
2904 void
2905 scm_init_gc ()
2906 {
2907 SCM after_gc_thunk;
2908
2909 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2910 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2911
2912 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2913 gc_async_thunk);
2914 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2915
2916 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2917
2918 #ifndef SCM_MAGIC_SNARFER
2919 #include "libguile/gc.x"
2920 #endif
2921 }
2922
2923 #endif /*MARK_DEPENDENCIES*/
2924
2925 /*
2926 Local Variables:
2927 c-file-style: "gnu"
2928 End:
2929 */