replace "scm_*_t" with "scm_t_*".
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995,1996,1997,1998,1999,2000,2001 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 /* SECTION: This code is compiled once.
48 */
49
50 #ifndef MARK_DEPENDENCIES
51
52 \f
53 #include <stdio.h>
54 #include <errno.h>
55 #include <string.h>
56
57 #include "libguile/_scm.h"
58 #include "libguile/eval.h"
59 #include "libguile/stime.h"
60 #include "libguile/stackchk.h"
61 #include "libguile/struct.h"
62 #include "libguile/smob.h"
63 #include "libguile/unif.h"
64 #include "libguile/async.h"
65 #include "libguile/ports.h"
66 #include "libguile/root.h"
67 #include "libguile/strings.h"
68 #include "libguile/vectors.h"
69 #include "libguile/weaks.h"
70 #include "libguile/hashtab.h"
71 #include "libguile/tags.h"
72
73 #include "libguile/validate.h"
74 #include "libguile/deprecation.h"
75 #include "libguile/gc.h"
76
77 #ifdef GUILE_DEBUG_MALLOC
78 #include "libguile/debug-malloc.h"
79 #endif
80
81 #ifdef HAVE_MALLOC_H
82 #include <malloc.h>
83 #endif
84
85 #ifdef HAVE_UNISTD_H
86 #include <unistd.h>
87 #endif
88
89 #ifdef __STDC__
90 #include <stdarg.h>
91 #define var_start(x, y) va_start(x, y)
92 #else
93 #include <varargs.h>
94 #define var_start(x, y) va_start(x)
95 #endif
96
97 \f
98
99 unsigned int scm_gc_running_p = 0;
100
101 \f
102
103 #if (SCM_DEBUG_CELL_ACCESSES == 1)
104
105 scm_t_bits scm_tc16_allocated;
106
107 /* Set this to != 0 if every cell that is accessed shall be checked:
108 */
109 unsigned int scm_debug_cell_accesses_p = 1;
110
111 /* Set this to 0 if no additional gc's shall be performed, otherwise set it to
112 * the number of cell accesses after which a gc shall be called.
113 */
114 static unsigned int debug_cells_gc_interval = 0;
115
116
117 /* Assert that the given object is a valid reference to a valid cell. This
118 * test involves to determine whether the object is a cell pointer, whether
119 * this pointer actually points into a heap segment and whether the cell
120 * pointed to is not a free cell. Further, additional garbage collections may
121 * get executed after a user defined number of cell accesses. This helps to
122 * find places in the C code where references are dropped for extremely short
123 * periods.
124 */
125 void
126 scm_assert_cell_valid (SCM cell)
127 {
128 static unsigned int already_running = 0;
129
130 if (scm_debug_cell_accesses_p && !already_running)
131 {
132 already_running = 1; /* set to avoid recursion */
133
134 if (!scm_cellp (cell))
135 {
136 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lux\n",
137 (unsigned long) SCM_UNPACK (cell));
138 abort ();
139 }
140 else if (!scm_gc_running_p)
141 {
142 /* Dirk::FIXME:: During garbage collection there occur references to
143 free cells. This is allright during conservative marking, but
144 should not happen otherwise (I think). The case of free cells
145 accessed during conservative marking is handled in function
146 scm_mark_locations. However, there still occur accesses to free
147 cells during gc. I don't understand why this happens. If it is
148 a bug and gets fixed, the following test should also work while
149 gc is running.
150 */
151 if (SCM_FREE_CELL_P (cell))
152 {
153 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lux\n",
154 (unsigned long) SCM_UNPACK (cell));
155 abort ();
156 }
157
158 /* If desired, perform additional garbage collections after a user
159 * defined number of cell accesses.
160 */
161 if (debug_cells_gc_interval)
162 {
163 static unsigned int counter = 0;
164
165 if (counter != 0)
166 {
167 --counter;
168 }
169 else
170 {
171 counter = debug_cells_gc_interval;
172 scm_igc ("scm_assert_cell_valid");
173 }
174 }
175 }
176 already_running = 0; /* re-enable */
177 }
178 }
179
180
181 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
182 (SCM flag),
183 "If @var{flag} is @code{#f}, cell access checking is disabled.\n"
184 "If @var{flag} is @code{#t}, cell access checking is enabled,\n"
185 "but no additional calls to garbage collection are issued.\n"
186 "If @var{flag} is a number, cell access checking is enabled,\n"
187 "with an additional garbage collection after the given\n"
188 "number of cell accesses.\n"
189 "This procedure only exists when the compile-time flag\n"
190 "@code{SCM_DEBUG_CELL_ACCESSES} was set to 1.")
191 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
192 {
193 if (SCM_FALSEP (flag)) {
194 scm_debug_cell_accesses_p = 0;
195 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
196 debug_cells_gc_interval = 0;
197 scm_debug_cell_accesses_p = 1;
198 } else if (SCM_INUMP (flag)) {
199 long int f = SCM_INUM (flag);
200 if (f <= 0) SCM_OUT_OF_RANGE (1, flag);
201 debug_cells_gc_interval = f;
202 scm_debug_cell_accesses_p = 1;
203 } else {
204 SCM_WRONG_TYPE_ARG (1, flag);
205 }
206 return SCM_UNSPECIFIED;
207 }
208 #undef FUNC_NAME
209
210 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
211
212 \f
213
214 /* {heap tuning parameters}
215 *
216 * These are parameters for controlling memory allocation. The heap
217 * is the area out of which scm_cons, and object headers are allocated.
218 *
219 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
220 * 64 bit machine. The units of the _SIZE parameters are bytes.
221 * Cons pairs and object headers occupy one heap cell.
222 *
223 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
224 * allocated initially the heap will grow by half its current size
225 * each subsequent time more heap is needed.
226 *
227 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
228 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
229 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type size_t. This code
230 * is in scm_init_storage() and alloc_some_heap() in sys.c
231 *
232 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
233 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
234 *
235 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
236 * is needed.
237 *
238 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
239 * trigger a GC.
240 *
241 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
242 * reclaimed by a GC triggered by must_malloc. If less than this is
243 * reclaimed, the trigger threshold is raised. [I don't know what a
244 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
245 * work around a oscillation that caused almost constant GC.]
246 */
247
248 /*
249 * Heap size 45000 and 40% min yield gives quick startup and no extra
250 * heap allocation. Having higher values on min yield may lead to
251 * large heaps, especially if code behaviour is varying its
252 * maximum consumption between different freelists.
253 */
254
255 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
256 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
257 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
258 size_t scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
259 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
260 int scm_default_min_yield_1 = 40;
261
262 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
263 size_t scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
264 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
265 /* The following value may seem large, but note that if we get to GC at
266 * all, this means that we have a numerically intensive application
267 */
268 int scm_default_min_yield_2 = 40;
269
270 size_t scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
271
272 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
273 #ifdef _QC
274 # define SCM_HEAP_SEG_SIZE 32768L
275 #else
276 # ifdef sequent
277 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
278 # else
279 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
280 # endif
281 #endif
282 /* Make heap grow with factor 1.5 */
283 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
284 #define SCM_INIT_MALLOC_LIMIT 100000
285 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
286
287 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
288 aligned inner bounds for allocated storage */
289
290 #ifdef PROT386
291 /*in 386 protected mode we must only adjust the offset */
292 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
293 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
294 #else
295 # ifdef _UNICOS
296 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
297 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
298 # else
299 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
300 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
301 # endif /* UNICOS */
302 #endif /* PROT386 */
303
304 #define DOUBLECELL_ALIGNED_P(x) (((2 * sizeof (scm_cell) - 1) & SCM_UNPACK (x)) == 0)
305
306 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
307 #define CLUSTER_SIZE_IN_BYTES(freelist) \
308 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
309
310 \f
311 /* scm_freelists
312 */
313
314 typedef struct scm_t_freelist {
315 /* collected cells */
316 SCM cells;
317 /* number of cells left to collect before cluster is full */
318 unsigned int left_to_collect;
319 /* number of clusters which have been allocated */
320 unsigned int clusters_allocated;
321 /* a list of freelists, each of size cluster_size,
322 * except the last one which may be shorter
323 */
324 SCM clusters;
325 SCM *clustertail;
326 /* this is the number of objects in each cluster, including the spine cell */
327 unsigned int cluster_size;
328 /* indicates that we should grow heap instead of GC:ing
329 */
330 int grow_heap_p;
331 /* minimum yield on this list in order not to grow the heap
332 */
333 long min_yield;
334 /* defines min_yield as percent of total heap size
335 */
336 int min_yield_fraction;
337 /* number of cells per object on this list */
338 int span;
339 /* number of collected cells during last GC */
340 unsigned long collected;
341 /* number of collected cells during penultimate GC */
342 unsigned long collected_1;
343 /* total number of cells in heap segments
344 * belonging to this list.
345 */
346 unsigned long heap_size;
347 } scm_t_freelist;
348
349 SCM scm_freelist = SCM_EOL;
350 scm_t_freelist scm_master_freelist = {
351 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0, 0
352 };
353 SCM scm_freelist2 = SCM_EOL;
354 scm_t_freelist scm_master_freelist2 = {
355 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0, 0
356 };
357
358 /* scm_mtrigger
359 * is the number of bytes of must_malloc allocation needed to trigger gc.
360 */
361 unsigned long scm_mtrigger;
362
363 /* scm_gc_heap_lock
364 * If set, don't expand the heap. Set only during gc, during which no allocation
365 * is supposed to take place anyway.
366 */
367 int scm_gc_heap_lock = 0;
368
369 /* GC Blocking
370 * Don't pause for collection if this is set -- just
371 * expand the heap.
372 */
373 int scm_block_gc = 1;
374
375 /* During collection, this accumulates objects holding
376 * weak references.
377 */
378 SCM scm_weak_vectors;
379
380 /* During collection, this accumulates structures which are to be freed.
381 */
382 SCM scm_structs_to_free;
383
384 /* GC Statistics Keeping
385 */
386 unsigned long scm_cells_allocated = 0;
387 unsigned long scm_mallocated = 0;
388 unsigned long scm_gc_cells_collected;
389 unsigned long scm_gc_yield;
390 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
391 unsigned long scm_gc_malloc_collected;
392 unsigned long scm_gc_ports_collected;
393 unsigned long scm_gc_time_taken = 0;
394 static unsigned long t_before_gc;
395 static unsigned long t_before_sweep;
396 unsigned long scm_gc_mark_time_taken = 0;
397 unsigned long scm_gc_sweep_time_taken = 0;
398 unsigned long scm_gc_times = 0;
399 unsigned long scm_gc_cells_swept = 0;
400 double scm_gc_cells_marked_acc = 0.;
401 double scm_gc_cells_swept_acc = 0.;
402
403 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
404 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
405 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
406 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
407 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
408 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
409 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
410 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
411 SCM_SYMBOL (sym_times, "gc-times");
412 SCM_SYMBOL (sym_cells_marked, "cells-marked");
413 SCM_SYMBOL (sym_cells_swept, "cells-swept");
414
415 typedef struct scm_t_heap_seg_data
416 {
417 /* lower and upper bounds of the segment */
418 SCM_CELLPTR bounds[2];
419
420 /* address of the head-of-freelist pointer for this segment's cells.
421 All segments usually point to the same one, scm_freelist. */
422 scm_t_freelist *freelist;
423
424 /* number of cells per object in this segment */
425 int span;
426 } scm_t_heap_seg_data;
427
428
429
430 static size_t init_heap_seg (SCM_CELLPTR, size_t, scm_t_freelist *);
431
432 typedef enum { return_on_error, abort_on_error } policy_on_error;
433 static void alloc_some_heap (scm_t_freelist *, policy_on_error);
434
435
436 #define SCM_HEAP_SIZE \
437 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
438 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
439
440 #define BVEC_GROW_SIZE 256
441 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
442 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_t_c_bvec_limb))
443
444 /* mark space allocation */
445
446 typedef struct scm_t_mark_space
447 {
448 scm_t_c_bvec_limb *bvec_space;
449 struct scm_t_mark_space *next;
450 } scm_t_mark_space;
451
452 static scm_t_mark_space *current_mark_space;
453 static scm_t_mark_space **mark_space_ptr;
454 static ptrdiff_t current_mark_space_offset;
455 static scm_t_mark_space *mark_space_head;
456
457 static scm_t_c_bvec_limb *
458 get_bvec ()
459 #define FUNC_NAME "get_bvec"
460 {
461 scm_t_c_bvec_limb *res;
462
463 if (!current_mark_space)
464 {
465 SCM_SYSCALL (current_mark_space = (scm_t_mark_space *) malloc (sizeof (scm_t_mark_space)));
466 if (!current_mark_space)
467 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
468
469 current_mark_space->bvec_space = NULL;
470 current_mark_space->next = NULL;
471
472 *mark_space_ptr = current_mark_space;
473 mark_space_ptr = &(current_mark_space->next);
474
475 return get_bvec ();
476 }
477
478 if (!(current_mark_space->bvec_space))
479 {
480 SCM_SYSCALL (current_mark_space->bvec_space =
481 (scm_t_c_bvec_limb *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
482 if (!(current_mark_space->bvec_space))
483 SCM_MISC_ERROR ("could not grow heap", SCM_EOL);
484
485 current_mark_space_offset = 0;
486
487 return get_bvec ();
488 }
489
490 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
491 {
492 current_mark_space = NULL;
493
494 return get_bvec ();
495 }
496
497 res = current_mark_space->bvec_space + current_mark_space_offset;
498 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
499
500 return res;
501 }
502 #undef FUNC_NAME
503
504
505 static void
506 clear_mark_space ()
507 {
508 scm_t_mark_space *ms;
509
510 for (ms = mark_space_head; ms; ms = ms->next)
511 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
512 }
513
514
515 \f
516 /* Debugging functions. */
517
518 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
519
520 /* Return the number of the heap segment containing CELL. */
521 static long
522 which_seg (SCM cell)
523 {
524 long i;
525
526 for (i = 0; i < scm_n_heap_segs; i++)
527 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
528 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
529 return i;
530 fprintf (stderr, "which_seg: can't find segment containing cell %lux\n",
531 (unsigned long) SCM_UNPACK (cell));
532 abort ();
533 }
534
535
536 static void
537 map_free_list (scm_t_freelist *master, SCM freelist)
538 {
539 long last_seg = -1, count = 0;
540 SCM f;
541
542 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
543 {
544 long this_seg = which_seg (f);
545
546 if (this_seg != last_seg)
547 {
548 if (last_seg != -1)
549 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
550 (long) count, master->span, (long) last_seg);
551 last_seg = this_seg;
552 count = 0;
553 }
554 count++;
555 }
556 if (last_seg != -1)
557 fprintf (stderr, " %5ld %d-cells in segment %ld\n",
558 (long) count, master->span, (long) last_seg);
559 }
560
561 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
562 (),
563 "Print debugging information about the free-list.\n"
564 "@code{map-free-list} is only included in\n"
565 "@code{--enable-guile-debug} builds of Guile.")
566 #define FUNC_NAME s_scm_map_free_list
567 {
568 long i;
569 fprintf (stderr, "%ld segments total (%d:%ld",
570 (long) scm_n_heap_segs,
571 scm_heap_table[0].span,
572 (long) (scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]));
573 for (i = 1; i < scm_n_heap_segs; i++)
574 fprintf (stderr, ", %d:%ld",
575 scm_heap_table[i].span,
576 (long) (scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]));
577 fprintf (stderr, ")\n");
578 map_free_list (&scm_master_freelist, scm_freelist);
579 map_free_list (&scm_master_freelist2, scm_freelist2);
580 fflush (stderr);
581
582 return SCM_UNSPECIFIED;
583 }
584 #undef FUNC_NAME
585
586 static long last_cluster;
587 static long last_size;
588
589 static long
590 free_list_length (char *title, long i, SCM freelist)
591 {
592 SCM ls;
593 long n = 0;
594 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
595 if (SCM_FREE_CELL_P (ls))
596 ++n;
597 else
598 {
599 fprintf (stderr, "bad cell in %s at position %ld\n", title, (long) n);
600 abort ();
601 }
602 if (n != last_size)
603 {
604 if (i > 0)
605 {
606 if (last_cluster == i - 1)
607 fprintf (stderr, "\t%ld\n", (long) last_size);
608 else
609 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
610 }
611 if (i >= 0)
612 fprintf (stderr, "%s %ld", title, (long) i);
613 else
614 fprintf (stderr, "%s\t%ld\n", title, (long) n);
615 last_cluster = i;
616 last_size = n;
617 }
618 return n;
619 }
620
621 static void
622 free_list_lengths (char *title, scm_t_freelist *master, SCM freelist)
623 {
624 SCM clusters;
625 long i = 0, len, n = 0;
626 fprintf (stderr, "%s\n\n", title);
627 n += free_list_length ("free list", -1, freelist);
628 for (clusters = master->clusters;
629 SCM_NNULLP (clusters);
630 clusters = SCM_CDR (clusters))
631 {
632 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
633 n += len;
634 }
635 if (last_cluster == i - 1)
636 fprintf (stderr, "\t%ld\n", (long) last_size);
637 else
638 fprintf (stderr, "-%ld\t%ld\n", (long) (i - 1), (long) last_size);
639 fprintf (stderr, "\ntotal %ld objects\n\n", (long) n);
640 }
641
642 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
643 (),
644 "Print debugging information about the free-list.\n"
645 "@code{free-list-length} is only included in\n"
646 "@code{--enable-guile-debug} builds of Guile.")
647 #define FUNC_NAME s_scm_free_list_length
648 {
649 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
650 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
651 return SCM_UNSPECIFIED;
652 }
653 #undef FUNC_NAME
654
655 #endif
656
657 #ifdef GUILE_DEBUG_FREELIST
658
659 /* Non-zero if freelist debugging is in effect. Set this via
660 `gc-set-debug-check-freelist!'. */
661 static int scm_debug_check_freelist = 0;
662
663 /* Number of calls to SCM_NEWCELL since startup. */
664 static unsigned long scm_newcell_count;
665 static unsigned long scm_newcell2_count;
666
667 /* Search freelist for anything that isn't marked as a free cell.
668 Abort if we find something. */
669 static void
670 scm_check_freelist (SCM freelist)
671 {
672 SCM f;
673 long i = 0;
674
675 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
676 if (!SCM_FREE_CELL_P (f))
677 {
678 fprintf (stderr, "Bad cell in freelist on newcell %lu: %lu'th elt\n",
679 (long) scm_newcell_count, (long) i);
680 abort ();
681 }
682 }
683
684 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
685 (SCM flag),
686 "If @var{flag} is @code{#t}, check the freelist for consistency\n"
687 "on each cell allocation. This procedure only exists when the\n"
688 "@code{GUILE_DEBUG_FREELIST} compile-time flag was selected.")
689 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
690 {
691 /* [cmm] I did a double-take when I read this code the first time.
692 well, FWIW. */
693 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
694 return SCM_UNSPECIFIED;
695 }
696 #undef FUNC_NAME
697
698
699 SCM
700 scm_debug_newcell (void)
701 {
702 SCM new;
703
704 scm_newcell_count++;
705 if (scm_debug_check_freelist)
706 {
707 scm_check_freelist (scm_freelist);
708 scm_gc();
709 }
710
711 /* The rest of this is supposed to be identical to the SCM_NEWCELL
712 macro. */
713 if (SCM_NULLP (scm_freelist))
714 {
715 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
716 SCM_GC_SET_ALLOCATED (new);
717 }
718 else
719 {
720 new = scm_freelist;
721 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
722 SCM_GC_SET_ALLOCATED (new);
723 }
724
725 return new;
726 }
727
728 SCM
729 scm_debug_newcell2 (void)
730 {
731 SCM new;
732
733 scm_newcell2_count++;
734 if (scm_debug_check_freelist)
735 {
736 scm_check_freelist (scm_freelist2);
737 scm_gc ();
738 }
739
740 /* The rest of this is supposed to be identical to the SCM_NEWCELL
741 macro. */
742 if (SCM_NULLP (scm_freelist2))
743 {
744 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
745 SCM_GC_SET_ALLOCATED (new);
746 }
747 else
748 {
749 new = scm_freelist2;
750 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
751 SCM_GC_SET_ALLOCATED (new);
752 }
753
754 return new;
755 }
756
757 #endif /* GUILE_DEBUG_FREELIST */
758
759 \f
760
761 static unsigned long
762 master_cells_allocated (scm_t_freelist *master)
763 {
764 /* the '- 1' below is to ignore the cluster spine cells. */
765 long objects = master->clusters_allocated * (master->cluster_size - 1);
766 if (SCM_NULLP (master->clusters))
767 objects -= master->left_to_collect;
768 return master->span * objects;
769 }
770
771 static unsigned long
772 freelist_length (SCM freelist)
773 {
774 long n;
775 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
776 ++n;
777 return n;
778 }
779
780 static unsigned long
781 compute_cells_allocated ()
782 {
783 return (scm_cells_allocated
784 + master_cells_allocated (&scm_master_freelist)
785 + master_cells_allocated (&scm_master_freelist2)
786 - scm_master_freelist.span * freelist_length (scm_freelist)
787 - scm_master_freelist2.span * freelist_length (scm_freelist2));
788 }
789
790 /* {Scheme Interface to GC}
791 */
792
793 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
794 (),
795 "Return an association list of statistics about Guile's current\n"
796 "use of storage.")
797 #define FUNC_NAME s_scm_gc_stats
798 {
799 long i;
800 long n;
801 SCM heap_segs;
802 unsigned long int local_scm_mtrigger;
803 unsigned long int local_scm_mallocated;
804 unsigned long int local_scm_heap_size;
805 unsigned long int local_scm_cells_allocated;
806 unsigned long int local_scm_gc_time_taken;
807 unsigned long int local_scm_gc_times;
808 unsigned long int local_scm_gc_mark_time_taken;
809 unsigned long int local_scm_gc_sweep_time_taken;
810 double local_scm_gc_cells_swept;
811 double local_scm_gc_cells_marked;
812 SCM answer;
813
814 SCM_DEFER_INTS;
815
816 ++scm_block_gc;
817
818 retry:
819 heap_segs = SCM_EOL;
820 n = scm_n_heap_segs;
821 for (i = scm_n_heap_segs; i--; )
822 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
823 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
824 heap_segs);
825 if (scm_n_heap_segs != n)
826 goto retry;
827
828 --scm_block_gc;
829
830 /* Below, we cons to produce the resulting list. We want a snapshot of
831 * the heap situation before consing.
832 */
833 local_scm_mtrigger = scm_mtrigger;
834 local_scm_mallocated = scm_mallocated;
835 local_scm_heap_size = SCM_HEAP_SIZE;
836 local_scm_cells_allocated = compute_cells_allocated ();
837 local_scm_gc_time_taken = scm_gc_time_taken;
838 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
839 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
840 local_scm_gc_times = scm_gc_times;
841 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
842 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
843
844 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
845 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
846 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
847 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
848 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
849 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
850 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
851 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
852 scm_cons (sym_cells_marked, scm_i_dbl2big (local_scm_gc_cells_marked)),
853 scm_cons (sym_cells_swept, scm_i_dbl2big (local_scm_gc_cells_swept)),
854 scm_cons (sym_heap_segments, heap_segs),
855 SCM_UNDEFINED);
856 SCM_ALLOW_INTS;
857 return answer;
858 }
859 #undef FUNC_NAME
860
861
862 static void
863 gc_start_stats (const char *what SCM_UNUSED)
864 {
865 t_before_gc = scm_c_get_internal_run_time ();
866 scm_gc_cells_swept = 0;
867 scm_gc_cells_collected = 0;
868 scm_gc_yield_1 = scm_gc_yield;
869 scm_gc_yield = (scm_cells_allocated
870 + master_cells_allocated (&scm_master_freelist)
871 + master_cells_allocated (&scm_master_freelist2));
872 scm_gc_malloc_collected = 0;
873 scm_gc_ports_collected = 0;
874 }
875
876
877 static void
878 gc_end_stats ()
879 {
880 unsigned long t = scm_c_get_internal_run_time ();
881 scm_gc_time_taken += (t - t_before_gc);
882 scm_gc_sweep_time_taken += (t - t_before_sweep);
883 ++scm_gc_times;
884
885 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
886 scm_gc_cells_swept_acc += scm_gc_cells_swept;
887 }
888
889
890 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
891 (SCM obj),
892 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
893 "returned by this function for @var{obj}")
894 #define FUNC_NAME s_scm_object_address
895 {
896 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
897 }
898 #undef FUNC_NAME
899
900
901 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
902 (),
903 "Scans all of SCM objects and reclaims for further use those that are\n"
904 "no longer accessible.")
905 #define FUNC_NAME s_scm_gc
906 {
907 SCM_DEFER_INTS;
908 scm_igc ("call");
909 SCM_ALLOW_INTS;
910 return SCM_UNSPECIFIED;
911 }
912 #undef FUNC_NAME
913
914
915 \f
916 /* {C Interface For When GC is Triggered}
917 */
918
919 static void
920 adjust_min_yield (scm_t_freelist *freelist)
921 {
922 /* min yield is adjusted upwards so that next predicted total yield
923 * (allocated cells actually freed by GC) becomes
924 * `min_yield_fraction' of total heap size. Note, however, that
925 * the absolute value of min_yield will correspond to `collected'
926 * on one master (the one which currently is triggering GC).
927 *
928 * The reason why we look at total yield instead of cells collected
929 * on one list is that we want to take other freelists into account.
930 * On this freelist, we know that (local) yield = collected cells,
931 * but that's probably not the case on the other lists.
932 *
933 * (We might consider computing a better prediction, for example
934 * by computing an average over multiple GC:s.)
935 */
936 if (freelist->min_yield_fraction)
937 {
938 /* Pick largest of last two yields. */
939 long delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
940 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
941 #ifdef DEBUGINFO
942 fprintf (stderr, " after GC = %lu, delta = %ld\n",
943 (long) scm_cells_allocated,
944 (long) delta);
945 #endif
946 if (delta > 0)
947 freelist->min_yield += delta;
948 }
949 }
950
951
952 /* When we get POSIX threads support, the master will be global and
953 * common while the freelist will be individual for each thread.
954 */
955
956 SCM
957 scm_gc_for_newcell (scm_t_freelist *master, SCM *freelist)
958 {
959 SCM cell;
960 ++scm_ints_disabled;
961 do
962 {
963 if (SCM_NULLP (master->clusters))
964 {
965 if (master->grow_heap_p || scm_block_gc)
966 {
967 /* In order to reduce gc frequency, try to allocate a new heap
968 * segment first, even if gc might find some free cells. If we
969 * can't obtain a new heap segment, we will try gc later.
970 */
971 master->grow_heap_p = 0;
972 alloc_some_heap (master, return_on_error);
973 }
974 if (SCM_NULLP (master->clusters))
975 {
976 /* The heap was not grown, either because it wasn't scheduled to
977 * grow, or because there was not enough memory available. In
978 * both cases we have to try gc to get some free cells.
979 */
980 #ifdef DEBUGINFO
981 fprintf (stderr, "allocated = %lu, ",
982 (long) (scm_cells_allocated
983 + master_cells_allocated (&scm_master_freelist)
984 + master_cells_allocated (&scm_master_freelist2)));
985 #endif
986 scm_igc ("cells");
987 adjust_min_yield (master);
988 if (SCM_NULLP (master->clusters))
989 {
990 /* gc could not free any cells. Now, we _must_ allocate a
991 * new heap segment, because there is no other possibility
992 * to provide a new cell for the caller.
993 */
994 alloc_some_heap (master, abort_on_error);
995 }
996 }
997 }
998 cell = SCM_CAR (master->clusters);
999 master->clusters = SCM_CDR (master->clusters);
1000 ++master->clusters_allocated;
1001 }
1002 while (SCM_NULLP (cell));
1003
1004 #ifdef GUILE_DEBUG_FREELIST
1005 scm_check_freelist (cell);
1006 #endif
1007
1008 --scm_ints_disabled;
1009 *freelist = SCM_FREE_CELL_CDR (cell);
1010 return cell;
1011 }
1012
1013
1014 #if 0
1015 /* This is a support routine which can be used to reserve a cluster
1016 * for some special use, such as debugging. It won't be useful until
1017 * free cells are preserved between garbage collections.
1018 */
1019
1020 void
1021 scm_alloc_cluster (scm_t_freelist *master)
1022 {
1023 SCM freelist, cell;
1024 cell = scm_gc_for_newcell (master, &freelist);
1025 SCM_SETCDR (cell, freelist);
1026 return cell;
1027 }
1028 #endif
1029
1030
1031 scm_t_c_hook scm_before_gc_c_hook;
1032 scm_t_c_hook scm_before_mark_c_hook;
1033 scm_t_c_hook scm_before_sweep_c_hook;
1034 scm_t_c_hook scm_after_sweep_c_hook;
1035 scm_t_c_hook scm_after_gc_c_hook;
1036
1037
1038 void
1039 scm_igc (const char *what)
1040 {
1041 long j;
1042
1043 ++scm_gc_running_p;
1044 scm_c_hook_run (&scm_before_gc_c_hook, 0);
1045 #ifdef DEBUGINFO
1046 fprintf (stderr,
1047 SCM_NULLP (scm_freelist)
1048 ? "*"
1049 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
1050 #endif
1051 /* During the critical section, only the current thread may run. */
1052 SCM_CRITICAL_SECTION_START;
1053
1054 /* fprintf (stderr, "gc: %s\n", what); */
1055
1056 if (!scm_stack_base || scm_block_gc)
1057 {
1058 --scm_gc_running_p;
1059 return;
1060 }
1061
1062 gc_start_stats (what);
1063
1064 if (scm_gc_heap_lock)
1065 /* We've invoked the collector while a GC is already in progress.
1066 That should never happen. */
1067 abort ();
1068
1069 ++scm_gc_heap_lock;
1070
1071 /* flush dead entries from the continuation stack */
1072 {
1073 long x;
1074 long bound;
1075 SCM * elts;
1076 elts = SCM_VELTS (scm_continuation_stack);
1077 bound = SCM_VECTOR_LENGTH (scm_continuation_stack);
1078 x = SCM_INUM (scm_continuation_stack_ptr);
1079 while (x < bound)
1080 {
1081 elts[x] = SCM_BOOL_F;
1082 ++x;
1083 }
1084 }
1085
1086 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1087
1088 clear_mark_space ();
1089
1090 #ifndef USE_THREADS
1091
1092 /* Mark objects on the C stack. */
1093 SCM_FLUSH_REGISTER_WINDOWS;
1094 /* This assumes that all registers are saved into the jmp_buf */
1095 setjmp (scm_save_regs_gc_mark);
1096 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1097 ( (size_t) (sizeof (SCM_STACKITEM) - 1 +
1098 sizeof scm_save_regs_gc_mark)
1099 / sizeof (SCM_STACKITEM)));
1100
1101 {
1102 unsigned long stack_len = scm_stack_size (scm_stack_base);
1103 #ifdef SCM_STACK_GROWS_UP
1104 scm_mark_locations (scm_stack_base, stack_len);
1105 #else
1106 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1107 #endif
1108 }
1109
1110 #else /* USE_THREADS */
1111
1112 /* Mark every thread's stack and registers */
1113 scm_threads_mark_stacks ();
1114
1115 #endif /* USE_THREADS */
1116
1117 j = SCM_NUM_PROTECTS;
1118 while (j--)
1119 scm_gc_mark (scm_sys_protects[j]);
1120
1121 /* mark the registered roots */
1122 {
1123 long i;
1124 for (i = 0; i < SCM_VECTOR_LENGTH (scm_gc_registered_roots); ++i) {
1125 SCM l = SCM_VELTS (scm_gc_registered_roots)[i];
1126 for (; ! SCM_NULLP (l); l = SCM_CDR (l)) {
1127 SCM *p = (SCM *) (scm_num2long (SCM_CAAR (l), 0, NULL));
1128 scm_gc_mark (*p);
1129 }
1130 }
1131 }
1132
1133 /* FIXME: we should have a means to register C functions to be run
1134 * in different phases of GC
1135 */
1136 scm_mark_subr_table ();
1137
1138 #ifndef USE_THREADS
1139 scm_gc_mark (scm_root->handle);
1140 #endif
1141
1142 t_before_sweep = scm_c_get_internal_run_time ();
1143 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1144
1145 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1146
1147 scm_gc_sweep ();
1148
1149 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1150
1151 --scm_gc_heap_lock;
1152 gc_end_stats ();
1153
1154 SCM_CRITICAL_SECTION_END;
1155 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1156 --scm_gc_running_p;
1157 }
1158
1159 \f
1160
1161 /* {Mark/Sweep}
1162 */
1163
1164 #define MARK scm_gc_mark
1165 #define FNAME "scm_gc_mark"
1166
1167 #endif /*!MARK_DEPENDENCIES*/
1168
1169 /* Mark an object precisely.
1170 */
1171 void
1172 MARK (SCM p)
1173 #define FUNC_NAME FNAME
1174 {
1175 register long i;
1176 register SCM ptr;
1177 scm_t_bits cell_type;
1178
1179 #ifndef MARK_DEPENDENCIES
1180 # define RECURSE scm_gc_mark
1181 #else
1182 /* go through the usual marking, but not for self-cycles. */
1183 # define RECURSE(x) do { if ((x) != p) scm_gc_mark (x); } while (0)
1184 #endif
1185 ptr = p;
1186
1187 #ifdef MARK_DEPENDENCIES
1188 goto gc_mark_loop_first_time;
1189 #endif
1190
1191 /* A simple hack for debugging. Chose the second branch to get a
1192 meaningful backtrace for crashes inside the GC.
1193 */
1194 #if 1
1195 #define goto_gc_mark_loop goto gc_mark_loop
1196 #define goto_gc_mark_nimp goto gc_mark_nimp
1197 #else
1198 #define goto_gc_mark_loop RECURSE(ptr); return
1199 #define goto_gc_mark_nimp RECURSE(ptr); return
1200 #endif
1201
1202 gc_mark_loop:
1203 if (SCM_IMP (ptr))
1204 return;
1205
1206 gc_mark_nimp:
1207
1208 #ifdef MARK_DEPENDENCIES
1209 if (SCM_EQ_P (ptr, p))
1210 return;
1211
1212 scm_gc_mark (ptr);
1213 return;
1214
1215 gc_mark_loop_first_time:
1216 #endif
1217
1218 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1219 /* We are in debug mode. Check the ptr exhaustively. */
1220 if (!scm_cellp (ptr))
1221 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1222 #else
1223 /* In non-debug mode, do at least some cheap testing. */
1224 if (!SCM_CELLP (ptr))
1225 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1226 #endif
1227
1228 #ifndef MARK_DEPENDENCIES
1229
1230 if (SCM_GCMARKP (ptr))
1231 return;
1232
1233 SCM_SETGCMARK (ptr);
1234
1235 #endif
1236
1237 cell_type = SCM_GC_CELL_TYPE (ptr);
1238 switch (SCM_ITAG7 (cell_type))
1239 {
1240 case scm_tcs_cons_nimcar:
1241 if (SCM_IMP (SCM_CDR (ptr)))
1242 {
1243 ptr = SCM_CAR (ptr);
1244 goto_gc_mark_nimp;
1245 }
1246 RECURSE (SCM_CAR (ptr));
1247 ptr = SCM_CDR (ptr);
1248 goto_gc_mark_nimp;
1249 case scm_tcs_cons_imcar:
1250 ptr = SCM_CDR (ptr);
1251 goto_gc_mark_loop;
1252 case scm_tc7_pws:
1253 RECURSE (SCM_SETTER (ptr));
1254 ptr = SCM_PROCEDURE (ptr);
1255 goto_gc_mark_loop;
1256 case scm_tcs_cons_gloc:
1257 {
1258 /* Dirk:FIXME:: The following code is super ugly: ptr may be a
1259 * struct or a gloc. If it is a gloc, the cell word #0 of ptr
1260 * is the address of a scm_tc16_variable smob. If it is a
1261 * struct, the cell word #0 of ptr is a pointer to a struct
1262 * vtable data region. (The fact that these are accessed in
1263 * the same way restricts the possibilites to change the data
1264 * layout of structs or heap cells.) To discriminate between
1265 * the two, it is guaranteed that the scm_vtable_index_vcell
1266 * element of the prospective vtable is always zero. For a
1267 * gloc, this location has the CDR of the variable smob, which
1268 * is guaranteed to be non-zero.
1269 */
1270 scm_t_bits word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1271 scm_t_bits * vtable_data = (scm_t_bits *) word0; /* access as struct */
1272 if (vtable_data [scm_vtable_index_vcell] != 0)
1273 {
1274 /* ptr is a gloc */
1275 SCM gloc_car = SCM_PACK (word0);
1276 RECURSE (gloc_car);
1277 ptr = SCM_CDR (ptr);
1278 goto gc_mark_loop;
1279 }
1280 else
1281 {
1282 /* ptr is a struct */
1283 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1284 long len = SCM_SYMBOL_LENGTH (layout);
1285 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1286 scm_t_bits * struct_data = (scm_t_bits *) SCM_STRUCT_DATA (ptr);
1287
1288 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1289 {
1290 RECURSE (SCM_PACK (struct_data[scm_struct_i_procedure]));
1291 RECURSE (SCM_PACK (struct_data[scm_struct_i_setter]));
1292 }
1293 if (len)
1294 {
1295 long x;
1296
1297 for (x = 0; x < len - 2; x += 2, ++struct_data)
1298 if (fields_desc[x] == 'p')
1299 RECURSE (SCM_PACK (*struct_data));
1300 if (fields_desc[x] == 'p')
1301 {
1302 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1303 for (x = *struct_data++; x; --x, ++struct_data)
1304 RECURSE (SCM_PACK (*struct_data));
1305 else
1306 RECURSE (SCM_PACK (*struct_data));
1307 }
1308 }
1309 /* mark vtable */
1310 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1311 goto_gc_mark_loop;
1312 }
1313 }
1314 break;
1315 case scm_tcs_closures:
1316 if (SCM_IMP (SCM_ENV (ptr)))
1317 {
1318 ptr = SCM_CLOSCAR (ptr);
1319 goto_gc_mark_nimp;
1320 }
1321 RECURSE (SCM_CLOSCAR (ptr));
1322 ptr = SCM_ENV (ptr);
1323 goto_gc_mark_nimp;
1324 case scm_tc7_vector:
1325 i = SCM_VECTOR_LENGTH (ptr);
1326 if (i == 0)
1327 break;
1328 while (--i > 0)
1329 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1330 RECURSE (SCM_VELTS (ptr)[i]);
1331 ptr = SCM_VELTS (ptr)[0];
1332 goto_gc_mark_loop;
1333 #ifdef CCLO
1334 case scm_tc7_cclo:
1335 {
1336 size_t i = SCM_CCLO_LENGTH (ptr);
1337 size_t j;
1338 for (j = 1; j != i; ++j)
1339 {
1340 SCM obj = SCM_CCLO_REF (ptr, j);
1341 if (!SCM_IMP (obj))
1342 RECURSE (obj);
1343 }
1344 ptr = SCM_CCLO_REF (ptr, 0);
1345 goto_gc_mark_loop;
1346 }
1347 #endif
1348 #ifdef HAVE_ARRAYS
1349 case scm_tc7_bvect:
1350 case scm_tc7_byvect:
1351 case scm_tc7_ivect:
1352 case scm_tc7_uvect:
1353 case scm_tc7_fvect:
1354 case scm_tc7_dvect:
1355 case scm_tc7_cvect:
1356 case scm_tc7_svect:
1357 #ifdef HAVE_LONG_LONGS
1358 case scm_tc7_llvect:
1359 #endif
1360 #endif
1361 case scm_tc7_string:
1362 break;
1363
1364 case scm_tc7_substring:
1365 ptr = SCM_CDR (ptr);
1366 goto_gc_mark_loop;
1367
1368 case scm_tc7_wvect:
1369 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1370 scm_weak_vectors = ptr;
1371 if (SCM_IS_WHVEC_ANY (ptr))
1372 {
1373 long x;
1374 long len;
1375 int weak_keys;
1376 int weak_values;
1377
1378 len = SCM_VECTOR_LENGTH (ptr);
1379 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1380 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1381
1382 for (x = 0; x < len; ++x)
1383 {
1384 SCM alist;
1385 alist = SCM_VELTS (ptr)[x];
1386
1387 /* mark everything on the alist except the keys or
1388 * values, according to weak_values and weak_keys. */
1389 while ( SCM_CONSP (alist)
1390 && !SCM_GCMARKP (alist)
1391 && SCM_CONSP (SCM_CAR (alist)))
1392 {
1393 SCM kvpair;
1394 SCM next_alist;
1395
1396 kvpair = SCM_CAR (alist);
1397 next_alist = SCM_CDR (alist);
1398 /*
1399 * Do not do this:
1400 * SCM_SETGCMARK (alist);
1401 * SCM_SETGCMARK (kvpair);
1402 *
1403 * It may be that either the key or value is protected by
1404 * an escaped reference to part of the spine of this alist.
1405 * If we mark the spine here, and only mark one or neither of the
1406 * key and value, they may never be properly marked.
1407 * This leads to a horrible situation in which an alist containing
1408 * freelist cells is exported.
1409 *
1410 * So only mark the spines of these arrays last of all marking.
1411 * If somebody confuses us by constructing a weak vector
1412 * with a circular alist then we are hosed, but at least we
1413 * won't prematurely drop table entries.
1414 */
1415 if (!weak_keys)
1416 RECURSE (SCM_CAR (kvpair));
1417 if (!weak_values)
1418 RECURSE (SCM_CDR (kvpair));
1419 alist = next_alist;
1420 }
1421 if (SCM_NIMP (alist))
1422 RECURSE (alist);
1423 }
1424 }
1425 break;
1426
1427 case scm_tc7_symbol:
1428 ptr = SCM_PROP_SLOTS (ptr);
1429 goto_gc_mark_loop;
1430 case scm_tcs_subrs:
1431 break;
1432 case scm_tc7_port:
1433 i = SCM_PTOBNUM (ptr);
1434 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1435 if (!(i < scm_numptob))
1436 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1437 #endif
1438 if (SCM_PTAB_ENTRY(ptr))
1439 RECURSE (SCM_FILENAME (ptr));
1440 if (scm_ptobs[i].mark)
1441 {
1442 ptr = (scm_ptobs[i].mark) (ptr);
1443 goto_gc_mark_loop;
1444 }
1445 else
1446 return;
1447 break;
1448 case scm_tc7_smob:
1449 switch (SCM_TYP16 (ptr))
1450 { /* should be faster than going through scm_smobs */
1451 case scm_tc_free_cell:
1452 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1453 case scm_tc16_big:
1454 case scm_tc16_real:
1455 case scm_tc16_complex:
1456 break;
1457 default:
1458 i = SCM_SMOBNUM (ptr);
1459 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1460 if (!(i < scm_numsmob))
1461 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1462 #endif
1463 if (scm_smobs[i].mark)
1464 {
1465 ptr = (scm_smobs[i].mark) (ptr);
1466 goto_gc_mark_loop;
1467 }
1468 else
1469 return;
1470 }
1471 break;
1472 default:
1473 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1474 }
1475 #undef RECURSE
1476 }
1477 #undef FUNC_NAME
1478
1479 #ifndef MARK_DEPENDENCIES
1480
1481 #undef MARK
1482 #undef FNAME
1483
1484 /* And here we define `scm_gc_mark_dependencies', by including this
1485 * same file in itself.
1486 */
1487 #define MARK scm_gc_mark_dependencies
1488 #define FNAME "scm_gc_mark_dependencies"
1489 #define MARK_DEPENDENCIES
1490 #include "gc.c"
1491 #undef MARK_DEPENDENCIES
1492 #undef MARK
1493 #undef FNAME
1494
1495
1496 /* Mark a Region Conservatively
1497 */
1498
1499 void
1500 scm_mark_locations (SCM_STACKITEM x[], unsigned long n)
1501 {
1502 unsigned long m;
1503
1504 for (m = 0; m < n; ++m)
1505 {
1506 SCM obj = * (SCM *) &x[m];
1507 if (SCM_CELLP (obj))
1508 {
1509 SCM_CELLPTR ptr = SCM2PTR (obj);
1510 long i = 0;
1511 long j = scm_n_heap_segs - 1;
1512 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1513 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1514 {
1515 while (i <= j)
1516 {
1517 long seg_id;
1518 seg_id = -1;
1519 if ((i == j)
1520 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1521 seg_id = i;
1522 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1523 seg_id = j;
1524 else
1525 {
1526 long k;
1527 k = (i + j) / 2;
1528 if (k == i)
1529 break;
1530 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1531 {
1532 j = k;
1533 ++i;
1534 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1535 continue;
1536 else
1537 break;
1538 }
1539 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1540 {
1541 i = k;
1542 --j;
1543 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1544 continue;
1545 else
1546 break;
1547 }
1548 }
1549
1550 if (SCM_GC_IN_CARD_HEADERP (ptr))
1551 break;
1552
1553 if (scm_heap_table[seg_id].span == 1
1554 || DOUBLECELL_ALIGNED_P (obj))
1555 scm_gc_mark (obj);
1556
1557 break;
1558 }
1559 }
1560 }
1561 }
1562 }
1563
1564
1565 /* The function scm_cellp determines whether an SCM value can be regarded as a
1566 * pointer to a cell on the heap. Binary search is used in order to determine
1567 * the heap segment that contains the cell.
1568 */
1569 int
1570 scm_cellp (SCM value)
1571 {
1572 if (SCM_CELLP (value)) {
1573 scm_cell * ptr = SCM2PTR (value);
1574 unsigned long i = 0;
1575 unsigned long j = scm_n_heap_segs - 1;
1576
1577 if (SCM_GC_IN_CARD_HEADERP (ptr))
1578 return 0;
1579
1580 while (i < j) {
1581 long k = (i + j) / 2;
1582 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1583 j = k;
1584 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1585 i = k + 1;
1586 }
1587 }
1588
1589 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1590 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1591 && (scm_heap_table[i].span == 1 || DOUBLECELL_ALIGNED_P (value))
1592 && !SCM_GC_IN_CARD_HEADERP (ptr)
1593 )
1594 return 1;
1595 else
1596 return 0;
1597 } else
1598 return 0;
1599 }
1600
1601
1602 static void
1603 gc_sweep_freelist_start (scm_t_freelist *freelist)
1604 {
1605 freelist->cells = SCM_EOL;
1606 freelist->left_to_collect = freelist->cluster_size;
1607 freelist->clusters_allocated = 0;
1608 freelist->clusters = SCM_EOL;
1609 freelist->clustertail = &freelist->clusters;
1610 freelist->collected_1 = freelist->collected;
1611 freelist->collected = 0;
1612 }
1613
1614 static void
1615 gc_sweep_freelist_finish (scm_t_freelist *freelist)
1616 {
1617 long collected;
1618 *freelist->clustertail = freelist->cells;
1619 if (!SCM_NULLP (freelist->cells))
1620 {
1621 SCM c = freelist->cells;
1622 SCM_SET_CELL_WORD_0 (c, SCM_FREE_CELL_CDR (c));
1623 SCM_SET_CELL_WORD_1 (c, SCM_EOL);
1624 freelist->collected +=
1625 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1626 }
1627 scm_gc_cells_collected += freelist->collected;
1628
1629 /* Although freelist->min_yield is used to test freelist->collected
1630 * (which is the local GC yield for freelist), it is adjusted so
1631 * that *total* yield is freelist->min_yield_fraction of total heap
1632 * size. This means that a too low yield is compensated by more
1633 * heap on the list which is currently doing most work, which is
1634 * just what we want.
1635 */
1636 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1637 freelist->grow_heap_p = (collected < freelist->min_yield);
1638 }
1639
1640 #define NEXT_DATA_CELL(ptr, span) \
1641 do { \
1642 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1643 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1644 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1645 : nxt__); \
1646 } while (0)
1647
1648 void
1649 scm_gc_sweep ()
1650 #define FUNC_NAME "scm_gc_sweep"
1651 {
1652 register SCM_CELLPTR ptr;
1653 register SCM nfreelist;
1654 register scm_t_freelist *freelist;
1655 register unsigned long m;
1656 register int span;
1657 long i;
1658 size_t seg_size;
1659
1660 m = 0;
1661
1662 gc_sweep_freelist_start (&scm_master_freelist);
1663 gc_sweep_freelist_start (&scm_master_freelist2);
1664
1665 for (i = 0; i < scm_n_heap_segs; i++)
1666 {
1667 register long left_to_collect;
1668 register size_t j;
1669
1670 /* Unmarked cells go onto the front of the freelist this heap
1671 segment points to. Rather than updating the real freelist
1672 pointer as we go along, we accumulate the new head in
1673 nfreelist. Then, if it turns out that the entire segment is
1674 free, we free (i.e., malloc's free) the whole segment, and
1675 simply don't assign nfreelist back into the real freelist. */
1676 freelist = scm_heap_table[i].freelist;
1677 nfreelist = freelist->cells;
1678 left_to_collect = freelist->left_to_collect;
1679 span = scm_heap_table[i].span;
1680
1681 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1682 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1683
1684 /* use only data cells in seg_size */
1685 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1686
1687 scm_gc_cells_swept += seg_size;
1688
1689 for (j = seg_size + span; j -= span; ptr += span)
1690 {
1691 SCM scmptr;
1692
1693 if (SCM_GC_IN_CARD_HEADERP (ptr))
1694 {
1695 SCM_CELLPTR nxt;
1696
1697 /* cheat here */
1698 nxt = ptr;
1699 NEXT_DATA_CELL (nxt, span);
1700 j += span;
1701
1702 ptr = nxt - span;
1703 continue;
1704 }
1705
1706 scmptr = PTR2SCM (ptr);
1707
1708 if (SCM_GCMARKP (scmptr))
1709 continue;
1710
1711 switch SCM_TYP7 (scmptr)
1712 {
1713 case scm_tcs_cons_gloc:
1714 {
1715 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1716 * struct or a gloc. See the corresponding comment in
1717 * scm_gc_mark.
1718 */
1719 scm_t_bits word0 = (SCM_CELL_WORD_0 (scmptr)
1720 - scm_tc3_cons_gloc);
1721 /* access as struct */
1722 scm_t_bits * vtable_data = (scm_t_bits *) word0;
1723 if (vtable_data[scm_vtable_index_vcell] == 0)
1724 {
1725 /* Structs need to be freed in a special order.
1726 * This is handled by GC C hooks in struct.c.
1727 */
1728 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1729 scm_structs_to_free = scmptr;
1730 continue;
1731 }
1732 /* fall through so that scmptr gets collected */
1733 }
1734 break;
1735 case scm_tcs_cons_imcar:
1736 case scm_tcs_cons_nimcar:
1737 case scm_tcs_closures:
1738 case scm_tc7_pws:
1739 break;
1740 case scm_tc7_wvect:
1741 m += (2 + SCM_VECTOR_LENGTH (scmptr)) * sizeof (SCM);
1742 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1743 break;
1744 case scm_tc7_vector:
1745 {
1746 unsigned long int length = SCM_VECTOR_LENGTH (scmptr);
1747 if (length > 0)
1748 {
1749 m += length * sizeof (scm_t_bits);
1750 scm_must_free (SCM_VECTOR_BASE (scmptr));
1751 }
1752 break;
1753 }
1754 #ifdef CCLO
1755 case scm_tc7_cclo:
1756 m += (SCM_CCLO_LENGTH (scmptr) * sizeof (SCM));
1757 scm_must_free (SCM_CCLO_BASE (scmptr));
1758 break;
1759 #endif
1760 #ifdef HAVE_ARRAYS
1761 case scm_tc7_bvect:
1762 {
1763 unsigned long int length = SCM_BITVECTOR_LENGTH (scmptr);
1764 if (length > 0)
1765 {
1766 m += sizeof (long) * ((length + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1767 scm_must_free (SCM_BITVECTOR_BASE (scmptr));
1768 }
1769 }
1770 break;
1771 case scm_tc7_byvect:
1772 case scm_tc7_ivect:
1773 case scm_tc7_uvect:
1774 case scm_tc7_svect:
1775 #ifdef HAVE_LONG_LONGS
1776 case scm_tc7_llvect:
1777 #endif
1778 case scm_tc7_fvect:
1779 case scm_tc7_dvect:
1780 case scm_tc7_cvect:
1781 m += SCM_UVECTOR_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1782 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1783 break;
1784 #endif
1785 case scm_tc7_substring:
1786 break;
1787 case scm_tc7_string:
1788 m += SCM_STRING_LENGTH (scmptr) + 1;
1789 scm_must_free (SCM_STRING_CHARS (scmptr));
1790 break;
1791 case scm_tc7_symbol:
1792 m += SCM_SYMBOL_LENGTH (scmptr) + 1;
1793 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1794 break;
1795 case scm_tcs_subrs:
1796 /* the various "subrs" (primitives) are never freed */
1797 continue;
1798 case scm_tc7_port:
1799 if SCM_OPENP (scmptr)
1800 {
1801 int k = SCM_PTOBNUM (scmptr);
1802 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1803 if (!(k < scm_numptob))
1804 SCM_MISC_ERROR ("undefined port type", SCM_EOL);
1805 #endif
1806 /* Keep "revealed" ports alive. */
1807 if (scm_revealed_count (scmptr) > 0)
1808 continue;
1809 /* Yes, I really do mean scm_ptobs[k].free */
1810 /* rather than ftobs[k].close. .close */
1811 /* is for explicit CLOSE-PORT by user */
1812 m += (scm_ptobs[k].free) (scmptr);
1813 SCM_SETSTREAM (scmptr, 0);
1814 scm_remove_from_port_table (scmptr);
1815 scm_gc_ports_collected++;
1816 SCM_CLR_PORT_OPEN_FLAG (scmptr);
1817 }
1818 break;
1819 case scm_tc7_smob:
1820 switch SCM_TYP16 (scmptr)
1821 {
1822 case scm_tc_free_cell:
1823 case scm_tc16_real:
1824 break;
1825 #ifdef SCM_BIGDIG
1826 case scm_tc16_big:
1827 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1828 scm_must_free (SCM_BDIGITS (scmptr));
1829 break;
1830 #endif /* def SCM_BIGDIG */
1831 case scm_tc16_complex:
1832 m += sizeof (scm_t_complex);
1833 scm_must_free (SCM_COMPLEX_MEM (scmptr));
1834 break;
1835 default:
1836 {
1837 int k;
1838 k = SCM_SMOBNUM (scmptr);
1839 #if (SCM_DEBUG_CELL_ACCESSES == 1) || (defined (GUILE_DEBUG_FREELIST))
1840 if (!(k < scm_numsmob))
1841 SCM_MISC_ERROR ("undefined smob type", SCM_EOL);
1842 #endif
1843 if (scm_smobs[k].free)
1844 m += (scm_smobs[k].free) (scmptr);
1845 break;
1846 }
1847 }
1848 break;
1849 default:
1850 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1851 }
1852
1853 if (!--left_to_collect)
1854 {
1855 SCM_SET_CELL_WORD_0 (scmptr, nfreelist);
1856 *freelist->clustertail = scmptr;
1857 freelist->clustertail = SCM_CDRLOC (scmptr);
1858
1859 nfreelist = SCM_EOL;
1860 freelist->collected += span * freelist->cluster_size;
1861 left_to_collect = freelist->cluster_size;
1862 }
1863 else
1864 {
1865 /* Stick the new cell on the front of nfreelist. It's
1866 critical that we mark this cell as freed; otherwise, the
1867 conservative collector might trace it as some other type
1868 of object. */
1869 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1870 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1871 nfreelist = scmptr;
1872 }
1873 }
1874
1875 #ifdef GC_FREE_SEGMENTS
1876 if (n == seg_size)
1877 {
1878 register long j;
1879
1880 freelist->heap_size -= seg_size;
1881 free ((char *) scm_heap_table[i].bounds[0]);
1882 scm_heap_table[i].bounds[0] = 0;
1883 for (j = i + 1; j < scm_n_heap_segs; j++)
1884 scm_heap_table[j - 1] = scm_heap_table[j];
1885 scm_n_heap_segs -= 1;
1886 i--; /* We need to scan the segment just moved. */
1887 }
1888 else
1889 #endif /* ifdef GC_FREE_SEGMENTS */
1890 {
1891 /* Update the real freelist pointer to point to the head of
1892 the list of free cells we've built for this segment. */
1893 freelist->cells = nfreelist;
1894 freelist->left_to_collect = left_to_collect;
1895 }
1896
1897 #ifdef GUILE_DEBUG_FREELIST
1898 scm_map_free_list ();
1899 #endif
1900 }
1901
1902 gc_sweep_freelist_finish (&scm_master_freelist);
1903 gc_sweep_freelist_finish (&scm_master_freelist2);
1904
1905 /* When we move to POSIX threads private freelists should probably
1906 be GC-protected instead. */
1907 scm_freelist = SCM_EOL;
1908 scm_freelist2 = SCM_EOL;
1909
1910 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1911 scm_gc_yield -= scm_cells_allocated;
1912
1913 if (scm_mallocated < m)
1914 /* The byte count of allocated objects has underflowed. This is
1915 probably because you forgot to report the sizes of objects you
1916 have allocated, by calling scm_done_malloc or some such. When
1917 the GC freed them, it subtracted their size from
1918 scm_mallocated, which underflowed. */
1919 abort ();
1920
1921 scm_mallocated -= m;
1922 scm_gc_malloc_collected = m;
1923 }
1924 #undef FUNC_NAME
1925
1926
1927 \f
1928 /* {Front end to malloc}
1929 *
1930 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1931 * scm_done_free
1932 *
1933 * These functions provide services comparable to malloc, realloc, and
1934 * free. They should be used when allocating memory that will be under
1935 * control of the garbage collector, i.e., if the memory may be freed
1936 * during garbage collection.
1937 */
1938
1939 /* scm_must_malloc
1940 * Return newly malloced storage or throw an error.
1941 *
1942 * The parameter WHAT is a string for error reporting.
1943 * If the threshold scm_mtrigger will be passed by this
1944 * allocation, or if the first call to malloc fails,
1945 * garbage collect -- on the presumption that some objects
1946 * using malloced storage may be collected.
1947 *
1948 * The limit scm_mtrigger may be raised by this allocation.
1949 */
1950 void *
1951 scm_must_malloc (size_t size, const char *what)
1952 {
1953 void *ptr;
1954 unsigned long nm = scm_mallocated + size;
1955
1956 if (nm < size)
1957 /* The byte count of allocated objects has overflowed. This is
1958 probably because you forgot to report the correct size of freed
1959 memory in some of your smob free methods. */
1960 abort ();
1961
1962 if (nm <= scm_mtrigger)
1963 {
1964 SCM_SYSCALL (ptr = malloc (size));
1965 if (NULL != ptr)
1966 {
1967 scm_mallocated = nm;
1968 #ifdef GUILE_DEBUG_MALLOC
1969 scm_malloc_register (ptr, what);
1970 #endif
1971 return ptr;
1972 }
1973 }
1974
1975 scm_igc (what);
1976
1977 nm = scm_mallocated + size;
1978
1979 if (nm < size)
1980 /* The byte count of allocated objects has overflowed. This is
1981 probably because you forgot to report the correct size of freed
1982 memory in some of your smob free methods. */
1983 abort ();
1984
1985 SCM_SYSCALL (ptr = malloc (size));
1986 if (NULL != ptr)
1987 {
1988 scm_mallocated = nm;
1989 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1990 if (nm > scm_mtrigger)
1991 scm_mtrigger = nm + nm / 2;
1992 else
1993 scm_mtrigger += scm_mtrigger / 2;
1994 }
1995 #ifdef GUILE_DEBUG_MALLOC
1996 scm_malloc_register (ptr, what);
1997 #endif
1998
1999 return ptr;
2000 }
2001
2002 scm_memory_error (what);
2003 }
2004
2005
2006 /* scm_must_realloc
2007 * is similar to scm_must_malloc.
2008 */
2009 void *
2010 scm_must_realloc (void *where,
2011 size_t old_size,
2012 size_t size,
2013 const char *what)
2014 {
2015 void *ptr;
2016 unsigned long nm;
2017
2018 if (size <= old_size)
2019 return where;
2020
2021 nm = scm_mallocated + size - old_size;
2022
2023 if (nm < (size - old_size))
2024 /* The byte count of allocated objects has overflowed. This is
2025 probably because you forgot to report the correct size of freed
2026 memory in some of your smob free methods. */
2027 abort ();
2028
2029 if (nm <= scm_mtrigger)
2030 {
2031 SCM_SYSCALL (ptr = realloc (where, size));
2032 if (NULL != ptr)
2033 {
2034 scm_mallocated = nm;
2035 #ifdef GUILE_DEBUG_MALLOC
2036 scm_malloc_reregister (where, ptr, what);
2037 #endif
2038 return ptr;
2039 }
2040 }
2041
2042 scm_igc (what);
2043
2044 nm = scm_mallocated + size - old_size;
2045
2046 if (nm < (size - old_size))
2047 /* The byte count of allocated objects has overflowed. This is
2048 probably because you forgot to report the correct size of freed
2049 memory in some of your smob free methods. */
2050 abort ();
2051
2052 SCM_SYSCALL (ptr = realloc (where, size));
2053 if (NULL != ptr)
2054 {
2055 scm_mallocated = nm;
2056 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
2057 if (nm > scm_mtrigger)
2058 scm_mtrigger = nm + nm / 2;
2059 else
2060 scm_mtrigger += scm_mtrigger / 2;
2061 }
2062 #ifdef GUILE_DEBUG_MALLOC
2063 scm_malloc_reregister (where, ptr, what);
2064 #endif
2065 return ptr;
2066 }
2067
2068 scm_memory_error (what);
2069 }
2070
2071 char *
2072 scm_must_strndup (const char *str, size_t length)
2073 {
2074 char * dst = scm_must_malloc (length + 1, "scm_must_strndup");
2075 memcpy (dst, str, length);
2076 dst[length] = 0;
2077 return dst;
2078 }
2079
2080 char *
2081 scm_must_strdup (const char *str)
2082 {
2083 return scm_must_strndup (str, strlen (str));
2084 }
2085
2086 void
2087 scm_must_free (void *obj)
2088 #define FUNC_NAME "scm_must_free"
2089 {
2090 #ifdef GUILE_DEBUG_MALLOC
2091 scm_malloc_unregister (obj);
2092 #endif
2093 if (obj)
2094 free (obj);
2095 else
2096 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
2097 }
2098 #undef FUNC_NAME
2099
2100
2101 /* Announce that there has been some malloc done that will be freed
2102 * during gc. A typical use is for a smob that uses some malloced
2103 * memory but can not get it from scm_must_malloc (for whatever
2104 * reason). When a new object of this smob is created you call
2105 * scm_done_malloc with the size of the object. When your smob free
2106 * function is called, be sure to include this size in the return
2107 * value.
2108 *
2109 * If you can't actually free the memory in the smob free function,
2110 * for whatever reason (like reference counting), you still can (and
2111 * should) report the amount of memory freed when you actually free it.
2112 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
2113 * eh? Or even better, call scm_done_free. */
2114
2115 void
2116 scm_done_malloc (long size)
2117 {
2118 if (size < 0) {
2119 if (scm_mallocated < size)
2120 /* The byte count of allocated objects has underflowed. This is
2121 probably because you forgot to report the sizes of objects you
2122 have allocated, by calling scm_done_malloc or some such. When
2123 the GC freed them, it subtracted their size from
2124 scm_mallocated, which underflowed. */
2125 abort ();
2126 } else {
2127 unsigned long nm = scm_mallocated + size;
2128 if (nm < size)
2129 /* The byte count of allocated objects has overflowed. This is
2130 probably because you forgot to report the correct size of freed
2131 memory in some of your smob free methods. */
2132 abort ();
2133 }
2134
2135 scm_mallocated += size;
2136
2137 if (scm_mallocated > scm_mtrigger)
2138 {
2139 scm_igc ("foreign mallocs");
2140 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
2141 {
2142 if (scm_mallocated > scm_mtrigger)
2143 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
2144 else
2145 scm_mtrigger += scm_mtrigger / 2;
2146 }
2147 }
2148 }
2149
2150 void
2151 scm_done_free (long size)
2152 {
2153 if (size >= 0) {
2154 if (scm_mallocated < size)
2155 /* The byte count of allocated objects has underflowed. This is
2156 probably because you forgot to report the sizes of objects you
2157 have allocated, by calling scm_done_malloc or some such. When
2158 the GC freed them, it subtracted their size from
2159 scm_mallocated, which underflowed. */
2160 abort ();
2161 } else {
2162 unsigned long nm = scm_mallocated + size;
2163 if (nm < size)
2164 /* The byte count of allocated objects has overflowed. This is
2165 probably because you forgot to report the correct size of freed
2166 memory in some of your smob free methods. */
2167 abort ();
2168 }
2169
2170 scm_mallocated -= size;
2171 }
2172
2173
2174 \f
2175 /* {Heap Segments}
2176 *
2177 * Each heap segment is an array of objects of a particular size.
2178 * Every segment has an associated (possibly shared) freelist.
2179 * A table of segment records is kept that records the upper and
2180 * lower extents of the segment; this is used during the conservative
2181 * phase of gc to identify probably gc roots (because they point
2182 * into valid segments at reasonable offsets). */
2183
2184 /* scm_expmem
2185 * is true if the first segment was smaller than INIT_HEAP_SEG.
2186 * If scm_expmem is set to one, subsequent segment allocations will
2187 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2188 */
2189 int scm_expmem = 0;
2190
2191 size_t scm_max_segment_size;
2192
2193 /* scm_heap_org
2194 * is the lowest base address of any heap segment.
2195 */
2196 SCM_CELLPTR scm_heap_org;
2197
2198 scm_t_heap_seg_data * scm_heap_table = 0;
2199 static size_t heap_segment_table_size = 0;
2200 size_t scm_n_heap_segs = 0;
2201
2202 /* init_heap_seg
2203 * initializes a new heap segment and returns the number of objects it contains.
2204 *
2205 * The segment origin and segment size in bytes are input parameters.
2206 * The freelist is both input and output.
2207 *
2208 * This function presumes that the scm_heap_table has already been expanded
2209 * to accomodate a new segment record and that the markbit space was reserved
2210 * for all the cards in this segment.
2211 */
2212
2213 #define INIT_CARD(card, span) \
2214 do { \
2215 SCM_GC_SET_CARD_BVEC (card, get_bvec ()); \
2216 if ((span) == 2) \
2217 SCM_GC_SET_CARD_DOUBLECELL (card); \
2218 } while (0)
2219
2220 static size_t
2221 init_heap_seg (SCM_CELLPTR seg_org, size_t size, scm_t_freelist *freelist)
2222 {
2223 register SCM_CELLPTR ptr;
2224 SCM_CELLPTR seg_end;
2225 long new_seg_index;
2226 ptrdiff_t n_new_cells;
2227 int span = freelist->span;
2228
2229 if (seg_org == NULL)
2230 return 0;
2231
2232 /* Align the begin ptr up.
2233 */
2234 ptr = SCM_GC_CARD_UP (seg_org);
2235
2236 /* Compute the ceiling on valid object pointers w/in this segment.
2237 */
2238 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2239
2240 /* Find the right place and insert the segment record.
2241 *
2242 */
2243 for (new_seg_index = 0;
2244 ( (new_seg_index < scm_n_heap_segs)
2245 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2246 new_seg_index++)
2247 ;
2248
2249 {
2250 int i;
2251 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2252 scm_heap_table[i] = scm_heap_table[i - 1];
2253 }
2254
2255 ++scm_n_heap_segs;
2256
2257 scm_heap_table[new_seg_index].span = span;
2258 scm_heap_table[new_seg_index].freelist = freelist;
2259 scm_heap_table[new_seg_index].bounds[0] = ptr;
2260 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2261
2262 /*n_new_cells*/
2263 n_new_cells = seg_end - ptr;
2264
2265 freelist->heap_size += n_new_cells;
2266
2267 /* Partition objects in this segment into clusters */
2268 {
2269 SCM clusters;
2270 SCM *clusterp = &clusters;
2271
2272 NEXT_DATA_CELL (ptr, span);
2273 while (ptr < seg_end)
2274 {
2275 scm_cell *nxt = ptr;
2276 scm_cell *prv = NULL;
2277 scm_cell *last_card = NULL;
2278 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2279 NEXT_DATA_CELL(nxt, span);
2280
2281 /* Allocate cluster spine
2282 */
2283 *clusterp = PTR2SCM (ptr);
2284 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2285 clusterp = SCM_CDRLOC (*clusterp);
2286 ptr = nxt;
2287
2288 while (n_data_cells--)
2289 {
2290 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2291 SCM scmptr = PTR2SCM (ptr);
2292 nxt = ptr;
2293 NEXT_DATA_CELL (nxt, span);
2294 prv = ptr;
2295
2296 if (card != last_card)
2297 {
2298 INIT_CARD (card, span);
2299 last_card = card;
2300 }
2301
2302 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2303 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (nxt));
2304
2305 ptr = nxt;
2306 }
2307
2308 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2309 }
2310
2311 /* sanity check */
2312 {
2313 scm_cell *ref = seg_end;
2314 NEXT_DATA_CELL (ref, span);
2315 if (ref != ptr)
2316 /* [cmm] looks like the segment size doesn't divide cleanly by
2317 cluster size. bad cmm! */
2318 abort();
2319 }
2320
2321 /* Patch up the last cluster pointer in the segment
2322 * to join it to the input freelist.
2323 */
2324 *clusterp = freelist->clusters;
2325 freelist->clusters = clusters;
2326 }
2327
2328 #ifdef DEBUGINFO
2329 fprintf (stderr, "H");
2330 #endif
2331 return size;
2332 }
2333
2334 static size_t
2335 round_to_cluster_size (scm_t_freelist *freelist, size_t len)
2336 {
2337 size_t cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2338
2339 return
2340 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2341 + ALIGNMENT_SLACK (freelist);
2342 }
2343
2344 static void
2345 alloc_some_heap (scm_t_freelist *freelist, policy_on_error error_policy)
2346 #define FUNC_NAME "alloc_some_heap"
2347 {
2348 SCM_CELLPTR ptr;
2349 size_t len;
2350
2351 if (scm_gc_heap_lock)
2352 {
2353 /* Critical code sections (such as the garbage collector) aren't
2354 * supposed to add heap segments.
2355 */
2356 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2357 abort ();
2358 }
2359
2360 if (scm_n_heap_segs == heap_segment_table_size)
2361 {
2362 /* We have to expand the heap segment table to have room for the new
2363 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2364 * init_heap_seg only if the allocation of the segment itself succeeds.
2365 */
2366 size_t new_table_size = scm_n_heap_segs + 1;
2367 size_t size = new_table_size * sizeof (scm_t_heap_seg_data);
2368 scm_t_heap_seg_data *new_heap_table;
2369
2370 SCM_SYSCALL (new_heap_table = ((scm_t_heap_seg_data *)
2371 realloc ((char *)scm_heap_table, size)));
2372 if (!new_heap_table)
2373 {
2374 if (error_policy == abort_on_error)
2375 {
2376 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2377 abort ();
2378 }
2379 else
2380 {
2381 return;
2382 }
2383 }
2384 else
2385 {
2386 scm_heap_table = new_heap_table;
2387 heap_segment_table_size = new_table_size;
2388 }
2389 }
2390
2391 /* Pick a size for the new heap segment.
2392 * The rule for picking the size of a segment is explained in
2393 * gc.h
2394 */
2395 {
2396 /* Assure that the new segment is predicted to be large enough.
2397 *
2398 * New yield should at least equal GC fraction of new heap size, i.e.
2399 *
2400 * y + dh > f * (h + dh)
2401 *
2402 * y : yield
2403 * f : min yield fraction
2404 * h : heap size
2405 * dh : size of new heap segment
2406 *
2407 * This gives dh > (f * h - y) / (1 - f)
2408 */
2409 int f = freelist->min_yield_fraction;
2410 unsigned long h = SCM_HEAP_SIZE;
2411 size_t min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2412 len = SCM_EXPHEAP (freelist->heap_size);
2413 #ifdef DEBUGINFO
2414 fprintf (stderr, "(%ld < %ld)", (long) len, (long) min_cells);
2415 #endif
2416 if (len < min_cells)
2417 len = min_cells + freelist->cluster_size;
2418 len *= sizeof (scm_cell);
2419 /* force new sampling */
2420 freelist->collected = LONG_MAX;
2421 }
2422
2423 if (len > scm_max_segment_size)
2424 len = scm_max_segment_size;
2425
2426 {
2427 size_t smallest;
2428
2429 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2430
2431 if (len < smallest)
2432 len = smallest;
2433
2434 /* Allocate with decaying ambition. */
2435 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2436 && (len >= smallest))
2437 {
2438 size_t rounded_len = round_to_cluster_size (freelist, len);
2439 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2440 if (ptr)
2441 {
2442 init_heap_seg (ptr, rounded_len, freelist);
2443 return;
2444 }
2445 len /= 2;
2446 }
2447 }
2448
2449 if (error_policy == abort_on_error)
2450 {
2451 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2452 abort ();
2453 }
2454 }
2455 #undef FUNC_NAME
2456
2457 \f
2458 /* {GC Protection Helper Functions}
2459 */
2460
2461
2462 /*
2463 * If within a function you need to protect one or more scheme objects from
2464 * garbage collection, pass them as parameters to one of the
2465 * scm_remember_upto_here* functions below. These functions don't do
2466 * anything, but since the compiler does not know that they are actually
2467 * no-ops, it will generate code that calls these functions with the given
2468 * parameters. Therefore, you can be sure that the compiler will keep those
2469 * scheme values alive (on the stack or in a register) up to the point where
2470 * scm_remember_upto_here* is called. In other words, place the call to
2471 * scm_remember_upt_here* _behind_ the last code in your function, that
2472 * depends on the scheme object to exist.
2473 *
2474 * Example: We want to make sure, that the string object str does not get
2475 * garbage collected during the execution of 'some_function', because
2476 * otherwise the characters belonging to str would be freed and
2477 * 'some_function' might access freed memory. To make sure that the compiler
2478 * keeps str alive on the stack or in a register such that it is visible to
2479 * the conservative gc we add the call to scm_remember_upto_here_1 _after_ the
2480 * call to 'some_function'. Note that this would not be necessary if str was
2481 * used anyway after the call to 'some_function'.
2482 * char *chars = SCM_STRING_CHARS (str);
2483 * some_function (chars);
2484 * scm_remember_upto_here_1 (str); // str will be alive up to this point.
2485 */
2486
2487 void
2488 scm_remember_upto_here_1 (SCM obj SCM_UNUSED)
2489 {
2490 /* Empty. Protects a single object from garbage collection. */
2491 }
2492
2493 void
2494 scm_remember_upto_here_2 (SCM obj1 SCM_UNUSED, SCM obj2 SCM_UNUSED)
2495 {
2496 /* Empty. Protects two objects from garbage collection. */
2497 }
2498
2499 void
2500 scm_remember_upto_here (SCM obj SCM_UNUSED, ...)
2501 {
2502 /* Empty. Protects any number of objects from garbage collection. */
2503 }
2504
2505
2506 #if (SCM_DEBUG_DEPRECATED == 0)
2507
2508 void
2509 scm_remember (SCM *ptr)
2510 {
2511 scm_c_issue_deprecation_warning ("`scm_remember' is deprecated. "
2512 "Use the `scm_remember_upto_here*' family of functions instead.");
2513 }
2514
2515 SCM
2516 scm_protect_object (SCM obj)
2517 {
2518 scm_c_issue_deprecation_warning ("`scm_protect_object' is deprecated. "
2519 "Use `scm_gc_protect_object' instead.");
2520 return scm_gc_protect_object (obj);
2521 }
2522
2523 SCM
2524 scm_unprotect_object (SCM obj)
2525 {
2526 scm_c_issue_deprecation_warning ("`scm_unprotect_object' is deprecated. "
2527 "Use `scm_gc_unprotect_object' instead.");
2528 return scm_gc_unprotect_object (obj);
2529 }
2530
2531 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2532
2533 /*
2534 These crazy functions prevent garbage collection
2535 of arguments after the first argument by
2536 ensuring they remain live throughout the
2537 function because they are used in the last
2538 line of the code block.
2539 It'd be better to have a nice compiler hint to
2540 aid the conservative stack-scanning GC. --03/09/00 gjb */
2541 SCM
2542 scm_return_first (SCM elt, ...)
2543 {
2544 return elt;
2545 }
2546
2547 int
2548 scm_return_first_int (int i, ...)
2549 {
2550 return i;
2551 }
2552
2553
2554 SCM
2555 scm_permanent_object (SCM obj)
2556 {
2557 SCM_REDEFER_INTS;
2558 scm_permobjs = scm_cons (obj, scm_permobjs);
2559 SCM_REALLOW_INTS;
2560 return obj;
2561 }
2562
2563
2564 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2565 other references are dropped, until the object is unprotected by calling
2566 scm_gc_unprotect_object (OBJ). Calls to scm_gc_protect/unprotect_object nest,
2567 i. e. it is possible to protect the same object several times, but it is
2568 necessary to unprotect the object the same number of times to actually get
2569 the object unprotected. It is an error to unprotect an object more often
2570 than it has been protected before. The function scm_protect_object returns
2571 OBJ.
2572 */
2573
2574 /* Implementation note: For every object X, there is a counter which
2575 scm_gc_protect_object(X) increments and scm_gc_unprotect_object(X) decrements.
2576 */
2577
2578 SCM
2579 scm_gc_protect_object (SCM obj)
2580 {
2581 SCM handle;
2582
2583 /* This critical section barrier will be replaced by a mutex. */
2584 SCM_REDEFER_INTS;
2585
2586 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2587 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2588
2589 SCM_REALLOW_INTS;
2590
2591 return obj;
2592 }
2593
2594
2595 /* Remove any protection for OBJ established by a prior call to
2596 scm_protect_object. This function returns OBJ.
2597
2598 See scm_protect_object for more information. */
2599 SCM
2600 scm_gc_unprotect_object (SCM obj)
2601 {
2602 SCM handle;
2603
2604 /* This critical section barrier will be replaced by a mutex. */
2605 SCM_REDEFER_INTS;
2606
2607 handle = scm_hashq_get_handle (scm_protects, obj);
2608
2609 if (SCM_FALSEP (handle))
2610 {
2611 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2612 abort ();
2613 }
2614 else
2615 {
2616 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2617 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2618 scm_hashq_remove_x (scm_protects, obj);
2619 else
2620 SCM_SETCDR (handle, count);
2621 }
2622
2623 SCM_REALLOW_INTS;
2624
2625 return obj;
2626 }
2627
2628 void
2629 scm_gc_register_root (SCM *p)
2630 {
2631 SCM handle;
2632 SCM key = scm_long2num ((long) p);
2633
2634 /* This critical section barrier will be replaced by a mutex. */
2635 SCM_REDEFER_INTS;
2636
2637 handle = scm_hashv_create_handle_x (scm_gc_registered_roots, key, SCM_MAKINUM (0));
2638 SCM_SETCDR (handle, scm_sum (SCM_CDR (handle), SCM_MAKINUM (1)));
2639
2640 SCM_REALLOW_INTS;
2641 }
2642
2643 void
2644 scm_gc_unregister_root (SCM *p)
2645 {
2646 SCM handle;
2647 SCM key = scm_long2num ((long) p);
2648
2649 /* This critical section barrier will be replaced by a mutex. */
2650 SCM_REDEFER_INTS;
2651
2652 handle = scm_hashv_get_handle (scm_gc_registered_roots, key);
2653
2654 if (SCM_FALSEP (handle))
2655 {
2656 fprintf (stderr, "scm_gc_unregister_root called on unregistered root\n");
2657 abort ();
2658 }
2659 else
2660 {
2661 SCM count = scm_difference (SCM_CDR (handle), SCM_MAKINUM (1));
2662 if (SCM_EQ_P (count, SCM_MAKINUM (0)))
2663 scm_hashv_remove_x (scm_gc_registered_roots, key);
2664 else
2665 SCM_SETCDR (handle, count);
2666 }
2667
2668 SCM_REALLOW_INTS;
2669 }
2670
2671 void
2672 scm_gc_register_roots (SCM *b, unsigned long n)
2673 {
2674 SCM *p = b;
2675 for (; p < b + n; ++p)
2676 scm_gc_register_root (p);
2677 }
2678
2679 void
2680 scm_gc_unregister_roots (SCM *b, unsigned long n)
2681 {
2682 SCM *p = b;
2683 for (; p < b + n; ++p)
2684 scm_gc_unregister_root (p);
2685 }
2686
2687 int terminating;
2688
2689 /* called on process termination. */
2690 #ifdef HAVE_ATEXIT
2691 static void
2692 cleanup (void)
2693 #else
2694 #ifdef HAVE_ON_EXIT
2695 extern int on_exit (void (*procp) (), int arg);
2696
2697 static void
2698 cleanup (int status, void *arg)
2699 #else
2700 #error Dont know how to setup a cleanup handler on your system.
2701 #endif
2702 #endif
2703 {
2704 terminating = 1;
2705 scm_flush_all_ports ();
2706 }
2707
2708 \f
2709 static int
2710 make_initial_segment (size_t init_heap_size, scm_t_freelist *freelist)
2711 {
2712 size_t rounded_size = round_to_cluster_size (freelist, init_heap_size);
2713
2714 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2715 rounded_size,
2716 freelist))
2717 {
2718 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2719 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2720 rounded_size,
2721 freelist))
2722 return 1;
2723 }
2724 else
2725 scm_expmem = 1;
2726
2727 if (freelist->min_yield_fraction)
2728 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2729 / 100);
2730 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2731
2732 return 0;
2733 }
2734
2735 \f
2736 static void
2737 init_freelist (scm_t_freelist *freelist,
2738 int span,
2739 long cluster_size,
2740 int min_yield)
2741 {
2742 freelist->clusters = SCM_EOL;
2743 freelist->cluster_size = cluster_size + 1;
2744 freelist->left_to_collect = 0;
2745 freelist->clusters_allocated = 0;
2746 freelist->min_yield = 0;
2747 freelist->min_yield_fraction = min_yield;
2748 freelist->span = span;
2749 freelist->collected = 0;
2750 freelist->collected_1 = 0;
2751 freelist->heap_size = 0;
2752 }
2753
2754
2755 /* Get an integer from an environment variable. */
2756 static int
2757 scm_i_getenv_int (const char *var, int def)
2758 {
2759 char *end, *val = getenv (var);
2760 long res;
2761 if (!val)
2762 return def;
2763 res = strtol (val, &end, 10);
2764 if (end == val)
2765 return def;
2766 return res;
2767 }
2768
2769
2770 int
2771 scm_init_storage ()
2772 {
2773 unsigned long gc_trigger_1;
2774 unsigned long gc_trigger_2;
2775 size_t init_heap_size_1;
2776 size_t init_heap_size_2;
2777 size_t j;
2778
2779 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2780 scm_tc16_allocated = scm_make_smob_type ("allocated cell", 0);
2781 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
2782
2783 j = SCM_NUM_PROTECTS;
2784 while (j)
2785 scm_sys_protects[--j] = SCM_BOOL_F;
2786 scm_block_gc = 1;
2787
2788 scm_freelist = SCM_EOL;
2789 scm_freelist2 = SCM_EOL;
2790 gc_trigger_1 = scm_i_getenv_int ("GUILE_MIN_YIELD_1", scm_default_min_yield_1);
2791 init_freelist (&scm_master_freelist, 1, SCM_CLUSTER_SIZE_1, gc_trigger_1);
2792 gc_trigger_2 = scm_i_getenv_int ("GUILE_MIN_YIELD_2", scm_default_min_yield_2);
2793 init_freelist (&scm_master_freelist2, 2, SCM_CLUSTER_SIZE_2, gc_trigger_2);
2794 scm_max_segment_size = scm_i_getenv_int ("GUILE_MAX_SEGMENT_SIZE", scm_default_max_segment_size);
2795
2796 scm_expmem = 0;
2797
2798 j = SCM_HEAP_SEG_SIZE;
2799 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2800 scm_heap_table = ((scm_t_heap_seg_data *)
2801 scm_must_malloc (sizeof (scm_t_heap_seg_data) * 2, "hplims"));
2802 heap_segment_table_size = 2;
2803
2804 mark_space_ptr = &mark_space_head;
2805
2806 init_heap_size_1 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_1", scm_default_init_heap_size_1);
2807 init_heap_size_2 = scm_i_getenv_int ("GUILE_INIT_SEGMENT_SIZE_2", scm_default_init_heap_size_2);
2808 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2809 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2810 return 1;
2811
2812 /* scm_hplims[0] can change. do not remove scm_heap_org */
2813 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2814
2815 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2816 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2817 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2818 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2819 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2820
2821 /* Initialise the list of ports. */
2822 scm_t_portable = (scm_t_port **)
2823 malloc (sizeof (scm_t_port *) * scm_t_portable_room);
2824 if (!scm_t_portable)
2825 return 1;
2826
2827 #ifdef HAVE_ATEXIT
2828 atexit (cleanup);
2829 #else
2830 #ifdef HAVE_ON_EXIT
2831 on_exit (cleanup, 0);
2832 #endif
2833 #endif
2834
2835 scm_stand_in_procs = SCM_EOL;
2836 scm_permobjs = SCM_EOL;
2837 scm_protects = scm_c_make_hash_table (31);
2838 scm_gc_registered_roots = scm_c_make_hash_table (31);
2839
2840 return 0;
2841 }
2842
2843 \f
2844
2845 SCM scm_after_gc_hook;
2846
2847 static SCM gc_async;
2848
2849 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2850 * is run after the gc, as soon as the asynchronous events are handled by the
2851 * evaluator.
2852 */
2853 static SCM
2854 gc_async_thunk (void)
2855 {
2856 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2857 return SCM_UNSPECIFIED;
2858 }
2859
2860
2861 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2862 * the garbage collection. The only purpose of this function is to mark the
2863 * gc_async (which will eventually lead to the execution of the
2864 * gc_async_thunk).
2865 */
2866 static void *
2867 mark_gc_async (void * hook_data SCM_UNUSED,
2868 void *func_data SCM_UNUSED,
2869 void *data SCM_UNUSED)
2870 {
2871 /* If cell access debugging is enabled, the user may choose to perform
2872 * additional garbage collections after an arbitrary number of cell
2873 * accesses. We don't want the scheme level after-gc-hook to be performed
2874 * for each of these garbage collections for the following reason: The
2875 * execution of the after-gc-hook causes cell accesses itself. Thus, if the
2876 * after-gc-hook was performed with every gc, and if the gc was performed
2877 * after a very small number of cell accesses, then the number of cell
2878 * accesses during the execution of the after-gc-hook will suffice to cause
2879 * the execution of the next gc. Then, guile would keep executing the
2880 * after-gc-hook over and over again, and would never come to do other
2881 * things.
2882 *
2883 * To overcome this problem, if cell access debugging with additional
2884 * garbage collections is enabled, the after-gc-hook is never run by the
2885 * garbage collecter. When running guile with cell access debugging and the
2886 * execution of the after-gc-hook is desired, then it is necessary to run
2887 * the hook explicitly from the user code. This has the effect, that from
2888 * the scheme level point of view it seems that garbage collection is
2889 * performed with a much lower frequency than it actually is. Obviously,
2890 * this will not work for code that depends on a fixed one to one
2891 * relationship between the execution counts of the C level garbage
2892 * collection hooks and the execution count of the scheme level
2893 * after-gc-hook.
2894 */
2895 #if (SCM_DEBUG_CELL_ACCESSES == 1)
2896 if (debug_cells_gc_interval == 0)
2897 scm_system_async_mark (gc_async);
2898 #else
2899 scm_system_async_mark (gc_async);
2900 #endif
2901
2902 return NULL;
2903 }
2904
2905
2906 void
2907 scm_init_gc ()
2908 {
2909 SCM after_gc_thunk;
2910
2911 scm_after_gc_hook = scm_permanent_object (scm_make_hook (SCM_INUM0));
2912 scm_c_define ("after-gc-hook", scm_after_gc_hook);
2913
2914 after_gc_thunk = scm_c_make_subr ("%gc-thunk", scm_tc7_subr_0,
2915 gc_async_thunk);
2916 gc_async = scm_system_async (after_gc_thunk); /* protected via scm_asyncs */
2917
2918 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2919
2920 #ifndef SCM_MAGIC_SNARFER
2921 #include "libguile/gc.x"
2922 #endif
2923 }
2924
2925 #endif /*MARK_DEPENDENCIES*/
2926
2927 /*
2928 Local Variables:
2929 c-file-style: "gnu"
2930 End:
2931 */