* gc.c (scm_gc_mark): Don't use GUILE_DEBUG flag to compile in
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
63
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
66
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
69 #endif
70
71 #ifdef HAVE_MALLOC_H
72 #include <malloc.h>
73 #endif
74
75 #ifdef HAVE_UNISTD_H
76 #include <unistd.h>
77 #endif
78
79 #ifdef __STDC__
80 #include <stdarg.h>
81 #define var_start(x, y) va_start(x, y)
82 #else
83 #include <varargs.h>
84 #define var_start(x, y) va_start(x)
85 #endif
86
87 \f
88
89 unsigned int scm_gc_running_p = 0;
90
91 \f
92
93 #if (SCM_DEBUG_CELL_ACCESSES == 1)
94
95 unsigned int scm_debug_cell_accesses_p = 0;
96
97
98 /* Assert that the given object is a valid reference to a valid cell. This
99 * test involves to determine whether the object is a cell pointer, whether
100 * this pointer actually points into a heap segment and whether the cell
101 * pointed to is not a free cell.
102 */
103 void
104 scm_assert_cell_valid (SCM cell)
105 {
106 if (scm_debug_cell_accesses_p)
107 {
108 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
109
110 if (!scm_cellp (cell))
111 {
112 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
113 abort ();
114 }
115 else if (!scm_gc_running_p)
116 {
117 /* Dirk::FIXME:: During garbage collection there occur references to
118 free cells. This is allright during conservative marking, but
119 should not happen otherwise (I think). The case of free cells
120 accessed during conservative marking is handled in function
121 scm_mark_locations. However, there still occur accesses to free
122 cells during gc. I don't understand why this happens. If it is
123 a bug and gets fixed, the following test should also work while
124 gc is running.
125 */
126 if (SCM_FREE_CELL_P (cell))
127 {
128 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
129 abort ();
130 }
131 }
132 scm_debug_cell_accesses_p = 1; /* re-enable */
133 }
134 }
135
136
137 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
138 (SCM flag),
139 "If FLAG is #f, cell access checking is disabled.\n"
140 "If FLAG is #t, cell access checking is enabled.\n"
141 "This procedure only exists because the compile-time flag\n"
142 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
143 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
144 {
145 if (SCM_FALSEP (flag)) {
146 scm_debug_cell_accesses_p = 0;
147 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
148 scm_debug_cell_accesses_p = 1;
149 } else {
150 SCM_WRONG_TYPE_ARG (1, flag);
151 }
152 return SCM_UNSPECIFIED;
153 }
154 #undef FUNC_NAME
155
156 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
157
158 \f
159
160 /* {heap tuning parameters}
161 *
162 * These are parameters for controlling memory allocation. The heap
163 * is the area out of which scm_cons, and object headers are allocated.
164 *
165 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
166 * 64 bit machine. The units of the _SIZE parameters are bytes.
167 * Cons pairs and object headers occupy one heap cell.
168 *
169 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
170 * allocated initially the heap will grow by half its current size
171 * each subsequent time more heap is needed.
172 *
173 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
174 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
175 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
176 * is in scm_init_storage() and alloc_some_heap() in sys.c
177 *
178 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
179 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
180 *
181 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
182 * is needed.
183 *
184 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
185 * trigger a GC.
186 *
187 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
188 * reclaimed by a GC triggered by must_malloc. If less than this is
189 * reclaimed, the trigger threshold is raised. [I don't know what a
190 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
191 * work around a oscillation that caused almost constant GC.]
192 */
193
194 /*
195 * Heap size 45000 and 40% min yield gives quick startup and no extra
196 * heap allocation. Having higher values on min yield may lead to
197 * large heaps, especially if code behaviour is varying its
198 * maximum consumption between different freelists.
199 */
200
201 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
202 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
203 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
204 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
205 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
206 int scm_default_min_yield_1 = 40;
207
208 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
209 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
210 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
211 /* The following value may seem large, but note that if we get to GC at
212 * all, this means that we have a numerically intensive application
213 */
214 int scm_default_min_yield_2 = 40;
215
216 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
217
218 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
219 #ifdef _QC
220 # define SCM_HEAP_SEG_SIZE 32768L
221 #else
222 # ifdef sequent
223 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
224 # else
225 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
226 # endif
227 #endif
228 /* Make heap grow with factor 1.5 */
229 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
230 #define SCM_INIT_MALLOC_LIMIT 100000
231 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
232
233 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
234 aligned inner bounds for allocated storage */
235
236 #ifdef PROT386
237 /*in 386 protected mode we must only adjust the offset */
238 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
239 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
240 #else
241 # ifdef _UNICOS
242 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
243 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
244 # else
245 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
246 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
247 # endif /* UNICOS */
248 #endif /* PROT386 */
249
250 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
251 #define CLUSTER_SIZE_IN_BYTES(freelist) \
252 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
253
254 \f
255 /* scm_freelists
256 */
257
258 typedef struct scm_freelist_t {
259 /* collected cells */
260 SCM cells;
261 /* number of cells left to collect before cluster is full */
262 unsigned int left_to_collect;
263 /* number of clusters which have been allocated */
264 unsigned int clusters_allocated;
265 /* a list of freelists, each of size cluster_size,
266 * except the last one which may be shorter
267 */
268 SCM clusters;
269 SCM *clustertail;
270 /* this is the number of objects in each cluster, including the spine cell */
271 int cluster_size;
272 /* indicates that we should grow heap instead of GC:ing
273 */
274 int grow_heap_p;
275 /* minimum yield on this list in order not to grow the heap
276 */
277 long min_yield;
278 /* defines min_yield as percent of total heap size
279 */
280 int min_yield_fraction;
281 /* number of cells per object on this list */
282 int span;
283 /* number of collected cells during last GC */
284 long collected;
285 /* number of collected cells during penultimate GC */
286 long collected_1;
287 /* total number of cells in heap segments
288 * belonging to this list.
289 */
290 long heap_size;
291 } scm_freelist_t;
292
293 SCM scm_freelist = SCM_EOL;
294 scm_freelist_t scm_master_freelist = {
295 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
296 };
297 SCM scm_freelist2 = SCM_EOL;
298 scm_freelist_t scm_master_freelist2 = {
299 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
300 };
301
302 /* scm_mtrigger
303 * is the number of bytes of must_malloc allocation needed to trigger gc.
304 */
305 unsigned long scm_mtrigger;
306
307 /* scm_gc_heap_lock
308 * If set, don't expand the heap. Set only during gc, during which no allocation
309 * is supposed to take place anyway.
310 */
311 int scm_gc_heap_lock = 0;
312
313 /* GC Blocking
314 * Don't pause for collection if this is set -- just
315 * expand the heap.
316 */
317 int scm_block_gc = 1;
318
319 /* During collection, this accumulates objects holding
320 * weak references.
321 */
322 SCM scm_weak_vectors;
323
324 /* During collection, this accumulates structures which are to be freed.
325 */
326 SCM scm_structs_to_free;
327
328 /* GC Statistics Keeping
329 */
330 unsigned long scm_cells_allocated = 0;
331 long scm_mallocated = 0;
332 unsigned long scm_gc_cells_collected;
333 unsigned long scm_gc_yield;
334 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
335 unsigned long scm_gc_malloc_collected;
336 unsigned long scm_gc_ports_collected;
337 unsigned long scm_gc_time_taken = 0;
338 static unsigned long t_before_gc;
339 static unsigned long t_before_sweep;
340 unsigned long scm_gc_mark_time_taken = 0;
341 unsigned long scm_gc_sweep_time_taken = 0;
342 unsigned long scm_gc_times = 0;
343 unsigned long scm_gc_cells_swept = 0;
344 double scm_gc_cells_marked_acc = 0.;
345 double scm_gc_cells_swept_acc = 0.;
346
347 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
348 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
349 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
350 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
351 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
352 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
353 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
354 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
355 SCM_SYMBOL (sym_times, "gc-times");
356 SCM_SYMBOL (sym_cells_marked, "cells-marked");
357 SCM_SYMBOL (sym_cells_swept, "cells-swept");
358
359 typedef struct scm_heap_seg_data_t
360 {
361 /* lower and upper bounds of the segment */
362 SCM_CELLPTR bounds[2];
363
364 /* address of the head-of-freelist pointer for this segment's cells.
365 All segments usually point to the same one, scm_freelist. */
366 scm_freelist_t *freelist;
367
368 /* number of cells per object in this segment */
369 int span;
370 } scm_heap_seg_data_t;
371
372
373
374 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
375
376 typedef enum { return_on_error, abort_on_error } policy_on_error;
377 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
378
379
380 #define SCM_HEAP_SIZE \
381 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
382 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
383
384 #define BVEC_GROW_SIZE 256
385 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
386 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
387
388 /* mark space allocation */
389
390 typedef struct scm_mark_space_t
391 {
392 scm_c_bvec_limb_t *bvec_space;
393 struct scm_mark_space_t *next;
394 } scm_mark_space_t;
395
396 static scm_mark_space_t *current_mark_space;
397 static scm_mark_space_t **mark_space_ptr;
398 static int current_mark_space_offset;
399 static scm_mark_space_t *mark_space_head;
400
401 static scm_c_bvec_limb_t *
402 get_bvec ()
403 {
404 scm_c_bvec_limb_t *res;
405
406 if (!current_mark_space)
407 {
408 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
409 if (!current_mark_space)
410 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
411
412 current_mark_space->bvec_space = NULL;
413 current_mark_space->next = NULL;
414
415 *mark_space_ptr = current_mark_space;
416 mark_space_ptr = &(current_mark_space->next);
417
418 return get_bvec ();
419 }
420
421 if (!(current_mark_space->bvec_space))
422 {
423 SCM_SYSCALL (current_mark_space->bvec_space =
424 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
425 if (!(current_mark_space->bvec_space))
426 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
427
428 current_mark_space_offset = 0;
429
430 return get_bvec ();
431 }
432
433 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
434 {
435 current_mark_space = NULL;
436
437 return get_bvec ();
438 }
439
440 res = current_mark_space->bvec_space + current_mark_space_offset;
441 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
442
443 return res;
444 }
445
446 static void
447 clear_mark_space ()
448 {
449 scm_mark_space_t *ms;
450
451 for (ms = mark_space_head; ms; ms = ms->next)
452 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
453 }
454
455
456 \f
457 /* Debugging functions. */
458
459 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
460
461 /* Return the number of the heap segment containing CELL. */
462 static int
463 which_seg (SCM cell)
464 {
465 int i;
466
467 for (i = 0; i < scm_n_heap_segs; i++)
468 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
469 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
470 return i;
471 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
472 SCM_UNPACK (cell));
473 abort ();
474 }
475
476
477 static void
478 map_free_list (scm_freelist_t *master, SCM freelist)
479 {
480 int last_seg = -1, count = 0;
481 SCM f;
482
483 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
484 {
485 int this_seg = which_seg (f);
486
487 if (this_seg != last_seg)
488 {
489 if (last_seg != -1)
490 fprintf (stderr, " %5d %d-cells in segment %d\n",
491 count, master->span, last_seg);
492 last_seg = this_seg;
493 count = 0;
494 }
495 count++;
496 }
497 if (last_seg != -1)
498 fprintf (stderr, " %5d %d-cells in segment %d\n",
499 count, master->span, last_seg);
500 }
501
502 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
503 (),
504 "Print debugging information about the free-list.\n"
505 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
506 #define FUNC_NAME s_scm_map_free_list
507 {
508 int i;
509 fprintf (stderr, "%d segments total (%d:%d",
510 scm_n_heap_segs,
511 scm_heap_table[0].span,
512 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
513 for (i = 1; i < scm_n_heap_segs; i++)
514 fprintf (stderr, ", %d:%d",
515 scm_heap_table[i].span,
516 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
517 fprintf (stderr, ")\n");
518 map_free_list (&scm_master_freelist, scm_freelist);
519 map_free_list (&scm_master_freelist2, scm_freelist2);
520 fflush (stderr);
521
522 return SCM_UNSPECIFIED;
523 }
524 #undef FUNC_NAME
525
526 static int last_cluster;
527 static int last_size;
528
529 static int
530 free_list_length (char *title, int i, SCM freelist)
531 {
532 SCM ls;
533 int n = 0;
534 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
535 if (SCM_FREE_CELL_P (ls))
536 ++n;
537 else
538 {
539 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
540 abort ();
541 }
542 if (n != last_size)
543 {
544 if (i > 0)
545 {
546 if (last_cluster == i - 1)
547 fprintf (stderr, "\t%d\n", last_size);
548 else
549 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
550 }
551 if (i >= 0)
552 fprintf (stderr, "%s %d", title, i);
553 else
554 fprintf (stderr, "%s\t%d\n", title, n);
555 last_cluster = i;
556 last_size = n;
557 }
558 return n;
559 }
560
561 static void
562 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
563 {
564 SCM clusters;
565 int i = 0, len, n = 0;
566 fprintf (stderr, "%s\n\n", title);
567 n += free_list_length ("free list", -1, freelist);
568 for (clusters = master->clusters;
569 SCM_NNULLP (clusters);
570 clusters = SCM_CDR (clusters))
571 {
572 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
573 n += len;
574 }
575 if (last_cluster == i - 1)
576 fprintf (stderr, "\t%d\n", last_size);
577 else
578 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
579 fprintf (stderr, "\ntotal %d objects\n\n", n);
580 }
581
582 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
583 (),
584 "Print debugging information about the free-list.\n"
585 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
586 #define FUNC_NAME s_scm_free_list_length
587 {
588 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
589 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
590 return SCM_UNSPECIFIED;
591 }
592 #undef FUNC_NAME
593
594 #endif
595
596 #ifdef GUILE_DEBUG_FREELIST
597
598 /* Number of calls to SCM_NEWCELL since startup. */
599 static unsigned long scm_newcell_count;
600 static unsigned long scm_newcell2_count;
601
602 /* Search freelist for anything that isn't marked as a free cell.
603 Abort if we find something. */
604 static void
605 scm_check_freelist (SCM freelist)
606 {
607 SCM f;
608 int i = 0;
609
610 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
611 if (!SCM_FREE_CELL_P (f))
612 {
613 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
614 scm_newcell_count, i);
615 abort ();
616 }
617 }
618
619 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
620 (SCM flag),
621 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
622 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
623 "compile-time flag was selected.\n")
624 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
625 {
626 /* [cmm] I did a double-take when I read this code the first time.
627 well, FWIW. */
628 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
629 return SCM_UNSPECIFIED;
630 }
631 #undef FUNC_NAME
632
633
634 SCM
635 scm_debug_newcell (void)
636 {
637 SCM new;
638
639 scm_newcell_count++;
640 if (scm_debug_check_freelist)
641 {
642 scm_check_freelist (scm_freelist);
643 scm_gc();
644 }
645
646 /* The rest of this is supposed to be identical to the SCM_NEWCELL
647 macro. */
648 if (SCM_NULLP (scm_freelist))
649 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
650 else
651 {
652 new = scm_freelist;
653 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
654 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
655 }
656
657 return new;
658 }
659
660 SCM
661 scm_debug_newcell2 (void)
662 {
663 SCM new;
664
665 scm_newcell2_count++;
666 if (scm_debug_check_freelist)
667 {
668 scm_check_freelist (scm_freelist2);
669 scm_gc ();
670 }
671
672 /* The rest of this is supposed to be identical to the SCM_NEWCELL
673 macro. */
674 if (SCM_NULLP (scm_freelist2))
675 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
676 else
677 {
678 new = scm_freelist2;
679 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
680 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
681 }
682
683 return new;
684 }
685
686 #endif /* GUILE_DEBUG_FREELIST */
687
688 \f
689
690 static unsigned long
691 master_cells_allocated (scm_freelist_t *master)
692 {
693 /* the '- 1' below is to ignore the cluster spine cells. */
694 int objects = master->clusters_allocated * (master->cluster_size - 1);
695 if (SCM_NULLP (master->clusters))
696 objects -= master->left_to_collect;
697 return master->span * objects;
698 }
699
700 static unsigned long
701 freelist_length (SCM freelist)
702 {
703 int n;
704 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
705 ++n;
706 return n;
707 }
708
709 static unsigned long
710 compute_cells_allocated ()
711 {
712 return (scm_cells_allocated
713 + master_cells_allocated (&scm_master_freelist)
714 + master_cells_allocated (&scm_master_freelist2)
715 - scm_master_freelist.span * freelist_length (scm_freelist)
716 - scm_master_freelist2.span * freelist_length (scm_freelist2));
717 }
718
719 /* {Scheme Interface to GC}
720 */
721
722 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
723 (),
724 "Returns an association list of statistics about Guile's current use of storage. ")
725 #define FUNC_NAME s_scm_gc_stats
726 {
727 int i;
728 int n;
729 SCM heap_segs;
730 long int local_scm_mtrigger;
731 long int local_scm_mallocated;
732 long int local_scm_heap_size;
733 long int local_scm_cells_allocated;
734 long int local_scm_gc_time_taken;
735 long int local_scm_gc_times;
736 long int local_scm_gc_mark_time_taken;
737 long int local_scm_gc_sweep_time_taken;
738 double local_scm_gc_cells_swept;
739 double local_scm_gc_cells_marked;
740 SCM answer;
741
742 SCM_DEFER_INTS;
743
744 ++scm_block_gc;
745
746 retry:
747 heap_segs = SCM_EOL;
748 n = scm_n_heap_segs;
749 for (i = scm_n_heap_segs; i--; )
750 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
751 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
752 heap_segs);
753 if (scm_n_heap_segs != n)
754 goto retry;
755
756 --scm_block_gc;
757
758 /* Below, we cons to produce the resulting list. We want a snapshot of
759 * the heap situation before consing.
760 */
761 local_scm_mtrigger = scm_mtrigger;
762 local_scm_mallocated = scm_mallocated;
763 local_scm_heap_size = SCM_HEAP_SIZE;
764 local_scm_cells_allocated = compute_cells_allocated ();
765 local_scm_gc_time_taken = scm_gc_time_taken;
766 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
767 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
768 local_scm_gc_times = scm_gc_times;
769 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
770 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
771
772 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
773 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
774 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
775 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
776 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
777 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
778 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
779 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
780 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
781 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
782 scm_cons (sym_heap_segments, heap_segs),
783 SCM_UNDEFINED);
784 SCM_ALLOW_INTS;
785 return answer;
786 }
787 #undef FUNC_NAME
788
789
790 static void
791 gc_start_stats (const char *what)
792 {
793 t_before_gc = scm_c_get_internal_run_time ();
794 scm_gc_cells_swept = 0;
795 scm_gc_cells_collected = 0;
796 scm_gc_yield_1 = scm_gc_yield;
797 scm_gc_yield = (scm_cells_allocated
798 + master_cells_allocated (&scm_master_freelist)
799 + master_cells_allocated (&scm_master_freelist2));
800 scm_gc_malloc_collected = 0;
801 scm_gc_ports_collected = 0;
802 }
803
804
805 static void
806 gc_end_stats ()
807 {
808 unsigned long t = scm_c_get_internal_run_time ();
809 scm_gc_time_taken += (t - t_before_gc);
810 scm_gc_sweep_time_taken += (t - t_before_sweep);
811 ++scm_gc_times;
812
813 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
814 scm_gc_cells_swept_acc += scm_gc_cells_swept;
815 }
816
817
818 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
819 (SCM obj),
820 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
821 "returned by this function for @var{obj}")
822 #define FUNC_NAME s_scm_object_address
823 {
824 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
825 }
826 #undef FUNC_NAME
827
828
829 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
830 (),
831 "Scans all of SCM objects and reclaims for further use those that are\n"
832 "no longer accessible.")
833 #define FUNC_NAME s_scm_gc
834 {
835 SCM_DEFER_INTS;
836 scm_igc ("call");
837 SCM_ALLOW_INTS;
838 return SCM_UNSPECIFIED;
839 }
840 #undef FUNC_NAME
841
842
843 \f
844 /* {C Interface For When GC is Triggered}
845 */
846
847 static void
848 adjust_min_yield (scm_freelist_t *freelist)
849 {
850 /* min yield is adjusted upwards so that next predicted total yield
851 * (allocated cells actually freed by GC) becomes
852 * `min_yield_fraction' of total heap size. Note, however, that
853 * the absolute value of min_yield will correspond to `collected'
854 * on one master (the one which currently is triggering GC).
855 *
856 * The reason why we look at total yield instead of cells collected
857 * on one list is that we want to take other freelists into account.
858 * On this freelist, we know that (local) yield = collected cells,
859 * but that's probably not the case on the other lists.
860 *
861 * (We might consider computing a better prediction, for example
862 * by computing an average over multiple GC:s.)
863 */
864 if (freelist->min_yield_fraction)
865 {
866 /* Pick largest of last two yields. */
867 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
868 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
869 #ifdef DEBUGINFO
870 fprintf (stderr, " after GC = %d, delta = %d\n",
871 scm_cells_allocated,
872 delta);
873 #endif
874 if (delta > 0)
875 freelist->min_yield += delta;
876 }
877 }
878
879
880 /* When we get POSIX threads support, the master will be global and
881 * common while the freelist will be individual for each thread.
882 */
883
884 SCM
885 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
886 {
887 SCM cell;
888 ++scm_ints_disabled;
889 do
890 {
891 if (SCM_NULLP (master->clusters))
892 {
893 if (master->grow_heap_p || scm_block_gc)
894 {
895 /* In order to reduce gc frequency, try to allocate a new heap
896 * segment first, even if gc might find some free cells. If we
897 * can't obtain a new heap segment, we will try gc later.
898 */
899 master->grow_heap_p = 0;
900 alloc_some_heap (master, return_on_error);
901 }
902 if (SCM_NULLP (master->clusters))
903 {
904 /* The heap was not grown, either because it wasn't scheduled to
905 * grow, or because there was not enough memory available. In
906 * both cases we have to try gc to get some free cells.
907 */
908 #ifdef DEBUGINFO
909 fprintf (stderr, "allocated = %d, ",
910 scm_cells_allocated
911 + master_cells_allocated (&scm_master_freelist)
912 + master_cells_allocated (&scm_master_freelist2));
913 #endif
914 scm_igc ("cells");
915 adjust_min_yield (master);
916 if (SCM_NULLP (master->clusters))
917 {
918 /* gc could not free any cells. Now, we _must_ allocate a
919 * new heap segment, because there is no other possibility
920 * to provide a new cell for the caller.
921 */
922 alloc_some_heap (master, abort_on_error);
923 }
924 }
925 }
926 cell = SCM_CAR (master->clusters);
927 master->clusters = SCM_CDR (master->clusters);
928 ++master->clusters_allocated;
929 }
930 while (SCM_NULLP (cell));
931
932 #ifdef GUILE_DEBUG_FREELIST
933 scm_check_freelist (cell);
934 #endif
935
936 --scm_ints_disabled;
937 *freelist = SCM_FREE_CELL_CDR (cell);
938 SCM_SET_FREE_CELL_TYPE (cell, scm_tc16_allocated);
939 return cell;
940 }
941
942
943 #if 0
944 /* This is a support routine which can be used to reserve a cluster
945 * for some special use, such as debugging. It won't be useful until
946 * free cells are preserved between garbage collections.
947 */
948
949 void
950 scm_alloc_cluster (scm_freelist_t *master)
951 {
952 SCM freelist, cell;
953 cell = scm_gc_for_newcell (master, &freelist);
954 SCM_SETCDR (cell, freelist);
955 return cell;
956 }
957 #endif
958
959
960 scm_c_hook_t scm_before_gc_c_hook;
961 scm_c_hook_t scm_before_mark_c_hook;
962 scm_c_hook_t scm_before_sweep_c_hook;
963 scm_c_hook_t scm_after_sweep_c_hook;
964 scm_c_hook_t scm_after_gc_c_hook;
965
966
967 void
968 scm_igc (const char *what)
969 {
970 int j;
971
972 ++scm_gc_running_p;
973 scm_c_hook_run (&scm_before_gc_c_hook, 0);
974 #ifdef DEBUGINFO
975 fprintf (stderr,
976 SCM_NULLP (scm_freelist)
977 ? "*"
978 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
979 #endif
980 #ifdef USE_THREADS
981 /* During the critical section, only the current thread may run. */
982 SCM_THREAD_CRITICAL_SECTION_START;
983 #endif
984
985 /* fprintf (stderr, "gc: %s\n", what); */
986
987 if (!scm_stack_base || scm_block_gc)
988 {
989 --scm_gc_running_p;
990 return;
991 }
992
993 gc_start_stats (what);
994
995 if (scm_mallocated < 0)
996 /* The byte count of allocated objects has underflowed. This is
997 probably because you forgot to report the sizes of objects you
998 have allocated, by calling scm_done_malloc or some such. When
999 the GC freed them, it subtracted their size from
1000 scm_mallocated, which underflowed. */
1001 abort ();
1002
1003 if (scm_gc_heap_lock)
1004 /* We've invoked the collector while a GC is already in progress.
1005 That should never happen. */
1006 abort ();
1007
1008 ++scm_gc_heap_lock;
1009
1010 /* flush dead entries from the continuation stack */
1011 {
1012 int x;
1013 int bound;
1014 SCM * elts;
1015 elts = SCM_VELTS (scm_continuation_stack);
1016 bound = SCM_LENGTH (scm_continuation_stack);
1017 x = SCM_INUM (scm_continuation_stack_ptr);
1018 while (x < bound)
1019 {
1020 elts[x] = SCM_BOOL_F;
1021 ++x;
1022 }
1023 }
1024
1025 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1026
1027 clear_mark_space ();
1028
1029 #ifndef USE_THREADS
1030
1031 /* Protect from the C stack. This must be the first marking
1032 * done because it provides information about what objects
1033 * are "in-use" by the C code. "in-use" objects are those
1034 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1035 * usable. This requirement is stricter than a liveness
1036 * requirement -- in particular, it constrains the implementation
1037 * of scm_vector_set_length_x.
1038 */
1039 SCM_FLUSH_REGISTER_WINDOWS;
1040 /* This assumes that all registers are saved into the jmp_buf */
1041 setjmp (scm_save_regs_gc_mark);
1042 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1043 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1044 sizeof scm_save_regs_gc_mark)
1045 / sizeof (SCM_STACKITEM)));
1046
1047 {
1048 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1049 #ifdef SCM_STACK_GROWS_UP
1050 scm_mark_locations (scm_stack_base, stack_len);
1051 #else
1052 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1053 #endif
1054 }
1055
1056 #else /* USE_THREADS */
1057
1058 /* Mark every thread's stack and registers */
1059 scm_threads_mark_stacks ();
1060
1061 #endif /* USE_THREADS */
1062
1063 /* FIXME: insert a phase to un-protect string-data preserved
1064 * in scm_vector_set_length_x.
1065 */
1066
1067 j = SCM_NUM_PROTECTS;
1068 while (j--)
1069 scm_gc_mark (scm_sys_protects[j]);
1070
1071 /* FIXME: we should have a means to register C functions to be run
1072 * in different phases of GC
1073 */
1074 scm_mark_subr_table ();
1075
1076 #ifndef USE_THREADS
1077 scm_gc_mark (scm_root->handle);
1078 #endif
1079
1080 t_before_sweep = scm_c_get_internal_run_time ();
1081 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1082
1083 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1084
1085 scm_gc_sweep ();
1086
1087 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1088
1089 --scm_gc_heap_lock;
1090 gc_end_stats ();
1091
1092 #ifdef USE_THREADS
1093 SCM_THREAD_CRITICAL_SECTION_END;
1094 #endif
1095 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1096 --scm_gc_running_p;
1097 }
1098
1099 \f
1100
1101 /* {Mark/Sweep}
1102 */
1103
1104
1105
1106 /* Mark an object precisely.
1107 */
1108 void
1109 scm_gc_mark (SCM p)
1110 #define FUNC_NAME "scm_gc_mark"
1111 {
1112 register long i;
1113 register SCM ptr;
1114
1115 ptr = p;
1116
1117 gc_mark_loop:
1118 if (SCM_IMP (ptr))
1119 return;
1120
1121 gc_mark_nimp:
1122 if (!SCM_CELLP (ptr))
1123 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1124
1125 #if (defined (GUILE_DEBUG_FREELIST))
1126
1127 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1128 scm_wta (ptr, "rogue pointer in heap", NULL);
1129
1130 #endif
1131
1132 if (SCM_GCMARKP (ptr))
1133 return;
1134
1135 SCM_SETGCMARK (ptr);
1136
1137 switch (SCM_TYP7 (ptr))
1138 {
1139 case scm_tcs_cons_nimcar:
1140 if (SCM_IMP (SCM_CDR (ptr)))
1141 {
1142 ptr = SCM_CAR (ptr);
1143 goto gc_mark_nimp;
1144 }
1145 scm_gc_mark (SCM_CAR (ptr));
1146 ptr = SCM_CDR (ptr);
1147 goto gc_mark_nimp;
1148 case scm_tcs_cons_imcar:
1149 ptr = SCM_CDR (ptr);
1150 goto gc_mark_loop;
1151 case scm_tc7_pws:
1152 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
1153 ptr = SCM_CDR (ptr);
1154 goto gc_mark_loop;
1155 case scm_tcs_cons_gloc:
1156 {
1157 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1158 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1159 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1160 * pointer to a struct vtable data region. The fact that these are
1161 * accessed in the same way restricts the possibilites to change the
1162 * data layout of structs or heap cells.
1163 */
1164 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1165 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1166 if (vtable_data [scm_vtable_index_vcell] != 0)
1167 {
1168 /* ptr is a gloc */
1169 SCM gloc_car = SCM_PACK (word0);
1170 scm_gc_mark (gloc_car);
1171 ptr = SCM_CDR (ptr);
1172 goto gc_mark_loop;
1173 }
1174 else
1175 {
1176 /* ptr is a struct */
1177 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1178 int len = SCM_LENGTH (layout);
1179 char * fields_desc = SCM_CHARS (layout);
1180 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1181
1182 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1183 {
1184 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1185 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
1186 }
1187 if (len)
1188 {
1189 int x;
1190
1191 for (x = 0; x < len - 2; x += 2, ++struct_data)
1192 if (fields_desc[x] == 'p')
1193 scm_gc_mark (SCM_PACK (*struct_data));
1194 if (fields_desc[x] == 'p')
1195 {
1196 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1197 for (x = *struct_data; x; --x)
1198 scm_gc_mark (SCM_PACK (*++struct_data));
1199 else
1200 scm_gc_mark (SCM_PACK (*struct_data));
1201 }
1202 }
1203 /* mark vtable */
1204 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1205 goto gc_mark_loop;
1206 }
1207 }
1208 break;
1209 case scm_tcs_closures:
1210 if (SCM_IMP (SCM_CDR (ptr)))
1211 {
1212 ptr = SCM_CLOSCAR (ptr);
1213 goto gc_mark_nimp;
1214 }
1215 scm_gc_mark (SCM_CLOSCAR (ptr));
1216 ptr = SCM_CDR (ptr);
1217 goto gc_mark_nimp;
1218 case scm_tc7_vector:
1219 case scm_tc7_lvector:
1220 #ifdef CCLO
1221 case scm_tc7_cclo:
1222 #endif
1223 i = SCM_LENGTH (ptr);
1224 if (i == 0)
1225 break;
1226 while (--i > 0)
1227 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1228 scm_gc_mark (SCM_VELTS (ptr)[i]);
1229 ptr = SCM_VELTS (ptr)[0];
1230 goto gc_mark_loop;
1231 case scm_tc7_contin:
1232 if (SCM_VELTS (ptr))
1233 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1234 (scm_sizet)
1235 (SCM_LENGTH (ptr) +
1236 (sizeof (SCM_STACKITEM) + -1 +
1237 sizeof (scm_contregs)) /
1238 sizeof (SCM_STACKITEM)));
1239 break;
1240 #ifdef HAVE_ARRAYS
1241 case scm_tc7_bvect:
1242 case scm_tc7_byvect:
1243 case scm_tc7_ivect:
1244 case scm_tc7_uvect:
1245 case scm_tc7_fvect:
1246 case scm_tc7_dvect:
1247 case scm_tc7_cvect:
1248 case scm_tc7_svect:
1249 #ifdef HAVE_LONG_LONGS
1250 case scm_tc7_llvect:
1251 #endif
1252 #endif
1253 case scm_tc7_string:
1254 break;
1255
1256 case scm_tc7_substring:
1257 ptr = SCM_CDR (ptr);
1258 goto gc_mark_loop;
1259
1260 case scm_tc7_wvect:
1261 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1262 scm_weak_vectors = ptr;
1263 if (SCM_IS_WHVEC_ANY (ptr))
1264 {
1265 int x;
1266 int len;
1267 int weak_keys;
1268 int weak_values;
1269
1270 len = SCM_LENGTH (ptr);
1271 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1272 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1273
1274 for (x = 0; x < len; ++x)
1275 {
1276 SCM alist;
1277 alist = SCM_VELTS (ptr)[x];
1278
1279 /* mark everything on the alist except the keys or
1280 * values, according to weak_values and weak_keys. */
1281 while ( SCM_CONSP (alist)
1282 && !SCM_GCMARKP (alist)
1283 && SCM_CONSP (SCM_CAR (alist)))
1284 {
1285 SCM kvpair;
1286 SCM next_alist;
1287
1288 kvpair = SCM_CAR (alist);
1289 next_alist = SCM_CDR (alist);
1290 /*
1291 * Do not do this:
1292 * SCM_SETGCMARK (alist);
1293 * SCM_SETGCMARK (kvpair);
1294 *
1295 * It may be that either the key or value is protected by
1296 * an escaped reference to part of the spine of this alist.
1297 * If we mark the spine here, and only mark one or neither of the
1298 * key and value, they may never be properly marked.
1299 * This leads to a horrible situation in which an alist containing
1300 * freelist cells is exported.
1301 *
1302 * So only mark the spines of these arrays last of all marking.
1303 * If somebody confuses us by constructing a weak vector
1304 * with a circular alist then we are hosed, but at least we
1305 * won't prematurely drop table entries.
1306 */
1307 if (!weak_keys)
1308 scm_gc_mark (SCM_CAR (kvpair));
1309 if (!weak_values)
1310 scm_gc_mark (SCM_CDR (kvpair));
1311 alist = next_alist;
1312 }
1313 if (SCM_NIMP (alist))
1314 scm_gc_mark (alist);
1315 }
1316 }
1317 break;
1318
1319 case scm_tc7_msymbol:
1320 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1321 ptr = SCM_SYMBOL_PROPS (ptr);
1322 goto gc_mark_loop;
1323 case scm_tc7_ssymbol:
1324 case scm_tcs_subrs:
1325 break;
1326 case scm_tc7_port:
1327 i = SCM_PTOBNUM (ptr);
1328 if (!(i < scm_numptob))
1329 goto def;
1330 if (SCM_PTAB_ENTRY(ptr))
1331 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1332 if (scm_ptobs[i].mark)
1333 {
1334 ptr = (scm_ptobs[i].mark) (ptr);
1335 goto gc_mark_loop;
1336 }
1337 else
1338 return;
1339 break;
1340 case scm_tc7_smob:
1341 switch (SCM_TYP16 (ptr))
1342 { /* should be faster than going through scm_smobs */
1343 case scm_tc_free_cell:
1344 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1345 case scm_tc16_allocated:
1346 case scm_tc16_big:
1347 case scm_tc16_real:
1348 case scm_tc16_complex:
1349 break;
1350 default:
1351 i = SCM_SMOBNUM (ptr);
1352 if (!(i < scm_numsmob))
1353 goto def;
1354 if (scm_smobs[i].mark)
1355 {
1356 ptr = (scm_smobs[i].mark) (ptr);
1357 goto gc_mark_loop;
1358 }
1359 else
1360 return;
1361 }
1362 break;
1363 default:
1364 def:
1365 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1366 }
1367 }
1368 #undef FUNC_NAME
1369
1370
1371 /* Mark a Region Conservatively
1372 */
1373
1374 void
1375 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1376 {
1377 unsigned long m;
1378
1379 for (m = 0; m < n; ++m)
1380 {
1381 SCM obj = * (SCM *) &x[m];
1382 if (SCM_CELLP (obj))
1383 {
1384 SCM_CELLPTR ptr = SCM2PTR (obj);
1385 int i = 0;
1386 int j = scm_n_heap_segs - 1;
1387 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1388 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1389 {
1390 while (i <= j)
1391 {
1392 int seg_id;
1393 seg_id = -1;
1394 if ((i == j)
1395 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1396 seg_id = i;
1397 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1398 seg_id = j;
1399 else
1400 {
1401 int k;
1402 k = (i + j) / 2;
1403 if (k == i)
1404 break;
1405 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1406 {
1407 j = k;
1408 ++i;
1409 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1410 continue;
1411 else
1412 break;
1413 }
1414 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1415 {
1416 i = k;
1417 --j;
1418 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1419 continue;
1420 else
1421 break;
1422 }
1423 }
1424
1425 if (SCM_GC_IN_CARD_HEADERP (ptr))
1426 break;
1427
1428 if (scm_heap_table[seg_id].span == 1
1429 || SCM_DOUBLE_CELLP (obj))
1430 {
1431 if (!SCM_FREE_CELL_P (obj))
1432 scm_gc_mark (obj);
1433 }
1434 break;
1435 }
1436 }
1437 }
1438 }
1439 }
1440
1441
1442 /* The function scm_cellp determines whether an SCM value can be regarded as a
1443 * pointer to a cell on the heap. Binary search is used in order to determine
1444 * the heap segment that contains the cell.
1445 */
1446 int
1447 scm_cellp (SCM value)
1448 {
1449 if (SCM_CELLP (value)) {
1450 scm_cell * ptr = SCM2PTR (value);
1451 unsigned int i = 0;
1452 unsigned int j = scm_n_heap_segs - 1;
1453
1454 while (i < j) {
1455 int k = (i + j) / 2;
1456 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1457 j = k;
1458 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1459 i = k + 1;
1460 }
1461 }
1462
1463 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1464 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1465 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))
1466 && !SCM_GC_IN_CARD_HEADERP (ptr)
1467 )
1468 return 1;
1469 else
1470 return 0;
1471 } else
1472 return 0;
1473 }
1474
1475
1476 static void
1477 gc_sweep_freelist_start (scm_freelist_t *freelist)
1478 {
1479 freelist->cells = SCM_EOL;
1480 freelist->left_to_collect = freelist->cluster_size;
1481 freelist->clusters_allocated = 0;
1482 freelist->clusters = SCM_EOL;
1483 freelist->clustertail = &freelist->clusters;
1484 freelist->collected_1 = freelist->collected;
1485 freelist->collected = 0;
1486 }
1487
1488 static void
1489 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1490 {
1491 int collected;
1492 *freelist->clustertail = freelist->cells;
1493 if (!SCM_NULLP (freelist->cells))
1494 {
1495 SCM c = freelist->cells;
1496 SCM_SETCAR (c, SCM_CDR (c));
1497 SCM_SETCDR (c, SCM_EOL);
1498 freelist->collected +=
1499 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1500 }
1501 scm_gc_cells_collected += freelist->collected;
1502
1503 /* Although freelist->min_yield is used to test freelist->collected
1504 * (which is the local GC yield for freelist), it is adjusted so
1505 * that *total* yield is freelist->min_yield_fraction of total heap
1506 * size. This means that a too low yield is compensated by more
1507 * heap on the list which is currently doing most work, which is
1508 * just what we want.
1509 */
1510 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1511 freelist->grow_heap_p = (collected < freelist->min_yield);
1512 }
1513
1514 #define NEXT_DATA_CELL(ptr, span) \
1515 do { \
1516 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1517 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1518 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1519 : nxt__); \
1520 } while (0)
1521
1522 void
1523 scm_gc_sweep ()
1524 #define FUNC_NAME "scm_gc_sweep"
1525 {
1526 register SCM_CELLPTR ptr;
1527 register SCM nfreelist;
1528 register scm_freelist_t *freelist;
1529 register long m;
1530 register int span;
1531 long i;
1532 scm_sizet seg_size;
1533
1534 m = 0;
1535
1536 gc_sweep_freelist_start (&scm_master_freelist);
1537 gc_sweep_freelist_start (&scm_master_freelist2);
1538
1539 for (i = 0; i < scm_n_heap_segs; i++)
1540 {
1541 register unsigned int left_to_collect;
1542 register scm_sizet j;
1543
1544 /* Unmarked cells go onto the front of the freelist this heap
1545 segment points to. Rather than updating the real freelist
1546 pointer as we go along, we accumulate the new head in
1547 nfreelist. Then, if it turns out that the entire segment is
1548 free, we free (i.e., malloc's free) the whole segment, and
1549 simply don't assign nfreelist back into the real freelist. */
1550 freelist = scm_heap_table[i].freelist;
1551 nfreelist = freelist->cells;
1552 left_to_collect = freelist->left_to_collect;
1553 span = scm_heap_table[i].span;
1554
1555 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1556 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1557
1558 /* use only data cells in seg_size */
1559 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1560
1561 scm_gc_cells_swept += seg_size;
1562
1563 for (j = seg_size + span; j -= span; ptr += span)
1564 {
1565 SCM scmptr;
1566
1567 if (SCM_GC_IN_CARD_HEADERP (ptr))
1568 {
1569 SCM_CELLPTR nxt;
1570
1571 /* cheat here */
1572 nxt = ptr;
1573 NEXT_DATA_CELL (nxt, span);
1574 j += span;
1575
1576 ptr = nxt - span;
1577 continue;
1578 }
1579
1580 scmptr = PTR2SCM (ptr);
1581
1582 if (SCM_GCMARKP (scmptr))
1583 continue;
1584
1585 switch SCM_TYP7 (scmptr)
1586 {
1587 case scm_tcs_cons_gloc:
1588 {
1589 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1590 * struct or a gloc. See the corresponding comment in
1591 * scm_gc_mark.
1592 */
1593 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1594 - scm_tc3_cons_gloc);
1595 /* access as struct */
1596 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1597 if (vtable_data[scm_vtable_index_vcell] == 0)
1598 {
1599 /* Structs need to be freed in a special order.
1600 * This is handled by GC C hooks in struct.c.
1601 */
1602 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1603 scm_structs_to_free = scmptr;
1604 continue;
1605 }
1606 /* fall through so that scmptr gets collected */
1607 }
1608 break;
1609 case scm_tcs_cons_imcar:
1610 case scm_tcs_cons_nimcar:
1611 case scm_tcs_closures:
1612 case scm_tc7_pws:
1613 break;
1614 case scm_tc7_wvect:
1615 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1616 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1617 break;
1618 case scm_tc7_vector:
1619 case scm_tc7_lvector:
1620 #ifdef CCLO
1621 case scm_tc7_cclo:
1622 #endif
1623 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1624 freechars:
1625 scm_must_free (SCM_CHARS (scmptr));
1626 /* SCM_SETCHARS(scmptr, 0);*/
1627 break;
1628 #ifdef HAVE_ARRAYS
1629 case scm_tc7_bvect:
1630 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1631 goto freechars;
1632 case scm_tc7_byvect:
1633 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1634 goto freechars;
1635 case scm_tc7_ivect:
1636 case scm_tc7_uvect:
1637 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1638 goto freechars;
1639 case scm_tc7_svect:
1640 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1641 goto freechars;
1642 #ifdef HAVE_LONG_LONGS
1643 case scm_tc7_llvect:
1644 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1645 goto freechars;
1646 #endif
1647 case scm_tc7_fvect:
1648 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1649 goto freechars;
1650 case scm_tc7_dvect:
1651 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1652 goto freechars;
1653 case scm_tc7_cvect:
1654 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1655 goto freechars;
1656 #endif
1657 case scm_tc7_substring:
1658 break;
1659 case scm_tc7_string:
1660 m += SCM_HUGE_LENGTH (scmptr) + 1;
1661 goto freechars;
1662 case scm_tc7_msymbol:
1663 m += (SCM_LENGTH (scmptr) + 1
1664 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1665 scm_must_free ((char *)SCM_SLOTS (scmptr));
1666 break;
1667 case scm_tc7_contin:
1668 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1669 if (SCM_VELTS (scmptr))
1670 goto freechars;
1671 case scm_tc7_ssymbol:
1672 break;
1673 case scm_tcs_subrs:
1674 /* the various "subrs" (primitives) are never freed */
1675 continue;
1676 case scm_tc7_port:
1677 if SCM_OPENP (scmptr)
1678 {
1679 int k = SCM_PTOBNUM (scmptr);
1680 if (!(k < scm_numptob))
1681 goto sweeperr;
1682 /* Keep "revealed" ports alive. */
1683 if (scm_revealed_count (scmptr) > 0)
1684 continue;
1685 /* Yes, I really do mean scm_ptobs[k].free */
1686 /* rather than ftobs[k].close. .close */
1687 /* is for explicit CLOSE-PORT by user */
1688 m += (scm_ptobs[k].free) (scmptr);
1689 SCM_SETSTREAM (scmptr, 0);
1690 scm_remove_from_port_table (scmptr);
1691 scm_gc_ports_collected++;
1692 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1693 }
1694 break;
1695 case scm_tc7_smob:
1696 switch SCM_TYP16 (scmptr)
1697 {
1698 case scm_tc_free_cell:
1699 case scm_tc16_real:
1700 break;
1701 #ifdef SCM_BIGDIG
1702 case scm_tc16_big:
1703 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1704 goto freechars;
1705 #endif /* def SCM_BIGDIG */
1706 case scm_tc16_complex:
1707 m += 2 * sizeof (double);
1708 goto freechars;
1709 default:
1710 {
1711 int k;
1712 k = SCM_SMOBNUM (scmptr);
1713 if (!(k < scm_numsmob))
1714 goto sweeperr;
1715 m += (scm_smobs[k].free) (scmptr);
1716 break;
1717 }
1718 }
1719 break;
1720 default:
1721 sweeperr:
1722 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1723 }
1724
1725 if (!--left_to_collect)
1726 {
1727 SCM_SETCAR (scmptr, nfreelist);
1728 *freelist->clustertail = scmptr;
1729 freelist->clustertail = SCM_CDRLOC (scmptr);
1730
1731 nfreelist = SCM_EOL;
1732 freelist->collected += span * freelist->cluster_size;
1733 left_to_collect = freelist->cluster_size;
1734 }
1735 else
1736 {
1737 /* Stick the new cell on the front of nfreelist. It's
1738 critical that we mark this cell as freed; otherwise, the
1739 conservative collector might trace it as some other type
1740 of object. */
1741 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1742 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1743 nfreelist = scmptr;
1744 }
1745 }
1746
1747 #ifdef GC_FREE_SEGMENTS
1748 if (n == seg_size)
1749 {
1750 register long j;
1751
1752 freelist->heap_size -= seg_size;
1753 free ((char *) scm_heap_table[i].bounds[0]);
1754 scm_heap_table[i].bounds[0] = 0;
1755 for (j = i + 1; j < scm_n_heap_segs; j++)
1756 scm_heap_table[j - 1] = scm_heap_table[j];
1757 scm_n_heap_segs -= 1;
1758 i--; /* We need to scan the segment just moved. */
1759 }
1760 else
1761 #endif /* ifdef GC_FREE_SEGMENTS */
1762 {
1763 /* Update the real freelist pointer to point to the head of
1764 the list of free cells we've built for this segment. */
1765 freelist->cells = nfreelist;
1766 freelist->left_to_collect = left_to_collect;
1767 }
1768
1769 #ifdef GUILE_DEBUG_FREELIST
1770 scm_map_free_list ();
1771 #endif
1772 }
1773
1774 gc_sweep_freelist_finish (&scm_master_freelist);
1775 gc_sweep_freelist_finish (&scm_master_freelist2);
1776
1777 /* When we move to POSIX threads private freelists should probably
1778 be GC-protected instead. */
1779 scm_freelist = SCM_EOL;
1780 scm_freelist2 = SCM_EOL;
1781
1782 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1783 scm_gc_yield -= scm_cells_allocated;
1784 scm_mallocated -= m;
1785 scm_gc_malloc_collected = m;
1786 }
1787 #undef FUNC_NAME
1788
1789
1790 \f
1791 /* {Front end to malloc}
1792 *
1793 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1794 * scm_done_free
1795 *
1796 * These functions provide services comperable to malloc, realloc, and
1797 * free. They are for allocating malloced parts of scheme objects.
1798 * The primary purpose of the front end is to impose calls to gc. */
1799
1800
1801 /* scm_must_malloc
1802 * Return newly malloced storage or throw an error.
1803 *
1804 * The parameter WHAT is a string for error reporting.
1805 * If the threshold scm_mtrigger will be passed by this
1806 * allocation, or if the first call to malloc fails,
1807 * garbage collect -- on the presumption that some objects
1808 * using malloced storage may be collected.
1809 *
1810 * The limit scm_mtrigger may be raised by this allocation.
1811 */
1812 void *
1813 scm_must_malloc (scm_sizet size, const char *what)
1814 {
1815 void *ptr;
1816 unsigned long nm = scm_mallocated + size;
1817
1818 if (nm <= scm_mtrigger)
1819 {
1820 SCM_SYSCALL (ptr = malloc (size));
1821 if (NULL != ptr)
1822 {
1823 scm_mallocated = nm;
1824 #ifdef GUILE_DEBUG_MALLOC
1825 scm_malloc_register (ptr, what);
1826 #endif
1827 return ptr;
1828 }
1829 }
1830
1831 scm_igc (what);
1832
1833 nm = scm_mallocated + size;
1834 SCM_SYSCALL (ptr = malloc (size));
1835 if (NULL != ptr)
1836 {
1837 scm_mallocated = nm;
1838 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1839 if (nm > scm_mtrigger)
1840 scm_mtrigger = nm + nm / 2;
1841 else
1842 scm_mtrigger += scm_mtrigger / 2;
1843 }
1844 #ifdef GUILE_DEBUG_MALLOC
1845 scm_malloc_register (ptr, what);
1846 #endif
1847
1848 return ptr;
1849 }
1850
1851 scm_memory_error (what);
1852 }
1853
1854
1855 /* scm_must_realloc
1856 * is similar to scm_must_malloc.
1857 */
1858 void *
1859 scm_must_realloc (void *where,
1860 scm_sizet old_size,
1861 scm_sizet size,
1862 const char *what)
1863 {
1864 void *ptr;
1865 scm_sizet nm = scm_mallocated + size - old_size;
1866
1867 if (nm <= scm_mtrigger)
1868 {
1869 SCM_SYSCALL (ptr = realloc (where, size));
1870 if (NULL != ptr)
1871 {
1872 scm_mallocated = nm;
1873 #ifdef GUILE_DEBUG_MALLOC
1874 scm_malloc_reregister (where, ptr, what);
1875 #endif
1876 return ptr;
1877 }
1878 }
1879
1880 scm_igc (what);
1881
1882 nm = scm_mallocated + size - old_size;
1883 SCM_SYSCALL (ptr = realloc (where, size));
1884 if (NULL != ptr)
1885 {
1886 scm_mallocated = nm;
1887 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1888 if (nm > scm_mtrigger)
1889 scm_mtrigger = nm + nm / 2;
1890 else
1891 scm_mtrigger += scm_mtrigger / 2;
1892 }
1893 #ifdef GUILE_DEBUG_MALLOC
1894 scm_malloc_reregister (where, ptr, what);
1895 #endif
1896 return ptr;
1897 }
1898
1899 scm_memory_error (what);
1900 }
1901
1902
1903 void
1904 scm_must_free (void *obj)
1905 #define FUNC_NAME "scm_must_free"
1906 {
1907 #ifdef GUILE_DEBUG_MALLOC
1908 scm_malloc_unregister (obj);
1909 #endif
1910 if (obj)
1911 free (obj);
1912 else
1913 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1914 }
1915 #undef FUNC_NAME
1916
1917
1918 /* Announce that there has been some malloc done that will be freed
1919 * during gc. A typical use is for a smob that uses some malloced
1920 * memory but can not get it from scm_must_malloc (for whatever
1921 * reason). When a new object of this smob is created you call
1922 * scm_done_malloc with the size of the object. When your smob free
1923 * function is called, be sure to include this size in the return
1924 * value.
1925 *
1926 * If you can't actually free the memory in the smob free function,
1927 * for whatever reason (like reference counting), you still can (and
1928 * should) report the amount of memory freed when you actually free it.
1929 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1930 * eh? Or even better, call scm_done_free. */
1931
1932 void
1933 scm_done_malloc (long size)
1934 {
1935 scm_mallocated += size;
1936
1937 if (scm_mallocated > scm_mtrigger)
1938 {
1939 scm_igc ("foreign mallocs");
1940 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1941 {
1942 if (scm_mallocated > scm_mtrigger)
1943 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1944 else
1945 scm_mtrigger += scm_mtrigger / 2;
1946 }
1947 }
1948 }
1949
1950 void
1951 scm_done_free (long size)
1952 {
1953 scm_mallocated -= size;
1954 }
1955
1956
1957 \f
1958 /* {Heap Segments}
1959 *
1960 * Each heap segment is an array of objects of a particular size.
1961 * Every segment has an associated (possibly shared) freelist.
1962 * A table of segment records is kept that records the upper and
1963 * lower extents of the segment; this is used during the conservative
1964 * phase of gc to identify probably gc roots (because they point
1965 * into valid segments at reasonable offsets). */
1966
1967 /* scm_expmem
1968 * is true if the first segment was smaller than INIT_HEAP_SEG.
1969 * If scm_expmem is set to one, subsequent segment allocations will
1970 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1971 */
1972 int scm_expmem = 0;
1973
1974 scm_sizet scm_max_segment_size;
1975
1976 /* scm_heap_org
1977 * is the lowest base address of any heap segment.
1978 */
1979 SCM_CELLPTR scm_heap_org;
1980
1981 scm_heap_seg_data_t * scm_heap_table = 0;
1982 static unsigned int heap_segment_table_size = 0;
1983 int scm_n_heap_segs = 0;
1984
1985 /* init_heap_seg
1986 * initializes a new heap segment and returns the number of objects it contains.
1987 *
1988 * The segment origin and segment size in bytes are input parameters.
1989 * The freelist is both input and output.
1990 *
1991 * This function presumes that the scm_heap_table has already been expanded
1992 * to accomodate a new segment record and that the markbit space was reserved
1993 * for all the cards in this segment.
1994 */
1995
1996 #define INIT_CARD(card, span) \
1997 do { \
1998 SCM_GC_CARD_BVEC (card) = get_bvec (); \
1999 if ((span) == 2) \
2000 SCM_GC_SET_CARD_DOUBLECELL (card); \
2001 } while (0)
2002
2003 static scm_sizet
2004 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
2005 {
2006 register SCM_CELLPTR ptr;
2007 SCM_CELLPTR seg_end;
2008 int new_seg_index;
2009 int n_new_cells;
2010 int span = freelist->span;
2011
2012 if (seg_org == NULL)
2013 return 0;
2014
2015 /* Align the begin ptr up.
2016 */
2017 ptr = SCM_GC_CARD_UP (seg_org);
2018
2019 /* Compute the ceiling on valid object pointers w/in this segment.
2020 */
2021 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2022
2023 /* Find the right place and insert the segment record.
2024 *
2025 */
2026 for (new_seg_index = 0;
2027 ( (new_seg_index < scm_n_heap_segs)
2028 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2029 new_seg_index++)
2030 ;
2031
2032 {
2033 int i;
2034 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2035 scm_heap_table[i] = scm_heap_table[i - 1];
2036 }
2037
2038 ++scm_n_heap_segs;
2039
2040 scm_heap_table[new_seg_index].span = span;
2041 scm_heap_table[new_seg_index].freelist = freelist;
2042 scm_heap_table[new_seg_index].bounds[0] = ptr;
2043 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2044
2045 /*n_new_cells*/
2046 n_new_cells = seg_end - ptr;
2047
2048 freelist->heap_size += n_new_cells;
2049
2050 /* Partition objects in this segment into clusters */
2051 {
2052 SCM clusters;
2053 SCM *clusterp = &clusters;
2054
2055 NEXT_DATA_CELL (ptr, span);
2056 while (ptr < seg_end)
2057 {
2058 scm_cell *nxt = ptr;
2059 scm_cell *prv = NULL;
2060 scm_cell *last_card = NULL;
2061 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2062 NEXT_DATA_CELL(nxt, span);
2063
2064 /* Allocate cluster spine
2065 */
2066 *clusterp = PTR2SCM (ptr);
2067 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2068 clusterp = SCM_CDRLOC (*clusterp);
2069 ptr = nxt;
2070
2071 while (n_data_cells--)
2072 {
2073 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2074 SCM scmptr = PTR2SCM (ptr);
2075 nxt = ptr;
2076 NEXT_DATA_CELL (nxt, span);
2077 prv = ptr;
2078
2079 if (card != last_card)
2080 {
2081 INIT_CARD (card, span);
2082 last_card = card;
2083 }
2084
2085 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2086 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2087
2088 ptr = nxt;
2089 }
2090
2091 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2092 }
2093
2094 /* sanity check */
2095 {
2096 scm_cell *ref = seg_end;
2097 NEXT_DATA_CELL (ref, span);
2098 if (ref != ptr)
2099 /* [cmm] looks like the segment size doesn't divide cleanly by
2100 cluster size. bad cmm! */
2101 abort();
2102 }
2103
2104 /* Patch up the last cluster pointer in the segment
2105 * to join it to the input freelist.
2106 */
2107 *clusterp = freelist->clusters;
2108 freelist->clusters = clusters;
2109 }
2110
2111 #ifdef DEBUGINFO
2112 fprintf (stderr, "H");
2113 #endif
2114 return size;
2115 }
2116
2117 static scm_sizet
2118 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2119 {
2120 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2121
2122 return
2123 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2124 + ALIGNMENT_SLACK (freelist);
2125 }
2126
2127 static void
2128 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2129 #define FUNC_NAME "alloc_some_heap"
2130 {
2131 SCM_CELLPTR ptr;
2132 long len;
2133
2134 if (scm_gc_heap_lock)
2135 {
2136 /* Critical code sections (such as the garbage collector) aren't
2137 * supposed to add heap segments.
2138 */
2139 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2140 abort ();
2141 }
2142
2143 if (scm_n_heap_segs == heap_segment_table_size)
2144 {
2145 /* We have to expand the heap segment table to have room for the new
2146 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2147 * init_heap_seg only if the allocation of the segment itself succeeds.
2148 */
2149 unsigned int new_table_size = scm_n_heap_segs + 1;
2150 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2151 scm_heap_seg_data_t * new_heap_table;
2152
2153 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2154 realloc ((char *)scm_heap_table, size)));
2155 if (!new_heap_table)
2156 {
2157 if (error_policy == abort_on_error)
2158 {
2159 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2160 abort ();
2161 }
2162 else
2163 {
2164 return;
2165 }
2166 }
2167 else
2168 {
2169 scm_heap_table = new_heap_table;
2170 heap_segment_table_size = new_table_size;
2171 }
2172 }
2173
2174 /* Pick a size for the new heap segment.
2175 * The rule for picking the size of a segment is explained in
2176 * gc.h
2177 */
2178 {
2179 /* Assure that the new segment is predicted to be large enough.
2180 *
2181 * New yield should at least equal GC fraction of new heap size, i.e.
2182 *
2183 * y + dh > f * (h + dh)
2184 *
2185 * y : yield
2186 * f : min yield fraction
2187 * h : heap size
2188 * dh : size of new heap segment
2189 *
2190 * This gives dh > (f * h - y) / (1 - f)
2191 */
2192 int f = freelist->min_yield_fraction;
2193 long h = SCM_HEAP_SIZE;
2194 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2195 len = SCM_EXPHEAP (freelist->heap_size);
2196 #ifdef DEBUGINFO
2197 fprintf (stderr, "(%d < %d)", len, min_cells);
2198 #endif
2199 if (len < min_cells)
2200 len = min_cells + freelist->cluster_size;
2201 len *= sizeof (scm_cell);
2202 /* force new sampling */
2203 freelist->collected = LONG_MAX;
2204 }
2205
2206 if (len > scm_max_segment_size)
2207 len = scm_max_segment_size;
2208
2209 {
2210 scm_sizet smallest;
2211
2212 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2213
2214 if (len < smallest)
2215 len = smallest;
2216
2217 /* Allocate with decaying ambition. */
2218 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2219 && (len >= smallest))
2220 {
2221 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2222 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2223 if (ptr)
2224 {
2225 init_heap_seg (ptr, rounded_len, freelist);
2226 return;
2227 }
2228 len /= 2;
2229 }
2230 }
2231
2232 if (error_policy == abort_on_error)
2233 {
2234 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2235 abort ();
2236 }
2237 }
2238 #undef FUNC_NAME
2239
2240
2241 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2242 (SCM name),
2243 "")
2244 #define FUNC_NAME s_scm_unhash_name
2245 {
2246 int x;
2247 int bound;
2248 SCM_VALIDATE_SYMBOL (1,name);
2249 SCM_DEFER_INTS;
2250 bound = scm_n_heap_segs;
2251 for (x = 0; x < bound; ++x)
2252 {
2253 SCM_CELLPTR p;
2254 SCM_CELLPTR pbound;
2255 p = scm_heap_table[x].bounds[0];
2256 pbound = scm_heap_table[x].bounds[1];
2257 while (p < pbound)
2258 {
2259 SCM cell = PTR2SCM (p);
2260 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2261 {
2262 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2263 * struct cell. See the corresponding comment in scm_gc_mark.
2264 */
2265 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2266 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2267 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2268 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2269 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2270 {
2271 SCM_SET_CELL_OBJECT_0 (cell, name);
2272 }
2273 }
2274 ++p;
2275 }
2276 }
2277 SCM_ALLOW_INTS;
2278 return name;
2279 }
2280 #undef FUNC_NAME
2281
2282
2283 \f
2284 /* {GC Protection Helper Functions}
2285 */
2286
2287
2288 void
2289 scm_remember (SCM *ptr)
2290 { /* empty */ }
2291
2292
2293 /*
2294 These crazy functions prevent garbage collection
2295 of arguments after the first argument by
2296 ensuring they remain live throughout the
2297 function because they are used in the last
2298 line of the code block.
2299 It'd be better to have a nice compiler hint to
2300 aid the conservative stack-scanning GC. --03/09/00 gjb */
2301 SCM
2302 scm_return_first (SCM elt, ...)
2303 {
2304 return elt;
2305 }
2306
2307 int
2308 scm_return_first_int (int i, ...)
2309 {
2310 return i;
2311 }
2312
2313
2314 SCM
2315 scm_permanent_object (SCM obj)
2316 {
2317 SCM_REDEFER_INTS;
2318 scm_permobjs = scm_cons (obj, scm_permobjs);
2319 SCM_REALLOW_INTS;
2320 return obj;
2321 }
2322
2323
2324 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2325 other references are dropped, until the object is unprotected by calling
2326 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2327 i. e. it is possible to protect the same object several times, but it is
2328 necessary to unprotect the object the same number of times to actually get
2329 the object unprotected. It is an error to unprotect an object more often
2330 than it has been protected before. The function scm_protect_object returns
2331 OBJ.
2332 */
2333
2334 /* Implementation note: For every object X, there is a counter which
2335 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2336 */
2337
2338 SCM
2339 scm_protect_object (SCM obj)
2340 {
2341 SCM handle;
2342
2343 /* This critical section barrier will be replaced by a mutex. */
2344 SCM_REDEFER_INTS;
2345
2346 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2347 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2348
2349 SCM_REALLOW_INTS;
2350
2351 return obj;
2352 }
2353
2354
2355 /* Remove any protection for OBJ established by a prior call to
2356 scm_protect_object. This function returns OBJ.
2357
2358 See scm_protect_object for more information. */
2359 SCM
2360 scm_unprotect_object (SCM obj)
2361 {
2362 SCM handle;
2363
2364 /* This critical section barrier will be replaced by a mutex. */
2365 SCM_REDEFER_INTS;
2366
2367 handle = scm_hashq_get_handle (scm_protects, obj);
2368
2369 if (SCM_IMP (handle))
2370 {
2371 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2372 abort ();
2373 }
2374 else
2375 {
2376 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2377 if (count == 0)
2378 scm_hashq_remove_x (scm_protects, obj);
2379 else
2380 SCM_SETCDR (handle, SCM_MAKINUM (count));
2381 }
2382
2383 SCM_REALLOW_INTS;
2384
2385 return obj;
2386 }
2387
2388 int terminating;
2389
2390 /* called on process termination. */
2391 #ifdef HAVE_ATEXIT
2392 static void
2393 cleanup (void)
2394 #else
2395 #ifdef HAVE_ON_EXIT
2396 extern int on_exit (void (*procp) (), int arg);
2397
2398 static void
2399 cleanup (int status, void *arg)
2400 #else
2401 #error Dont know how to setup a cleanup handler on your system.
2402 #endif
2403 #endif
2404 {
2405 terminating = 1;
2406 scm_flush_all_ports ();
2407 }
2408
2409 \f
2410 static int
2411 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2412 {
2413 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2414
2415 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2416 rounded_size,
2417 freelist))
2418 {
2419 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2420 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2421 rounded_size,
2422 freelist))
2423 return 1;
2424 }
2425 else
2426 scm_expmem = 1;
2427
2428 if (freelist->min_yield_fraction)
2429 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2430 / 100);
2431 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2432
2433 return 0;
2434 }
2435
2436 \f
2437 static void
2438 init_freelist (scm_freelist_t *freelist,
2439 int span,
2440 int cluster_size,
2441 int min_yield)
2442 {
2443 freelist->clusters = SCM_EOL;
2444 freelist->cluster_size = cluster_size + 1;
2445 freelist->left_to_collect = 0;
2446 freelist->clusters_allocated = 0;
2447 freelist->min_yield = 0;
2448 freelist->min_yield_fraction = min_yield;
2449 freelist->span = span;
2450 freelist->collected = 0;
2451 freelist->collected_1 = 0;
2452 freelist->heap_size = 0;
2453 }
2454
2455 int
2456 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2457 scm_sizet init_heap_size_2, int gc_trigger_2,
2458 scm_sizet max_segment_size)
2459 {
2460 scm_sizet j;
2461
2462 if (!init_heap_size_1)
2463 init_heap_size_1 = scm_default_init_heap_size_1;
2464 if (!init_heap_size_2)
2465 init_heap_size_2 = scm_default_init_heap_size_2;
2466
2467 j = SCM_NUM_PROTECTS;
2468 while (j)
2469 scm_sys_protects[--j] = SCM_BOOL_F;
2470 scm_block_gc = 1;
2471
2472 scm_freelist = SCM_EOL;
2473 scm_freelist2 = SCM_EOL;
2474 init_freelist (&scm_master_freelist,
2475 1, SCM_CLUSTER_SIZE_1,
2476 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2477 init_freelist (&scm_master_freelist2,
2478 2, SCM_CLUSTER_SIZE_2,
2479 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2480 scm_max_segment_size
2481 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2482
2483 scm_expmem = 0;
2484
2485 j = SCM_HEAP_SEG_SIZE;
2486 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2487 scm_heap_table = ((scm_heap_seg_data_t *)
2488 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2489 heap_segment_table_size = 2;
2490
2491 mark_space_ptr = &mark_space_head;
2492
2493 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2494 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2495 return 1;
2496
2497 /* scm_hplims[0] can change. do not remove scm_heap_org */
2498 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2499
2500 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2501 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2502 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2503 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2504 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2505
2506 /* Initialise the list of ports. */
2507 scm_port_table = (scm_port **)
2508 malloc (sizeof (scm_port *) * scm_port_table_room);
2509 if (!scm_port_table)
2510 return 1;
2511
2512 #ifdef HAVE_ATEXIT
2513 atexit (cleanup);
2514 #else
2515 #ifdef HAVE_ON_EXIT
2516 on_exit (cleanup, 0);
2517 #endif
2518 #endif
2519
2520 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2521 SCM_SETCDR (scm_undefineds, scm_undefineds);
2522
2523 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2524 scm_nullstr = scm_makstr (0L, 0);
2525 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2526 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2527 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2528 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2529 scm_stand_in_procs = SCM_EOL;
2530 scm_permobjs = SCM_EOL;
2531 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2532 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2533 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2534 #ifdef SCM_BIGDIG
2535 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2536 #endif
2537
2538 return 0;
2539 }
2540
2541 \f
2542
2543 SCM scm_after_gc_hook;
2544
2545 #if (SCM_DEBUG_DEPRECATED == 0)
2546 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2547 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2548 static SCM gc_async;
2549
2550
2551 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2552 * is run after the gc, as soon as the asynchronous events are handled by the
2553 * evaluator.
2554 */
2555 static SCM
2556 gc_async_thunk (void)
2557 {
2558 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2559
2560 #if (SCM_DEBUG_DEPRECATED == 0)
2561
2562 /* The following code will be removed in Guile 1.5. */
2563 if (SCM_NFALSEP (scm_gc_vcell))
2564 {
2565 SCM proc = SCM_CDR (scm_gc_vcell);
2566
2567 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2568 scm_apply (proc, SCM_EOL, SCM_EOL);
2569 }
2570
2571 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2572
2573 return SCM_UNSPECIFIED;
2574 }
2575
2576
2577 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2578 * the garbage collection. The only purpose of this function is to mark the
2579 * gc_async (which will eventually lead to the execution of the
2580 * gc_async_thunk).
2581 */
2582 static void *
2583 mark_gc_async (void * hook_data, void *func_data, void *data)
2584 {
2585 scm_system_async_mark (gc_async);
2586 return NULL;
2587 }
2588
2589
2590 void
2591 scm_init_gc ()
2592 {
2593 SCM after_gc_thunk;
2594
2595 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2596
2597 #if (SCM_DEBUG_DEPRECATED == 0)
2598 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2599 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2600 /* Dirk:FIXME:: We don't really want a binding here. */
2601 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2602 gc_async = scm_system_async (after_gc_thunk);
2603
2604 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2605
2606 #include "libguile/gc.x"
2607 }
2608
2609 /*
2610 Local Variables:
2611 c-file-style: "gnu"
2612 End:
2613 */