* Further references to SCM_CHARS removed.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
63
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
66
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
69 #endif
70
71 #ifdef HAVE_MALLOC_H
72 #include <malloc.h>
73 #endif
74
75 #ifdef HAVE_UNISTD_H
76 #include <unistd.h>
77 #endif
78
79 #ifdef __STDC__
80 #include <stdarg.h>
81 #define var_start(x, y) va_start(x, y)
82 #else
83 #include <varargs.h>
84 #define var_start(x, y) va_start(x)
85 #endif
86
87 \f
88
89 unsigned int scm_gc_running_p = 0;
90
91 \f
92
93 #if (SCM_DEBUG_CELL_ACCESSES == 1)
94
95 unsigned int scm_debug_cell_accesses_p = 0;
96
97
98 /* Assert that the given object is a valid reference to a valid cell. This
99 * test involves to determine whether the object is a cell pointer, whether
100 * this pointer actually points into a heap segment and whether the cell
101 * pointed to is not a free cell.
102 */
103 void
104 scm_assert_cell_valid (SCM cell)
105 {
106 if (scm_debug_cell_accesses_p)
107 {
108 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
109
110 if (!scm_cellp (cell))
111 {
112 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
113 abort ();
114 }
115 else if (!scm_gc_running_p)
116 {
117 /* Dirk::FIXME:: During garbage collection there occur references to
118 free cells. This is allright during conservative marking, but
119 should not happen otherwise (I think). The case of free cells
120 accessed during conservative marking is handled in function
121 scm_mark_locations. However, there still occur accesses to free
122 cells during gc. I don't understand why this happens. If it is
123 a bug and gets fixed, the following test should also work while
124 gc is running.
125 */
126 if (SCM_FREE_CELL_P (cell))
127 {
128 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
129 abort ();
130 }
131 }
132 scm_debug_cell_accesses_p = 1; /* re-enable */
133 }
134 }
135
136
137 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
138 (SCM flag),
139 "If FLAG is #f, cell access checking is disabled.\n"
140 "If FLAG is #t, cell access checking is enabled.\n"
141 "This procedure only exists because the compile-time flag\n"
142 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
143 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
144 {
145 if (SCM_FALSEP (flag)) {
146 scm_debug_cell_accesses_p = 0;
147 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
148 scm_debug_cell_accesses_p = 1;
149 } else {
150 SCM_WRONG_TYPE_ARG (1, flag);
151 }
152 return SCM_UNSPECIFIED;
153 }
154 #undef FUNC_NAME
155
156 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
157
158 \f
159
160 /* {heap tuning parameters}
161 *
162 * These are parameters for controlling memory allocation. The heap
163 * is the area out of which scm_cons, and object headers are allocated.
164 *
165 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
166 * 64 bit machine. The units of the _SIZE parameters are bytes.
167 * Cons pairs and object headers occupy one heap cell.
168 *
169 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
170 * allocated initially the heap will grow by half its current size
171 * each subsequent time more heap is needed.
172 *
173 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
174 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
175 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
176 * is in scm_init_storage() and alloc_some_heap() in sys.c
177 *
178 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
179 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
180 *
181 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
182 * is needed.
183 *
184 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
185 * trigger a GC.
186 *
187 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
188 * reclaimed by a GC triggered by must_malloc. If less than this is
189 * reclaimed, the trigger threshold is raised. [I don't know what a
190 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
191 * work around a oscillation that caused almost constant GC.]
192 */
193
194 /*
195 * Heap size 45000 and 40% min yield gives quick startup and no extra
196 * heap allocation. Having higher values on min yield may lead to
197 * large heaps, especially if code behaviour is varying its
198 * maximum consumption between different freelists.
199 */
200
201 #define SCM_DATA_CELLS2CARDS(n) (((n) + SCM_GC_CARD_N_DATA_CELLS - 1) / SCM_GC_CARD_N_DATA_CELLS)
202 #define SCM_CARDS_PER_CLUSTER SCM_DATA_CELLS2CARDS (2000L)
203 #define SCM_CLUSTER_SIZE_1 (SCM_CARDS_PER_CLUSTER * SCM_GC_CARD_N_DATA_CELLS)
204 int scm_default_init_heap_size_1 = (((SCM_DATA_CELLS2CARDS (45000L) + SCM_CARDS_PER_CLUSTER - 1)
205 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
206 int scm_default_min_yield_1 = 40;
207
208 #define SCM_CLUSTER_SIZE_2 (SCM_CARDS_PER_CLUSTER * (SCM_GC_CARD_N_DATA_CELLS / 2))
209 int scm_default_init_heap_size_2 = (((SCM_DATA_CELLS2CARDS (2500L * 2) + SCM_CARDS_PER_CLUSTER - 1)
210 / SCM_CARDS_PER_CLUSTER) * SCM_GC_CARD_SIZE);
211 /* The following value may seem large, but note that if we get to GC at
212 * all, this means that we have a numerically intensive application
213 */
214 int scm_default_min_yield_2 = 40;
215
216 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
217
218 #define SCM_MIN_HEAP_SEG_SIZE (8 * SCM_GC_CARD_SIZE)
219 #ifdef _QC
220 # define SCM_HEAP_SEG_SIZE 32768L
221 #else
222 # ifdef sequent
223 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
224 # else
225 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
226 # endif
227 #endif
228 /* Make heap grow with factor 1.5 */
229 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
230 #define SCM_INIT_MALLOC_LIMIT 100000
231 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
232
233 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find (scm_cell * span)
234 aligned inner bounds for allocated storage */
235
236 #ifdef PROT386
237 /*in 386 protected mode we must only adjust the offset */
238 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
239 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
240 #else
241 # ifdef _UNICOS
242 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
243 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
244 # else
245 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
246 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
247 # endif /* UNICOS */
248 #endif /* PROT386 */
249
250 #define ALIGNMENT_SLACK(freelist) (SCM_GC_CARD_SIZE - 1)
251 #define CLUSTER_SIZE_IN_BYTES(freelist) \
252 (((freelist)->cluster_size / (SCM_GC_CARD_N_DATA_CELLS / (freelist)->span)) * SCM_GC_CARD_SIZE)
253
254 \f
255 /* scm_freelists
256 */
257
258 typedef struct scm_freelist_t {
259 /* collected cells */
260 SCM cells;
261 /* number of cells left to collect before cluster is full */
262 unsigned int left_to_collect;
263 /* number of clusters which have been allocated */
264 unsigned int clusters_allocated;
265 /* a list of freelists, each of size cluster_size,
266 * except the last one which may be shorter
267 */
268 SCM clusters;
269 SCM *clustertail;
270 /* this is the number of objects in each cluster, including the spine cell */
271 int cluster_size;
272 /* indicates that we should grow heap instead of GC:ing
273 */
274 int grow_heap_p;
275 /* minimum yield on this list in order not to grow the heap
276 */
277 long min_yield;
278 /* defines min_yield as percent of total heap size
279 */
280 int min_yield_fraction;
281 /* number of cells per object on this list */
282 int span;
283 /* number of collected cells during last GC */
284 long collected;
285 /* number of collected cells during penultimate GC */
286 long collected_1;
287 /* total number of cells in heap segments
288 * belonging to this list.
289 */
290 long heap_size;
291 } scm_freelist_t;
292
293 SCM scm_freelist = SCM_EOL;
294 scm_freelist_t scm_master_freelist = {
295 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
296 };
297 SCM scm_freelist2 = SCM_EOL;
298 scm_freelist_t scm_master_freelist2 = {
299 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
300 };
301
302 /* scm_mtrigger
303 * is the number of bytes of must_malloc allocation needed to trigger gc.
304 */
305 unsigned long scm_mtrigger;
306
307 /* scm_gc_heap_lock
308 * If set, don't expand the heap. Set only during gc, during which no allocation
309 * is supposed to take place anyway.
310 */
311 int scm_gc_heap_lock = 0;
312
313 /* GC Blocking
314 * Don't pause for collection if this is set -- just
315 * expand the heap.
316 */
317 int scm_block_gc = 1;
318
319 /* During collection, this accumulates objects holding
320 * weak references.
321 */
322 SCM scm_weak_vectors;
323
324 /* During collection, this accumulates structures which are to be freed.
325 */
326 SCM scm_structs_to_free;
327
328 /* GC Statistics Keeping
329 */
330 unsigned long scm_cells_allocated = 0;
331 long scm_mallocated = 0;
332 unsigned long scm_gc_cells_collected;
333 unsigned long scm_gc_yield;
334 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
335 unsigned long scm_gc_malloc_collected;
336 unsigned long scm_gc_ports_collected;
337 unsigned long scm_gc_time_taken = 0;
338 static unsigned long t_before_gc;
339 static unsigned long t_before_sweep;
340 unsigned long scm_gc_mark_time_taken = 0;
341 unsigned long scm_gc_sweep_time_taken = 0;
342 unsigned long scm_gc_times = 0;
343 unsigned long scm_gc_cells_swept = 0;
344 double scm_gc_cells_marked_acc = 0.;
345 double scm_gc_cells_swept_acc = 0.;
346
347 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
348 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
349 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
350 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
351 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
352 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
353 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
354 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
355 SCM_SYMBOL (sym_times, "gc-times");
356 SCM_SYMBOL (sym_cells_marked, "cells-marked");
357 SCM_SYMBOL (sym_cells_swept, "cells-swept");
358
359 typedef struct scm_heap_seg_data_t
360 {
361 /* lower and upper bounds of the segment */
362 SCM_CELLPTR bounds[2];
363
364 /* address of the head-of-freelist pointer for this segment's cells.
365 All segments usually point to the same one, scm_freelist. */
366 scm_freelist_t *freelist;
367
368 /* number of cells per object in this segment */
369 int span;
370 } scm_heap_seg_data_t;
371
372
373
374 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
375
376 typedef enum { return_on_error, abort_on_error } policy_on_error;
377 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
378
379
380 #define SCM_HEAP_SIZE \
381 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
382 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
383
384 #define BVEC_GROW_SIZE 256
385 #define BVEC_GROW_SIZE_IN_LIMBS (SCM_GC_CARD_BVEC_SIZE_IN_LIMBS * BVEC_GROW_SIZE)
386 #define BVEC_GROW_SIZE_IN_BYTES (BVEC_GROW_SIZE_IN_LIMBS * sizeof (scm_c_bvec_limb_t))
387
388 /* mark space allocation */
389
390 typedef struct scm_mark_space_t
391 {
392 scm_c_bvec_limb_t *bvec_space;
393 struct scm_mark_space_t *next;
394 } scm_mark_space_t;
395
396 static scm_mark_space_t *current_mark_space;
397 static scm_mark_space_t **mark_space_ptr;
398 static int current_mark_space_offset;
399 static scm_mark_space_t *mark_space_head;
400
401 static scm_c_bvec_limb_t *
402 get_bvec ()
403 {
404 scm_c_bvec_limb_t *res;
405
406 if (!current_mark_space)
407 {
408 SCM_SYSCALL (current_mark_space = (scm_mark_space_t *) malloc (sizeof (scm_mark_space_t)));
409 if (!current_mark_space)
410 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
411
412 current_mark_space->bvec_space = NULL;
413 current_mark_space->next = NULL;
414
415 *mark_space_ptr = current_mark_space;
416 mark_space_ptr = &(current_mark_space->next);
417
418 return get_bvec ();
419 }
420
421 if (!(current_mark_space->bvec_space))
422 {
423 SCM_SYSCALL (current_mark_space->bvec_space =
424 (scm_c_bvec_limb_t *) calloc (BVEC_GROW_SIZE_IN_BYTES, 1));
425 if (!(current_mark_space->bvec_space))
426 scm_wta (SCM_UNDEFINED, "could not grow", "heap");
427
428 current_mark_space_offset = 0;
429
430 return get_bvec ();
431 }
432
433 if (current_mark_space_offset == BVEC_GROW_SIZE_IN_LIMBS)
434 {
435 current_mark_space = NULL;
436
437 return get_bvec ();
438 }
439
440 res = current_mark_space->bvec_space + current_mark_space_offset;
441 current_mark_space_offset += SCM_GC_CARD_BVEC_SIZE_IN_LIMBS;
442
443 return res;
444 }
445
446 static void
447 clear_mark_space ()
448 {
449 scm_mark_space_t *ms;
450
451 for (ms = mark_space_head; ms; ms = ms->next)
452 memset (ms->bvec_space, 0, BVEC_GROW_SIZE_IN_BYTES);
453 }
454
455
456 \f
457 /* Debugging functions. */
458
459 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
460
461 /* Return the number of the heap segment containing CELL. */
462 static int
463 which_seg (SCM cell)
464 {
465 int i;
466
467 for (i = 0; i < scm_n_heap_segs; i++)
468 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
469 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
470 return i;
471 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
472 SCM_UNPACK (cell));
473 abort ();
474 }
475
476
477 static void
478 map_free_list (scm_freelist_t *master, SCM freelist)
479 {
480 int last_seg = -1, count = 0;
481 SCM f;
482
483 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
484 {
485 int this_seg = which_seg (f);
486
487 if (this_seg != last_seg)
488 {
489 if (last_seg != -1)
490 fprintf (stderr, " %5d %d-cells in segment %d\n",
491 count, master->span, last_seg);
492 last_seg = this_seg;
493 count = 0;
494 }
495 count++;
496 }
497 if (last_seg != -1)
498 fprintf (stderr, " %5d %d-cells in segment %d\n",
499 count, master->span, last_seg);
500 }
501
502 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
503 (),
504 "Print debugging information about the free-list.\n"
505 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
506 #define FUNC_NAME s_scm_map_free_list
507 {
508 int i;
509 fprintf (stderr, "%d segments total (%d:%d",
510 scm_n_heap_segs,
511 scm_heap_table[0].span,
512 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
513 for (i = 1; i < scm_n_heap_segs; i++)
514 fprintf (stderr, ", %d:%d",
515 scm_heap_table[i].span,
516 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
517 fprintf (stderr, ")\n");
518 map_free_list (&scm_master_freelist, scm_freelist);
519 map_free_list (&scm_master_freelist2, scm_freelist2);
520 fflush (stderr);
521
522 return SCM_UNSPECIFIED;
523 }
524 #undef FUNC_NAME
525
526 static int last_cluster;
527 static int last_size;
528
529 static int
530 free_list_length (char *title, int i, SCM freelist)
531 {
532 SCM ls;
533 int n = 0;
534 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
535 if (SCM_FREE_CELL_P (ls))
536 ++n;
537 else
538 {
539 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
540 abort ();
541 }
542 if (n != last_size)
543 {
544 if (i > 0)
545 {
546 if (last_cluster == i - 1)
547 fprintf (stderr, "\t%d\n", last_size);
548 else
549 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
550 }
551 if (i >= 0)
552 fprintf (stderr, "%s %d", title, i);
553 else
554 fprintf (stderr, "%s\t%d\n", title, n);
555 last_cluster = i;
556 last_size = n;
557 }
558 return n;
559 }
560
561 static void
562 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
563 {
564 SCM clusters;
565 int i = 0, len, n = 0;
566 fprintf (stderr, "%s\n\n", title);
567 n += free_list_length ("free list", -1, freelist);
568 for (clusters = master->clusters;
569 SCM_NNULLP (clusters);
570 clusters = SCM_CDR (clusters))
571 {
572 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
573 n += len;
574 }
575 if (last_cluster == i - 1)
576 fprintf (stderr, "\t%d\n", last_size);
577 else
578 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
579 fprintf (stderr, "\ntotal %d objects\n\n", n);
580 }
581
582 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
583 (),
584 "Print debugging information about the free-list.\n"
585 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
586 #define FUNC_NAME s_scm_free_list_length
587 {
588 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
589 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
590 return SCM_UNSPECIFIED;
591 }
592 #undef FUNC_NAME
593
594 #endif
595
596 #ifdef GUILE_DEBUG_FREELIST
597
598 /* Number of calls to SCM_NEWCELL since startup. */
599 static unsigned long scm_newcell_count;
600 static unsigned long scm_newcell2_count;
601
602 /* Search freelist for anything that isn't marked as a free cell.
603 Abort if we find something. */
604 static void
605 scm_check_freelist (SCM freelist)
606 {
607 SCM f;
608 int i = 0;
609
610 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
611 if (!SCM_FREE_CELL_P (f))
612 {
613 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
614 scm_newcell_count, i);
615 abort ();
616 }
617 }
618
619 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
620 (SCM flag),
621 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
622 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
623 "compile-time flag was selected.\n")
624 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
625 {
626 /* [cmm] I did a double-take when I read this code the first time.
627 well, FWIW. */
628 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
629 return SCM_UNSPECIFIED;
630 }
631 #undef FUNC_NAME
632
633
634 SCM
635 scm_debug_newcell (void)
636 {
637 SCM new;
638
639 scm_newcell_count++;
640 if (scm_debug_check_freelist)
641 {
642 scm_check_freelist (scm_freelist);
643 scm_gc();
644 }
645
646 /* The rest of this is supposed to be identical to the SCM_NEWCELL
647 macro. */
648 if (SCM_NULLP (scm_freelist))
649 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
650 else
651 {
652 new = scm_freelist;
653 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
654 }
655
656 return new;
657 }
658
659 SCM
660 scm_debug_newcell2 (void)
661 {
662 SCM new;
663
664 scm_newcell2_count++;
665 if (scm_debug_check_freelist)
666 {
667 scm_check_freelist (scm_freelist2);
668 scm_gc ();
669 }
670
671 /* The rest of this is supposed to be identical to the SCM_NEWCELL
672 macro. */
673 if (SCM_NULLP (scm_freelist2))
674 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
675 else
676 {
677 new = scm_freelist2;
678 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
679 }
680
681 return new;
682 }
683
684 #endif /* GUILE_DEBUG_FREELIST */
685
686 \f
687
688 static unsigned long
689 master_cells_allocated (scm_freelist_t *master)
690 {
691 /* the '- 1' below is to ignore the cluster spine cells. */
692 int objects = master->clusters_allocated * (master->cluster_size - 1);
693 if (SCM_NULLP (master->clusters))
694 objects -= master->left_to_collect;
695 return master->span * objects;
696 }
697
698 static unsigned long
699 freelist_length (SCM freelist)
700 {
701 int n;
702 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
703 ++n;
704 return n;
705 }
706
707 static unsigned long
708 compute_cells_allocated ()
709 {
710 return (scm_cells_allocated
711 + master_cells_allocated (&scm_master_freelist)
712 + master_cells_allocated (&scm_master_freelist2)
713 - scm_master_freelist.span * freelist_length (scm_freelist)
714 - scm_master_freelist2.span * freelist_length (scm_freelist2));
715 }
716
717 /* {Scheme Interface to GC}
718 */
719
720 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
721 (),
722 "Returns an association list of statistics about Guile's current use of storage. ")
723 #define FUNC_NAME s_scm_gc_stats
724 {
725 int i;
726 int n;
727 SCM heap_segs;
728 long int local_scm_mtrigger;
729 long int local_scm_mallocated;
730 long int local_scm_heap_size;
731 long int local_scm_cells_allocated;
732 long int local_scm_gc_time_taken;
733 long int local_scm_gc_times;
734 long int local_scm_gc_mark_time_taken;
735 long int local_scm_gc_sweep_time_taken;
736 double local_scm_gc_cells_swept;
737 double local_scm_gc_cells_marked;
738 SCM answer;
739
740 SCM_DEFER_INTS;
741
742 ++scm_block_gc;
743
744 retry:
745 heap_segs = SCM_EOL;
746 n = scm_n_heap_segs;
747 for (i = scm_n_heap_segs; i--; )
748 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
749 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
750 heap_segs);
751 if (scm_n_heap_segs != n)
752 goto retry;
753
754 --scm_block_gc;
755
756 /* Below, we cons to produce the resulting list. We want a snapshot of
757 * the heap situation before consing.
758 */
759 local_scm_mtrigger = scm_mtrigger;
760 local_scm_mallocated = scm_mallocated;
761 local_scm_heap_size = SCM_HEAP_SIZE;
762 local_scm_cells_allocated = compute_cells_allocated ();
763 local_scm_gc_time_taken = scm_gc_time_taken;
764 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
765 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
766 local_scm_gc_times = scm_gc_times;
767 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
768 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
769
770 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
771 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
772 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
773 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
774 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
775 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
776 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
777 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
778 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
779 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
780 scm_cons (sym_heap_segments, heap_segs),
781 SCM_UNDEFINED);
782 SCM_ALLOW_INTS;
783 return answer;
784 }
785 #undef FUNC_NAME
786
787
788 static void
789 gc_start_stats (const char *what)
790 {
791 t_before_gc = scm_c_get_internal_run_time ();
792 scm_gc_cells_swept = 0;
793 scm_gc_cells_collected = 0;
794 scm_gc_yield_1 = scm_gc_yield;
795 scm_gc_yield = (scm_cells_allocated
796 + master_cells_allocated (&scm_master_freelist)
797 + master_cells_allocated (&scm_master_freelist2));
798 scm_gc_malloc_collected = 0;
799 scm_gc_ports_collected = 0;
800 }
801
802
803 static void
804 gc_end_stats ()
805 {
806 unsigned long t = scm_c_get_internal_run_time ();
807 scm_gc_time_taken += (t - t_before_gc);
808 scm_gc_sweep_time_taken += (t - t_before_sweep);
809 ++scm_gc_times;
810
811 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
812 scm_gc_cells_swept_acc += scm_gc_cells_swept;
813 }
814
815
816 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
817 (SCM obj),
818 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
819 "returned by this function for @var{obj}")
820 #define FUNC_NAME s_scm_object_address
821 {
822 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
823 }
824 #undef FUNC_NAME
825
826
827 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
828 (),
829 "Scans all of SCM objects and reclaims for further use those that are\n"
830 "no longer accessible.")
831 #define FUNC_NAME s_scm_gc
832 {
833 SCM_DEFER_INTS;
834 scm_igc ("call");
835 SCM_ALLOW_INTS;
836 return SCM_UNSPECIFIED;
837 }
838 #undef FUNC_NAME
839
840
841 \f
842 /* {C Interface For When GC is Triggered}
843 */
844
845 static void
846 adjust_min_yield (scm_freelist_t *freelist)
847 {
848 /* min yield is adjusted upwards so that next predicted total yield
849 * (allocated cells actually freed by GC) becomes
850 * `min_yield_fraction' of total heap size. Note, however, that
851 * the absolute value of min_yield will correspond to `collected'
852 * on one master (the one which currently is triggering GC).
853 *
854 * The reason why we look at total yield instead of cells collected
855 * on one list is that we want to take other freelists into account.
856 * On this freelist, we know that (local) yield = collected cells,
857 * but that's probably not the case on the other lists.
858 *
859 * (We might consider computing a better prediction, for example
860 * by computing an average over multiple GC:s.)
861 */
862 if (freelist->min_yield_fraction)
863 {
864 /* Pick largest of last two yields. */
865 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
866 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
867 #ifdef DEBUGINFO
868 fprintf (stderr, " after GC = %d, delta = %d\n",
869 scm_cells_allocated,
870 delta);
871 #endif
872 if (delta > 0)
873 freelist->min_yield += delta;
874 }
875 }
876
877
878 /* When we get POSIX threads support, the master will be global and
879 * common while the freelist will be individual for each thread.
880 */
881
882 SCM
883 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
884 {
885 SCM cell;
886 ++scm_ints_disabled;
887 do
888 {
889 if (SCM_NULLP (master->clusters))
890 {
891 if (master->grow_heap_p || scm_block_gc)
892 {
893 /* In order to reduce gc frequency, try to allocate a new heap
894 * segment first, even if gc might find some free cells. If we
895 * can't obtain a new heap segment, we will try gc later.
896 */
897 master->grow_heap_p = 0;
898 alloc_some_heap (master, return_on_error);
899 }
900 if (SCM_NULLP (master->clusters))
901 {
902 /* The heap was not grown, either because it wasn't scheduled to
903 * grow, or because there was not enough memory available. In
904 * both cases we have to try gc to get some free cells.
905 */
906 #ifdef DEBUGINFO
907 fprintf (stderr, "allocated = %d, ",
908 scm_cells_allocated
909 + master_cells_allocated (&scm_master_freelist)
910 + master_cells_allocated (&scm_master_freelist2));
911 #endif
912 scm_igc ("cells");
913 adjust_min_yield (master);
914 if (SCM_NULLP (master->clusters))
915 {
916 /* gc could not free any cells. Now, we _must_ allocate a
917 * new heap segment, because there is no other possibility
918 * to provide a new cell for the caller.
919 */
920 alloc_some_heap (master, abort_on_error);
921 }
922 }
923 }
924 cell = SCM_CAR (master->clusters);
925 master->clusters = SCM_CDR (master->clusters);
926 ++master->clusters_allocated;
927 }
928 while (SCM_NULLP (cell));
929
930 #ifdef GUILE_DEBUG_FREELIST
931 scm_check_freelist (cell);
932 #endif
933
934 --scm_ints_disabled;
935 *freelist = SCM_FREE_CELL_CDR (cell);
936 return cell;
937 }
938
939
940 #if 0
941 /* This is a support routine which can be used to reserve a cluster
942 * for some special use, such as debugging. It won't be useful until
943 * free cells are preserved between garbage collections.
944 */
945
946 void
947 scm_alloc_cluster (scm_freelist_t *master)
948 {
949 SCM freelist, cell;
950 cell = scm_gc_for_newcell (master, &freelist);
951 SCM_SETCDR (cell, freelist);
952 return cell;
953 }
954 #endif
955
956
957 scm_c_hook_t scm_before_gc_c_hook;
958 scm_c_hook_t scm_before_mark_c_hook;
959 scm_c_hook_t scm_before_sweep_c_hook;
960 scm_c_hook_t scm_after_sweep_c_hook;
961 scm_c_hook_t scm_after_gc_c_hook;
962
963
964 void
965 scm_igc (const char *what)
966 {
967 int j;
968
969 ++scm_gc_running_p;
970 scm_c_hook_run (&scm_before_gc_c_hook, 0);
971 #ifdef DEBUGINFO
972 fprintf (stderr,
973 SCM_NULLP (scm_freelist)
974 ? "*"
975 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
976 #endif
977 #ifdef USE_THREADS
978 /* During the critical section, only the current thread may run. */
979 SCM_THREAD_CRITICAL_SECTION_START;
980 #endif
981
982 /* fprintf (stderr, "gc: %s\n", what); */
983
984 if (!scm_stack_base || scm_block_gc)
985 {
986 --scm_gc_running_p;
987 return;
988 }
989
990 gc_start_stats (what);
991
992 if (scm_mallocated < 0)
993 /* The byte count of allocated objects has underflowed. This is
994 probably because you forgot to report the sizes of objects you
995 have allocated, by calling scm_done_malloc or some such. When
996 the GC freed them, it subtracted their size from
997 scm_mallocated, which underflowed. */
998 abort ();
999
1000 if (scm_gc_heap_lock)
1001 /* We've invoked the collector while a GC is already in progress.
1002 That should never happen. */
1003 abort ();
1004
1005 ++scm_gc_heap_lock;
1006
1007 /* flush dead entries from the continuation stack */
1008 {
1009 int x;
1010 int bound;
1011 SCM * elts;
1012 elts = SCM_VELTS (scm_continuation_stack);
1013 bound = SCM_LENGTH (scm_continuation_stack);
1014 x = SCM_INUM (scm_continuation_stack_ptr);
1015 while (x < bound)
1016 {
1017 elts[x] = SCM_BOOL_F;
1018 ++x;
1019 }
1020 }
1021
1022 scm_c_hook_run (&scm_before_mark_c_hook, 0);
1023
1024 clear_mark_space ();
1025
1026 #ifndef USE_THREADS
1027
1028 /* Protect from the C stack. This must be the first marking
1029 * done because it provides information about what objects
1030 * are "in-use" by the C code. "in-use" objects are those
1031 * for which the information about length and base address must
1032 * remain usable. This requirement is stricter than a liveness
1033 * requirement -- in particular, it constrains the implementation
1034 * of scm_vector_set_length_x.
1035 */
1036 SCM_FLUSH_REGISTER_WINDOWS;
1037 /* This assumes that all registers are saved into the jmp_buf */
1038 setjmp (scm_save_regs_gc_mark);
1039 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
1040 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
1041 sizeof scm_save_regs_gc_mark)
1042 / sizeof (SCM_STACKITEM)));
1043
1044 {
1045 scm_sizet stack_len = scm_stack_size (scm_stack_base);
1046 #ifdef SCM_STACK_GROWS_UP
1047 scm_mark_locations (scm_stack_base, stack_len);
1048 #else
1049 scm_mark_locations (scm_stack_base - stack_len, stack_len);
1050 #endif
1051 }
1052
1053 #else /* USE_THREADS */
1054
1055 /* Mark every thread's stack and registers */
1056 scm_threads_mark_stacks ();
1057
1058 #endif /* USE_THREADS */
1059
1060 /* FIXME: insert a phase to un-protect string-data preserved
1061 * in scm_vector_set_length_x.
1062 */
1063
1064 j = SCM_NUM_PROTECTS;
1065 while (j--)
1066 scm_gc_mark (scm_sys_protects[j]);
1067
1068 /* FIXME: we should have a means to register C functions to be run
1069 * in different phases of GC
1070 */
1071 scm_mark_subr_table ();
1072
1073 #ifndef USE_THREADS
1074 scm_gc_mark (scm_root->handle);
1075 #endif
1076
1077 t_before_sweep = scm_c_get_internal_run_time ();
1078 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
1079
1080 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
1081
1082 scm_gc_sweep ();
1083
1084 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1085
1086 --scm_gc_heap_lock;
1087 gc_end_stats ();
1088
1089 #ifdef USE_THREADS
1090 SCM_THREAD_CRITICAL_SECTION_END;
1091 #endif
1092 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1093 --scm_gc_running_p;
1094 }
1095
1096 \f
1097
1098 /* {Mark/Sweep}
1099 */
1100
1101
1102
1103 /* Mark an object precisely.
1104 */
1105 void
1106 scm_gc_mark (SCM p)
1107 #define FUNC_NAME "scm_gc_mark"
1108 {
1109 register long i;
1110 register SCM ptr;
1111
1112 ptr = p;
1113
1114 gc_mark_loop:
1115 if (SCM_IMP (ptr))
1116 return;
1117
1118 gc_mark_nimp:
1119 if (!SCM_CELLP (ptr))
1120 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1121
1122 #if (defined (GUILE_DEBUG_FREELIST))
1123
1124 if (SCM_GC_IN_CARD_HEADERP (SCM2PTR (ptr)))
1125 scm_wta (ptr, "rogue pointer in heap", NULL);
1126
1127 #endif
1128
1129 if (SCM_GCMARKP (ptr))
1130 return;
1131
1132 SCM_SETGCMARK (ptr);
1133
1134 switch (SCM_TYP7 (ptr))
1135 {
1136 case scm_tcs_cons_nimcar:
1137 if (SCM_IMP (SCM_CDR (ptr)))
1138 {
1139 ptr = SCM_CAR (ptr);
1140 goto gc_mark_nimp;
1141 }
1142 scm_gc_mark (SCM_CAR (ptr));
1143 ptr = SCM_CDR (ptr);
1144 goto gc_mark_nimp;
1145 case scm_tcs_cons_imcar:
1146 ptr = SCM_CDR (ptr);
1147 goto gc_mark_loop;
1148 case scm_tc7_pws:
1149 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
1150 ptr = SCM_CDR (ptr);
1151 goto gc_mark_loop;
1152 case scm_tcs_cons_gloc:
1153 {
1154 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1155 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1156 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1157 * pointer to a struct vtable data region. The fact that these are
1158 * accessed in the same way restricts the possibilites to change the
1159 * data layout of structs or heap cells.
1160 */
1161 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1162 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1163 if (vtable_data [scm_vtable_index_vcell] != 0)
1164 {
1165 /* ptr is a gloc */
1166 SCM gloc_car = SCM_PACK (word0);
1167 scm_gc_mark (gloc_car);
1168 ptr = SCM_CDR (ptr);
1169 goto gc_mark_loop;
1170 }
1171 else
1172 {
1173 /* ptr is a struct */
1174 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1175 int len = SCM_LENGTH (layout);
1176 char * fields_desc = SCM_SYMBOL_CHARS (layout);
1177 scm_bits_t * struct_data = (scm_bits_t *) SCM_STRUCT_DATA (ptr);
1178
1179 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1180 {
1181 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1182 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
1183 }
1184 if (len)
1185 {
1186 int x;
1187
1188 for (x = 0; x < len - 2; x += 2, ++struct_data)
1189 if (fields_desc[x] == 'p')
1190 scm_gc_mark (SCM_PACK (*struct_data));
1191 if (fields_desc[x] == 'p')
1192 {
1193 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1194 for (x = *struct_data; x; --x)
1195 scm_gc_mark (SCM_PACK (*++struct_data));
1196 else
1197 scm_gc_mark (SCM_PACK (*struct_data));
1198 }
1199 }
1200 /* mark vtable */
1201 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1202 goto gc_mark_loop;
1203 }
1204 }
1205 break;
1206 case scm_tcs_closures:
1207 if (SCM_IMP (SCM_CDR (ptr)))
1208 {
1209 ptr = SCM_CLOSCAR (ptr);
1210 goto gc_mark_nimp;
1211 }
1212 scm_gc_mark (SCM_CLOSCAR (ptr));
1213 ptr = SCM_CDR (ptr);
1214 goto gc_mark_nimp;
1215 case scm_tc7_vector:
1216 #ifdef CCLO
1217 case scm_tc7_cclo:
1218 #endif
1219 i = SCM_LENGTH (ptr);
1220 if (i == 0)
1221 break;
1222 while (--i > 0)
1223 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1224 scm_gc_mark (SCM_VELTS (ptr)[i]);
1225 ptr = SCM_VELTS (ptr)[0];
1226 goto gc_mark_loop;
1227 case scm_tc7_contin:
1228 if (SCM_VELTS (ptr))
1229 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1230 (scm_sizet)
1231 (SCM_LENGTH (ptr) +
1232 (sizeof (SCM_STACKITEM) + -1 +
1233 sizeof (scm_contregs)) /
1234 sizeof (SCM_STACKITEM)));
1235 break;
1236 #ifdef HAVE_ARRAYS
1237 case scm_tc7_bvect:
1238 case scm_tc7_byvect:
1239 case scm_tc7_ivect:
1240 case scm_tc7_uvect:
1241 case scm_tc7_fvect:
1242 case scm_tc7_dvect:
1243 case scm_tc7_cvect:
1244 case scm_tc7_svect:
1245 #ifdef HAVE_LONG_LONGS
1246 case scm_tc7_llvect:
1247 #endif
1248 #endif
1249 case scm_tc7_string:
1250 break;
1251
1252 case scm_tc7_substring:
1253 ptr = SCM_CDR (ptr);
1254 goto gc_mark_loop;
1255
1256 case scm_tc7_wvect:
1257 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1258 scm_weak_vectors = ptr;
1259 if (SCM_IS_WHVEC_ANY (ptr))
1260 {
1261 int x;
1262 int len;
1263 int weak_keys;
1264 int weak_values;
1265
1266 len = SCM_LENGTH (ptr);
1267 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1268 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1269
1270 for (x = 0; x < len; ++x)
1271 {
1272 SCM alist;
1273 alist = SCM_VELTS (ptr)[x];
1274
1275 /* mark everything on the alist except the keys or
1276 * values, according to weak_values and weak_keys. */
1277 while ( SCM_CONSP (alist)
1278 && !SCM_GCMARKP (alist)
1279 && SCM_CONSP (SCM_CAR (alist)))
1280 {
1281 SCM kvpair;
1282 SCM next_alist;
1283
1284 kvpair = SCM_CAR (alist);
1285 next_alist = SCM_CDR (alist);
1286 /*
1287 * Do not do this:
1288 * SCM_SETGCMARK (alist);
1289 * SCM_SETGCMARK (kvpair);
1290 *
1291 * It may be that either the key or value is protected by
1292 * an escaped reference to part of the spine of this alist.
1293 * If we mark the spine here, and only mark one or neither of the
1294 * key and value, they may never be properly marked.
1295 * This leads to a horrible situation in which an alist containing
1296 * freelist cells is exported.
1297 *
1298 * So only mark the spines of these arrays last of all marking.
1299 * If somebody confuses us by constructing a weak vector
1300 * with a circular alist then we are hosed, but at least we
1301 * won't prematurely drop table entries.
1302 */
1303 if (!weak_keys)
1304 scm_gc_mark (SCM_CAR (kvpair));
1305 if (!weak_values)
1306 scm_gc_mark (SCM_CDR (kvpair));
1307 alist = next_alist;
1308 }
1309 if (SCM_NIMP (alist))
1310 scm_gc_mark (alist);
1311 }
1312 }
1313 break;
1314
1315 case scm_tc7_symbol:
1316 ptr = SCM_PROP_SLOTS (ptr);
1317 goto gc_mark_loop;
1318 case scm_tcs_subrs:
1319 break;
1320 case scm_tc7_port:
1321 i = SCM_PTOBNUM (ptr);
1322 if (!(i < scm_numptob))
1323 goto def;
1324 if (SCM_PTAB_ENTRY(ptr))
1325 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1326 if (scm_ptobs[i].mark)
1327 {
1328 ptr = (scm_ptobs[i].mark) (ptr);
1329 goto gc_mark_loop;
1330 }
1331 else
1332 return;
1333 break;
1334 case scm_tc7_smob:
1335 switch (SCM_TYP16 (ptr))
1336 { /* should be faster than going through scm_smobs */
1337 case scm_tc_free_cell:
1338 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1339 case scm_tc16_big:
1340 case scm_tc16_real:
1341 case scm_tc16_complex:
1342 break;
1343 default:
1344 i = SCM_SMOBNUM (ptr);
1345 if (!(i < scm_numsmob))
1346 goto def;
1347 if (scm_smobs[i].mark)
1348 {
1349 ptr = (scm_smobs[i].mark) (ptr);
1350 goto gc_mark_loop;
1351 }
1352 else
1353 return;
1354 }
1355 break;
1356 default:
1357 def:
1358 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1359 }
1360 }
1361 #undef FUNC_NAME
1362
1363
1364 /* Mark a Region Conservatively
1365 */
1366
1367 void
1368 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1369 {
1370 unsigned long m;
1371
1372 for (m = 0; m < n; ++m)
1373 {
1374 SCM obj = * (SCM *) &x[m];
1375 if (SCM_CELLP (obj))
1376 {
1377 SCM_CELLPTR ptr = SCM2PTR (obj);
1378 int i = 0;
1379 int j = scm_n_heap_segs - 1;
1380 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1381 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1382 {
1383 while (i <= j)
1384 {
1385 int seg_id;
1386 seg_id = -1;
1387 if ((i == j)
1388 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1389 seg_id = i;
1390 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1391 seg_id = j;
1392 else
1393 {
1394 int k;
1395 k = (i + j) / 2;
1396 if (k == i)
1397 break;
1398 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1399 {
1400 j = k;
1401 ++i;
1402 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1403 continue;
1404 else
1405 break;
1406 }
1407 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1408 {
1409 i = k;
1410 --j;
1411 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1412 continue;
1413 else
1414 break;
1415 }
1416 }
1417
1418 if (SCM_GC_IN_CARD_HEADERP (ptr))
1419 break;
1420
1421 if (scm_heap_table[seg_id].span == 1
1422 || SCM_DOUBLE_CELLP (obj))
1423 scm_gc_mark (obj);
1424
1425 break;
1426 }
1427 }
1428 }
1429 }
1430 }
1431
1432
1433 /* The function scm_cellp determines whether an SCM value can be regarded as a
1434 * pointer to a cell on the heap. Binary search is used in order to determine
1435 * the heap segment that contains the cell.
1436 */
1437 int
1438 scm_cellp (SCM value)
1439 {
1440 if (SCM_CELLP (value)) {
1441 scm_cell * ptr = SCM2PTR (value);
1442 unsigned int i = 0;
1443 unsigned int j = scm_n_heap_segs - 1;
1444
1445 while (i < j) {
1446 int k = (i + j) / 2;
1447 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1448 j = k;
1449 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1450 i = k + 1;
1451 }
1452 }
1453
1454 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1455 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1456 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))
1457 && !SCM_GC_IN_CARD_HEADERP (ptr)
1458 )
1459 return 1;
1460 else
1461 return 0;
1462 } else
1463 return 0;
1464 }
1465
1466
1467 static void
1468 gc_sweep_freelist_start (scm_freelist_t *freelist)
1469 {
1470 freelist->cells = SCM_EOL;
1471 freelist->left_to_collect = freelist->cluster_size;
1472 freelist->clusters_allocated = 0;
1473 freelist->clusters = SCM_EOL;
1474 freelist->clustertail = &freelist->clusters;
1475 freelist->collected_1 = freelist->collected;
1476 freelist->collected = 0;
1477 }
1478
1479 static void
1480 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1481 {
1482 int collected;
1483 *freelist->clustertail = freelist->cells;
1484 if (!SCM_NULLP (freelist->cells))
1485 {
1486 SCM c = freelist->cells;
1487 SCM_SETCAR (c, SCM_CDR (c));
1488 SCM_SETCDR (c, SCM_EOL);
1489 freelist->collected +=
1490 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1491 }
1492 scm_gc_cells_collected += freelist->collected;
1493
1494 /* Although freelist->min_yield is used to test freelist->collected
1495 * (which is the local GC yield for freelist), it is adjusted so
1496 * that *total* yield is freelist->min_yield_fraction of total heap
1497 * size. This means that a too low yield is compensated by more
1498 * heap on the list which is currently doing most work, which is
1499 * just what we want.
1500 */
1501 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1502 freelist->grow_heap_p = (collected < freelist->min_yield);
1503 }
1504
1505 #define NEXT_DATA_CELL(ptr, span) \
1506 do { \
1507 scm_cell *nxt__ = CELL_UP ((char *) (ptr) + 1, (span)); \
1508 (ptr) = (SCM_GC_IN_CARD_HEADERP (nxt__) ? \
1509 CELL_UP (SCM_GC_CELL_CARD (nxt__) + SCM_GC_CARD_N_HEADER_CELLS, span) \
1510 : nxt__); \
1511 } while (0)
1512
1513 void
1514 scm_gc_sweep ()
1515 #define FUNC_NAME "scm_gc_sweep"
1516 {
1517 register SCM_CELLPTR ptr;
1518 register SCM nfreelist;
1519 register scm_freelist_t *freelist;
1520 register long m;
1521 register int span;
1522 long i;
1523 scm_sizet seg_size;
1524
1525 m = 0;
1526
1527 gc_sweep_freelist_start (&scm_master_freelist);
1528 gc_sweep_freelist_start (&scm_master_freelist2);
1529
1530 for (i = 0; i < scm_n_heap_segs; i++)
1531 {
1532 register unsigned int left_to_collect;
1533 register scm_sizet j;
1534
1535 /* Unmarked cells go onto the front of the freelist this heap
1536 segment points to. Rather than updating the real freelist
1537 pointer as we go along, we accumulate the new head in
1538 nfreelist. Then, if it turns out that the entire segment is
1539 free, we free (i.e., malloc's free) the whole segment, and
1540 simply don't assign nfreelist back into the real freelist. */
1541 freelist = scm_heap_table[i].freelist;
1542 nfreelist = freelist->cells;
1543 left_to_collect = freelist->left_to_collect;
1544 span = scm_heap_table[i].span;
1545
1546 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1547 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1548
1549 /* use only data cells in seg_size */
1550 seg_size = (seg_size / SCM_GC_CARD_N_CELLS) * (SCM_GC_CARD_N_DATA_CELLS / span) * span;
1551
1552 scm_gc_cells_swept += seg_size;
1553
1554 for (j = seg_size + span; j -= span; ptr += span)
1555 {
1556 SCM scmptr;
1557
1558 if (SCM_GC_IN_CARD_HEADERP (ptr))
1559 {
1560 SCM_CELLPTR nxt;
1561
1562 /* cheat here */
1563 nxt = ptr;
1564 NEXT_DATA_CELL (nxt, span);
1565 j += span;
1566
1567 ptr = nxt - span;
1568 continue;
1569 }
1570
1571 scmptr = PTR2SCM (ptr);
1572
1573 if (SCM_GCMARKP (scmptr))
1574 continue;
1575
1576 switch SCM_TYP7 (scmptr)
1577 {
1578 case scm_tcs_cons_gloc:
1579 {
1580 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1581 * struct or a gloc. See the corresponding comment in
1582 * scm_gc_mark.
1583 */
1584 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1585 - scm_tc3_cons_gloc);
1586 /* access as struct */
1587 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1588 if (vtable_data[scm_vtable_index_vcell] == 0)
1589 {
1590 /* Structs need to be freed in a special order.
1591 * This is handled by GC C hooks in struct.c.
1592 */
1593 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1594 scm_structs_to_free = scmptr;
1595 continue;
1596 }
1597 /* fall through so that scmptr gets collected */
1598 }
1599 break;
1600 case scm_tcs_cons_imcar:
1601 case scm_tcs_cons_nimcar:
1602 case scm_tcs_closures:
1603 case scm_tc7_pws:
1604 break;
1605 case scm_tc7_wvect:
1606 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1607 scm_must_free (SCM_VECTOR_BASE (scmptr) - 2);
1608 break;
1609 case scm_tc7_vector:
1610 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1611 scm_must_free (SCM_VECTOR_BASE (scmptr));
1612 break;
1613 #ifdef CCLO
1614 case scm_tc7_cclo:
1615 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1616 scm_must_free (SCM_CCLO_BASE (scmptr));
1617 break;
1618 #endif
1619 #ifdef HAVE_ARRAYS
1620 case scm_tc7_bvect:
1621 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1622 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1623 break;
1624 case scm_tc7_byvect:
1625 case scm_tc7_ivect:
1626 case scm_tc7_uvect:
1627 case scm_tc7_svect:
1628 #ifdef HAVE_LONG_LONGS
1629 case scm_tc7_llvect:
1630 #endif
1631 case scm_tc7_fvect:
1632 case scm_tc7_dvect:
1633 case scm_tc7_cvect:
1634 m += SCM_HUGE_LENGTH (scmptr) * scm_uniform_element_size (scmptr);
1635 scm_must_free (SCM_UVECTOR_BASE (scmptr));
1636 break;
1637 #endif
1638 case scm_tc7_substring:
1639 break;
1640 case scm_tc7_string:
1641 m += SCM_HUGE_LENGTH (scmptr) + 1;
1642 scm_must_free (SCM_STRING_CHARS (scmptr));
1643 break;
1644 case scm_tc7_symbol:
1645 m += SCM_LENGTH (scmptr) + 1;
1646 scm_must_free (SCM_SYMBOL_CHARS (scmptr));
1647 break;
1648 case scm_tc7_contin:
1649 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1650 if (SCM_CONTREGS (scmptr))
1651 {
1652 scm_must_free (SCM_CONTREGS (scmptr));
1653 break;
1654 }
1655 else
1656 {
1657 continue;
1658 }
1659 case scm_tcs_subrs:
1660 /* the various "subrs" (primitives) are never freed */
1661 continue;
1662 case scm_tc7_port:
1663 if SCM_OPENP (scmptr)
1664 {
1665 int k = SCM_PTOBNUM (scmptr);
1666 if (!(k < scm_numptob))
1667 goto sweeperr;
1668 /* Keep "revealed" ports alive. */
1669 if (scm_revealed_count (scmptr) > 0)
1670 continue;
1671 /* Yes, I really do mean scm_ptobs[k].free */
1672 /* rather than ftobs[k].close. .close */
1673 /* is for explicit CLOSE-PORT by user */
1674 m += (scm_ptobs[k].free) (scmptr);
1675 SCM_SETSTREAM (scmptr, 0);
1676 scm_remove_from_port_table (scmptr);
1677 scm_gc_ports_collected++;
1678 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1679 }
1680 break;
1681 case scm_tc7_smob:
1682 switch SCM_TYP16 (scmptr)
1683 {
1684 case scm_tc_free_cell:
1685 case scm_tc16_real:
1686 break;
1687 #ifdef SCM_BIGDIG
1688 case scm_tc16_big:
1689 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1690 scm_must_free (SCM_BDIGITS (scmptr));
1691 break;
1692 #endif /* def SCM_BIGDIG */
1693 case scm_tc16_complex:
1694 m += sizeof (scm_complex_t);
1695 scm_must_free (SCM_CHARS (scmptr));
1696 break;
1697 default:
1698 {
1699 int k;
1700 k = SCM_SMOBNUM (scmptr);
1701 if (!(k < scm_numsmob))
1702 goto sweeperr;
1703 m += (scm_smobs[k].free) (scmptr);
1704 break;
1705 }
1706 }
1707 break;
1708 default:
1709 sweeperr:
1710 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1711 }
1712
1713 if (!--left_to_collect)
1714 {
1715 SCM_SETCAR (scmptr, nfreelist);
1716 *freelist->clustertail = scmptr;
1717 freelist->clustertail = SCM_CDRLOC (scmptr);
1718
1719 nfreelist = SCM_EOL;
1720 freelist->collected += span * freelist->cluster_size;
1721 left_to_collect = freelist->cluster_size;
1722 }
1723 else
1724 {
1725 /* Stick the new cell on the front of nfreelist. It's
1726 critical that we mark this cell as freed; otherwise, the
1727 conservative collector might trace it as some other type
1728 of object. */
1729 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1730 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1731 nfreelist = scmptr;
1732 }
1733 }
1734
1735 #ifdef GC_FREE_SEGMENTS
1736 if (n == seg_size)
1737 {
1738 register long j;
1739
1740 freelist->heap_size -= seg_size;
1741 free ((char *) scm_heap_table[i].bounds[0]);
1742 scm_heap_table[i].bounds[0] = 0;
1743 for (j = i + 1; j < scm_n_heap_segs; j++)
1744 scm_heap_table[j - 1] = scm_heap_table[j];
1745 scm_n_heap_segs -= 1;
1746 i--; /* We need to scan the segment just moved. */
1747 }
1748 else
1749 #endif /* ifdef GC_FREE_SEGMENTS */
1750 {
1751 /* Update the real freelist pointer to point to the head of
1752 the list of free cells we've built for this segment. */
1753 freelist->cells = nfreelist;
1754 freelist->left_to_collect = left_to_collect;
1755 }
1756
1757 #ifdef GUILE_DEBUG_FREELIST
1758 scm_map_free_list ();
1759 #endif
1760 }
1761
1762 gc_sweep_freelist_finish (&scm_master_freelist);
1763 gc_sweep_freelist_finish (&scm_master_freelist2);
1764
1765 /* When we move to POSIX threads private freelists should probably
1766 be GC-protected instead. */
1767 scm_freelist = SCM_EOL;
1768 scm_freelist2 = SCM_EOL;
1769
1770 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1771 scm_gc_yield -= scm_cells_allocated;
1772 scm_mallocated -= m;
1773 scm_gc_malloc_collected = m;
1774 }
1775 #undef FUNC_NAME
1776
1777
1778 \f
1779 /* {Front end to malloc}
1780 *
1781 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1782 * scm_done_free
1783 *
1784 * These functions provide services comperable to malloc, realloc, and
1785 * free. They are for allocating malloced parts of scheme objects.
1786 * The primary purpose of the front end is to impose calls to gc. */
1787
1788
1789 /* scm_must_malloc
1790 * Return newly malloced storage or throw an error.
1791 *
1792 * The parameter WHAT is a string for error reporting.
1793 * If the threshold scm_mtrigger will be passed by this
1794 * allocation, or if the first call to malloc fails,
1795 * garbage collect -- on the presumption that some objects
1796 * using malloced storage may be collected.
1797 *
1798 * The limit scm_mtrigger may be raised by this allocation.
1799 */
1800 void *
1801 scm_must_malloc (scm_sizet size, const char *what)
1802 {
1803 void *ptr;
1804 unsigned long nm = scm_mallocated + size;
1805
1806 if (nm <= scm_mtrigger)
1807 {
1808 SCM_SYSCALL (ptr = malloc (size));
1809 if (NULL != ptr)
1810 {
1811 scm_mallocated = nm;
1812 #ifdef GUILE_DEBUG_MALLOC
1813 scm_malloc_register (ptr, what);
1814 #endif
1815 return ptr;
1816 }
1817 }
1818
1819 scm_igc (what);
1820
1821 nm = scm_mallocated + size;
1822 SCM_SYSCALL (ptr = malloc (size));
1823 if (NULL != ptr)
1824 {
1825 scm_mallocated = nm;
1826 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1827 if (nm > scm_mtrigger)
1828 scm_mtrigger = nm + nm / 2;
1829 else
1830 scm_mtrigger += scm_mtrigger / 2;
1831 }
1832 #ifdef GUILE_DEBUG_MALLOC
1833 scm_malloc_register (ptr, what);
1834 #endif
1835
1836 return ptr;
1837 }
1838
1839 scm_memory_error (what);
1840 }
1841
1842
1843 /* scm_must_realloc
1844 * is similar to scm_must_malloc.
1845 */
1846 void *
1847 scm_must_realloc (void *where,
1848 scm_sizet old_size,
1849 scm_sizet size,
1850 const char *what)
1851 {
1852 void *ptr;
1853 scm_sizet nm = scm_mallocated + size - old_size;
1854
1855 if (nm <= scm_mtrigger)
1856 {
1857 SCM_SYSCALL (ptr = realloc (where, size));
1858 if (NULL != ptr)
1859 {
1860 scm_mallocated = nm;
1861 #ifdef GUILE_DEBUG_MALLOC
1862 scm_malloc_reregister (where, ptr, what);
1863 #endif
1864 return ptr;
1865 }
1866 }
1867
1868 scm_igc (what);
1869
1870 nm = scm_mallocated + size - old_size;
1871 SCM_SYSCALL (ptr = realloc (where, size));
1872 if (NULL != ptr)
1873 {
1874 scm_mallocated = nm;
1875 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1876 if (nm > scm_mtrigger)
1877 scm_mtrigger = nm + nm / 2;
1878 else
1879 scm_mtrigger += scm_mtrigger / 2;
1880 }
1881 #ifdef GUILE_DEBUG_MALLOC
1882 scm_malloc_reregister (where, ptr, what);
1883 #endif
1884 return ptr;
1885 }
1886
1887 scm_memory_error (what);
1888 }
1889
1890
1891 void
1892 scm_must_free (void *obj)
1893 #define FUNC_NAME "scm_must_free"
1894 {
1895 #ifdef GUILE_DEBUG_MALLOC
1896 scm_malloc_unregister (obj);
1897 #endif
1898 if (obj)
1899 free (obj);
1900 else
1901 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1902 }
1903 #undef FUNC_NAME
1904
1905
1906 /* Announce that there has been some malloc done that will be freed
1907 * during gc. A typical use is for a smob that uses some malloced
1908 * memory but can not get it from scm_must_malloc (for whatever
1909 * reason). When a new object of this smob is created you call
1910 * scm_done_malloc with the size of the object. When your smob free
1911 * function is called, be sure to include this size in the return
1912 * value.
1913 *
1914 * If you can't actually free the memory in the smob free function,
1915 * for whatever reason (like reference counting), you still can (and
1916 * should) report the amount of memory freed when you actually free it.
1917 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1918 * eh? Or even better, call scm_done_free. */
1919
1920 void
1921 scm_done_malloc (long size)
1922 {
1923 scm_mallocated += size;
1924
1925 if (scm_mallocated > scm_mtrigger)
1926 {
1927 scm_igc ("foreign mallocs");
1928 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1929 {
1930 if (scm_mallocated > scm_mtrigger)
1931 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1932 else
1933 scm_mtrigger += scm_mtrigger / 2;
1934 }
1935 }
1936 }
1937
1938 void
1939 scm_done_free (long size)
1940 {
1941 scm_mallocated -= size;
1942 }
1943
1944
1945 \f
1946 /* {Heap Segments}
1947 *
1948 * Each heap segment is an array of objects of a particular size.
1949 * Every segment has an associated (possibly shared) freelist.
1950 * A table of segment records is kept that records the upper and
1951 * lower extents of the segment; this is used during the conservative
1952 * phase of gc to identify probably gc roots (because they point
1953 * into valid segments at reasonable offsets). */
1954
1955 /* scm_expmem
1956 * is true if the first segment was smaller than INIT_HEAP_SEG.
1957 * If scm_expmem is set to one, subsequent segment allocations will
1958 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1959 */
1960 int scm_expmem = 0;
1961
1962 scm_sizet scm_max_segment_size;
1963
1964 /* scm_heap_org
1965 * is the lowest base address of any heap segment.
1966 */
1967 SCM_CELLPTR scm_heap_org;
1968
1969 scm_heap_seg_data_t * scm_heap_table = 0;
1970 static unsigned int heap_segment_table_size = 0;
1971 int scm_n_heap_segs = 0;
1972
1973 /* init_heap_seg
1974 * initializes a new heap segment and returns the number of objects it contains.
1975 *
1976 * The segment origin and segment size in bytes are input parameters.
1977 * The freelist is both input and output.
1978 *
1979 * This function presumes that the scm_heap_table has already been expanded
1980 * to accomodate a new segment record and that the markbit space was reserved
1981 * for all the cards in this segment.
1982 */
1983
1984 #define INIT_CARD(card, span) \
1985 do { \
1986 SCM_GC_CARD_BVEC (card) = get_bvec (); \
1987 if ((span) == 2) \
1988 SCM_GC_SET_CARD_DOUBLECELL (card); \
1989 } while (0)
1990
1991 static scm_sizet
1992 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1993 {
1994 register SCM_CELLPTR ptr;
1995 SCM_CELLPTR seg_end;
1996 int new_seg_index;
1997 int n_new_cells;
1998 int span = freelist->span;
1999
2000 if (seg_org == NULL)
2001 return 0;
2002
2003 /* Align the begin ptr up.
2004 */
2005 ptr = SCM_GC_CARD_UP (seg_org);
2006
2007 /* Compute the ceiling on valid object pointers w/in this segment.
2008 */
2009 seg_end = SCM_GC_CARD_DOWN ((char *)seg_org + size);
2010
2011 /* Find the right place and insert the segment record.
2012 *
2013 */
2014 for (new_seg_index = 0;
2015 ( (new_seg_index < scm_n_heap_segs)
2016 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
2017 new_seg_index++)
2018 ;
2019
2020 {
2021 int i;
2022 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2023 scm_heap_table[i] = scm_heap_table[i - 1];
2024 }
2025
2026 ++scm_n_heap_segs;
2027
2028 scm_heap_table[new_seg_index].span = span;
2029 scm_heap_table[new_seg_index].freelist = freelist;
2030 scm_heap_table[new_seg_index].bounds[0] = ptr;
2031 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2032
2033 /*n_new_cells*/
2034 n_new_cells = seg_end - ptr;
2035
2036 freelist->heap_size += n_new_cells;
2037
2038 /* Partition objects in this segment into clusters */
2039 {
2040 SCM clusters;
2041 SCM *clusterp = &clusters;
2042
2043 NEXT_DATA_CELL (ptr, span);
2044 while (ptr < seg_end)
2045 {
2046 scm_cell *nxt = ptr;
2047 scm_cell *prv = NULL;
2048 scm_cell *last_card = NULL;
2049 int n_data_cells = (SCM_GC_CARD_N_DATA_CELLS / span) * SCM_CARDS_PER_CLUSTER - 1;
2050 NEXT_DATA_CELL(nxt, span);
2051
2052 /* Allocate cluster spine
2053 */
2054 *clusterp = PTR2SCM (ptr);
2055 SCM_SETCAR (*clusterp, PTR2SCM (nxt));
2056 clusterp = SCM_CDRLOC (*clusterp);
2057 ptr = nxt;
2058
2059 while (n_data_cells--)
2060 {
2061 scm_cell *card = SCM_GC_CELL_CARD (ptr);
2062 SCM scmptr = PTR2SCM (ptr);
2063 nxt = ptr;
2064 NEXT_DATA_CELL (nxt, span);
2065 prv = ptr;
2066
2067 if (card != last_card)
2068 {
2069 INIT_CARD (card, span);
2070 last_card = card;
2071 }
2072
2073 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2074 SCM_SETCDR (scmptr, PTR2SCM (nxt));
2075
2076 ptr = nxt;
2077 }
2078
2079 SCM_SET_FREE_CELL_CDR (PTR2SCM (prv), SCM_EOL);
2080 }
2081
2082 /* sanity check */
2083 {
2084 scm_cell *ref = seg_end;
2085 NEXT_DATA_CELL (ref, span);
2086 if (ref != ptr)
2087 /* [cmm] looks like the segment size doesn't divide cleanly by
2088 cluster size. bad cmm! */
2089 abort();
2090 }
2091
2092 /* Patch up the last cluster pointer in the segment
2093 * to join it to the input freelist.
2094 */
2095 *clusterp = freelist->clusters;
2096 freelist->clusters = clusters;
2097 }
2098
2099 #ifdef DEBUGINFO
2100 fprintf (stderr, "H");
2101 #endif
2102 return size;
2103 }
2104
2105 static scm_sizet
2106 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2107 {
2108 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2109
2110 return
2111 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2112 + ALIGNMENT_SLACK (freelist);
2113 }
2114
2115 static void
2116 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2117 #define FUNC_NAME "alloc_some_heap"
2118 {
2119 SCM_CELLPTR ptr;
2120 long len;
2121
2122 if (scm_gc_heap_lock)
2123 {
2124 /* Critical code sections (such as the garbage collector) aren't
2125 * supposed to add heap segments.
2126 */
2127 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2128 abort ();
2129 }
2130
2131 if (scm_n_heap_segs == heap_segment_table_size)
2132 {
2133 /* We have to expand the heap segment table to have room for the new
2134 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2135 * init_heap_seg only if the allocation of the segment itself succeeds.
2136 */
2137 unsigned int new_table_size = scm_n_heap_segs + 1;
2138 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2139 scm_heap_seg_data_t * new_heap_table;
2140
2141 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2142 realloc ((char *)scm_heap_table, size)));
2143 if (!new_heap_table)
2144 {
2145 if (error_policy == abort_on_error)
2146 {
2147 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2148 abort ();
2149 }
2150 else
2151 {
2152 return;
2153 }
2154 }
2155 else
2156 {
2157 scm_heap_table = new_heap_table;
2158 heap_segment_table_size = new_table_size;
2159 }
2160 }
2161
2162 /* Pick a size for the new heap segment.
2163 * The rule for picking the size of a segment is explained in
2164 * gc.h
2165 */
2166 {
2167 /* Assure that the new segment is predicted to be large enough.
2168 *
2169 * New yield should at least equal GC fraction of new heap size, i.e.
2170 *
2171 * y + dh > f * (h + dh)
2172 *
2173 * y : yield
2174 * f : min yield fraction
2175 * h : heap size
2176 * dh : size of new heap segment
2177 *
2178 * This gives dh > (f * h - y) / (1 - f)
2179 */
2180 int f = freelist->min_yield_fraction;
2181 long h = SCM_HEAP_SIZE;
2182 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2183 len = SCM_EXPHEAP (freelist->heap_size);
2184 #ifdef DEBUGINFO
2185 fprintf (stderr, "(%d < %d)", len, min_cells);
2186 #endif
2187 if (len < min_cells)
2188 len = min_cells + freelist->cluster_size;
2189 len *= sizeof (scm_cell);
2190 /* force new sampling */
2191 freelist->collected = LONG_MAX;
2192 }
2193
2194 if (len > scm_max_segment_size)
2195 len = scm_max_segment_size;
2196
2197 {
2198 scm_sizet smallest;
2199
2200 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2201
2202 if (len < smallest)
2203 len = smallest;
2204
2205 /* Allocate with decaying ambition. */
2206 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2207 && (len >= smallest))
2208 {
2209 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2210 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2211 if (ptr)
2212 {
2213 init_heap_seg (ptr, rounded_len, freelist);
2214 return;
2215 }
2216 len /= 2;
2217 }
2218 }
2219
2220 if (error_policy == abort_on_error)
2221 {
2222 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2223 abort ();
2224 }
2225 }
2226 #undef FUNC_NAME
2227
2228
2229 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2230 (SCM name),
2231 "")
2232 #define FUNC_NAME s_scm_unhash_name
2233 {
2234 int x;
2235 int bound;
2236 SCM_VALIDATE_SYMBOL (1,name);
2237 SCM_DEFER_INTS;
2238 bound = scm_n_heap_segs;
2239 for (x = 0; x < bound; ++x)
2240 {
2241 SCM_CELLPTR p;
2242 SCM_CELLPTR pbound;
2243 p = scm_heap_table[x].bounds[0];
2244 pbound = scm_heap_table[x].bounds[1];
2245 while (p < pbound)
2246 {
2247 SCM cell = PTR2SCM (p);
2248 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2249 {
2250 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2251 * struct cell. See the corresponding comment in scm_gc_mark.
2252 */
2253 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2254 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2255 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2256 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2257 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2258 {
2259 SCM_SET_CELL_OBJECT_0 (cell, name);
2260 }
2261 }
2262 ++p;
2263 }
2264 }
2265 SCM_ALLOW_INTS;
2266 return name;
2267 }
2268 #undef FUNC_NAME
2269
2270
2271 \f
2272 /* {GC Protection Helper Functions}
2273 */
2274
2275
2276 void
2277 scm_remember (SCM *ptr)
2278 { /* empty */ }
2279
2280
2281 /*
2282 These crazy functions prevent garbage collection
2283 of arguments after the first argument by
2284 ensuring they remain live throughout the
2285 function because they are used in the last
2286 line of the code block.
2287 It'd be better to have a nice compiler hint to
2288 aid the conservative stack-scanning GC. --03/09/00 gjb */
2289 SCM
2290 scm_return_first (SCM elt, ...)
2291 {
2292 return elt;
2293 }
2294
2295 int
2296 scm_return_first_int (int i, ...)
2297 {
2298 return i;
2299 }
2300
2301
2302 SCM
2303 scm_permanent_object (SCM obj)
2304 {
2305 SCM_REDEFER_INTS;
2306 scm_permobjs = scm_cons (obj, scm_permobjs);
2307 SCM_REALLOW_INTS;
2308 return obj;
2309 }
2310
2311
2312 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2313 other references are dropped, until the object is unprotected by calling
2314 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2315 i. e. it is possible to protect the same object several times, but it is
2316 necessary to unprotect the object the same number of times to actually get
2317 the object unprotected. It is an error to unprotect an object more often
2318 than it has been protected before. The function scm_protect_object returns
2319 OBJ.
2320 */
2321
2322 /* Implementation note: For every object X, there is a counter which
2323 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2324 */
2325
2326 SCM
2327 scm_protect_object (SCM obj)
2328 {
2329 SCM handle;
2330
2331 /* This critical section barrier will be replaced by a mutex. */
2332 SCM_REDEFER_INTS;
2333
2334 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2335 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2336
2337 SCM_REALLOW_INTS;
2338
2339 return obj;
2340 }
2341
2342
2343 /* Remove any protection for OBJ established by a prior call to
2344 scm_protect_object. This function returns OBJ.
2345
2346 See scm_protect_object for more information. */
2347 SCM
2348 scm_unprotect_object (SCM obj)
2349 {
2350 SCM handle;
2351
2352 /* This critical section barrier will be replaced by a mutex. */
2353 SCM_REDEFER_INTS;
2354
2355 handle = scm_hashq_get_handle (scm_protects, obj);
2356
2357 if (SCM_IMP (handle))
2358 {
2359 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2360 abort ();
2361 }
2362 else
2363 {
2364 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2365 if (count == 0)
2366 scm_hashq_remove_x (scm_protects, obj);
2367 else
2368 SCM_SETCDR (handle, SCM_MAKINUM (count));
2369 }
2370
2371 SCM_REALLOW_INTS;
2372
2373 return obj;
2374 }
2375
2376 int terminating;
2377
2378 /* called on process termination. */
2379 #ifdef HAVE_ATEXIT
2380 static void
2381 cleanup (void)
2382 #else
2383 #ifdef HAVE_ON_EXIT
2384 extern int on_exit (void (*procp) (), int arg);
2385
2386 static void
2387 cleanup (int status, void *arg)
2388 #else
2389 #error Dont know how to setup a cleanup handler on your system.
2390 #endif
2391 #endif
2392 {
2393 terminating = 1;
2394 scm_flush_all_ports ();
2395 }
2396
2397 \f
2398 static int
2399 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2400 {
2401 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2402
2403 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2404 rounded_size,
2405 freelist))
2406 {
2407 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2408 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2409 rounded_size,
2410 freelist))
2411 return 1;
2412 }
2413 else
2414 scm_expmem = 1;
2415
2416 if (freelist->min_yield_fraction)
2417 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2418 / 100);
2419 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2420
2421 return 0;
2422 }
2423
2424 \f
2425 static void
2426 init_freelist (scm_freelist_t *freelist,
2427 int span,
2428 int cluster_size,
2429 int min_yield)
2430 {
2431 freelist->clusters = SCM_EOL;
2432 freelist->cluster_size = cluster_size + 1;
2433 freelist->left_to_collect = 0;
2434 freelist->clusters_allocated = 0;
2435 freelist->min_yield = 0;
2436 freelist->min_yield_fraction = min_yield;
2437 freelist->span = span;
2438 freelist->collected = 0;
2439 freelist->collected_1 = 0;
2440 freelist->heap_size = 0;
2441 }
2442
2443 int
2444 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2445 scm_sizet init_heap_size_2, int gc_trigger_2,
2446 scm_sizet max_segment_size)
2447 {
2448 scm_sizet j;
2449
2450 if (!init_heap_size_1)
2451 init_heap_size_1 = scm_default_init_heap_size_1;
2452 if (!init_heap_size_2)
2453 init_heap_size_2 = scm_default_init_heap_size_2;
2454
2455 j = SCM_NUM_PROTECTS;
2456 while (j)
2457 scm_sys_protects[--j] = SCM_BOOL_F;
2458 scm_block_gc = 1;
2459
2460 scm_freelist = SCM_EOL;
2461 scm_freelist2 = SCM_EOL;
2462 init_freelist (&scm_master_freelist,
2463 1, SCM_CLUSTER_SIZE_1,
2464 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2465 init_freelist (&scm_master_freelist2,
2466 2, SCM_CLUSTER_SIZE_2,
2467 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2468 scm_max_segment_size
2469 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2470
2471 scm_expmem = 0;
2472
2473 j = SCM_HEAP_SEG_SIZE;
2474 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2475 scm_heap_table = ((scm_heap_seg_data_t *)
2476 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2477 heap_segment_table_size = 2;
2478
2479 mark_space_ptr = &mark_space_head;
2480
2481 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2482 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2483 return 1;
2484
2485 /* scm_hplims[0] can change. do not remove scm_heap_org */
2486 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2487
2488 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2489 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2490 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2491 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2492 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2493
2494 /* Initialise the list of ports. */
2495 scm_port_table = (scm_port **)
2496 malloc (sizeof (scm_port *) * scm_port_table_room);
2497 if (!scm_port_table)
2498 return 1;
2499
2500 #ifdef HAVE_ATEXIT
2501 atexit (cleanup);
2502 #else
2503 #ifdef HAVE_ON_EXIT
2504 on_exit (cleanup, 0);
2505 #endif
2506 #endif
2507
2508 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2509 SCM_SETCDR (scm_undefineds, scm_undefineds);
2510
2511 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2512 scm_nullstr = scm_makstr (0L, 0);
2513 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2514 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2515 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2516 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2517 scm_stand_in_procs = SCM_EOL;
2518 scm_permobjs = SCM_EOL;
2519 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2520 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2521 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2522 #ifdef SCM_BIGDIG
2523 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2524 #endif
2525
2526 return 0;
2527 }
2528
2529 \f
2530
2531 SCM scm_after_gc_hook;
2532
2533 #if (SCM_DEBUG_DEPRECATED == 0)
2534 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2535 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2536 static SCM gc_async;
2537
2538
2539 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2540 * is run after the gc, as soon as the asynchronous events are handled by the
2541 * evaluator.
2542 */
2543 static SCM
2544 gc_async_thunk (void)
2545 {
2546 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2547
2548 #if (SCM_DEBUG_DEPRECATED == 0)
2549
2550 /* The following code will be removed in Guile 1.5. */
2551 if (SCM_NFALSEP (scm_gc_vcell))
2552 {
2553 SCM proc = SCM_CDR (scm_gc_vcell);
2554
2555 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2556 scm_apply (proc, SCM_EOL, SCM_EOL);
2557 }
2558
2559 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2560
2561 return SCM_UNSPECIFIED;
2562 }
2563
2564
2565 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2566 * the garbage collection. The only purpose of this function is to mark the
2567 * gc_async (which will eventually lead to the execution of the
2568 * gc_async_thunk).
2569 */
2570 static void *
2571 mark_gc_async (void * hook_data, void *func_data, void *data)
2572 {
2573 scm_system_async_mark (gc_async);
2574 return NULL;
2575 }
2576
2577
2578 void
2579 scm_init_gc ()
2580 {
2581 SCM after_gc_thunk;
2582
2583 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2584
2585 #if (SCM_DEBUG_DEPRECATED == 0)
2586 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2587 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2588 /* Dirk:FIXME:: We don't really want a binding here. */
2589 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2590 gc_async = scm_system_async (after_gc_thunk);
2591
2592 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2593
2594 #include "libguile/gc.x"
2595 }
2596
2597 /*
2598 Local Variables:
2599 c-file-style: "gnu"
2600 End:
2601 */