* gc.c (scm_gc_stats): add more obscure stats, such as: mark time,
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
63
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
66
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
69 #endif
70
71 #ifdef HAVE_MALLOC_H
72 #include <malloc.h>
73 #endif
74
75 #ifdef HAVE_UNISTD_H
76 #include <unistd.h>
77 #endif
78
79 #ifdef __STDC__
80 #include <stdarg.h>
81 #define var_start(x, y) va_start(x, y)
82 #else
83 #include <varargs.h>
84 #define var_start(x, y) va_start(x)
85 #endif
86
87 \f
88
89 unsigned int scm_gc_running_p = 0;
90
91 \f
92
93 #if (SCM_DEBUG_CELL_ACCESSES == 1)
94
95 unsigned int scm_debug_cell_accesses_p = 0;
96
97
98 /* Assert that the given object is a valid reference to a valid cell. This
99 * test involves to determine whether the object is a cell pointer, whether
100 * this pointer actually points into a heap segment and whether the cell
101 * pointed to is not a free cell.
102 */
103 void
104 scm_assert_cell_valid (SCM cell)
105 {
106 if (scm_debug_cell_accesses_p)
107 {
108 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
109
110 if (!scm_cellp (cell))
111 {
112 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
113 abort ();
114 }
115 else if (!scm_gc_running_p)
116 {
117 /* Dirk::FIXME:: During garbage collection there occur references to
118 free cells. This is allright during conservative marking, but
119 should not happen otherwise (I think). The case of free cells
120 accessed during conservative marking is handled in function
121 scm_mark_locations. However, there still occur accesses to free
122 cells during gc. I don't understand why this happens. If it is
123 a bug and gets fixed, the following test should also work while
124 gc is running.
125 */
126 if (SCM_FREE_CELL_P (cell))
127 {
128 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
129 abort ();
130 }
131 }
132 scm_debug_cell_accesses_p = 1; /* re-enable */
133 }
134 }
135
136
137 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
138 (SCM flag),
139 "If FLAG is #f, cell access checking is disabled.\n"
140 "If FLAG is #t, cell access checking is enabled.\n"
141 "This procedure only exists because the compile-time flag\n"
142 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
143 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
144 {
145 if (SCM_FALSEP (flag)) {
146 scm_debug_cell_accesses_p = 0;
147 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
148 scm_debug_cell_accesses_p = 1;
149 } else {
150 SCM_WRONG_TYPE_ARG (1, flag);
151 }
152 return SCM_UNSPECIFIED;
153 }
154 #undef FUNC_NAME
155
156 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
157
158 \f
159
160 /* {heap tuning parameters}
161 *
162 * These are parameters for controlling memory allocation. The heap
163 * is the area out of which scm_cons, and object headers are allocated.
164 *
165 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
166 * 64 bit machine. The units of the _SIZE parameters are bytes.
167 * Cons pairs and object headers occupy one heap cell.
168 *
169 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
170 * allocated initially the heap will grow by half its current size
171 * each subsequent time more heap is needed.
172 *
173 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
174 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
175 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
176 * is in scm_init_storage() and alloc_some_heap() in sys.c
177 *
178 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
179 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
180 *
181 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
182 * is needed.
183 *
184 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
185 * trigger a GC.
186 *
187 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
188 * reclaimed by a GC triggered by must_malloc. If less than this is
189 * reclaimed, the trigger threshold is raised. [I don't know what a
190 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
191 * work around a oscillation that caused almost constant GC.]
192 */
193
194 /*
195 * Heap size 45000 and 40% min yield gives quick startup and no extra
196 * heap allocation. Having higher values on min yield may lead to
197 * large heaps, especially if code behaviour is varying its
198 * maximum consumption between different freelists.
199 */
200 int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
201 int scm_default_min_yield_1 = 40;
202 #define SCM_CLUSTER_SIZE_1 2000L
203
204 int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
205 /* The following value may seem large, but note that if we get to GC at
206 * all, this means that we have a numerically intensive application
207 */
208 int scm_default_min_yield_2 = 40;
209 #define SCM_CLUSTER_SIZE_2 1000L
210
211 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
212
213 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
214 #ifdef _QC
215 # define SCM_HEAP_SEG_SIZE 32768L
216 #else
217 # ifdef sequent
218 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
219 # else
220 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
221 # endif
222 #endif
223 /* Make heap grow with factor 1.5 */
224 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
225 #define SCM_INIT_MALLOC_LIMIT 100000
226 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
227
228 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
229 bounds for allocated storage */
230
231 #ifdef PROT386
232 /*in 386 protected mode we must only adjust the offset */
233 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
234 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
235 #else
236 # ifdef _UNICOS
237 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
238 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
239 # else
240 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
241 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
242 # endif /* UNICOS */
243 #endif /* PROT386 */
244 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
245 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
246 #define SCM_HEAP_SIZE \
247 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
248 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
249
250
251 \f
252 /* scm_freelists
253 */
254
255 typedef struct scm_freelist_t {
256 /* collected cells */
257 SCM cells;
258 /* number of cells left to collect before cluster is full */
259 unsigned int left_to_collect;
260 /* number of clusters which have been allocated */
261 unsigned int clusters_allocated;
262 /* a list of freelists, each of size cluster_size,
263 * except the last one which may be shorter
264 */
265 SCM clusters;
266 SCM *clustertail;
267 /* this is the number of objects in each cluster, including the spine cell */
268 int cluster_size;
269 /* indicates that we should grow heap instead of GC:ing
270 */
271 int grow_heap_p;
272 /* minimum yield on this list in order not to grow the heap
273 */
274 long min_yield;
275 /* defines min_yield as percent of total heap size
276 */
277 int min_yield_fraction;
278 /* number of cells per object on this list */
279 int span;
280 /* number of collected cells during last GC */
281 long collected;
282 /* number of collected cells during penultimate GC */
283 long collected_1;
284 /* total number of cells in heap segments
285 * belonging to this list.
286 */
287 long heap_size;
288 } scm_freelist_t;
289
290 SCM scm_freelist = SCM_EOL;
291 scm_freelist_t scm_master_freelist = {
292 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
293 };
294 SCM scm_freelist2 = SCM_EOL;
295 scm_freelist_t scm_master_freelist2 = {
296 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
297 };
298
299 /* scm_mtrigger
300 * is the number of bytes of must_malloc allocation needed to trigger gc.
301 */
302 unsigned long scm_mtrigger;
303
304
305 /* scm_gc_heap_lock
306 * If set, don't expand the heap. Set only during gc, during which no allocation
307 * is supposed to take place anyway.
308 */
309 int scm_gc_heap_lock = 0;
310
311 /* GC Blocking
312 * Don't pause for collection if this is set -- just
313 * expand the heap.
314 */
315 int scm_block_gc = 1;
316
317 /* During collection, this accumulates objects holding
318 * weak references.
319 */
320 SCM scm_weak_vectors;
321
322 /* During collection, this accumulates structures which are to be freed.
323 */
324 SCM scm_structs_to_free;
325
326 /* GC Statistics Keeping
327 */
328 unsigned long scm_cells_allocated = 0;
329 long scm_mallocated = 0;
330 unsigned long scm_gc_cells_collected;
331 unsigned long scm_gc_yield;
332 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
333 unsigned long scm_gc_malloc_collected;
334 unsigned long scm_gc_ports_collected;
335 unsigned long scm_gc_time_taken = 0;
336 static unsigned long t_before_gc;
337 static unsigned long t_before_sweep;
338 unsigned long scm_gc_mark_time_taken = 0;
339 unsigned long scm_gc_sweep_time_taken = 0;
340 unsigned long scm_gc_times = 0;
341 unsigned long scm_gc_cells_swept = 0;
342 double scm_gc_cells_marked_acc = 0.;
343 double scm_gc_cells_swept_acc = 0.;
344
345 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
346 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
347 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
348 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
349 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
350 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
351 SCM_SYMBOL (sym_gc_mark_time_taken, "gc-mark-time-taken");
352 SCM_SYMBOL (sym_gc_sweep_time_taken, "gc-sweep-time-taken");
353 SCM_SYMBOL (sym_times, "gc-times");
354 SCM_SYMBOL (sym_cells_marked, "cells-marked");
355 SCM_SYMBOL (sym_cells_swept, "cells-swept");
356
357 typedef struct scm_heap_seg_data_t
358 {
359 /* lower and upper bounds of the segment */
360 SCM_CELLPTR bounds[2];
361
362 /* address of the head-of-freelist pointer for this segment's cells.
363 All segments usually point to the same one, scm_freelist. */
364 scm_freelist_t *freelist;
365
366 /* number of cells per object in this segment */
367 int span;
368 } scm_heap_seg_data_t;
369
370
371
372 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
373
374 typedef enum { return_on_error, abort_on_error } policy_on_error;
375 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
376
377
378 \f
379 /* Debugging functions. */
380
381 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
382
383 /* Return the number of the heap segment containing CELL. */
384 static int
385 which_seg (SCM cell)
386 {
387 int i;
388
389 for (i = 0; i < scm_n_heap_segs; i++)
390 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
391 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
392 return i;
393 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
394 SCM_UNPACK (cell));
395 abort ();
396 }
397
398
399 static void
400 map_free_list (scm_freelist_t *master, SCM freelist)
401 {
402 int last_seg = -1, count = 0;
403 SCM f;
404
405 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
406 {
407 int this_seg = which_seg (f);
408
409 if (this_seg != last_seg)
410 {
411 if (last_seg != -1)
412 fprintf (stderr, " %5d %d-cells in segment %d\n",
413 count, master->span, last_seg);
414 last_seg = this_seg;
415 count = 0;
416 }
417 count++;
418 }
419 if (last_seg != -1)
420 fprintf (stderr, " %5d %d-cells in segment %d\n",
421 count, master->span, last_seg);
422 }
423
424 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
425 (),
426 "Print debugging information about the free-list.\n"
427 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
428 #define FUNC_NAME s_scm_map_free_list
429 {
430 int i;
431 fprintf (stderr, "%d segments total (%d:%d",
432 scm_n_heap_segs,
433 scm_heap_table[0].span,
434 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
435 for (i = 1; i < scm_n_heap_segs; i++)
436 fprintf (stderr, ", %d:%d",
437 scm_heap_table[i].span,
438 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
439 fprintf (stderr, ")\n");
440 map_free_list (&scm_master_freelist, scm_freelist);
441 map_free_list (&scm_master_freelist2, scm_freelist2);
442 fflush (stderr);
443
444 return SCM_UNSPECIFIED;
445 }
446 #undef FUNC_NAME
447
448 static int last_cluster;
449 static int last_size;
450
451 static int
452 free_list_length (char *title, int i, SCM freelist)
453 {
454 SCM ls;
455 int n = 0;
456 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
457 if (SCM_FREE_CELL_P (ls))
458 ++n;
459 else
460 {
461 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
462 abort ();
463 }
464 if (n != last_size)
465 {
466 if (i > 0)
467 {
468 if (last_cluster == i - 1)
469 fprintf (stderr, "\t%d\n", last_size);
470 else
471 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
472 }
473 if (i >= 0)
474 fprintf (stderr, "%s %d", title, i);
475 else
476 fprintf (stderr, "%s\t%d\n", title, n);
477 last_cluster = i;
478 last_size = n;
479 }
480 return n;
481 }
482
483 static void
484 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
485 {
486 SCM clusters;
487 int i = 0, len, n = 0;
488 fprintf (stderr, "%s\n\n", title);
489 n += free_list_length ("free list", -1, freelist);
490 for (clusters = master->clusters;
491 SCM_NNULLP (clusters);
492 clusters = SCM_CDR (clusters))
493 {
494 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
495 n += len;
496 }
497 if (last_cluster == i - 1)
498 fprintf (stderr, "\t%d\n", last_size);
499 else
500 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
501 fprintf (stderr, "\ntotal %d objects\n\n", n);
502 }
503
504 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
505 (),
506 "Print debugging information about the free-list.\n"
507 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
508 #define FUNC_NAME s_scm_free_list_length
509 {
510 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
511 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
512 return SCM_UNSPECIFIED;
513 }
514 #undef FUNC_NAME
515
516 #endif
517
518 #ifdef GUILE_DEBUG_FREELIST
519
520 /* Number of calls to SCM_NEWCELL since startup. */
521 static unsigned long scm_newcell_count;
522 static unsigned long scm_newcell2_count;
523
524 /* Search freelist for anything that isn't marked as a free cell.
525 Abort if we find something. */
526 static void
527 scm_check_freelist (SCM freelist)
528 {
529 SCM f;
530 int i = 0;
531
532 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
533 if (!SCM_FREE_CELL_P (f))
534 {
535 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
536 scm_newcell_count, i);
537 abort ();
538 }
539 }
540
541 static int scm_debug_check_freelist = 0;
542
543 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
544 (SCM flag),
545 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
546 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
547 "compile-time flag was selected.\n")
548 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
549 {
550 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
551 return SCM_UNSPECIFIED;
552 }
553 #undef FUNC_NAME
554
555
556 SCM
557 scm_debug_newcell (void)
558 {
559 SCM new;
560
561 scm_newcell_count++;
562 if (scm_debug_check_freelist)
563 {
564 scm_check_freelist (scm_freelist);
565 scm_gc();
566 }
567
568 /* The rest of this is supposed to be identical to the SCM_NEWCELL
569 macro. */
570 if (SCM_NULLP (scm_freelist))
571 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
572 else
573 {
574 new = scm_freelist;
575 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
576 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
577 }
578
579 return new;
580 }
581
582 SCM
583 scm_debug_newcell2 (void)
584 {
585 SCM new;
586
587 scm_newcell2_count++;
588 if (scm_debug_check_freelist)
589 {
590 scm_check_freelist (scm_freelist2);
591 scm_gc ();
592 }
593
594 /* The rest of this is supposed to be identical to the SCM_NEWCELL
595 macro. */
596 if (SCM_NULLP (scm_freelist2))
597 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
598 else
599 {
600 new = scm_freelist2;
601 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
602 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
603 }
604
605 return new;
606 }
607
608 #endif /* GUILE_DEBUG_FREELIST */
609
610 \f
611
612 static unsigned long
613 master_cells_allocated (scm_freelist_t *master)
614 {
615 int objects = master->clusters_allocated * (master->cluster_size - 1);
616 if (SCM_NULLP (master->clusters))
617 objects -= master->left_to_collect;
618 return master->span * objects;
619 }
620
621 static unsigned long
622 freelist_length (SCM freelist)
623 {
624 int n;
625 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
626 ++n;
627 return n;
628 }
629
630 static unsigned long
631 compute_cells_allocated ()
632 {
633 return (scm_cells_allocated
634 + master_cells_allocated (&scm_master_freelist)
635 + master_cells_allocated (&scm_master_freelist2)
636 - scm_master_freelist.span * freelist_length (scm_freelist)
637 - scm_master_freelist2.span * freelist_length (scm_freelist2));
638 }
639
640 /* {Scheme Interface to GC}
641 */
642
643 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
644 (),
645 "Returns an association list of statistics about Guile's current use of storage. ")
646 #define FUNC_NAME s_scm_gc_stats
647 {
648 int i;
649 int n;
650 SCM heap_segs;
651 long int local_scm_mtrigger;
652 long int local_scm_mallocated;
653 long int local_scm_heap_size;
654 long int local_scm_cells_allocated;
655 long int local_scm_gc_time_taken;
656 long int local_scm_gc_times;
657 long int local_scm_gc_mark_time_taken;
658 long int local_scm_gc_sweep_time_taken;
659 double local_scm_gc_cells_swept;
660 double local_scm_gc_cells_marked;
661 SCM answer;
662
663 SCM_DEFER_INTS;
664
665 ++scm_block_gc;
666
667 retry:
668 heap_segs = SCM_EOL;
669 n = scm_n_heap_segs;
670 for (i = scm_n_heap_segs; i--; )
671 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
672 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
673 heap_segs);
674 if (scm_n_heap_segs != n)
675 goto retry;
676
677 --scm_block_gc;
678
679 /* Below, we cons to produce the resulting list. We want a snapshot of
680 * the heap situation before consing.
681 */
682 local_scm_mtrigger = scm_mtrigger;
683 local_scm_mallocated = scm_mallocated;
684 local_scm_heap_size = SCM_HEAP_SIZE;
685 local_scm_cells_allocated = compute_cells_allocated ();
686 local_scm_gc_time_taken = scm_gc_time_taken;
687 local_scm_gc_mark_time_taken = scm_gc_mark_time_taken;
688 local_scm_gc_sweep_time_taken = scm_gc_sweep_time_taken;
689 local_scm_gc_times = scm_gc_times;
690 local_scm_gc_cells_swept = scm_gc_cells_swept_acc;
691 local_scm_gc_cells_marked = scm_gc_cells_marked_acc;
692
693 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
694 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
695 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
696 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
697 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
698 scm_cons (sym_times, scm_ulong2num (local_scm_gc_times)),
699 scm_cons (sym_gc_mark_time_taken, scm_ulong2num (local_scm_gc_mark_time_taken)),
700 scm_cons (sym_gc_sweep_time_taken, scm_ulong2num (local_scm_gc_sweep_time_taken)),
701 scm_cons (sym_cells_marked, scm_dbl2big (local_scm_gc_cells_marked)),
702 scm_cons (sym_cells_swept, scm_dbl2big (local_scm_gc_cells_swept)),
703 scm_cons (sym_heap_segments, heap_segs),
704 SCM_UNDEFINED);
705 SCM_ALLOW_INTS;
706 return answer;
707 }
708 #undef FUNC_NAME
709
710
711 static void
712 gc_start_stats (const char *what)
713 {
714 t_before_gc = scm_c_get_internal_run_time ();
715 scm_gc_cells_swept = 0;
716 scm_gc_cells_collected = 0;
717 scm_gc_yield_1 = scm_gc_yield;
718 scm_gc_yield = (scm_cells_allocated
719 + master_cells_allocated (&scm_master_freelist)
720 + master_cells_allocated (&scm_master_freelist2));
721 scm_gc_malloc_collected = 0;
722 scm_gc_ports_collected = 0;
723 }
724
725
726 static void
727 gc_end_stats ()
728 {
729 unsigned long t = scm_c_get_internal_run_time ();
730 scm_gc_time_taken += (t - t_before_gc);
731 scm_gc_sweep_time_taken += (t - t_before_sweep);
732 ++scm_gc_times;
733
734 scm_gc_cells_marked_acc += scm_gc_cells_swept - scm_gc_cells_collected;
735 scm_gc_cells_swept_acc += scm_gc_cells_swept;
736 }
737
738
739 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
740 (SCM obj),
741 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
742 "returned by this function for @var{obj}")
743 #define FUNC_NAME s_scm_object_address
744 {
745 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
746 }
747 #undef FUNC_NAME
748
749
750 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
751 (),
752 "Scans all of SCM objects and reclaims for further use those that are\n"
753 "no longer accessible.")
754 #define FUNC_NAME s_scm_gc
755 {
756 SCM_DEFER_INTS;
757 scm_igc ("call");
758 SCM_ALLOW_INTS;
759 return SCM_UNSPECIFIED;
760 }
761 #undef FUNC_NAME
762
763
764 \f
765 /* {C Interface For When GC is Triggered}
766 */
767
768 static void
769 adjust_min_yield (scm_freelist_t *freelist)
770 {
771 /* min yield is adjusted upwards so that next predicted total yield
772 * (allocated cells actually freed by GC) becomes
773 * `min_yield_fraction' of total heap size. Note, however, that
774 * the absolute value of min_yield will correspond to `collected'
775 * on one master (the one which currently is triggering GC).
776 *
777 * The reason why we look at total yield instead of cells collected
778 * on one list is that we want to take other freelists into account.
779 * On this freelist, we know that (local) yield = collected cells,
780 * but that's probably not the case on the other lists.
781 *
782 * (We might consider computing a better prediction, for example
783 * by computing an average over multiple GC:s.)
784 */
785 if (freelist->min_yield_fraction)
786 {
787 /* Pick largest of last two yields. */
788 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
789 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
790 #ifdef DEBUGINFO
791 fprintf (stderr, " after GC = %d, delta = %d\n",
792 scm_cells_allocated,
793 delta);
794 #endif
795 if (delta > 0)
796 freelist->min_yield += delta;
797 }
798 }
799
800
801 /* When we get POSIX threads support, the master will be global and
802 * common while the freelist will be individual for each thread.
803 */
804
805 SCM
806 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
807 {
808 SCM cell;
809 ++scm_ints_disabled;
810 do
811 {
812 if (SCM_NULLP (master->clusters))
813 {
814 if (master->grow_heap_p || scm_block_gc)
815 {
816 /* In order to reduce gc frequency, try to allocate a new heap
817 * segment first, even if gc might find some free cells. If we
818 * can't obtain a new heap segment, we will try gc later.
819 */
820 master->grow_heap_p = 0;
821 alloc_some_heap (master, return_on_error);
822 }
823 if (SCM_NULLP (master->clusters))
824 {
825 /* The heap was not grown, either because it wasn't scheduled to
826 * grow, or because there was not enough memory available. In
827 * both cases we have to try gc to get some free cells.
828 */
829 #ifdef DEBUGINFO
830 fprintf (stderr, "allocated = %d, ",
831 scm_cells_allocated
832 + master_cells_allocated (&scm_master_freelist)
833 + master_cells_allocated (&scm_master_freelist2));
834 #endif
835 scm_igc ("cells");
836 adjust_min_yield (master);
837 if (SCM_NULLP (master->clusters))
838 {
839 /* gc could not free any cells. Now, we _must_ allocate a
840 * new heap segment, because there is no other possibility
841 * to provide a new cell for the caller.
842 */
843 alloc_some_heap (master, abort_on_error);
844 }
845 }
846 }
847 cell = SCM_CAR (master->clusters);
848 master->clusters = SCM_CDR (master->clusters);
849 ++master->clusters_allocated;
850 }
851 while (SCM_NULLP (cell));
852 --scm_ints_disabled;
853 *freelist = SCM_FREE_CELL_CDR (cell);
854 SCM_SET_FREE_CELL_TYPE (cell, scm_tc16_allocated);
855 return cell;
856 }
857
858
859 #if 0
860 /* This is a support routine which can be used to reserve a cluster
861 * for some special use, such as debugging. It won't be useful until
862 * free cells are preserved between garbage collections.
863 */
864
865 void
866 scm_alloc_cluster (scm_freelist_t *master)
867 {
868 SCM freelist, cell;
869 cell = scm_gc_for_newcell (master, &freelist);
870 SCM_SETCDR (cell, freelist);
871 return cell;
872 }
873 #endif
874
875
876 scm_c_hook_t scm_before_gc_c_hook;
877 scm_c_hook_t scm_before_mark_c_hook;
878 scm_c_hook_t scm_before_sweep_c_hook;
879 scm_c_hook_t scm_after_sweep_c_hook;
880 scm_c_hook_t scm_after_gc_c_hook;
881
882
883 void
884 scm_igc (const char *what)
885 {
886 int j;
887
888 ++scm_gc_running_p;
889 scm_c_hook_run (&scm_before_gc_c_hook, 0);
890 #ifdef DEBUGINFO
891 fprintf (stderr,
892 SCM_NULLP (scm_freelist)
893 ? "*"
894 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
895 #endif
896 #ifdef USE_THREADS
897 /* During the critical section, only the current thread may run. */
898 SCM_THREAD_CRITICAL_SECTION_START;
899 #endif
900
901 /* fprintf (stderr, "gc: %s\n", what); */
902
903 if (!scm_stack_base || scm_block_gc)
904 {
905 --scm_gc_running_p;
906 return;
907 }
908
909 gc_start_stats (what);
910
911 if (scm_mallocated < 0)
912 /* The byte count of allocated objects has underflowed. This is
913 probably because you forgot to report the sizes of objects you
914 have allocated, by calling scm_done_malloc or some such. When
915 the GC freed them, it subtracted their size from
916 scm_mallocated, which underflowed. */
917 abort ();
918
919 if (scm_gc_heap_lock)
920 /* We've invoked the collector while a GC is already in progress.
921 That should never happen. */
922 abort ();
923
924 ++scm_gc_heap_lock;
925
926 /* flush dead entries from the continuation stack */
927 {
928 int x;
929 int bound;
930 SCM * elts;
931 elts = SCM_VELTS (scm_continuation_stack);
932 bound = SCM_LENGTH (scm_continuation_stack);
933 x = SCM_INUM (scm_continuation_stack_ptr);
934 while (x < bound)
935 {
936 elts[x] = SCM_BOOL_F;
937 ++x;
938 }
939 }
940
941 scm_c_hook_run (&scm_before_mark_c_hook, 0);
942
943 #ifndef USE_THREADS
944
945 /* Protect from the C stack. This must be the first marking
946 * done because it provides information about what objects
947 * are "in-use" by the C code. "in-use" objects are those
948 * for which the values from SCM_LENGTH and SCM_CHARS must remain
949 * usable. This requirement is stricter than a liveness
950 * requirement -- in particular, it constrains the implementation
951 * of scm_vector_set_length_x.
952 */
953 SCM_FLUSH_REGISTER_WINDOWS;
954 /* This assumes that all registers are saved into the jmp_buf */
955 setjmp (scm_save_regs_gc_mark);
956 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
957 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
958 sizeof scm_save_regs_gc_mark)
959 / sizeof (SCM_STACKITEM)));
960
961 {
962 scm_sizet stack_len = scm_stack_size (scm_stack_base);
963 #ifdef SCM_STACK_GROWS_UP
964 scm_mark_locations (scm_stack_base, stack_len);
965 #else
966 scm_mark_locations (scm_stack_base - stack_len, stack_len);
967 #endif
968 }
969
970 #else /* USE_THREADS */
971
972 /* Mark every thread's stack and registers */
973 scm_threads_mark_stacks ();
974
975 #endif /* USE_THREADS */
976
977 /* FIXME: insert a phase to un-protect string-data preserved
978 * in scm_vector_set_length_x.
979 */
980
981 j = SCM_NUM_PROTECTS;
982 while (j--)
983 scm_gc_mark (scm_sys_protects[j]);
984
985 /* FIXME: we should have a means to register C functions to be run
986 * in different phases of GC
987 */
988 scm_mark_subr_table ();
989
990 #ifndef USE_THREADS
991 scm_gc_mark (scm_root->handle);
992 #endif
993
994 t_before_sweep = scm_c_get_internal_run_time ();
995 scm_gc_mark_time_taken += (t_before_sweep - t_before_gc);
996
997 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
998
999 scm_gc_sweep ();
1000
1001 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
1002
1003 --scm_gc_heap_lock;
1004 gc_end_stats ();
1005
1006 #ifdef USE_THREADS
1007 SCM_THREAD_CRITICAL_SECTION_END;
1008 #endif
1009 scm_c_hook_run (&scm_after_gc_c_hook, 0);
1010 --scm_gc_running_p;
1011 }
1012
1013 \f
1014
1015 /* {Mark/Sweep}
1016 */
1017
1018
1019
1020 /* Mark an object precisely.
1021 */
1022 void
1023 scm_gc_mark (SCM p)
1024 #define FUNC_NAME "scm_gc_mark"
1025 {
1026 register long i;
1027 register SCM ptr;
1028
1029 ptr = p;
1030
1031 gc_mark_loop:
1032 if (SCM_IMP (ptr))
1033 return;
1034
1035 gc_mark_nimp:
1036 if (!SCM_CELLP (ptr))
1037 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
1038
1039 switch (SCM_TYP7 (ptr))
1040 {
1041 case scm_tcs_cons_nimcar:
1042 if (SCM_GCMARKP (ptr))
1043 break;
1044 SCM_SETGCMARK (ptr);
1045 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1046 {
1047 ptr = SCM_CAR (ptr);
1048 goto gc_mark_nimp;
1049 }
1050 scm_gc_mark (SCM_CAR (ptr));
1051 ptr = SCM_GCCDR (ptr);
1052 goto gc_mark_nimp;
1053 case scm_tcs_cons_imcar:
1054 if (SCM_GCMARKP (ptr))
1055 break;
1056 SCM_SETGCMARK (ptr);
1057 ptr = SCM_GCCDR (ptr);
1058 goto gc_mark_loop;
1059 case scm_tc7_pws:
1060 if (SCM_GCMARKP (ptr))
1061 break;
1062 SCM_SETGCMARK (ptr);
1063 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
1064 ptr = SCM_GCCDR (ptr);
1065 goto gc_mark_loop;
1066 case scm_tcs_cons_gloc:
1067 if (SCM_GCMARKP (ptr))
1068 break;
1069 SCM_SETGCMARK (ptr);
1070 {
1071 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1072 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1073 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1074 * pointer to a struct vtable data region. The fact that these are
1075 * accessed in the same way restricts the possibilites to change the
1076 * data layout of structs or heap cells.
1077 */
1078 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1079 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1080 if (vtable_data [scm_vtable_index_vcell] != 0)
1081 {
1082 /* ptr is a gloc */
1083 SCM gloc_car = SCM_PACK (word0);
1084 scm_gc_mark (gloc_car);
1085 ptr = SCM_GCCDR (ptr);
1086 goto gc_mark_loop;
1087 }
1088 else
1089 {
1090 /* ptr is a struct */
1091 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1092 int len = SCM_LENGTH (layout);
1093 char * fields_desc = SCM_CHARS (layout);
1094 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1095 that it removes the mark */
1096 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
1097
1098 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1099 {
1100 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1101 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
1102 }
1103 if (len)
1104 {
1105 int x;
1106
1107 for (x = 0; x < len - 2; x += 2, ++struct_data)
1108 if (fields_desc[x] == 'p')
1109 scm_gc_mark (SCM_PACK (*struct_data));
1110 if (fields_desc[x] == 'p')
1111 {
1112 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1113 for (x = *struct_data; x; --x)
1114 scm_gc_mark (SCM_PACK (*++struct_data));
1115 else
1116 scm_gc_mark (SCM_PACK (*struct_data));
1117 }
1118 }
1119 /* mark vtable */
1120 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1121 goto gc_mark_loop;
1122 }
1123 }
1124 break;
1125 case scm_tcs_closures:
1126 if (SCM_GCMARKP (ptr))
1127 break;
1128 SCM_SETGCMARK (ptr);
1129 if (SCM_IMP (SCM_CDR (ptr)))
1130 {
1131 ptr = SCM_CLOSCAR (ptr);
1132 goto gc_mark_nimp;
1133 }
1134 scm_gc_mark (SCM_CLOSCAR (ptr));
1135 ptr = SCM_GCCDR (ptr);
1136 goto gc_mark_nimp;
1137 case scm_tc7_vector:
1138 case scm_tc7_lvector:
1139 #ifdef CCLO
1140 case scm_tc7_cclo:
1141 #endif
1142 if (SCM_GC8MARKP (ptr))
1143 break;
1144 SCM_SETGC8MARK (ptr);
1145 i = SCM_LENGTH (ptr);
1146 if (i == 0)
1147 break;
1148 while (--i > 0)
1149 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1150 scm_gc_mark (SCM_VELTS (ptr)[i]);
1151 ptr = SCM_VELTS (ptr)[0];
1152 goto gc_mark_loop;
1153 case scm_tc7_contin:
1154 if SCM_GC8MARKP
1155 (ptr) break;
1156 SCM_SETGC8MARK (ptr);
1157 if (SCM_VELTS (ptr))
1158 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1159 (scm_sizet)
1160 (SCM_LENGTH (ptr) +
1161 (sizeof (SCM_STACKITEM) + -1 +
1162 sizeof (scm_contregs)) /
1163 sizeof (SCM_STACKITEM)));
1164 break;
1165 #ifdef HAVE_ARRAYS
1166 case scm_tc7_bvect:
1167 case scm_tc7_byvect:
1168 case scm_tc7_ivect:
1169 case scm_tc7_uvect:
1170 case scm_tc7_fvect:
1171 case scm_tc7_dvect:
1172 case scm_tc7_cvect:
1173 case scm_tc7_svect:
1174 #ifdef HAVE_LONG_LONGS
1175 case scm_tc7_llvect:
1176 #endif
1177 #endif
1178 case scm_tc7_string:
1179 SCM_SETGC8MARK (ptr);
1180 break;
1181
1182 case scm_tc7_substring:
1183 if (SCM_GC8MARKP(ptr))
1184 break;
1185 SCM_SETGC8MARK (ptr);
1186 ptr = SCM_CDR (ptr);
1187 goto gc_mark_loop;
1188
1189 case scm_tc7_wvect:
1190 if (SCM_GC8MARKP(ptr))
1191 break;
1192 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1193 scm_weak_vectors = ptr;
1194 SCM_SETGC8MARK (ptr);
1195 if (SCM_IS_WHVEC_ANY (ptr))
1196 {
1197 int x;
1198 int len;
1199 int weak_keys;
1200 int weak_values;
1201
1202 len = SCM_LENGTH (ptr);
1203 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1204 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1205
1206 for (x = 0; x < len; ++x)
1207 {
1208 SCM alist;
1209 alist = SCM_VELTS (ptr)[x];
1210
1211 /* mark everything on the alist except the keys or
1212 * values, according to weak_values and weak_keys. */
1213 while ( SCM_CONSP (alist)
1214 && !SCM_GCMARKP (alist)
1215 && SCM_CONSP (SCM_CAR (alist)))
1216 {
1217 SCM kvpair;
1218 SCM next_alist;
1219
1220 kvpair = SCM_CAR (alist);
1221 next_alist = SCM_CDR (alist);
1222 /*
1223 * Do not do this:
1224 * SCM_SETGCMARK (alist);
1225 * SCM_SETGCMARK (kvpair);
1226 *
1227 * It may be that either the key or value is protected by
1228 * an escaped reference to part of the spine of this alist.
1229 * If we mark the spine here, and only mark one or neither of the
1230 * key and value, they may never be properly marked.
1231 * This leads to a horrible situation in which an alist containing
1232 * freelist cells is exported.
1233 *
1234 * So only mark the spines of these arrays last of all marking.
1235 * If somebody confuses us by constructing a weak vector
1236 * with a circular alist then we are hosed, but at least we
1237 * won't prematurely drop table entries.
1238 */
1239 if (!weak_keys)
1240 scm_gc_mark (SCM_CAR (kvpair));
1241 if (!weak_values)
1242 scm_gc_mark (SCM_GCCDR (kvpair));
1243 alist = next_alist;
1244 }
1245 if (SCM_NIMP (alist))
1246 scm_gc_mark (alist);
1247 }
1248 }
1249 break;
1250
1251 case scm_tc7_msymbol:
1252 if (SCM_GC8MARKP(ptr))
1253 break;
1254 SCM_SETGC8MARK (ptr);
1255 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1256 ptr = SCM_SYMBOL_PROPS (ptr);
1257 goto gc_mark_loop;
1258 case scm_tc7_ssymbol:
1259 if (SCM_GC8MARKP(ptr))
1260 break;
1261 SCM_SETGC8MARK (ptr);
1262 break;
1263 case scm_tcs_subrs:
1264 break;
1265 case scm_tc7_port:
1266 i = SCM_PTOBNUM (ptr);
1267 if (!(i < scm_numptob))
1268 goto def;
1269 if (SCM_GC8MARKP (ptr))
1270 break;
1271 SCM_SETGC8MARK (ptr);
1272 if (SCM_PTAB_ENTRY(ptr))
1273 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1274 if (scm_ptobs[i].mark)
1275 {
1276 ptr = (scm_ptobs[i].mark) (ptr);
1277 goto gc_mark_loop;
1278 }
1279 else
1280 return;
1281 break;
1282 case scm_tc7_smob:
1283 if (SCM_GC8MARKP (ptr))
1284 break;
1285 SCM_SETGC8MARK (ptr);
1286 switch (SCM_GCTYP16 (ptr))
1287 { /* should be faster than going through scm_smobs */
1288 case scm_tc_free_cell:
1289 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1290 case scm_tc16_allocated:
1291 case scm_tc16_big:
1292 case scm_tc16_real:
1293 case scm_tc16_complex:
1294 break;
1295 default:
1296 i = SCM_SMOBNUM (ptr);
1297 if (!(i < scm_numsmob))
1298 goto def;
1299 if (scm_smobs[i].mark)
1300 {
1301 ptr = (scm_smobs[i].mark) (ptr);
1302 goto gc_mark_loop;
1303 }
1304 else
1305 return;
1306 }
1307 break;
1308 default:
1309 def:
1310 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1311 }
1312 }
1313 #undef FUNC_NAME
1314
1315
1316 /* Mark a Region Conservatively
1317 */
1318
1319 void
1320 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1321 {
1322 unsigned long m;
1323
1324 for (m = 0; m < n; ++m)
1325 {
1326 SCM obj = * (SCM *) &x[m];
1327 if (SCM_CELLP (obj))
1328 {
1329 SCM_CELLPTR ptr = SCM2PTR (obj);
1330 int i = 0;
1331 int j = scm_n_heap_segs - 1;
1332 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1333 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1334 {
1335 while (i <= j)
1336 {
1337 int seg_id;
1338 seg_id = -1;
1339 if ((i == j)
1340 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1341 seg_id = i;
1342 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1343 seg_id = j;
1344 else
1345 {
1346 int k;
1347 k = (i + j) / 2;
1348 if (k == i)
1349 break;
1350 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1351 {
1352 j = k;
1353 ++i;
1354 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1355 continue;
1356 else
1357 break;
1358 }
1359 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1360 {
1361 i = k;
1362 --j;
1363 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1364 continue;
1365 else
1366 break;
1367 }
1368 }
1369 if (scm_heap_table[seg_id].span == 1
1370 || SCM_DOUBLE_CELLP (obj))
1371 {
1372 if (!SCM_FREE_CELL_P (obj))
1373 scm_gc_mark (obj);
1374 }
1375 break;
1376 }
1377 }
1378 }
1379 }
1380 }
1381
1382
1383 /* The function scm_cellp determines whether an SCM value can be regarded as a
1384 * pointer to a cell on the heap. Binary search is used in order to determine
1385 * the heap segment that contains the cell.
1386 */
1387 int
1388 scm_cellp (SCM value)
1389 {
1390 if (SCM_CELLP (value)) {
1391 scm_cell * ptr = SCM2PTR (value);
1392 unsigned int i = 0;
1393 unsigned int j = scm_n_heap_segs - 1;
1394
1395 while (i < j) {
1396 int k = (i + j) / 2;
1397 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1398 j = k;
1399 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1400 i = k + 1;
1401 }
1402 }
1403
1404 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1405 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1406 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1407 return 1;
1408 } else {
1409 return 0;
1410 }
1411 } else {
1412 return 0;
1413 }
1414 }
1415
1416
1417 static void
1418 gc_sweep_freelist_start (scm_freelist_t *freelist)
1419 {
1420 freelist->cells = SCM_EOL;
1421 freelist->left_to_collect = freelist->cluster_size;
1422 freelist->clusters_allocated = 0;
1423 freelist->clusters = SCM_EOL;
1424 freelist->clustertail = &freelist->clusters;
1425 freelist->collected_1 = freelist->collected;
1426 freelist->collected = 0;
1427 }
1428
1429 static void
1430 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1431 {
1432 int collected;
1433 *freelist->clustertail = freelist->cells;
1434 if (!SCM_NULLP (freelist->cells))
1435 {
1436 SCM c = freelist->cells;
1437 SCM_SETCAR (c, SCM_CDR (c));
1438 SCM_SETCDR (c, SCM_EOL);
1439 freelist->collected +=
1440 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1441 }
1442 scm_gc_cells_collected += freelist->collected;
1443
1444 /* Although freelist->min_yield is used to test freelist->collected
1445 * (which is the local GC yield for freelist), it is adjusted so
1446 * that *total* yield is freelist->min_yield_fraction of total heap
1447 * size. This means that a too low yield is compensated by more
1448 * heap on the list which is currently doing most work, which is
1449 * just what we want.
1450 */
1451 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1452 freelist->grow_heap_p = (collected < freelist->min_yield);
1453 }
1454
1455 void
1456 scm_gc_sweep ()
1457 #define FUNC_NAME "scm_gc_sweep"
1458 {
1459 register SCM_CELLPTR ptr;
1460 register SCM nfreelist;
1461 register scm_freelist_t *freelist;
1462 register long m;
1463 register int span;
1464 long i;
1465 scm_sizet seg_size;
1466
1467 m = 0;
1468
1469 gc_sweep_freelist_start (&scm_master_freelist);
1470 gc_sweep_freelist_start (&scm_master_freelist2);
1471
1472 for (i = 0; i < scm_n_heap_segs; i++)
1473 {
1474 register unsigned int left_to_collect;
1475 register scm_sizet j;
1476
1477 /* Unmarked cells go onto the front of the freelist this heap
1478 segment points to. Rather than updating the real freelist
1479 pointer as we go along, we accumulate the new head in
1480 nfreelist. Then, if it turns out that the entire segment is
1481 free, we free (i.e., malloc's free) the whole segment, and
1482 simply don't assign nfreelist back into the real freelist. */
1483 freelist = scm_heap_table[i].freelist;
1484 nfreelist = freelist->cells;
1485 left_to_collect = freelist->left_to_collect;
1486 span = scm_heap_table[i].span;
1487
1488 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1489 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1490
1491 scm_gc_cells_swept += seg_size;
1492
1493 for (j = seg_size + span; j -= span; ptr += span)
1494 {
1495 SCM scmptr = PTR2SCM (ptr);
1496
1497 switch SCM_TYP7 (scmptr)
1498 {
1499 case scm_tcs_cons_gloc:
1500 {
1501 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1502 * struct or a gloc. See the corresponding comment in
1503 * scm_gc_mark.
1504 */
1505 scm_bits_t word0 = (SCM_CELL_WORD_0 (scmptr)
1506 - scm_tc3_cons_gloc);
1507 /* access as struct */
1508 scm_bits_t * vtable_data = (scm_bits_t *) word0;
1509 if (SCM_GCMARKP (scmptr))
1510 goto cmrkcontinue;
1511 else if (vtable_data[scm_vtable_index_vcell] == 0)
1512 {
1513 /* Structs need to be freed in a special order.
1514 * This is handled by GC C hooks in struct.c.
1515 */
1516 SCM_SET_STRUCT_GC_CHAIN (scmptr, scm_structs_to_free);
1517 scm_structs_to_free = scmptr;
1518 goto cmrkcontinue;
1519 }
1520 /* fall through so that scmptr gets collected */
1521 }
1522 break;
1523 case scm_tcs_cons_imcar:
1524 case scm_tcs_cons_nimcar:
1525 case scm_tcs_closures:
1526 case scm_tc7_pws:
1527 if (SCM_GCMARKP (scmptr))
1528 goto cmrkcontinue;
1529 break;
1530 case scm_tc7_wvect:
1531 if (SCM_GC8MARKP (scmptr))
1532 {
1533 goto c8mrkcontinue;
1534 }
1535 else
1536 {
1537 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1538 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1539 break;
1540 }
1541
1542 case scm_tc7_vector:
1543 case scm_tc7_lvector:
1544 #ifdef CCLO
1545 case scm_tc7_cclo:
1546 #endif
1547 if (SCM_GC8MARKP (scmptr))
1548 goto c8mrkcontinue;
1549
1550 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1551 freechars:
1552 scm_must_free (SCM_CHARS (scmptr));
1553 /* SCM_SETCHARS(scmptr, 0);*/
1554 break;
1555 #ifdef HAVE_ARRAYS
1556 case scm_tc7_bvect:
1557 if SCM_GC8MARKP (scmptr)
1558 goto c8mrkcontinue;
1559 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1560 goto freechars;
1561 case scm_tc7_byvect:
1562 if SCM_GC8MARKP (scmptr)
1563 goto c8mrkcontinue;
1564 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1565 goto freechars;
1566 case scm_tc7_ivect:
1567 case scm_tc7_uvect:
1568 if SCM_GC8MARKP (scmptr)
1569 goto c8mrkcontinue;
1570 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1571 goto freechars;
1572 case scm_tc7_svect:
1573 if SCM_GC8MARKP (scmptr)
1574 goto c8mrkcontinue;
1575 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1576 goto freechars;
1577 #ifdef HAVE_LONG_LONGS
1578 case scm_tc7_llvect:
1579 if SCM_GC8MARKP (scmptr)
1580 goto c8mrkcontinue;
1581 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1582 goto freechars;
1583 #endif
1584 case scm_tc7_fvect:
1585 if SCM_GC8MARKP (scmptr)
1586 goto c8mrkcontinue;
1587 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1588 goto freechars;
1589 case scm_tc7_dvect:
1590 if SCM_GC8MARKP (scmptr)
1591 goto c8mrkcontinue;
1592 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1593 goto freechars;
1594 case scm_tc7_cvect:
1595 if SCM_GC8MARKP (scmptr)
1596 goto c8mrkcontinue;
1597 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1598 goto freechars;
1599 #endif
1600 case scm_tc7_substring:
1601 if (SCM_GC8MARKP (scmptr))
1602 goto c8mrkcontinue;
1603 break;
1604 case scm_tc7_string:
1605 if (SCM_GC8MARKP (scmptr))
1606 goto c8mrkcontinue;
1607 m += SCM_HUGE_LENGTH (scmptr) + 1;
1608 goto freechars;
1609 case scm_tc7_msymbol:
1610 if (SCM_GC8MARKP (scmptr))
1611 goto c8mrkcontinue;
1612 m += (SCM_LENGTH (scmptr) + 1
1613 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1614 scm_must_free ((char *)SCM_SLOTS (scmptr));
1615 break;
1616 case scm_tc7_contin:
1617 if SCM_GC8MARKP (scmptr)
1618 goto c8mrkcontinue;
1619 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1620 if (SCM_VELTS (scmptr))
1621 goto freechars;
1622 case scm_tc7_ssymbol:
1623 if SCM_GC8MARKP(scmptr)
1624 goto c8mrkcontinue;
1625 break;
1626 case scm_tcs_subrs:
1627 continue;
1628 case scm_tc7_port:
1629 if SCM_GC8MARKP (scmptr)
1630 goto c8mrkcontinue;
1631 if SCM_OPENP (scmptr)
1632 {
1633 int k = SCM_PTOBNUM (scmptr);
1634 if (!(k < scm_numptob))
1635 goto sweeperr;
1636 /* Keep "revealed" ports alive. */
1637 if (scm_revealed_count (scmptr) > 0)
1638 continue;
1639 /* Yes, I really do mean scm_ptobs[k].free */
1640 /* rather than ftobs[k].close. .close */
1641 /* is for explicit CLOSE-PORT by user */
1642 m += (scm_ptobs[k].free) (scmptr);
1643 SCM_SETSTREAM (scmptr, 0);
1644 scm_remove_from_port_table (scmptr);
1645 scm_gc_ports_collected++;
1646 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1647 }
1648 break;
1649 case scm_tc7_smob:
1650 switch SCM_GCTYP16 (scmptr)
1651 {
1652 case scm_tc_free_cell:
1653 case scm_tc16_real:
1654 if SCM_GC8MARKP (scmptr)
1655 goto c8mrkcontinue;
1656 break;
1657 #ifdef SCM_BIGDIG
1658 case scm_tc16_big:
1659 if SCM_GC8MARKP (scmptr)
1660 goto c8mrkcontinue;
1661 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1662 goto freechars;
1663 #endif /* def SCM_BIGDIG */
1664 case scm_tc16_complex:
1665 if SCM_GC8MARKP (scmptr)
1666 goto c8mrkcontinue;
1667 m += 2 * sizeof (double);
1668 goto freechars;
1669 default:
1670 if SCM_GC8MARKP (scmptr)
1671 goto c8mrkcontinue;
1672
1673 {
1674 int k;
1675 k = SCM_SMOBNUM (scmptr);
1676 if (!(k < scm_numsmob))
1677 goto sweeperr;
1678 m += (scm_smobs[k].free) (scmptr);
1679 break;
1680 }
1681 }
1682 break;
1683 default:
1684 sweeperr:
1685 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1686 }
1687 #if 0
1688 if (SCM_FREE_CELL_P (scmptr))
1689 exit (2);
1690 #endif
1691 if (!--left_to_collect)
1692 {
1693 SCM_SETCAR (scmptr, nfreelist);
1694 *freelist->clustertail = scmptr;
1695 freelist->clustertail = SCM_CDRLOC (scmptr);
1696
1697 nfreelist = SCM_EOL;
1698 freelist->collected += span * freelist->cluster_size;
1699 left_to_collect = freelist->cluster_size;
1700 }
1701 else
1702 {
1703 /* Stick the new cell on the front of nfreelist. It's
1704 critical that we mark this cell as freed; otherwise, the
1705 conservative collector might trace it as some other type
1706 of object. */
1707 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1708 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1709 nfreelist = scmptr;
1710 }
1711
1712 continue;
1713 c8mrkcontinue:
1714 SCM_CLRGC8MARK (scmptr);
1715 continue;
1716 cmrkcontinue:
1717 SCM_CLRGCMARK (scmptr);
1718 }
1719 #ifdef GC_FREE_SEGMENTS
1720 if (n == seg_size)
1721 {
1722 register long j;
1723
1724 freelist->heap_size -= seg_size;
1725 free ((char *) scm_heap_table[i].bounds[0]);
1726 scm_heap_table[i].bounds[0] = 0;
1727 for (j = i + 1; j < scm_n_heap_segs; j++)
1728 scm_heap_table[j - 1] = scm_heap_table[j];
1729 scm_n_heap_segs -= 1;
1730 i--; /* We need to scan the segment just moved. */
1731 }
1732 else
1733 #endif /* ifdef GC_FREE_SEGMENTS */
1734 {
1735 /* Update the real freelist pointer to point to the head of
1736 the list of free cells we've built for this segment. */
1737 freelist->cells = nfreelist;
1738 freelist->left_to_collect = left_to_collect;
1739 }
1740
1741 #ifdef GUILE_DEBUG_FREELIST
1742 scm_check_freelist (freelist == &scm_master_freelist
1743 ? scm_freelist
1744 : scm_freelist2);
1745 scm_map_free_list ();
1746 #endif
1747 }
1748
1749 gc_sweep_freelist_finish (&scm_master_freelist);
1750 gc_sweep_freelist_finish (&scm_master_freelist2);
1751
1752 /* When we move to POSIX threads private freelists should probably
1753 be GC-protected instead. */
1754 scm_freelist = SCM_EOL;
1755 scm_freelist2 = SCM_EOL;
1756
1757 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1758 scm_gc_yield -= scm_cells_allocated;
1759 scm_mallocated -= m;
1760 scm_gc_malloc_collected = m;
1761 }
1762 #undef FUNC_NAME
1763
1764
1765 \f
1766
1767 /* {Front end to malloc}
1768 *
1769 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1770 * scm_done_free
1771 *
1772 * These functions provide services comperable to malloc, realloc, and
1773 * free. They are for allocating malloced parts of scheme objects.
1774 * The primary purpose of the front end is to impose calls to gc. */
1775
1776
1777 /* scm_must_malloc
1778 * Return newly malloced storage or throw an error.
1779 *
1780 * The parameter WHAT is a string for error reporting.
1781 * If the threshold scm_mtrigger will be passed by this
1782 * allocation, or if the first call to malloc fails,
1783 * garbage collect -- on the presumption that some objects
1784 * using malloced storage may be collected.
1785 *
1786 * The limit scm_mtrigger may be raised by this allocation.
1787 */
1788 void *
1789 scm_must_malloc (scm_sizet size, const char *what)
1790 {
1791 void *ptr;
1792 unsigned long nm = scm_mallocated + size;
1793
1794 if (nm <= scm_mtrigger)
1795 {
1796 SCM_SYSCALL (ptr = malloc (size));
1797 if (NULL != ptr)
1798 {
1799 scm_mallocated = nm;
1800 #ifdef GUILE_DEBUG_MALLOC
1801 scm_malloc_register (ptr, what);
1802 #endif
1803 return ptr;
1804 }
1805 }
1806
1807 scm_igc (what);
1808
1809 nm = scm_mallocated + size;
1810 SCM_SYSCALL (ptr = malloc (size));
1811 if (NULL != ptr)
1812 {
1813 scm_mallocated = nm;
1814 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1815 if (nm > scm_mtrigger)
1816 scm_mtrigger = nm + nm / 2;
1817 else
1818 scm_mtrigger += scm_mtrigger / 2;
1819 }
1820 #ifdef GUILE_DEBUG_MALLOC
1821 scm_malloc_register (ptr, what);
1822 #endif
1823
1824 return ptr;
1825 }
1826
1827 scm_memory_error (what);
1828 }
1829
1830
1831 /* scm_must_realloc
1832 * is similar to scm_must_malloc.
1833 */
1834 void *
1835 scm_must_realloc (void *where,
1836 scm_sizet old_size,
1837 scm_sizet size,
1838 const char *what)
1839 {
1840 void *ptr;
1841 scm_sizet nm = scm_mallocated + size - old_size;
1842
1843 if (nm <= scm_mtrigger)
1844 {
1845 SCM_SYSCALL (ptr = realloc (where, size));
1846 if (NULL != ptr)
1847 {
1848 scm_mallocated = nm;
1849 #ifdef GUILE_DEBUG_MALLOC
1850 scm_malloc_reregister (where, ptr, what);
1851 #endif
1852 return ptr;
1853 }
1854 }
1855
1856 scm_igc (what);
1857
1858 nm = scm_mallocated + size - old_size;
1859 SCM_SYSCALL (ptr = realloc (where, size));
1860 if (NULL != ptr)
1861 {
1862 scm_mallocated = nm;
1863 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1864 if (nm > scm_mtrigger)
1865 scm_mtrigger = nm + nm / 2;
1866 else
1867 scm_mtrigger += scm_mtrigger / 2;
1868 }
1869 #ifdef GUILE_DEBUG_MALLOC
1870 scm_malloc_reregister (where, ptr, what);
1871 #endif
1872 return ptr;
1873 }
1874
1875 scm_memory_error (what);
1876 }
1877
1878
1879 void
1880 scm_must_free (void *obj)
1881 #define FUNC_NAME "scm_must_free"
1882 {
1883 #ifdef GUILE_DEBUG_MALLOC
1884 scm_malloc_unregister (obj);
1885 #endif
1886 if (obj)
1887 free (obj);
1888 else
1889 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1890 }
1891 #undef FUNC_NAME
1892
1893
1894 /* Announce that there has been some malloc done that will be freed
1895 * during gc. A typical use is for a smob that uses some malloced
1896 * memory but can not get it from scm_must_malloc (for whatever
1897 * reason). When a new object of this smob is created you call
1898 * scm_done_malloc with the size of the object. When your smob free
1899 * function is called, be sure to include this size in the return
1900 * value.
1901 *
1902 * If you can't actually free the memory in the smob free function,
1903 * for whatever reason (like reference counting), you still can (and
1904 * should) report the amount of memory freed when you actually free it.
1905 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1906 * eh? Or even better, call scm_done_free. */
1907
1908 void
1909 scm_done_malloc (long size)
1910 {
1911 scm_mallocated += size;
1912
1913 if (scm_mallocated > scm_mtrigger)
1914 {
1915 scm_igc ("foreign mallocs");
1916 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1917 {
1918 if (scm_mallocated > scm_mtrigger)
1919 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1920 else
1921 scm_mtrigger += scm_mtrigger / 2;
1922 }
1923 }
1924 }
1925
1926 void
1927 scm_done_free (long size)
1928 {
1929 scm_mallocated -= size;
1930 }
1931
1932
1933 \f
1934
1935 /* {Heap Segments}
1936 *
1937 * Each heap segment is an array of objects of a particular size.
1938 * Every segment has an associated (possibly shared) freelist.
1939 * A table of segment records is kept that records the upper and
1940 * lower extents of the segment; this is used during the conservative
1941 * phase of gc to identify probably gc roots (because they point
1942 * into valid segments at reasonable offsets). */
1943
1944 /* scm_expmem
1945 * is true if the first segment was smaller than INIT_HEAP_SEG.
1946 * If scm_expmem is set to one, subsequent segment allocations will
1947 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1948 */
1949 int scm_expmem = 0;
1950
1951 scm_sizet scm_max_segment_size;
1952
1953 /* scm_heap_org
1954 * is the lowest base address of any heap segment.
1955 */
1956 SCM_CELLPTR scm_heap_org;
1957
1958 scm_heap_seg_data_t * scm_heap_table = 0;
1959 static unsigned int heap_segment_table_size = 0;
1960 int scm_n_heap_segs = 0;
1961
1962 /* init_heap_seg
1963 * initializes a new heap segment and return the number of objects it contains.
1964 *
1965 * The segment origin, segment size in bytes, and the span of objects
1966 * in cells are input parameters. The freelist is both input and output.
1967 *
1968 * This function presume that the scm_heap_table has already been expanded
1969 * to accomodate a new segment record.
1970 */
1971
1972
1973 static scm_sizet
1974 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1975 {
1976 register SCM_CELLPTR ptr;
1977 SCM_CELLPTR seg_end;
1978 int new_seg_index;
1979 int n_new_cells;
1980 int span = freelist->span;
1981
1982 if (seg_org == NULL)
1983 return 0;
1984
1985 ptr = CELL_UP (seg_org, span);
1986
1987 /* Compute the ceiling on valid object pointers w/in this segment.
1988 */
1989 seg_end = CELL_DN ((char *) seg_org + size, span);
1990
1991 /* Find the right place and insert the segment record.
1992 *
1993 */
1994 for (new_seg_index = 0;
1995 ( (new_seg_index < scm_n_heap_segs)
1996 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1997 new_seg_index++)
1998 ;
1999
2000 {
2001 int i;
2002 for (i = scm_n_heap_segs; i > new_seg_index; --i)
2003 scm_heap_table[i] = scm_heap_table[i - 1];
2004 }
2005
2006 ++scm_n_heap_segs;
2007
2008 scm_heap_table[new_seg_index].span = span;
2009 scm_heap_table[new_seg_index].freelist = freelist;
2010 scm_heap_table[new_seg_index].bounds[0] = ptr;
2011 scm_heap_table[new_seg_index].bounds[1] = seg_end;
2012
2013
2014 /* Compute the least valid object pointer w/in this segment
2015 */
2016 ptr = CELL_UP (ptr, span);
2017
2018
2019 /*n_new_cells*/
2020 n_new_cells = seg_end - ptr;
2021
2022 freelist->heap_size += n_new_cells;
2023
2024 /* Partition objects in this segment into clusters */
2025 {
2026 SCM clusters;
2027 SCM *clusterp = &clusters;
2028 int n_cluster_cells = span * freelist->cluster_size;
2029
2030 while (n_new_cells > span) /* at least one spine + one freecell */
2031 {
2032 /* Determine end of cluster
2033 */
2034 if (n_new_cells >= n_cluster_cells)
2035 {
2036 seg_end = ptr + n_cluster_cells;
2037 n_new_cells -= n_cluster_cells;
2038 }
2039 else
2040 /* [cmm] looks like the segment size doesn't divide cleanly by
2041 cluster size. bad cmm! */
2042 abort();
2043
2044 /* Allocate cluster spine
2045 */
2046 *clusterp = PTR2SCM (ptr);
2047 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2048 clusterp = SCM_CDRLOC (*clusterp);
2049 ptr += span;
2050
2051 while (ptr < seg_end)
2052 {
2053 SCM scmptr = PTR2SCM (ptr);
2054
2055 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2056 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (ptr + span));
2057 ptr += span;
2058 }
2059
2060 SCM_SET_FREE_CELL_CDR (PTR2SCM (ptr - span), SCM_EOL);
2061 }
2062
2063 /* Patch up the last cluster pointer in the segment
2064 * to join it to the input freelist.
2065 */
2066 *clusterp = freelist->clusters;
2067 freelist->clusters = clusters;
2068 }
2069
2070 #ifdef DEBUGINFO
2071 fprintf (stderr, "H");
2072 #endif
2073 return size;
2074 }
2075
2076 static scm_sizet
2077 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2078 {
2079 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2080
2081 return
2082 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2083 + ALIGNMENT_SLACK (freelist);
2084 }
2085
2086 static void
2087 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2088 #define FUNC_NAME "alloc_some_heap"
2089 {
2090 SCM_CELLPTR ptr;
2091 long len;
2092
2093 if (scm_gc_heap_lock)
2094 {
2095 /* Critical code sections (such as the garbage collector) aren't
2096 * supposed to add heap segments.
2097 */
2098 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2099 abort ();
2100 }
2101
2102 if (scm_n_heap_segs == heap_segment_table_size)
2103 {
2104 /* We have to expand the heap segment table to have room for the new
2105 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2106 * init_heap_seg only if the allocation of the segment itself succeeds.
2107 */
2108 unsigned int new_table_size = scm_n_heap_segs + 1;
2109 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2110 scm_heap_seg_data_t * new_heap_table;
2111
2112 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2113 realloc ((char *)scm_heap_table, size)));
2114 if (!new_heap_table)
2115 {
2116 if (error_policy == abort_on_error)
2117 {
2118 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2119 abort ();
2120 }
2121 else
2122 {
2123 return;
2124 }
2125 }
2126 else
2127 {
2128 scm_heap_table = new_heap_table;
2129 heap_segment_table_size = new_table_size;
2130 }
2131 }
2132
2133
2134 /* Pick a size for the new heap segment.
2135 * The rule for picking the size of a segment is explained in
2136 * gc.h
2137 */
2138 {
2139 /* Assure that the new segment is predicted to be large enough.
2140 *
2141 * New yield should at least equal GC fraction of new heap size, i.e.
2142 *
2143 * y + dh > f * (h + dh)
2144 *
2145 * y : yield
2146 * f : min yield fraction
2147 * h : heap size
2148 * dh : size of new heap segment
2149 *
2150 * This gives dh > (f * h - y) / (1 - f)
2151 */
2152 int f = freelist->min_yield_fraction;
2153 long h = SCM_HEAP_SIZE;
2154 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2155 len = SCM_EXPHEAP (freelist->heap_size);
2156 #ifdef DEBUGINFO
2157 fprintf (stderr, "(%d < %d)", len, min_cells);
2158 #endif
2159 if (len < min_cells)
2160 len = min_cells + freelist->cluster_size;
2161 len *= sizeof (scm_cell);
2162 /* force new sampling */
2163 freelist->collected = LONG_MAX;
2164 }
2165
2166 if (len > scm_max_segment_size)
2167 len = scm_max_segment_size;
2168
2169 {
2170 scm_sizet smallest;
2171
2172 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2173
2174 if (len < smallest)
2175 len = smallest;
2176
2177 /* Allocate with decaying ambition. */
2178 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2179 && (len >= smallest))
2180 {
2181 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2182 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2183 if (ptr)
2184 {
2185 init_heap_seg (ptr, rounded_len, freelist);
2186 return;
2187 }
2188 len /= 2;
2189 }
2190 }
2191
2192 if (error_policy == abort_on_error)
2193 {
2194 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2195 abort ();
2196 }
2197 }
2198 #undef FUNC_NAME
2199
2200
2201 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2202 (SCM name),
2203 "")
2204 #define FUNC_NAME s_scm_unhash_name
2205 {
2206 int x;
2207 int bound;
2208 SCM_VALIDATE_SYMBOL (1,name);
2209 SCM_DEFER_INTS;
2210 bound = scm_n_heap_segs;
2211 for (x = 0; x < bound; ++x)
2212 {
2213 SCM_CELLPTR p;
2214 SCM_CELLPTR pbound;
2215 p = scm_heap_table[x].bounds[0];
2216 pbound = scm_heap_table[x].bounds[1];
2217 while (p < pbound)
2218 {
2219 SCM cell = PTR2SCM (p);
2220 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2221 {
2222 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2223 * struct cell. See the corresponding comment in scm_gc_mark.
2224 */
2225 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2226 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2227 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2228 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2229 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2230 {
2231 SCM_SET_CELL_OBJECT_0 (cell, name);
2232 }
2233 }
2234 ++p;
2235 }
2236 }
2237 SCM_ALLOW_INTS;
2238 return name;
2239 }
2240 #undef FUNC_NAME
2241
2242
2243 \f
2244 /* {GC Protection Helper Functions}
2245 */
2246
2247
2248 void
2249 scm_remember (SCM *ptr)
2250 { /* empty */ }
2251
2252
2253 /*
2254 These crazy functions prevent garbage collection
2255 of arguments after the first argument by
2256 ensuring they remain live throughout the
2257 function because they are used in the last
2258 line of the code block.
2259 It'd be better to have a nice compiler hint to
2260 aid the conservative stack-scanning GC. --03/09/00 gjb */
2261 SCM
2262 scm_return_first (SCM elt, ...)
2263 {
2264 return elt;
2265 }
2266
2267 int
2268 scm_return_first_int (int i, ...)
2269 {
2270 return i;
2271 }
2272
2273
2274 SCM
2275 scm_permanent_object (SCM obj)
2276 {
2277 SCM_REDEFER_INTS;
2278 scm_permobjs = scm_cons (obj, scm_permobjs);
2279 SCM_REALLOW_INTS;
2280 return obj;
2281 }
2282
2283
2284 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2285 other references are dropped, until the object is unprotected by calling
2286 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2287 i. e. it is possible to protect the same object several times, but it is
2288 necessary to unprotect the object the same number of times to actually get
2289 the object unprotected. It is an error to unprotect an object more often
2290 than it has been protected before. The function scm_protect_object returns
2291 OBJ.
2292 */
2293
2294 /* Implementation note: For every object X, there is a counter which
2295 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2296 */
2297
2298 SCM
2299 scm_protect_object (SCM obj)
2300 {
2301 SCM handle;
2302
2303 /* This critical section barrier will be replaced by a mutex. */
2304 SCM_REDEFER_INTS;
2305
2306 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2307 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2308
2309 SCM_REALLOW_INTS;
2310
2311 return obj;
2312 }
2313
2314
2315 /* Remove any protection for OBJ established by a prior call to
2316 scm_protect_object. This function returns OBJ.
2317
2318 See scm_protect_object for more information. */
2319 SCM
2320 scm_unprotect_object (SCM obj)
2321 {
2322 SCM handle;
2323
2324 /* This critical section barrier will be replaced by a mutex. */
2325 SCM_REDEFER_INTS;
2326
2327 handle = scm_hashq_get_handle (scm_protects, obj);
2328
2329 if (SCM_IMP (handle))
2330 {
2331 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2332 abort ();
2333 }
2334 else
2335 {
2336 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2337 if (count == 0)
2338 scm_hashq_remove_x (scm_protects, obj);
2339 else
2340 SCM_SETCDR (handle, SCM_MAKINUM (count));
2341 }
2342
2343 SCM_REALLOW_INTS;
2344
2345 return obj;
2346 }
2347
2348 int terminating;
2349
2350 /* called on process termination. */
2351 #ifdef HAVE_ATEXIT
2352 static void
2353 cleanup (void)
2354 #else
2355 #ifdef HAVE_ON_EXIT
2356 extern int on_exit (void (*procp) (), int arg);
2357
2358 static void
2359 cleanup (int status, void *arg)
2360 #else
2361 #error Dont know how to setup a cleanup handler on your system.
2362 #endif
2363 #endif
2364 {
2365 terminating = 1;
2366 scm_flush_all_ports ();
2367 }
2368
2369 \f
2370 static int
2371 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2372 {
2373 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2374 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2375 rounded_size,
2376 freelist))
2377 {
2378 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2379 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2380 rounded_size,
2381 freelist))
2382 return 1;
2383 }
2384 else
2385 scm_expmem = 1;
2386
2387 if (freelist->min_yield_fraction)
2388 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2389 / 100);
2390 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2391
2392 return 0;
2393 }
2394
2395 \f
2396 static void
2397 init_freelist (scm_freelist_t *freelist,
2398 int span,
2399 int cluster_size,
2400 int min_yield)
2401 {
2402 freelist->clusters = SCM_EOL;
2403 freelist->cluster_size = cluster_size + 1;
2404 freelist->left_to_collect = 0;
2405 freelist->clusters_allocated = 0;
2406 freelist->min_yield = 0;
2407 freelist->min_yield_fraction = min_yield;
2408 freelist->span = span;
2409 freelist->collected = 0;
2410 freelist->collected_1 = 0;
2411 freelist->heap_size = 0;
2412 }
2413
2414 int
2415 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2416 scm_sizet init_heap_size_2, int gc_trigger_2,
2417 scm_sizet max_segment_size)
2418 {
2419 scm_sizet j;
2420
2421 if (!init_heap_size_1)
2422 init_heap_size_1 = scm_default_init_heap_size_1;
2423 if (!init_heap_size_2)
2424 init_heap_size_2 = scm_default_init_heap_size_2;
2425
2426 j = SCM_NUM_PROTECTS;
2427 while (j)
2428 scm_sys_protects[--j] = SCM_BOOL_F;
2429 scm_block_gc = 1;
2430
2431 scm_freelist = SCM_EOL;
2432 scm_freelist2 = SCM_EOL;
2433 init_freelist (&scm_master_freelist,
2434 1, SCM_CLUSTER_SIZE_1,
2435 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2436 init_freelist (&scm_master_freelist2,
2437 2, SCM_CLUSTER_SIZE_2,
2438 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2439 scm_max_segment_size
2440 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2441
2442 scm_expmem = 0;
2443
2444 j = SCM_HEAP_SEG_SIZE;
2445 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2446 scm_heap_table = ((scm_heap_seg_data_t *)
2447 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2448 heap_segment_table_size = 2;
2449
2450 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2451 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2452 return 1;
2453
2454 /* scm_hplims[0] can change. do not remove scm_heap_org */
2455 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2456
2457 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2458 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2459 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2460 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2461 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2462
2463 /* Initialise the list of ports. */
2464 scm_port_table = (scm_port **)
2465 malloc (sizeof (scm_port *) * scm_port_table_room);
2466 if (!scm_port_table)
2467 return 1;
2468
2469 #ifdef HAVE_ATEXIT
2470 atexit (cleanup);
2471 #else
2472 #ifdef HAVE_ON_EXIT
2473 on_exit (cleanup, 0);
2474 #endif
2475 #endif
2476
2477 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2478 SCM_SETCDR (scm_undefineds, scm_undefineds);
2479
2480 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2481 scm_nullstr = scm_makstr (0L, 0);
2482 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2483 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2484 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2485 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2486 scm_stand_in_procs = SCM_EOL;
2487 scm_permobjs = SCM_EOL;
2488 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2489 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2490 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2491 #ifdef SCM_BIGDIG
2492 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2493 #endif
2494 return 0;
2495 }
2496
2497 \f
2498
2499 SCM scm_after_gc_hook;
2500
2501 #if (SCM_DEBUG_DEPRECATED == 0)
2502 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2503 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2504 static SCM gc_async;
2505
2506
2507 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2508 * is run after the gc, as soon as the asynchronous events are handled by the
2509 * evaluator.
2510 */
2511 static SCM
2512 gc_async_thunk (void)
2513 {
2514 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2515
2516 #if (SCM_DEBUG_DEPRECATED == 0)
2517
2518 /* The following code will be removed in Guile 1.5. */
2519 if (SCM_NFALSEP (scm_gc_vcell))
2520 {
2521 SCM proc = SCM_CDR (scm_gc_vcell);
2522
2523 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2524 scm_apply (proc, SCM_EOL, SCM_EOL);
2525 }
2526
2527 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2528
2529 return SCM_UNSPECIFIED;
2530 }
2531
2532
2533 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2534 * the garbage collection. The only purpose of this function is to mark the
2535 * gc_async (which will eventually lead to the execution of the
2536 * gc_async_thunk).
2537 */
2538 static void *
2539 mark_gc_async (void * hook_data, void *func_data, void *data)
2540 {
2541 scm_system_async_mark (gc_async);
2542 return NULL;
2543 }
2544
2545
2546 void
2547 scm_init_gc ()
2548 {
2549 SCM after_gc_thunk;
2550
2551 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2552
2553 #if (SCM_DEBUG_DEPRECATED == 0)
2554 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2555 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2556 /* Dirk:FIXME:: We don't really want a binding here. */
2557 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2558 gc_async = scm_system_async (after_gc_thunk);
2559
2560 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2561
2562 #include "libguile/gc.x"
2563 }
2564
2565 /*
2566 Local Variables:
2567 c-file-style: "gnu"
2568 End:
2569 */