* Renamed local variable 'free' to avoid confusion with stdlib's 'free'.
[bpt/guile.git] / libguile / gc.c
1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
6 * any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
17 *
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
20 *
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
26 *
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
29 *
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
37 *
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
41
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
44
45 /* #define DEBUGINFO */
46
47 \f
48 #include <stdio.h>
49 #include "libguile/_scm.h"
50 #include "libguile/eval.h"
51 #include "libguile/stime.h"
52 #include "libguile/stackchk.h"
53 #include "libguile/struct.h"
54 #include "libguile/smob.h"
55 #include "libguile/unif.h"
56 #include "libguile/async.h"
57 #include "libguile/ports.h"
58 #include "libguile/root.h"
59 #include "libguile/strings.h"
60 #include "libguile/vectors.h"
61 #include "libguile/weaks.h"
62 #include "libguile/hashtab.h"
63
64 #include "libguile/validate.h"
65 #include "libguile/gc.h"
66
67 #ifdef GUILE_DEBUG_MALLOC
68 #include "libguile/debug-malloc.h"
69 #endif
70
71 #ifdef HAVE_MALLOC_H
72 #include <malloc.h>
73 #endif
74
75 #ifdef HAVE_UNISTD_H
76 #include <unistd.h>
77 #endif
78
79 #ifdef __STDC__
80 #include <stdarg.h>
81 #define var_start(x, y) va_start(x, y)
82 #else
83 #include <varargs.h>
84 #define var_start(x, y) va_start(x)
85 #endif
86
87 \f
88
89 unsigned int scm_gc_running_p = 0;
90
91 \f
92
93 #if (SCM_DEBUG_CELL_ACCESSES == 1)
94
95 unsigned int scm_debug_cell_accesses_p = 0;
96
97
98 /* Assert that the given object is a valid reference to a valid cell. This
99 * test involves to determine whether the object is a cell pointer, whether
100 * this pointer actually points into a heap segment and whether the cell
101 * pointed to is not a free cell.
102 */
103 void
104 scm_assert_cell_valid (SCM cell)
105 {
106 if (scm_debug_cell_accesses_p)
107 {
108 scm_debug_cell_accesses_p = 0; /* disable to avoid recursion */
109
110 if (!scm_cellp (cell))
111 {
112 fprintf (stderr, "scm_assert_cell_valid: Not a cell object: %lx\n", SCM_UNPACK (cell));
113 abort ();
114 }
115 else if (!scm_gc_running_p)
116 {
117 /* Dirk::FIXME:: During garbage collection there occur references to
118 free cells. This is allright during conservative marking, but
119 should not happen otherwise (I think). The case of free cells
120 accessed during conservative marking is handled in function
121 scm_mark_locations. However, there still occur accesses to free
122 cells during gc. I don't understand why this happens. If it is
123 a bug and gets fixed, the following test should also work while
124 gc is running.
125 */
126 if (SCM_FREE_CELL_P (cell))
127 {
128 fprintf (stderr, "scm_assert_cell_valid: Accessing free cell: %lx\n", SCM_UNPACK (cell));
129 abort ();
130 }
131 }
132 scm_debug_cell_accesses_p = 1; /* re-enable */
133 }
134 }
135
136
137 SCM_DEFINE (scm_set_debug_cell_accesses_x, "set-debug-cell-accesses!", 1, 0, 0,
138 (SCM flag),
139 "If FLAG is #f, cell access checking is disabled.\n"
140 "If FLAG is #t, cell access checking is enabled.\n"
141 "This procedure only exists because the compile-time flag\n"
142 "SCM_DEBUG_CELL_ACCESSES was set to 1.\n")
143 #define FUNC_NAME s_scm_set_debug_cell_accesses_x
144 {
145 if (SCM_FALSEP (flag)) {
146 scm_debug_cell_accesses_p = 0;
147 } else if (SCM_EQ_P (flag, SCM_BOOL_T)) {
148 scm_debug_cell_accesses_p = 1;
149 } else {
150 SCM_WRONG_TYPE_ARG (1, flag);
151 }
152 return SCM_UNSPECIFIED;
153 }
154 #undef FUNC_NAME
155
156 #endif /* SCM_DEBUG_CELL_ACCESSES == 1 */
157
158 \f
159
160 /* {heap tuning parameters}
161 *
162 * These are parameters for controlling memory allocation. The heap
163 * is the area out of which scm_cons, and object headers are allocated.
164 *
165 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
166 * 64 bit machine. The units of the _SIZE parameters are bytes.
167 * Cons pairs and object headers occupy one heap cell.
168 *
169 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
170 * allocated initially the heap will grow by half its current size
171 * each subsequent time more heap is needed.
172 *
173 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
174 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
175 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
176 * is in scm_init_storage() and alloc_some_heap() in sys.c
177 *
178 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
179 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
180 *
181 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
182 * is needed.
183 *
184 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
185 * trigger a GC.
186 *
187 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
188 * reclaimed by a GC triggered by must_malloc. If less than this is
189 * reclaimed, the trigger threshold is raised. [I don't know what a
190 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
191 * work around a oscillation that caused almost constant GC.]
192 */
193
194 /*
195 * Heap size 45000 and 40% min yield gives quick startup and no extra
196 * heap allocation. Having higher values on min yield may lead to
197 * large heaps, especially if code behaviour is varying its
198 * maximum consumption between different freelists.
199 */
200 int scm_default_init_heap_size_1 = (45000L * sizeof (scm_cell));
201 int scm_default_min_yield_1 = 40;
202 #define SCM_CLUSTER_SIZE_1 2000L
203
204 int scm_default_init_heap_size_2 = (2500L * 2 * sizeof (scm_cell));
205 /* The following value may seem large, but note that if we get to GC at
206 * all, this means that we have a numerically intensive application
207 */
208 int scm_default_min_yield_2 = 40;
209 #define SCM_CLUSTER_SIZE_2 1000L
210
211 int scm_default_max_segment_size = 2097000L;/* a little less (adm) than 2 Mb */
212
213 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
214 #ifdef _QC
215 # define SCM_HEAP_SEG_SIZE 32768L
216 #else
217 # ifdef sequent
218 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
219 # else
220 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
221 # endif
222 #endif
223 /* Make heap grow with factor 1.5 */
224 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
225 #define SCM_INIT_MALLOC_LIMIT 100000
226 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
227
228 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
229 bounds for allocated storage */
230
231 #ifdef PROT386
232 /*in 386 protected mode we must only adjust the offset */
233 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
234 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
235 #else
236 # ifdef _UNICOS
237 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
238 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
239 # else
240 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
241 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
242 # endif /* UNICOS */
243 #endif /* PROT386 */
244 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
245 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
246 #define SCM_HEAP_SIZE \
247 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
248 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
249
250
251 \f
252 /* scm_freelists
253 */
254
255 typedef struct scm_freelist_t {
256 /* collected cells */
257 SCM cells;
258 /* number of cells left to collect before cluster is full */
259 unsigned int left_to_collect;
260 /* number of clusters which have been allocated */
261 unsigned int clusters_allocated;
262 /* a list of freelists, each of size cluster_size,
263 * except the last one which may be shorter
264 */
265 SCM clusters;
266 SCM *clustertail;
267 /* this is the number of objects in each cluster, including the spine cell */
268 int cluster_size;
269 /* indicates that we should grow heap instead of GC:ing
270 */
271 int grow_heap_p;
272 /* minimum yield on this list in order not to grow the heap
273 */
274 long min_yield;
275 /* defines min_yield as percent of total heap size
276 */
277 int min_yield_fraction;
278 /* number of cells per object on this list */
279 int span;
280 /* number of collected cells during last GC */
281 long collected;
282 /* number of collected cells during penultimate GC */
283 long collected_1;
284 /* total number of cells in heap segments
285 * belonging to this list.
286 */
287 long heap_size;
288 } scm_freelist_t;
289
290 SCM scm_freelist = SCM_EOL;
291 scm_freelist_t scm_master_freelist = {
292 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_1, 0, 0, 0, 1, 0, 0
293 };
294 SCM scm_freelist2 = SCM_EOL;
295 scm_freelist_t scm_master_freelist2 = {
296 SCM_EOL, 0, 0, SCM_EOL, 0, SCM_CLUSTER_SIZE_2, 0, 0, 0, 2, 0, 0
297 };
298
299 /* scm_mtrigger
300 * is the number of bytes of must_malloc allocation needed to trigger gc.
301 */
302 unsigned long scm_mtrigger;
303
304
305 /* scm_gc_heap_lock
306 * If set, don't expand the heap. Set only during gc, during which no allocation
307 * is supposed to take place anyway.
308 */
309 int scm_gc_heap_lock = 0;
310
311 /* GC Blocking
312 * Don't pause for collection if this is set -- just
313 * expand the heap.
314 */
315 int scm_block_gc = 1;
316
317 /* During collection, this accumulates objects holding
318 * weak references.
319 */
320 SCM scm_weak_vectors;
321
322 /* GC Statistics Keeping
323 */
324 unsigned long scm_cells_allocated = 0;
325 long scm_mallocated = 0;
326 unsigned long scm_gc_cells_collected;
327 unsigned long scm_gc_yield;
328 static unsigned long scm_gc_yield_1 = 0; /* previous GC yield */
329 unsigned long scm_gc_malloc_collected;
330 unsigned long scm_gc_ports_collected;
331 unsigned long scm_gc_rt;
332 unsigned long scm_gc_time_taken = 0;
333
334 SCM_SYMBOL (sym_cells_allocated, "cells-allocated");
335 SCM_SYMBOL (sym_heap_size, "cell-heap-size");
336 SCM_SYMBOL (sym_mallocated, "bytes-malloced");
337 SCM_SYMBOL (sym_mtrigger, "gc-malloc-threshold");
338 SCM_SYMBOL (sym_heap_segments, "cell-heap-segments");
339 SCM_SYMBOL (sym_gc_time_taken, "gc-time-taken");
340
341 typedef struct scm_heap_seg_data_t
342 {
343 /* lower and upper bounds of the segment */
344 SCM_CELLPTR bounds[2];
345
346 /* address of the head-of-freelist pointer for this segment's cells.
347 All segments usually point to the same one, scm_freelist. */
348 scm_freelist_t *freelist;
349
350 /* number of cells per object in this segment */
351 int span;
352 } scm_heap_seg_data_t;
353
354
355
356 static scm_sizet init_heap_seg (SCM_CELLPTR, scm_sizet, scm_freelist_t *);
357
358 typedef enum { return_on_error, abort_on_error } policy_on_error;
359 static void alloc_some_heap (scm_freelist_t *, policy_on_error);
360
361
362 \f
363 /* Debugging functions. */
364
365 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
366
367 /* Return the number of the heap segment containing CELL. */
368 static int
369 which_seg (SCM cell)
370 {
371 int i;
372
373 for (i = 0; i < scm_n_heap_segs; i++)
374 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], SCM2PTR (cell))
375 && SCM_PTR_GT (scm_heap_table[i].bounds[1], SCM2PTR (cell)))
376 return i;
377 fprintf (stderr, "which_seg: can't find segment containing cell %lx\n",
378 SCM_UNPACK (cell));
379 abort ();
380 }
381
382
383 static void
384 map_free_list (scm_freelist_t *master, SCM freelist)
385 {
386 int last_seg = -1, count = 0;
387 SCM f;
388
389 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f))
390 {
391 int this_seg = which_seg (f);
392
393 if (this_seg != last_seg)
394 {
395 if (last_seg != -1)
396 fprintf (stderr, " %5d %d-cells in segment %d\n",
397 count, master->span, last_seg);
398 last_seg = this_seg;
399 count = 0;
400 }
401 count++;
402 }
403 if (last_seg != -1)
404 fprintf (stderr, " %5d %d-cells in segment %d\n",
405 count, master->span, last_seg);
406 }
407
408 SCM_DEFINE (scm_map_free_list, "map-free-list", 0, 0, 0,
409 (),
410 "Print debugging information about the free-list.\n"
411 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
412 #define FUNC_NAME s_scm_map_free_list
413 {
414 int i;
415 fprintf (stderr, "%d segments total (%d:%d",
416 scm_n_heap_segs,
417 scm_heap_table[0].span,
418 scm_heap_table[0].bounds[1] - scm_heap_table[0].bounds[0]);
419 for (i = 1; i < scm_n_heap_segs; i++)
420 fprintf (stderr, ", %d:%d",
421 scm_heap_table[i].span,
422 scm_heap_table[i].bounds[1] - scm_heap_table[i].bounds[0]);
423 fprintf (stderr, ")\n");
424 map_free_list (&scm_master_freelist, scm_freelist);
425 map_free_list (&scm_master_freelist2, scm_freelist2);
426 fflush (stderr);
427
428 return SCM_UNSPECIFIED;
429 }
430 #undef FUNC_NAME
431
432 static int last_cluster;
433 static int last_size;
434
435 static int
436 free_list_length (char *title, int i, SCM freelist)
437 {
438 SCM ls;
439 int n = 0;
440 for (ls = freelist; !SCM_NULLP (ls); ls = SCM_FREE_CELL_CDR (ls))
441 if (SCM_FREE_CELL_P (ls))
442 ++n;
443 else
444 {
445 fprintf (stderr, "bad cell in %s at position %d\n", title, n);
446 abort ();
447 }
448 if (n != last_size)
449 {
450 if (i > 0)
451 {
452 if (last_cluster == i - 1)
453 fprintf (stderr, "\t%d\n", last_size);
454 else
455 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
456 }
457 if (i >= 0)
458 fprintf (stderr, "%s %d", title, i);
459 else
460 fprintf (stderr, "%s\t%d\n", title, n);
461 last_cluster = i;
462 last_size = n;
463 }
464 return n;
465 }
466
467 static void
468 free_list_lengths (char *title, scm_freelist_t *master, SCM freelist)
469 {
470 SCM clusters;
471 int i = 0, len, n = 0;
472 fprintf (stderr, "%s\n\n", title);
473 n += free_list_length ("free list", -1, freelist);
474 for (clusters = master->clusters;
475 SCM_NNULLP (clusters);
476 clusters = SCM_CDR (clusters))
477 {
478 len = free_list_length ("cluster", i++, SCM_CAR (clusters));
479 n += len;
480 }
481 if (last_cluster == i - 1)
482 fprintf (stderr, "\t%d\n", last_size);
483 else
484 fprintf (stderr, "-%d\t%d\n", i - 1, last_size);
485 fprintf (stderr, "\ntotal %d objects\n\n", n);
486 }
487
488 SCM_DEFINE (scm_free_list_length, "free-list-length", 0, 0, 0,
489 (),
490 "Print debugging information about the free-list.\n"
491 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
492 #define FUNC_NAME s_scm_free_list_length
493 {
494 free_list_lengths ("1-cells", &scm_master_freelist, scm_freelist);
495 free_list_lengths ("2-cells", &scm_master_freelist2, scm_freelist2);
496 return SCM_UNSPECIFIED;
497 }
498 #undef FUNC_NAME
499
500 #endif
501
502 #ifdef GUILE_DEBUG_FREELIST
503
504 /* Number of calls to SCM_NEWCELL since startup. */
505 static unsigned long scm_newcell_count;
506 static unsigned long scm_newcell2_count;
507
508 /* Search freelist for anything that isn't marked as a free cell.
509 Abort if we find something. */
510 static void
511 scm_check_freelist (SCM freelist)
512 {
513 SCM f;
514 int i = 0;
515
516 for (f = freelist; !SCM_NULLP (f); f = SCM_FREE_CELL_CDR (f), i++)
517 if (!SCM_FREE_CELL_P (f))
518 {
519 fprintf (stderr, "Bad cell in freelist on newcell %lu: %d'th elt\n",
520 scm_newcell_count, i);
521 abort ();
522 }
523 }
524
525 static int scm_debug_check_freelist = 0;
526
527 SCM_DEFINE (scm_gc_set_debug_check_freelist_x, "gc-set-debug-check-freelist!", 1, 0, 0,
528 (SCM flag),
529 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
530 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
531 "compile-time flag was selected.\n")
532 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
533 {
534 SCM_VALIDATE_BOOL_COPY (1, flag, scm_debug_check_freelist);
535 return SCM_UNSPECIFIED;
536 }
537 #undef FUNC_NAME
538
539
540 SCM
541 scm_debug_newcell (void)
542 {
543 SCM new;
544
545 scm_newcell_count++;
546 if (scm_debug_check_freelist)
547 {
548 scm_check_freelist (scm_freelist);
549 scm_gc();
550 }
551
552 /* The rest of this is supposed to be identical to the SCM_NEWCELL
553 macro. */
554 if (SCM_NULLP (scm_freelist))
555 new = scm_gc_for_newcell (&scm_master_freelist, &scm_freelist);
556 else
557 {
558 new = scm_freelist;
559 scm_freelist = SCM_FREE_CELL_CDR (scm_freelist);
560 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
561 }
562
563 return new;
564 }
565
566 SCM
567 scm_debug_newcell2 (void)
568 {
569 SCM new;
570
571 scm_newcell2_count++;
572 if (scm_debug_check_freelist)
573 {
574 scm_check_freelist (scm_freelist2);
575 scm_gc ();
576 }
577
578 /* The rest of this is supposed to be identical to the SCM_NEWCELL
579 macro. */
580 if (SCM_NULLP (scm_freelist2))
581 new = scm_gc_for_newcell (&scm_master_freelist2, &scm_freelist2);
582 else
583 {
584 new = scm_freelist2;
585 scm_freelist2 = SCM_FREE_CELL_CDR (scm_freelist2);
586 SCM_SET_FREE_CELL_TYPE (new, scm_tc16_allocated);
587 }
588
589 return new;
590 }
591
592 #endif /* GUILE_DEBUG_FREELIST */
593
594 \f
595
596 static unsigned long
597 master_cells_allocated (scm_freelist_t *master)
598 {
599 int objects = master->clusters_allocated * (master->cluster_size - 1);
600 if (SCM_NULLP (master->clusters))
601 objects -= master->left_to_collect;
602 return master->span * objects;
603 }
604
605 static unsigned long
606 freelist_length (SCM freelist)
607 {
608 int n;
609 for (n = 0; !SCM_NULLP (freelist); freelist = SCM_FREE_CELL_CDR (freelist))
610 ++n;
611 return n;
612 }
613
614 static unsigned long
615 compute_cells_allocated ()
616 {
617 return (scm_cells_allocated
618 + master_cells_allocated (&scm_master_freelist)
619 + master_cells_allocated (&scm_master_freelist2)
620 - scm_master_freelist.span * freelist_length (scm_freelist)
621 - scm_master_freelist2.span * freelist_length (scm_freelist2));
622 }
623
624 /* {Scheme Interface to GC}
625 */
626
627 SCM_DEFINE (scm_gc_stats, "gc-stats", 0, 0, 0,
628 (),
629 "Returns an association list of statistics about Guile's current use of storage. ")
630 #define FUNC_NAME s_scm_gc_stats
631 {
632 int i;
633 int n;
634 SCM heap_segs;
635 long int local_scm_mtrigger;
636 long int local_scm_mallocated;
637 long int local_scm_heap_size;
638 long int local_scm_cells_allocated;
639 long int local_scm_gc_time_taken;
640 SCM answer;
641
642 SCM_DEFER_INTS;
643
644 ++scm_block_gc;
645
646 retry:
647 heap_segs = SCM_EOL;
648 n = scm_n_heap_segs;
649 for (i = scm_n_heap_segs; i--; )
650 heap_segs = scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[1]),
651 scm_ulong2num ((unsigned long)scm_heap_table[i].bounds[0])),
652 heap_segs);
653 if (scm_n_heap_segs != n)
654 goto retry;
655
656 --scm_block_gc;
657
658 /* Below, we cons to produce the resulting list. We want a snapshot of
659 * the heap situation before consing.
660 */
661 local_scm_mtrigger = scm_mtrigger;
662 local_scm_mallocated = scm_mallocated;
663 local_scm_heap_size = SCM_HEAP_SIZE;
664 local_scm_cells_allocated = compute_cells_allocated ();
665 local_scm_gc_time_taken = scm_gc_time_taken;
666
667 answer = scm_listify (scm_cons (sym_gc_time_taken, scm_ulong2num (local_scm_gc_time_taken)),
668 scm_cons (sym_cells_allocated, scm_ulong2num (local_scm_cells_allocated)),
669 scm_cons (sym_heap_size, scm_ulong2num (local_scm_heap_size)),
670 scm_cons (sym_mallocated, scm_ulong2num (local_scm_mallocated)),
671 scm_cons (sym_mtrigger, scm_ulong2num (local_scm_mtrigger)),
672 scm_cons (sym_heap_segments, heap_segs),
673 SCM_UNDEFINED);
674 SCM_ALLOW_INTS;
675 return answer;
676 }
677 #undef FUNC_NAME
678
679
680 void
681 scm_gc_start (const char *what)
682 {
683 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ());
684 scm_gc_cells_collected = 0;
685 scm_gc_yield_1 = scm_gc_yield;
686 scm_gc_yield = (scm_cells_allocated
687 + master_cells_allocated (&scm_master_freelist)
688 + master_cells_allocated (&scm_master_freelist2));
689 scm_gc_malloc_collected = 0;
690 scm_gc_ports_collected = 0;
691 }
692
693
694 void
695 scm_gc_end ()
696 {
697 scm_gc_rt = SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt;
698 scm_gc_time_taken += scm_gc_rt;
699 }
700
701
702 SCM_DEFINE (scm_object_address, "object-address", 1, 0, 0,
703 (SCM obj),
704 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
705 "returned by this function for @var{obj}")
706 #define FUNC_NAME s_scm_object_address
707 {
708 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj));
709 }
710 #undef FUNC_NAME
711
712
713 SCM_DEFINE (scm_gc, "gc", 0, 0, 0,
714 (),
715 "Scans all of SCM objects and reclaims for further use those that are\n"
716 "no longer accessible.")
717 #define FUNC_NAME s_scm_gc
718 {
719 SCM_DEFER_INTS;
720 scm_igc ("call");
721 SCM_ALLOW_INTS;
722 return SCM_UNSPECIFIED;
723 }
724 #undef FUNC_NAME
725
726
727 \f
728 /* {C Interface For When GC is Triggered}
729 */
730
731 static void
732 adjust_min_yield (scm_freelist_t *freelist)
733 {
734 /* min yield is adjusted upwards so that next predicted total yield
735 * (allocated cells actually freed by GC) becomes
736 * `min_yield_fraction' of total heap size. Note, however, that
737 * the absolute value of min_yield will correspond to `collected'
738 * on one master (the one which currently is triggering GC).
739 *
740 * The reason why we look at total yield instead of cells collected
741 * on one list is that we want to take other freelists into account.
742 * On this freelist, we know that (local) yield = collected cells,
743 * but that's probably not the case on the other lists.
744 *
745 * (We might consider computing a better prediction, for example
746 * by computing an average over multiple GC:s.)
747 */
748 if (freelist->min_yield_fraction)
749 {
750 /* Pick largest of last two yields. */
751 int delta = ((SCM_HEAP_SIZE * freelist->min_yield_fraction / 100)
752 - (long) SCM_MAX (scm_gc_yield_1, scm_gc_yield));
753 #ifdef DEBUGINFO
754 fprintf (stderr, " after GC = %d, delta = %d\n",
755 scm_cells_allocated,
756 delta);
757 #endif
758 if (delta > 0)
759 freelist->min_yield += delta;
760 }
761 }
762
763
764 /* When we get POSIX threads support, the master will be global and
765 * common while the freelist will be individual for each thread.
766 */
767
768 SCM
769 scm_gc_for_newcell (scm_freelist_t *master, SCM *freelist)
770 {
771 SCM cell;
772 ++scm_ints_disabled;
773 do
774 {
775 if (SCM_NULLP (master->clusters))
776 {
777 if (master->grow_heap_p || scm_block_gc)
778 {
779 /* In order to reduce gc frequency, try to allocate a new heap
780 * segment first, even if gc might find some free cells. If we
781 * can't obtain a new heap segment, we will try gc later.
782 */
783 master->grow_heap_p = 0;
784 alloc_some_heap (master, return_on_error);
785 }
786 if (SCM_NULLP (master->clusters))
787 {
788 /* The heap was not grown, either because it wasn't scheduled to
789 * grow, or because there was not enough memory available. In
790 * both cases we have to try gc to get some free cells.
791 */
792 #ifdef DEBUGINFO
793 fprintf (stderr, "allocated = %d, ",
794 scm_cells_allocated
795 + master_cells_allocated (&scm_master_freelist)
796 + master_cells_allocated (&scm_master_freelist2));
797 #endif
798 scm_igc ("cells");
799 adjust_min_yield (master);
800 if (SCM_NULLP (master->clusters))
801 {
802 /* gc could not free any cells. Now, we _must_ allocate a
803 * new heap segment, because there is no other possibility
804 * to provide a new cell for the caller.
805 */
806 alloc_some_heap (master, abort_on_error);
807 }
808 }
809 }
810 cell = SCM_CAR (master->clusters);
811 master->clusters = SCM_CDR (master->clusters);
812 ++master->clusters_allocated;
813 }
814 while (SCM_NULLP (cell));
815 --scm_ints_disabled;
816 *freelist = SCM_FREE_CELL_CDR (cell);
817 SCM_SET_FREE_CELL_TYPE (cell, scm_tc16_allocated);
818 return cell;
819 }
820
821
822 #if 0
823 /* This is a support routine which can be used to reserve a cluster
824 * for some special use, such as debugging. It won't be useful until
825 * free cells are preserved between garbage collections.
826 */
827
828 void
829 scm_alloc_cluster (scm_freelist_t *master)
830 {
831 SCM freelist, cell;
832 cell = scm_gc_for_newcell (master, &freelist);
833 SCM_SETCDR (cell, freelist);
834 return cell;
835 }
836 #endif
837
838
839 scm_c_hook_t scm_before_gc_c_hook;
840 scm_c_hook_t scm_before_mark_c_hook;
841 scm_c_hook_t scm_before_sweep_c_hook;
842 scm_c_hook_t scm_after_sweep_c_hook;
843 scm_c_hook_t scm_after_gc_c_hook;
844
845
846 void
847 scm_igc (const char *what)
848 {
849 int j;
850
851 ++scm_gc_running_p;
852 scm_c_hook_run (&scm_before_gc_c_hook, 0);
853 #ifdef DEBUGINFO
854 fprintf (stderr,
855 SCM_NULLP (scm_freelist)
856 ? "*"
857 : (SCM_NULLP (scm_freelist2) ? "o" : "m"));
858 #endif
859 #ifdef USE_THREADS
860 /* During the critical section, only the current thread may run. */
861 SCM_THREAD_CRITICAL_SECTION_START;
862 #endif
863
864 /* fprintf (stderr, "gc: %s\n", what); */
865
866 scm_gc_start (what);
867
868 if (!scm_stack_base || scm_block_gc)
869 {
870 scm_gc_end ();
871 --scm_gc_running_p;
872 return;
873 }
874
875 if (scm_mallocated < 0)
876 /* The byte count of allocated objects has underflowed. This is
877 probably because you forgot to report the sizes of objects you
878 have allocated, by calling scm_done_malloc or some such. When
879 the GC freed them, it subtracted their size from
880 scm_mallocated, which underflowed. */
881 abort ();
882
883 if (scm_gc_heap_lock)
884 /* We've invoked the collector while a GC is already in progress.
885 That should never happen. */
886 abort ();
887
888 ++scm_gc_heap_lock;
889
890 /* flush dead entries from the continuation stack */
891 {
892 int x;
893 int bound;
894 SCM * elts;
895 elts = SCM_VELTS (scm_continuation_stack);
896 bound = SCM_LENGTH (scm_continuation_stack);
897 x = SCM_INUM (scm_continuation_stack_ptr);
898 while (x < bound)
899 {
900 elts[x] = SCM_BOOL_F;
901 ++x;
902 }
903 }
904
905 scm_c_hook_run (&scm_before_mark_c_hook, 0);
906
907 #ifndef USE_THREADS
908
909 /* Protect from the C stack. This must be the first marking
910 * done because it provides information about what objects
911 * are "in-use" by the C code. "in-use" objects are those
912 * for which the values from SCM_LENGTH and SCM_CHARS must remain
913 * usable. This requirement is stricter than a liveness
914 * requirement -- in particular, it constrains the implementation
915 * of scm_vector_set_length_x.
916 */
917 SCM_FLUSH_REGISTER_WINDOWS;
918 /* This assumes that all registers are saved into the jmp_buf */
919 setjmp (scm_save_regs_gc_mark);
920 scm_mark_locations ((SCM_STACKITEM *) scm_save_regs_gc_mark,
921 ( (scm_sizet) (sizeof (SCM_STACKITEM) - 1 +
922 sizeof scm_save_regs_gc_mark)
923 / sizeof (SCM_STACKITEM)));
924
925 {
926 scm_sizet stack_len = scm_stack_size (scm_stack_base);
927 #ifdef SCM_STACK_GROWS_UP
928 scm_mark_locations (scm_stack_base, stack_len);
929 #else
930 scm_mark_locations (scm_stack_base - stack_len, stack_len);
931 #endif
932 }
933
934 #else /* USE_THREADS */
935
936 /* Mark every thread's stack and registers */
937 scm_threads_mark_stacks ();
938
939 #endif /* USE_THREADS */
940
941 /* FIXME: insert a phase to un-protect string-data preserved
942 * in scm_vector_set_length_x.
943 */
944
945 j = SCM_NUM_PROTECTS;
946 while (j--)
947 scm_gc_mark (scm_sys_protects[j]);
948
949 /* FIXME: we should have a means to register C functions to be run
950 * in different phases of GC
951 */
952 scm_mark_subr_table ();
953
954 #ifndef USE_THREADS
955 scm_gc_mark (scm_root->handle);
956 #endif
957
958 scm_c_hook_run (&scm_before_sweep_c_hook, 0);
959
960 scm_gc_sweep ();
961
962 scm_c_hook_run (&scm_after_sweep_c_hook, 0);
963
964 --scm_gc_heap_lock;
965 scm_gc_end ();
966
967 #ifdef USE_THREADS
968 SCM_THREAD_CRITICAL_SECTION_END;
969 #endif
970 scm_c_hook_run (&scm_after_gc_c_hook, 0);
971 --scm_gc_running_p;
972 }
973
974 \f
975
976 /* {Mark/Sweep}
977 */
978
979
980
981 /* Mark an object precisely.
982 */
983 void
984 scm_gc_mark (SCM p)
985 #define FUNC_NAME "scm_gc_mark"
986 {
987 register long i;
988 register SCM ptr;
989
990 ptr = p;
991
992 gc_mark_loop:
993 if (SCM_IMP (ptr))
994 return;
995
996 gc_mark_nimp:
997 if (!SCM_CELLP (ptr))
998 SCM_MISC_ERROR ("rogue pointer in heap", SCM_EOL);
999
1000 switch (SCM_TYP7 (ptr))
1001 {
1002 case scm_tcs_cons_nimcar:
1003 if (SCM_GCMARKP (ptr))
1004 break;
1005 SCM_SETGCMARK (ptr);
1006 if (SCM_IMP (SCM_CDR (ptr))) /* SCM_IMP works even with a GC mark */
1007 {
1008 ptr = SCM_CAR (ptr);
1009 goto gc_mark_nimp;
1010 }
1011 scm_gc_mark (SCM_CAR (ptr));
1012 ptr = SCM_GCCDR (ptr);
1013 goto gc_mark_nimp;
1014 case scm_tcs_cons_imcar:
1015 if (SCM_GCMARKP (ptr))
1016 break;
1017 SCM_SETGCMARK (ptr);
1018 ptr = SCM_GCCDR (ptr);
1019 goto gc_mark_loop;
1020 case scm_tc7_pws:
1021 if (SCM_GCMARKP (ptr))
1022 break;
1023 SCM_SETGCMARK (ptr);
1024 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr));
1025 ptr = SCM_GCCDR (ptr);
1026 goto gc_mark_loop;
1027 case scm_tcs_cons_gloc:
1028 if (SCM_GCMARKP (ptr))
1029 break;
1030 SCM_SETGCMARK (ptr);
1031 {
1032 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1033 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1034 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1035 * pointer to a struct vtable data region. The fact that these are
1036 * accessed in the same way restricts the possibilites to change the
1037 * data layout of structs or heap cells.
1038 */
1039 scm_bits_t word0 = SCM_CELL_WORD_0 (ptr) - scm_tc3_cons_gloc;
1040 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1041 switch (vtable_data [scm_vtable_index_vcell])
1042 {
1043 default:
1044 {
1045 /* ptr is a gloc */
1046 SCM gloc_car = SCM_PACK (word0);
1047 scm_gc_mark (gloc_car);
1048 ptr = SCM_GCCDR (ptr);
1049 goto gc_mark_loop;
1050 }
1051 case 1: /* ! */
1052 case 0: /* ! */
1053 {
1054 /* ptr is a struct */
1055 SCM layout = SCM_PACK (vtable_data [scm_vtable_index_layout]);
1056 int len = SCM_LENGTH (layout);
1057 char * fields_desc = SCM_CHARS (layout);
1058 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1059 that it removes the mark */
1060 scm_bits_t * struct_data = (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (ptr));
1061
1062 if (vtable_data[scm_struct_i_flags] & SCM_STRUCTF_ENTITY)
1063 {
1064 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_procedure]));
1065 scm_gc_mark (SCM_PACK (struct_data[scm_struct_i_setter]));
1066 }
1067 if (len)
1068 {
1069 int x;
1070
1071 for (x = 0; x < len - 2; x += 2, ++struct_data)
1072 if (fields_desc[x] == 'p')
1073 scm_gc_mark (SCM_PACK (*struct_data));
1074 if (fields_desc[x] == 'p')
1075 {
1076 if (SCM_LAYOUT_TAILP (fields_desc[x + 1]))
1077 for (x = *struct_data; x; --x)
1078 scm_gc_mark (SCM_PACK (*++struct_data));
1079 else
1080 scm_gc_mark (SCM_PACK (*struct_data));
1081 }
1082 }
1083 if (vtable_data [scm_vtable_index_vcell] == 0)
1084 {
1085 vtable_data [scm_vtable_index_vcell] = 1;
1086 ptr = SCM_PACK (vtable_data [scm_vtable_index_vtable]);
1087 goto gc_mark_loop;
1088 }
1089 }
1090 }
1091 }
1092 break;
1093 case scm_tcs_closures:
1094 if (SCM_GCMARKP (ptr))
1095 break;
1096 SCM_SETGCMARK (ptr);
1097 if (SCM_IMP (SCM_CDR (ptr)))
1098 {
1099 ptr = SCM_CLOSCAR (ptr);
1100 goto gc_mark_nimp;
1101 }
1102 scm_gc_mark (SCM_CLOSCAR (ptr));
1103 ptr = SCM_GCCDR (ptr);
1104 goto gc_mark_nimp;
1105 case scm_tc7_vector:
1106 case scm_tc7_lvector:
1107 #ifdef CCLO
1108 case scm_tc7_cclo:
1109 #endif
1110 if (SCM_GC8MARKP (ptr))
1111 break;
1112 SCM_SETGC8MARK (ptr);
1113 i = SCM_LENGTH (ptr);
1114 if (i == 0)
1115 break;
1116 while (--i > 0)
1117 if (SCM_NIMP (SCM_VELTS (ptr)[i]))
1118 scm_gc_mark (SCM_VELTS (ptr)[i]);
1119 ptr = SCM_VELTS (ptr)[0];
1120 goto gc_mark_loop;
1121 case scm_tc7_contin:
1122 if SCM_GC8MARKP
1123 (ptr) break;
1124 SCM_SETGC8MARK (ptr);
1125 if (SCM_VELTS (ptr))
1126 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr),
1127 (scm_sizet)
1128 (SCM_LENGTH (ptr) +
1129 (sizeof (SCM_STACKITEM) + -1 +
1130 sizeof (scm_contregs)) /
1131 sizeof (SCM_STACKITEM)));
1132 break;
1133 #ifdef HAVE_ARRAYS
1134 case scm_tc7_bvect:
1135 case scm_tc7_byvect:
1136 case scm_tc7_ivect:
1137 case scm_tc7_uvect:
1138 case scm_tc7_fvect:
1139 case scm_tc7_dvect:
1140 case scm_tc7_cvect:
1141 case scm_tc7_svect:
1142 #ifdef HAVE_LONG_LONGS
1143 case scm_tc7_llvect:
1144 #endif
1145 #endif
1146 case scm_tc7_string:
1147 SCM_SETGC8MARK (ptr);
1148 break;
1149
1150 case scm_tc7_substring:
1151 if (SCM_GC8MARKP(ptr))
1152 break;
1153 SCM_SETGC8MARK (ptr);
1154 ptr = SCM_CDR (ptr);
1155 goto gc_mark_loop;
1156
1157 case scm_tc7_wvect:
1158 if (SCM_GC8MARKP(ptr))
1159 break;
1160 SCM_WVECT_GC_CHAIN (ptr) = scm_weak_vectors;
1161 scm_weak_vectors = ptr;
1162 SCM_SETGC8MARK (ptr);
1163 if (SCM_IS_WHVEC_ANY (ptr))
1164 {
1165 int x;
1166 int len;
1167 int weak_keys;
1168 int weak_values;
1169
1170 len = SCM_LENGTH (ptr);
1171 weak_keys = SCM_IS_WHVEC (ptr) || SCM_IS_WHVEC_B (ptr);
1172 weak_values = SCM_IS_WHVEC_V (ptr) || SCM_IS_WHVEC_B (ptr);
1173
1174 for (x = 0; x < len; ++x)
1175 {
1176 SCM alist;
1177 alist = SCM_VELTS (ptr)[x];
1178
1179 /* mark everything on the alist except the keys or
1180 * values, according to weak_values and weak_keys. */
1181 while ( SCM_CONSP (alist)
1182 && !SCM_GCMARKP (alist)
1183 && SCM_CONSP (SCM_CAR (alist)))
1184 {
1185 SCM kvpair;
1186 SCM next_alist;
1187
1188 kvpair = SCM_CAR (alist);
1189 next_alist = SCM_CDR (alist);
1190 /*
1191 * Do not do this:
1192 * SCM_SETGCMARK (alist);
1193 * SCM_SETGCMARK (kvpair);
1194 *
1195 * It may be that either the key or value is protected by
1196 * an escaped reference to part of the spine of this alist.
1197 * If we mark the spine here, and only mark one or neither of the
1198 * key and value, they may never be properly marked.
1199 * This leads to a horrible situation in which an alist containing
1200 * freelist cells is exported.
1201 *
1202 * So only mark the spines of these arrays last of all marking.
1203 * If somebody confuses us by constructing a weak vector
1204 * with a circular alist then we are hosed, but at least we
1205 * won't prematurely drop table entries.
1206 */
1207 if (!weak_keys)
1208 scm_gc_mark (SCM_CAR (kvpair));
1209 if (!weak_values)
1210 scm_gc_mark (SCM_GCCDR (kvpair));
1211 alist = next_alist;
1212 }
1213 if (SCM_NIMP (alist))
1214 scm_gc_mark (alist);
1215 }
1216 }
1217 break;
1218
1219 case scm_tc7_msymbol:
1220 if (SCM_GC8MARKP(ptr))
1221 break;
1222 SCM_SETGC8MARK (ptr);
1223 scm_gc_mark (SCM_SYMBOL_FUNC (ptr));
1224 ptr = SCM_SYMBOL_PROPS (ptr);
1225 goto gc_mark_loop;
1226 case scm_tc7_ssymbol:
1227 if (SCM_GC8MARKP(ptr))
1228 break;
1229 SCM_SETGC8MARK (ptr);
1230 break;
1231 case scm_tcs_subrs:
1232 break;
1233 case scm_tc7_port:
1234 i = SCM_PTOBNUM (ptr);
1235 if (!(i < scm_numptob))
1236 goto def;
1237 if (SCM_GC8MARKP (ptr))
1238 break;
1239 SCM_SETGC8MARK (ptr);
1240 if (SCM_PTAB_ENTRY(ptr))
1241 scm_gc_mark (SCM_PTAB_ENTRY(ptr)->file_name);
1242 if (scm_ptobs[i].mark)
1243 {
1244 ptr = (scm_ptobs[i].mark) (ptr);
1245 goto gc_mark_loop;
1246 }
1247 else
1248 return;
1249 break;
1250 case scm_tc7_smob:
1251 if (SCM_GC8MARKP (ptr))
1252 break;
1253 SCM_SETGC8MARK (ptr);
1254 switch (SCM_GCTYP16 (ptr))
1255 { /* should be faster than going through scm_smobs */
1256 case scm_tc_free_cell:
1257 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1258 case scm_tc16_allocated:
1259 case scm_tc16_big:
1260 case scm_tc16_real:
1261 case scm_tc16_complex:
1262 break;
1263 default:
1264 i = SCM_SMOBNUM (ptr);
1265 if (!(i < scm_numsmob))
1266 goto def;
1267 if (scm_smobs[i].mark)
1268 {
1269 ptr = (scm_smobs[i].mark) (ptr);
1270 goto gc_mark_loop;
1271 }
1272 else
1273 return;
1274 }
1275 break;
1276 default:
1277 def:
1278 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1279 }
1280 }
1281 #undef FUNC_NAME
1282
1283
1284 /* Mark a Region Conservatively
1285 */
1286
1287 void
1288 scm_mark_locations (SCM_STACKITEM x[], scm_sizet n)
1289 {
1290 unsigned long m;
1291
1292 for (m = 0; m < n; ++m)
1293 {
1294 SCM obj = * (SCM *) &x[m];
1295 if (SCM_CELLP (obj))
1296 {
1297 SCM_CELLPTR ptr = SCM2PTR (obj);
1298 int i = 0;
1299 int j = scm_n_heap_segs - 1;
1300 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1301 && SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1302 {
1303 while (i <= j)
1304 {
1305 int seg_id;
1306 seg_id = -1;
1307 if ((i == j)
1308 || SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr))
1309 seg_id = i;
1310 else if (SCM_PTR_LE (scm_heap_table[j].bounds[0], ptr))
1311 seg_id = j;
1312 else
1313 {
1314 int k;
1315 k = (i + j) / 2;
1316 if (k == i)
1317 break;
1318 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr))
1319 {
1320 j = k;
1321 ++i;
1322 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr))
1323 continue;
1324 else
1325 break;
1326 }
1327 else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr))
1328 {
1329 i = k;
1330 --j;
1331 if (SCM_PTR_GT (scm_heap_table[j].bounds[1], ptr))
1332 continue;
1333 else
1334 break;
1335 }
1336 }
1337 if (scm_heap_table[seg_id].span == 1
1338 || SCM_DOUBLE_CELLP (obj))
1339 {
1340 if (!SCM_FREE_CELL_P (obj))
1341 scm_gc_mark (obj);
1342 }
1343 break;
1344 }
1345 }
1346 }
1347 }
1348 }
1349
1350
1351 /* The function scm_cellp determines whether an SCM value can be regarded as a
1352 * pointer to a cell on the heap. Binary search is used in order to determine
1353 * the heap segment that contains the cell.
1354 */
1355 int
1356 scm_cellp (SCM value)
1357 {
1358 if (SCM_CELLP (value)) {
1359 scm_cell * ptr = SCM2PTR (value);
1360 unsigned int i = 0;
1361 unsigned int j = scm_n_heap_segs - 1;
1362
1363 while (i < j) {
1364 int k = (i + j) / 2;
1365 if (SCM_PTR_GT (scm_heap_table[k].bounds[1], ptr)) {
1366 j = k;
1367 } else if (SCM_PTR_LE (scm_heap_table[k].bounds[0], ptr)) {
1368 i = k + 1;
1369 }
1370 }
1371
1372 if (SCM_PTR_LE (scm_heap_table[i].bounds[0], ptr)
1373 && SCM_PTR_GT (scm_heap_table[i].bounds[1], ptr)
1374 && (scm_heap_table[i].span == 1 || SCM_DOUBLE_CELLP (value))) {
1375 return 1;
1376 } else {
1377 return 0;
1378 }
1379 } else {
1380 return 0;
1381 }
1382 }
1383
1384
1385 static void
1386 gc_sweep_freelist_start (scm_freelist_t *freelist)
1387 {
1388 freelist->cells = SCM_EOL;
1389 freelist->left_to_collect = freelist->cluster_size;
1390 freelist->clusters_allocated = 0;
1391 freelist->clusters = SCM_EOL;
1392 freelist->clustertail = &freelist->clusters;
1393 freelist->collected_1 = freelist->collected;
1394 freelist->collected = 0;
1395 }
1396
1397 static void
1398 gc_sweep_freelist_finish (scm_freelist_t *freelist)
1399 {
1400 int collected;
1401 *freelist->clustertail = freelist->cells;
1402 if (!SCM_NULLP (freelist->cells))
1403 {
1404 SCM c = freelist->cells;
1405 SCM_SETCAR (c, SCM_CDR (c));
1406 SCM_SETCDR (c, SCM_EOL);
1407 freelist->collected +=
1408 freelist->span * (freelist->cluster_size - freelist->left_to_collect);
1409 }
1410 scm_gc_cells_collected += freelist->collected;
1411
1412 /* Although freelist->min_yield is used to test freelist->collected
1413 * (which is the local GC yield for freelist), it is adjusted so
1414 * that *total* yield is freelist->min_yield_fraction of total heap
1415 * size. This means that a too low yield is compensated by more
1416 * heap on the list which is currently doing most work, which is
1417 * just what we want.
1418 */
1419 collected = SCM_MAX (freelist->collected_1, freelist->collected);
1420 freelist->grow_heap_p = (collected < freelist->min_yield);
1421 }
1422
1423 void
1424 scm_gc_sweep ()
1425 #define FUNC_NAME "scm_gc_sweep"
1426 {
1427 register SCM_CELLPTR ptr;
1428 register SCM nfreelist;
1429 register scm_freelist_t *freelist;
1430 register long m;
1431 register int span;
1432 long i;
1433 scm_sizet seg_size;
1434
1435 m = 0;
1436
1437 gc_sweep_freelist_start (&scm_master_freelist);
1438 gc_sweep_freelist_start (&scm_master_freelist2);
1439
1440 for (i = 0; i < scm_n_heap_segs; i++)
1441 {
1442 register unsigned int left_to_collect;
1443 register scm_sizet j;
1444
1445 /* Unmarked cells go onto the front of the freelist this heap
1446 segment points to. Rather than updating the real freelist
1447 pointer as we go along, we accumulate the new head in
1448 nfreelist. Then, if it turns out that the entire segment is
1449 free, we free (i.e., malloc's free) the whole segment, and
1450 simply don't assign nfreelist back into the real freelist. */
1451 freelist = scm_heap_table[i].freelist;
1452 nfreelist = freelist->cells;
1453 left_to_collect = freelist->left_to_collect;
1454 span = scm_heap_table[i].span;
1455
1456 ptr = CELL_UP (scm_heap_table[i].bounds[0], span);
1457 seg_size = CELL_DN (scm_heap_table[i].bounds[1], span) - ptr;
1458 for (j = seg_size + span; j -= span; ptr += span)
1459 {
1460 SCM scmptr = PTR2SCM (ptr);
1461
1462 switch SCM_TYP7 (scmptr)
1463 {
1464 case scm_tcs_cons_gloc:
1465 {
1466 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1467 * struct or a gloc. See the corresponding comment in
1468 * scm_gc_mark.
1469 */
1470 scm_bits_t word0 = SCM_CELL_WORD_0 (scmptr) - scm_tc3_cons_gloc;
1471 scm_bits_t * vtable_data = (scm_bits_t *) word0; /* access as struct */
1472 if (SCM_GCMARKP (scmptr))
1473 {
1474 if (vtable_data [scm_vtable_index_vcell] == 1)
1475 vtable_data [scm_vtable_index_vcell] = 0;
1476 goto cmrkcontinue;
1477 }
1478 else
1479 {
1480 if (vtable_data [scm_vtable_index_vcell] == 0
1481 || vtable_data [scm_vtable_index_vcell] == 1)
1482 {
1483 scm_struct_free_t free_struct_data
1484 = (scm_struct_free_t) vtable_data[scm_struct_i_free];
1485 m += free_struct_data (vtable_data, (scm_bits_t *) SCM_UNPACK (SCM_GCCDR (scmptr)));
1486 }
1487 }
1488 }
1489 break;
1490 case scm_tcs_cons_imcar:
1491 case scm_tcs_cons_nimcar:
1492 case scm_tcs_closures:
1493 case scm_tc7_pws:
1494 if (SCM_GCMARKP (scmptr))
1495 goto cmrkcontinue;
1496 break;
1497 case scm_tc7_wvect:
1498 if (SCM_GC8MARKP (scmptr))
1499 {
1500 goto c8mrkcontinue;
1501 }
1502 else
1503 {
1504 m += (2 + SCM_LENGTH (scmptr)) * sizeof (SCM);
1505 scm_must_free ((char *)(SCM_VELTS (scmptr) - 2));
1506 break;
1507 }
1508
1509 case scm_tc7_vector:
1510 case scm_tc7_lvector:
1511 #ifdef CCLO
1512 case scm_tc7_cclo:
1513 #endif
1514 if (SCM_GC8MARKP (scmptr))
1515 goto c8mrkcontinue;
1516
1517 m += (SCM_LENGTH (scmptr) * sizeof (SCM));
1518 freechars:
1519 scm_must_free (SCM_CHARS (scmptr));
1520 /* SCM_SETCHARS(scmptr, 0);*/
1521 break;
1522 #ifdef HAVE_ARRAYS
1523 case scm_tc7_bvect:
1524 if SCM_GC8MARKP (scmptr)
1525 goto c8mrkcontinue;
1526 m += sizeof (long) * ((SCM_HUGE_LENGTH (scmptr) + SCM_LONG_BIT - 1) / SCM_LONG_BIT);
1527 goto freechars;
1528 case scm_tc7_byvect:
1529 if SCM_GC8MARKP (scmptr)
1530 goto c8mrkcontinue;
1531 m += SCM_HUGE_LENGTH (scmptr) * sizeof (char);
1532 goto freechars;
1533 case scm_tc7_ivect:
1534 case scm_tc7_uvect:
1535 if SCM_GC8MARKP (scmptr)
1536 goto c8mrkcontinue;
1537 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long);
1538 goto freechars;
1539 case scm_tc7_svect:
1540 if SCM_GC8MARKP (scmptr)
1541 goto c8mrkcontinue;
1542 m += SCM_HUGE_LENGTH (scmptr) * sizeof (short);
1543 goto freechars;
1544 #ifdef HAVE_LONG_LONGS
1545 case scm_tc7_llvect:
1546 if SCM_GC8MARKP (scmptr)
1547 goto c8mrkcontinue;
1548 m += SCM_HUGE_LENGTH (scmptr) * sizeof (long_long);
1549 goto freechars;
1550 #endif
1551 case scm_tc7_fvect:
1552 if SCM_GC8MARKP (scmptr)
1553 goto c8mrkcontinue;
1554 m += SCM_HUGE_LENGTH (scmptr) * sizeof (float);
1555 goto freechars;
1556 case scm_tc7_dvect:
1557 if SCM_GC8MARKP (scmptr)
1558 goto c8mrkcontinue;
1559 m += SCM_HUGE_LENGTH (scmptr) * sizeof (double);
1560 goto freechars;
1561 case scm_tc7_cvect:
1562 if SCM_GC8MARKP (scmptr)
1563 goto c8mrkcontinue;
1564 m += SCM_HUGE_LENGTH (scmptr) * 2 * sizeof (double);
1565 goto freechars;
1566 #endif
1567 case scm_tc7_substring:
1568 if (SCM_GC8MARKP (scmptr))
1569 goto c8mrkcontinue;
1570 break;
1571 case scm_tc7_string:
1572 if (SCM_GC8MARKP (scmptr))
1573 goto c8mrkcontinue;
1574 m += SCM_HUGE_LENGTH (scmptr) + 1;
1575 goto freechars;
1576 case scm_tc7_msymbol:
1577 if (SCM_GC8MARKP (scmptr))
1578 goto c8mrkcontinue;
1579 m += (SCM_LENGTH (scmptr) + 1
1580 + (SCM_CHARS (scmptr) - (char *) SCM_SLOTS (scmptr)));
1581 scm_must_free ((char *)SCM_SLOTS (scmptr));
1582 break;
1583 case scm_tc7_contin:
1584 if SCM_GC8MARKP (scmptr)
1585 goto c8mrkcontinue;
1586 m += SCM_LENGTH (scmptr) * sizeof (SCM_STACKITEM) + sizeof (scm_contregs);
1587 if (SCM_VELTS (scmptr))
1588 goto freechars;
1589 case scm_tc7_ssymbol:
1590 if SCM_GC8MARKP(scmptr)
1591 goto c8mrkcontinue;
1592 break;
1593 case scm_tcs_subrs:
1594 continue;
1595 case scm_tc7_port:
1596 if SCM_GC8MARKP (scmptr)
1597 goto c8mrkcontinue;
1598 if SCM_OPENP (scmptr)
1599 {
1600 int k = SCM_PTOBNUM (scmptr);
1601 if (!(k < scm_numptob))
1602 goto sweeperr;
1603 /* Keep "revealed" ports alive. */
1604 if (scm_revealed_count (scmptr) > 0)
1605 continue;
1606 /* Yes, I really do mean scm_ptobs[k].free */
1607 /* rather than ftobs[k].close. .close */
1608 /* is for explicit CLOSE-PORT by user */
1609 m += (scm_ptobs[k].free) (scmptr);
1610 SCM_SETSTREAM (scmptr, 0);
1611 scm_remove_from_port_table (scmptr);
1612 scm_gc_ports_collected++;
1613 SCM_SETAND_CAR (scmptr, ~SCM_OPN);
1614 }
1615 break;
1616 case scm_tc7_smob:
1617 switch SCM_GCTYP16 (scmptr)
1618 {
1619 case scm_tc_free_cell:
1620 case scm_tc16_real:
1621 if SCM_GC8MARKP (scmptr)
1622 goto c8mrkcontinue;
1623 break;
1624 #ifdef SCM_BIGDIG
1625 case scm_tc16_big:
1626 if SCM_GC8MARKP (scmptr)
1627 goto c8mrkcontinue;
1628 m += (SCM_NUMDIGS (scmptr) * SCM_BITSPERDIG / SCM_CHAR_BIT);
1629 goto freechars;
1630 #endif /* def SCM_BIGDIG */
1631 case scm_tc16_complex:
1632 if SCM_GC8MARKP (scmptr)
1633 goto c8mrkcontinue;
1634 m += 2 * sizeof (double);
1635 goto freechars;
1636 default:
1637 if SCM_GC8MARKP (scmptr)
1638 goto c8mrkcontinue;
1639
1640 {
1641 int k;
1642 k = SCM_SMOBNUM (scmptr);
1643 if (!(k < scm_numsmob))
1644 goto sweeperr;
1645 m += (scm_smobs[k].free) (scmptr);
1646 break;
1647 }
1648 }
1649 break;
1650 default:
1651 sweeperr:
1652 SCM_MISC_ERROR ("unknown type", SCM_EOL);
1653 }
1654 #if 0
1655 if (SCM_FREE_CELL_P (scmptr))
1656 exit (2);
1657 #endif
1658 if (!--left_to_collect)
1659 {
1660 SCM_SETCAR (scmptr, nfreelist);
1661 *freelist->clustertail = scmptr;
1662 freelist->clustertail = SCM_CDRLOC (scmptr);
1663
1664 nfreelist = SCM_EOL;
1665 freelist->collected += span * freelist->cluster_size;
1666 left_to_collect = freelist->cluster_size;
1667 }
1668 else
1669 {
1670 /* Stick the new cell on the front of nfreelist. It's
1671 critical that we mark this cell as freed; otherwise, the
1672 conservative collector might trace it as some other type
1673 of object. */
1674 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
1675 SCM_SET_FREE_CELL_CDR (scmptr, nfreelist);
1676 nfreelist = scmptr;
1677 }
1678
1679 continue;
1680 c8mrkcontinue:
1681 SCM_CLRGC8MARK (scmptr);
1682 continue;
1683 cmrkcontinue:
1684 SCM_CLRGCMARK (scmptr);
1685 }
1686 #ifdef GC_FREE_SEGMENTS
1687 if (n == seg_size)
1688 {
1689 register long j;
1690
1691 freelist->heap_size -= seg_size;
1692 free ((char *) scm_heap_table[i].bounds[0]);
1693 scm_heap_table[i].bounds[0] = 0;
1694 for (j = i + 1; j < scm_n_heap_segs; j++)
1695 scm_heap_table[j - 1] = scm_heap_table[j];
1696 scm_n_heap_segs -= 1;
1697 i--; /* We need to scan the segment just moved. */
1698 }
1699 else
1700 #endif /* ifdef GC_FREE_SEGMENTS */
1701 {
1702 /* Update the real freelist pointer to point to the head of
1703 the list of free cells we've built for this segment. */
1704 freelist->cells = nfreelist;
1705 freelist->left_to_collect = left_to_collect;
1706 }
1707
1708 #ifdef GUILE_DEBUG_FREELIST
1709 scm_check_freelist (freelist == &scm_master_freelist
1710 ? scm_freelist
1711 : scm_freelist2);
1712 scm_map_free_list ();
1713 #endif
1714 }
1715
1716 gc_sweep_freelist_finish (&scm_master_freelist);
1717 gc_sweep_freelist_finish (&scm_master_freelist2);
1718
1719 /* When we move to POSIX threads private freelists should probably
1720 be GC-protected instead. */
1721 scm_freelist = SCM_EOL;
1722 scm_freelist2 = SCM_EOL;
1723
1724 scm_cells_allocated = (SCM_HEAP_SIZE - scm_gc_cells_collected);
1725 scm_gc_yield -= scm_cells_allocated;
1726 scm_mallocated -= m;
1727 scm_gc_malloc_collected = m;
1728 }
1729 #undef FUNC_NAME
1730
1731
1732 \f
1733
1734 /* {Front end to malloc}
1735 *
1736 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc,
1737 * scm_done_free
1738 *
1739 * These functions provide services comperable to malloc, realloc, and
1740 * free. They are for allocating malloced parts of scheme objects.
1741 * The primary purpose of the front end is to impose calls to gc. */
1742
1743
1744 /* scm_must_malloc
1745 * Return newly malloced storage or throw an error.
1746 *
1747 * The parameter WHAT is a string for error reporting.
1748 * If the threshold scm_mtrigger will be passed by this
1749 * allocation, or if the first call to malloc fails,
1750 * garbage collect -- on the presumption that some objects
1751 * using malloced storage may be collected.
1752 *
1753 * The limit scm_mtrigger may be raised by this allocation.
1754 */
1755 void *
1756 scm_must_malloc (scm_sizet size, const char *what)
1757 {
1758 void *ptr;
1759 unsigned long nm = scm_mallocated + size;
1760
1761 if (nm <= scm_mtrigger)
1762 {
1763 SCM_SYSCALL (ptr = malloc (size));
1764 if (NULL != ptr)
1765 {
1766 scm_mallocated = nm;
1767 #ifdef GUILE_DEBUG_MALLOC
1768 scm_malloc_register (ptr, what);
1769 #endif
1770 return ptr;
1771 }
1772 }
1773
1774 scm_igc (what);
1775
1776 nm = scm_mallocated + size;
1777 SCM_SYSCALL (ptr = malloc (size));
1778 if (NULL != ptr)
1779 {
1780 scm_mallocated = nm;
1781 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1782 if (nm > scm_mtrigger)
1783 scm_mtrigger = nm + nm / 2;
1784 else
1785 scm_mtrigger += scm_mtrigger / 2;
1786 }
1787 #ifdef GUILE_DEBUG_MALLOC
1788 scm_malloc_register (ptr, what);
1789 #endif
1790
1791 return ptr;
1792 }
1793
1794 scm_memory_error (what);
1795 }
1796
1797
1798 /* scm_must_realloc
1799 * is similar to scm_must_malloc.
1800 */
1801 void *
1802 scm_must_realloc (void *where,
1803 scm_sizet old_size,
1804 scm_sizet size,
1805 const char *what)
1806 {
1807 void *ptr;
1808 scm_sizet nm = scm_mallocated + size - old_size;
1809
1810 if (nm <= scm_mtrigger)
1811 {
1812 SCM_SYSCALL (ptr = realloc (where, size));
1813 if (NULL != ptr)
1814 {
1815 scm_mallocated = nm;
1816 #ifdef GUILE_DEBUG_MALLOC
1817 scm_malloc_reregister (where, ptr, what);
1818 #endif
1819 return ptr;
1820 }
1821 }
1822
1823 scm_igc (what);
1824
1825 nm = scm_mallocated + size - old_size;
1826 SCM_SYSCALL (ptr = realloc (where, size));
1827 if (NULL != ptr)
1828 {
1829 scm_mallocated = nm;
1830 if (nm > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS) {
1831 if (nm > scm_mtrigger)
1832 scm_mtrigger = nm + nm / 2;
1833 else
1834 scm_mtrigger += scm_mtrigger / 2;
1835 }
1836 #ifdef GUILE_DEBUG_MALLOC
1837 scm_malloc_reregister (where, ptr, what);
1838 #endif
1839 return ptr;
1840 }
1841
1842 scm_memory_error (what);
1843 }
1844
1845
1846 void
1847 scm_must_free (void *obj)
1848 #define FUNC_NAME "scm_must_free"
1849 {
1850 #ifdef GUILE_DEBUG_MALLOC
1851 scm_malloc_unregister (obj);
1852 #endif
1853 if (obj)
1854 free (obj);
1855 else
1856 SCM_MISC_ERROR ("freeing NULL pointer", SCM_EOL);
1857 }
1858 #undef FUNC_NAME
1859
1860
1861 /* Announce that there has been some malloc done that will be freed
1862 * during gc. A typical use is for a smob that uses some malloced
1863 * memory but can not get it from scm_must_malloc (for whatever
1864 * reason). When a new object of this smob is created you call
1865 * scm_done_malloc with the size of the object. When your smob free
1866 * function is called, be sure to include this size in the return
1867 * value.
1868 *
1869 * If you can't actually free the memory in the smob free function,
1870 * for whatever reason (like reference counting), you still can (and
1871 * should) report the amount of memory freed when you actually free it.
1872 * Do it by calling scm_done_malloc with the _negated_ size. Clever,
1873 * eh? Or even better, call scm_done_free. */
1874
1875 void
1876 scm_done_malloc (long size)
1877 {
1878 scm_mallocated += size;
1879
1880 if (scm_mallocated > scm_mtrigger)
1881 {
1882 scm_igc ("foreign mallocs");
1883 if (scm_mallocated > scm_mtrigger - SCM_MTRIGGER_HYSTERESIS)
1884 {
1885 if (scm_mallocated > scm_mtrigger)
1886 scm_mtrigger = scm_mallocated + scm_mallocated / 2;
1887 else
1888 scm_mtrigger += scm_mtrigger / 2;
1889 }
1890 }
1891 }
1892
1893 void
1894 scm_done_free (long size)
1895 {
1896 scm_mallocated -= size;
1897 }
1898
1899
1900 \f
1901
1902 /* {Heap Segments}
1903 *
1904 * Each heap segment is an array of objects of a particular size.
1905 * Every segment has an associated (possibly shared) freelist.
1906 * A table of segment records is kept that records the upper and
1907 * lower extents of the segment; this is used during the conservative
1908 * phase of gc to identify probably gc roots (because they point
1909 * into valid segments at reasonable offsets). */
1910
1911 /* scm_expmem
1912 * is true if the first segment was smaller than INIT_HEAP_SEG.
1913 * If scm_expmem is set to one, subsequent segment allocations will
1914 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
1915 */
1916 int scm_expmem = 0;
1917
1918 scm_sizet scm_max_segment_size;
1919
1920 /* scm_heap_org
1921 * is the lowest base address of any heap segment.
1922 */
1923 SCM_CELLPTR scm_heap_org;
1924
1925 scm_heap_seg_data_t * scm_heap_table = 0;
1926 static unsigned int heap_segment_table_size = 0;
1927 int scm_n_heap_segs = 0;
1928
1929 /* init_heap_seg
1930 * initializes a new heap segment and return the number of objects it contains.
1931 *
1932 * The segment origin, segment size in bytes, and the span of objects
1933 * in cells are input parameters. The freelist is both input and output.
1934 *
1935 * This function presume that the scm_heap_table has already been expanded
1936 * to accomodate a new segment record.
1937 */
1938
1939
1940 static scm_sizet
1941 init_heap_seg (SCM_CELLPTR seg_org, scm_sizet size, scm_freelist_t *freelist)
1942 {
1943 register SCM_CELLPTR ptr;
1944 SCM_CELLPTR seg_end;
1945 int new_seg_index;
1946 int n_new_cells;
1947 int span = freelist->span;
1948
1949 if (seg_org == NULL)
1950 return 0;
1951
1952 ptr = CELL_UP (seg_org, span);
1953
1954 /* Compute the ceiling on valid object pointers w/in this segment.
1955 */
1956 seg_end = CELL_DN ((char *) seg_org + size, span);
1957
1958 /* Find the right place and insert the segment record.
1959 *
1960 */
1961 for (new_seg_index = 0;
1962 ( (new_seg_index < scm_n_heap_segs)
1963 && SCM_PTR_LE (scm_heap_table[new_seg_index].bounds[0], seg_org));
1964 new_seg_index++)
1965 ;
1966
1967 {
1968 int i;
1969 for (i = scm_n_heap_segs; i > new_seg_index; --i)
1970 scm_heap_table[i] = scm_heap_table[i - 1];
1971 }
1972
1973 ++scm_n_heap_segs;
1974
1975 scm_heap_table[new_seg_index].span = span;
1976 scm_heap_table[new_seg_index].freelist = freelist;
1977 scm_heap_table[new_seg_index].bounds[0] = ptr;
1978 scm_heap_table[new_seg_index].bounds[1] = seg_end;
1979
1980
1981 /* Compute the least valid object pointer w/in this segment
1982 */
1983 ptr = CELL_UP (ptr, span);
1984
1985
1986 /*n_new_cells*/
1987 n_new_cells = seg_end - ptr;
1988
1989 freelist->heap_size += n_new_cells;
1990
1991 /* Partition objects in this segment into clusters */
1992 {
1993 SCM clusters;
1994 SCM *clusterp = &clusters;
1995 int n_cluster_cells = span * freelist->cluster_size;
1996
1997 while (n_new_cells > span) /* at least one spine + one freecell */
1998 {
1999 /* Determine end of cluster
2000 */
2001 if (n_new_cells >= n_cluster_cells)
2002 {
2003 seg_end = ptr + n_cluster_cells;
2004 n_new_cells -= n_cluster_cells;
2005 }
2006 else
2007 /* [cmm] looks like the segment size doesn't divide cleanly by
2008 cluster size. bad cmm! */
2009 abort();
2010
2011 /* Allocate cluster spine
2012 */
2013 *clusterp = PTR2SCM (ptr);
2014 SCM_SETCAR (*clusterp, PTR2SCM (ptr + span));
2015 clusterp = SCM_CDRLOC (*clusterp);
2016 ptr += span;
2017
2018 while (ptr < seg_end)
2019 {
2020 SCM scmptr = PTR2SCM (ptr);
2021
2022 SCM_SET_CELL_TYPE (scmptr, scm_tc_free_cell);
2023 SCM_SET_FREE_CELL_CDR (scmptr, PTR2SCM (ptr + span));
2024 ptr += span;
2025 }
2026
2027 SCM_SET_FREE_CELL_CDR (PTR2SCM (ptr - span), SCM_EOL);
2028 }
2029
2030 /* Patch up the last cluster pointer in the segment
2031 * to join it to the input freelist.
2032 */
2033 *clusterp = freelist->clusters;
2034 freelist->clusters = clusters;
2035 }
2036
2037 #ifdef DEBUGINFO
2038 fprintf (stderr, "H");
2039 #endif
2040 return size;
2041 }
2042
2043 static scm_sizet
2044 round_to_cluster_size (scm_freelist_t *freelist, scm_sizet len)
2045 {
2046 scm_sizet cluster_size_in_bytes = CLUSTER_SIZE_IN_BYTES (freelist);
2047
2048 return
2049 (len + cluster_size_in_bytes - 1) / cluster_size_in_bytes * cluster_size_in_bytes
2050 + ALIGNMENT_SLACK (freelist);
2051 }
2052
2053 static void
2054 alloc_some_heap (scm_freelist_t *freelist, policy_on_error error_policy)
2055 #define FUNC_NAME "alloc_some_heap"
2056 {
2057 SCM_CELLPTR ptr;
2058 long len;
2059
2060 if (scm_gc_heap_lock)
2061 {
2062 /* Critical code sections (such as the garbage collector) aren't
2063 * supposed to add heap segments.
2064 */
2065 fprintf (stderr, "alloc_some_heap: Can not extend locked heap.\n");
2066 abort ();
2067 }
2068
2069 if (scm_n_heap_segs == heap_segment_table_size)
2070 {
2071 /* We have to expand the heap segment table to have room for the new
2072 * segment. Do not yet increment scm_n_heap_segs -- that is done by
2073 * init_heap_seg only if the allocation of the segment itself succeeds.
2074 */
2075 unsigned int new_table_size = scm_n_heap_segs + 1;
2076 size_t size = new_table_size * sizeof (scm_heap_seg_data_t);
2077 scm_heap_seg_data_t * new_heap_table;
2078
2079 SCM_SYSCALL (new_heap_table = ((scm_heap_seg_data_t *)
2080 realloc ((char *)scm_heap_table, size)));
2081 if (!new_heap_table)
2082 {
2083 if (error_policy == abort_on_error)
2084 {
2085 fprintf (stderr, "alloc_some_heap: Could not grow heap segment table.\n");
2086 abort ();
2087 }
2088 else
2089 {
2090 return;
2091 }
2092 }
2093 else
2094 {
2095 scm_heap_table = new_heap_table;
2096 heap_segment_table_size = new_table_size;
2097 }
2098 }
2099
2100
2101 /* Pick a size for the new heap segment.
2102 * The rule for picking the size of a segment is explained in
2103 * gc.h
2104 */
2105 {
2106 /* Assure that the new segment is predicted to be large enough.
2107 *
2108 * New yield should at least equal GC fraction of new heap size, i.e.
2109 *
2110 * y + dh > f * (h + dh)
2111 *
2112 * y : yield
2113 * f : min yield fraction
2114 * h : heap size
2115 * dh : size of new heap segment
2116 *
2117 * This gives dh > (f * h - y) / (1 - f)
2118 */
2119 int f = freelist->min_yield_fraction;
2120 long h = SCM_HEAP_SIZE;
2121 long min_cells = (f * h - 100 * (long) scm_gc_yield) / (99 - f);
2122 len = SCM_EXPHEAP (freelist->heap_size);
2123 #ifdef DEBUGINFO
2124 fprintf (stderr, "(%d < %d)", len, min_cells);
2125 #endif
2126 if (len < min_cells)
2127 len = min_cells + freelist->cluster_size;
2128 len *= sizeof (scm_cell);
2129 /* force new sampling */
2130 freelist->collected = LONG_MAX;
2131 }
2132
2133 if (len > scm_max_segment_size)
2134 len = scm_max_segment_size;
2135
2136 {
2137 scm_sizet smallest;
2138
2139 smallest = CLUSTER_SIZE_IN_BYTES (freelist);
2140
2141 if (len < smallest)
2142 len = smallest;
2143
2144 /* Allocate with decaying ambition. */
2145 while ((len >= SCM_MIN_HEAP_SEG_SIZE)
2146 && (len >= smallest))
2147 {
2148 scm_sizet rounded_len = round_to_cluster_size (freelist, len);
2149 SCM_SYSCALL (ptr = (SCM_CELLPTR) malloc (rounded_len));
2150 if (ptr)
2151 {
2152 init_heap_seg (ptr, rounded_len, freelist);
2153 return;
2154 }
2155 len /= 2;
2156 }
2157 }
2158
2159 if (error_policy == abort_on_error)
2160 {
2161 fprintf (stderr, "alloc_some_heap: Could not grow heap.\n");
2162 abort ();
2163 }
2164 }
2165 #undef FUNC_NAME
2166
2167
2168 SCM_DEFINE (scm_unhash_name, "unhash-name", 1, 0, 0,
2169 (SCM name),
2170 "")
2171 #define FUNC_NAME s_scm_unhash_name
2172 {
2173 int x;
2174 int bound;
2175 SCM_VALIDATE_SYMBOL (1,name);
2176 SCM_DEFER_INTS;
2177 bound = scm_n_heap_segs;
2178 for (x = 0; x < bound; ++x)
2179 {
2180 SCM_CELLPTR p;
2181 SCM_CELLPTR pbound;
2182 p = scm_heap_table[x].bounds[0];
2183 pbound = scm_heap_table[x].bounds[1];
2184 while (p < pbound)
2185 {
2186 SCM cell = PTR2SCM (p);
2187 if (SCM_TYP3 (cell) == scm_tc3_cons_gloc)
2188 {
2189 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2190 * struct cell. See the corresponding comment in scm_gc_mark.
2191 */
2192 scm_bits_t word0 = SCM_CELL_WORD_0 (cell) - scm_tc3_cons_gloc;
2193 SCM gloc_car = SCM_PACK (word0); /* access as gloc */
2194 SCM vcell = SCM_CELL_OBJECT_1 (gloc_car);
2195 if ((SCM_EQ_P (name, SCM_BOOL_T) || SCM_EQ_P (SCM_CAR (gloc_car), name))
2196 && (SCM_UNPACK (vcell) != 0) && (SCM_UNPACK (vcell) != 1))
2197 {
2198 SCM_SET_CELL_OBJECT_0 (cell, name);
2199 }
2200 }
2201 ++p;
2202 }
2203 }
2204 SCM_ALLOW_INTS;
2205 return name;
2206 }
2207 #undef FUNC_NAME
2208
2209
2210 \f
2211 /* {GC Protection Helper Functions}
2212 */
2213
2214
2215 void
2216 scm_remember (SCM *ptr)
2217 { /* empty */ }
2218
2219
2220 /*
2221 These crazy functions prevent garbage collection
2222 of arguments after the first argument by
2223 ensuring they remain live throughout the
2224 function because they are used in the last
2225 line of the code block.
2226 It'd be better to have a nice compiler hint to
2227 aid the conservative stack-scanning GC. --03/09/00 gjb */
2228 SCM
2229 scm_return_first (SCM elt, ...)
2230 {
2231 return elt;
2232 }
2233
2234 int
2235 scm_return_first_int (int i, ...)
2236 {
2237 return i;
2238 }
2239
2240
2241 SCM
2242 scm_permanent_object (SCM obj)
2243 {
2244 SCM_REDEFER_INTS;
2245 scm_permobjs = scm_cons (obj, scm_permobjs);
2246 SCM_REALLOW_INTS;
2247 return obj;
2248 }
2249
2250
2251 /* Protect OBJ from the garbage collector. OBJ will not be freed, even if all
2252 other references are dropped, until the object is unprotected by calling
2253 scm_unprotect_object (OBJ). Calls to scm_protect/unprotect_object nest,
2254 i. e. it is possible to protect the same object several times, but it is
2255 necessary to unprotect the object the same number of times to actually get
2256 the object unprotected. It is an error to unprotect an object more often
2257 than it has been protected before. The function scm_protect_object returns
2258 OBJ.
2259 */
2260
2261 /* Implementation note: For every object X, there is a counter which
2262 scm_protect_object(X) increments and scm_unprotect_object(X) decrements.
2263 */
2264
2265 SCM
2266 scm_protect_object (SCM obj)
2267 {
2268 SCM handle;
2269
2270 /* This critical section barrier will be replaced by a mutex. */
2271 SCM_REDEFER_INTS;
2272
2273 handle = scm_hashq_create_handle_x (scm_protects, obj, SCM_MAKINUM (0));
2274 SCM_SETCDR (handle, SCM_MAKINUM (SCM_INUM (SCM_CDR (handle)) + 1));
2275
2276 SCM_REALLOW_INTS;
2277
2278 return obj;
2279 }
2280
2281
2282 /* Remove any protection for OBJ established by a prior call to
2283 scm_protect_object. This function returns OBJ.
2284
2285 See scm_protect_object for more information. */
2286 SCM
2287 scm_unprotect_object (SCM obj)
2288 {
2289 SCM handle;
2290
2291 /* This critical section barrier will be replaced by a mutex. */
2292 SCM_REDEFER_INTS;
2293
2294 handle = scm_hashq_get_handle (scm_protects, obj);
2295
2296 if (SCM_IMP (handle))
2297 {
2298 fprintf (stderr, "scm_unprotect_object called on unprotected object\n");
2299 abort ();
2300 }
2301 else
2302 {
2303 unsigned long int count = SCM_INUM (SCM_CDR (handle)) - 1;
2304 if (count == 0)
2305 scm_hashq_remove_x (scm_protects, obj);
2306 else
2307 SCM_SETCDR (handle, SCM_MAKINUM (count));
2308 }
2309
2310 SCM_REALLOW_INTS;
2311
2312 return obj;
2313 }
2314
2315 int terminating;
2316
2317 /* called on process termination. */
2318 #ifdef HAVE_ATEXIT
2319 static void
2320 cleanup (void)
2321 #else
2322 #ifdef HAVE_ON_EXIT
2323 extern int on_exit (void (*procp) (), int arg);
2324
2325 static void
2326 cleanup (int status, void *arg)
2327 #else
2328 #error Dont know how to setup a cleanup handler on your system.
2329 #endif
2330 #endif
2331 {
2332 terminating = 1;
2333 scm_flush_all_ports ();
2334 }
2335
2336 \f
2337 static int
2338 make_initial_segment (scm_sizet init_heap_size, scm_freelist_t *freelist)
2339 {
2340 scm_sizet rounded_size = round_to_cluster_size (freelist, init_heap_size);
2341 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2342 rounded_size,
2343 freelist))
2344 {
2345 rounded_size = round_to_cluster_size (freelist, SCM_HEAP_SEG_SIZE);
2346 if (!init_heap_seg ((SCM_CELLPTR) malloc (rounded_size),
2347 rounded_size,
2348 freelist))
2349 return 1;
2350 }
2351 else
2352 scm_expmem = 1;
2353
2354 if (freelist->min_yield_fraction)
2355 freelist->min_yield = (freelist->heap_size * freelist->min_yield_fraction
2356 / 100);
2357 freelist->grow_heap_p = (freelist->heap_size < freelist->min_yield);
2358
2359 return 0;
2360 }
2361
2362 \f
2363 static void
2364 init_freelist (scm_freelist_t *freelist,
2365 int span,
2366 int cluster_size,
2367 int min_yield)
2368 {
2369 freelist->clusters = SCM_EOL;
2370 freelist->cluster_size = cluster_size + 1;
2371 freelist->left_to_collect = 0;
2372 freelist->clusters_allocated = 0;
2373 freelist->min_yield = 0;
2374 freelist->min_yield_fraction = min_yield;
2375 freelist->span = span;
2376 freelist->collected = 0;
2377 freelist->collected_1 = 0;
2378 freelist->heap_size = 0;
2379 }
2380
2381 int
2382 scm_init_storage (scm_sizet init_heap_size_1, int gc_trigger_1,
2383 scm_sizet init_heap_size_2, int gc_trigger_2,
2384 scm_sizet max_segment_size)
2385 {
2386 scm_sizet j;
2387
2388 if (!init_heap_size_1)
2389 init_heap_size_1 = scm_default_init_heap_size_1;
2390 if (!init_heap_size_2)
2391 init_heap_size_2 = scm_default_init_heap_size_2;
2392
2393 j = SCM_NUM_PROTECTS;
2394 while (j)
2395 scm_sys_protects[--j] = SCM_BOOL_F;
2396 scm_block_gc = 1;
2397
2398 scm_freelist = SCM_EOL;
2399 scm_freelist2 = SCM_EOL;
2400 init_freelist (&scm_master_freelist,
2401 1, SCM_CLUSTER_SIZE_1,
2402 gc_trigger_1 ? gc_trigger_1 : scm_default_min_yield_1);
2403 init_freelist (&scm_master_freelist2,
2404 2, SCM_CLUSTER_SIZE_2,
2405 gc_trigger_2 ? gc_trigger_2 : scm_default_min_yield_2);
2406 scm_max_segment_size
2407 = max_segment_size ? max_segment_size : scm_default_max_segment_size;
2408
2409 scm_expmem = 0;
2410
2411 j = SCM_HEAP_SEG_SIZE;
2412 scm_mtrigger = SCM_INIT_MALLOC_LIMIT;
2413 scm_heap_table = ((scm_heap_seg_data_t *)
2414 scm_must_malloc (sizeof (scm_heap_seg_data_t) * 2, "hplims"));
2415 heap_segment_table_size = 2;
2416
2417 if (make_initial_segment (init_heap_size_1, &scm_master_freelist) ||
2418 make_initial_segment (init_heap_size_2, &scm_master_freelist2))
2419 return 1;
2420
2421 /* scm_hplims[0] can change. do not remove scm_heap_org */
2422 scm_heap_org = CELL_UP (scm_heap_table[0].bounds[0], 1);
2423
2424 scm_c_hook_init (&scm_before_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2425 scm_c_hook_init (&scm_before_mark_c_hook, 0, SCM_C_HOOK_NORMAL);
2426 scm_c_hook_init (&scm_before_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2427 scm_c_hook_init (&scm_after_sweep_c_hook, 0, SCM_C_HOOK_NORMAL);
2428 scm_c_hook_init (&scm_after_gc_c_hook, 0, SCM_C_HOOK_NORMAL);
2429
2430 /* Initialise the list of ports. */
2431 scm_port_table = (scm_port **)
2432 malloc (sizeof (scm_port *) * scm_port_table_room);
2433 if (!scm_port_table)
2434 return 1;
2435
2436 #ifdef HAVE_ATEXIT
2437 atexit (cleanup);
2438 #else
2439 #ifdef HAVE_ON_EXIT
2440 on_exit (cleanup, 0);
2441 #endif
2442 #endif
2443
2444 scm_undefineds = scm_cons (SCM_UNDEFINED, SCM_EOL);
2445 SCM_SETCDR (scm_undefineds, scm_undefineds);
2446
2447 scm_listofnull = scm_cons (SCM_EOL, SCM_EOL);
2448 scm_nullstr = scm_makstr (0L, 0);
2449 scm_nullvect = scm_make_vector (SCM_INUM0, SCM_UNDEFINED);
2450 scm_symhash = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2451 scm_weak_symhash = scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim));
2452 scm_symhash_vars = scm_make_vector (SCM_MAKINUM (scm_symhash_dim), SCM_EOL);
2453 scm_stand_in_procs = SCM_EOL;
2454 scm_permobjs = SCM_EOL;
2455 scm_protects = scm_make_vector (SCM_MAKINUM (31), SCM_EOL);
2456 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM));
2457 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM));
2458 #ifdef SCM_BIGDIG
2459 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD));
2460 #endif
2461 return 0;
2462 }
2463
2464 \f
2465
2466 SCM scm_after_gc_hook;
2467
2468 #if (SCM_DEBUG_DEPRECATED == 0)
2469 static SCM scm_gc_vcell; /* the vcell for gc-thunk. */
2470 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2471 static SCM gc_async;
2472
2473
2474 /* The function gc_async_thunk causes the execution of the after-gc-hook. It
2475 * is run after the gc, as soon as the asynchronous events are handled by the
2476 * evaluator.
2477 */
2478 static SCM
2479 gc_async_thunk (void)
2480 {
2481 scm_c_run_hook (scm_after_gc_hook, SCM_EOL);
2482
2483 #if (SCM_DEBUG_DEPRECATED == 0)
2484
2485 /* The following code will be removed in Guile 1.5. */
2486 if (SCM_NFALSEP (scm_gc_vcell))
2487 {
2488 SCM proc = SCM_CDR (scm_gc_vcell);
2489
2490 if (SCM_NFALSEP (proc) && !SCM_UNBNDP (proc))
2491 scm_apply (proc, SCM_EOL, SCM_EOL);
2492 }
2493
2494 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2495
2496 return SCM_UNSPECIFIED;
2497 }
2498
2499
2500 /* The function mark_gc_async is run by the scm_after_gc_c_hook at the end of
2501 * the garbage collection. The only purpose of this function is to mark the
2502 * gc_async (which will eventually lead to the execution of the
2503 * gc_async_thunk).
2504 */
2505 static void *
2506 mark_gc_async (void * hook_data, void *func_data, void *data)
2507 {
2508 scm_system_async_mark (gc_async);
2509 return NULL;
2510 }
2511
2512
2513 void
2514 scm_init_gc ()
2515 {
2516 SCM after_gc_thunk;
2517
2518 scm_after_gc_hook = scm_create_hook ("after-gc-hook", 0);
2519
2520 #if (SCM_DEBUG_DEPRECATED == 0)
2521 scm_gc_vcell = scm_sysintern ("gc-thunk", SCM_BOOL_F);
2522 #endif /* SCM_DEBUG_DEPRECATED == 0 */
2523 /* Dirk:FIXME:: We don't really want a binding here. */
2524 after_gc_thunk = scm_make_gsubr ("%gc-thunk", 0, 0, 0, gc_async_thunk);
2525 gc_async = scm_system_async (after_gc_thunk);
2526
2527 scm_c_hook_add (&scm_after_gc_c_hook, mark_gc_async, NULL, 0);
2528
2529 #include "libguile/gc.x"
2530 }
2531
2532 /*
2533 Local Variables:
2534 c-file-style: "gnu"
2535 End:
2536 */