1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
49 #include "libguile/_scm.h"
50 #include "libguile/stime.h"
51 #include "libguile/stackchk.h"
52 #include "libguile/struct.h"
53 #include "libguile/smob.h"
54 #include "libguile/unif.h"
55 #include "libguile/async.h"
56 #include "libguile/ports.h"
57 #include "libguile/root.h"
58 #include "libguile/strings.h"
59 #include "libguile/vectors.h"
60 #include "libguile/weaks.h"
62 #include "libguile/validate.h"
63 #include "libguile/gc.h"
65 #ifdef GUILE_DEBUG_MALLOC
66 #include "libguile/debug-malloc.h"
79 #define var_start(x, y) va_start(x, y)
82 #define var_start(x, y) va_start(x)
86 /* {heap tuning parameters}
88 * These are parameters for controlling memory allocation. The heap
89 * is the area out of which scm_cons, and object headers are allocated.
91 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
92 * 64 bit machine. The units of the _SIZE parameters are bytes.
93 * Cons pairs and object headers occupy one heap cell.
95 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
96 * allocated initially the heap will grow by half its current size
97 * each subsequent time more heap is needed.
99 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
100 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
101 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
102 * is in scm_init_storage() and alloc_some_heap() in sys.c
104 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
105 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
107 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
110 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
113 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
114 * reclaimed by a GC triggered by must_malloc. If less than this is
115 * reclaimed, the trigger threshold is raised. [I don't know what a
116 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
117 * work around a oscillation that caused almost constant GC.]
121 * Heap size 45000 and 40% min yield gives quick startup and no extra
122 * heap allocation. Having higher values on min yield may lead to
123 * large heaps, especially if code behaviour is varying its
124 * maximum consumption between different freelists.
126 #define SCM_INIT_HEAP_SIZE_1 (45000L * sizeof (scm_cell))
127 #define SCM_CLUSTER_SIZE_1 2000L
128 #define SCM_MIN_YIELD_1 40
130 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
131 #define SCM_CLUSTER_SIZE_2 1000L
132 /* The following value may seem large, but note that if we get to GC at
133 * all, this means that we have a numerically intensive application
135 #define SCM_MIN_YIELD_2 40
137 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
139 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
141 # define SCM_HEAP_SEG_SIZE 32768L
144 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
146 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
149 /* Make heap grow with factor 1.5 */
150 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
151 #define SCM_INIT_MALLOC_LIMIT 100000
152 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
154 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
155 bounds for allocated storage */
158 /*in 386 protected mode we must only adjust the offset */
159 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
160 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
163 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
164 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
166 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
167 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
170 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
171 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
172 #ifdef GUILE_NEW_GC_SCHEME
173 #define SCM_HEAP_SIZE \
174 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
176 #define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
178 #define SCM_MAX(A, B) ((A) > (B) ? (A) : (B))
185 typedef struct scm_freelist_t
{
186 /* collected cells */
188 #ifdef GUILE_NEW_GC_SCHEME
189 /* number of cells left to collect before cluster is full */
190 unsigned int left_to_collect
;
191 /* number of clusters which have been allocated */
192 unsigned int clusters_allocated
;
193 /* a list of freelists, each of size cluster_size,
194 * except the last one which may be shorter
198 /* this is the number of objects in each cluster, including the spine cell */
200 /* indicates that we should grow heap instead of GC:ing
203 /* minimum yield on this list in order not to grow the heap
206 /* defines min_yield as percent of total heap size
208 int min_yield_fraction
;
210 /* number of cells per object on this list */
212 /* number of collected cells during last GC */
214 /* number of collected cells during penultimate GC */
216 /* total number of cells in heap segments
217 * belonging to this list.
222 #ifdef GUILE_NEW_GC_SCHEME
223 SCM scm_freelist
= SCM_EOL
;
224 scm_freelist_t scm_master_freelist
= {
225 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
227 SCM scm_freelist2
= SCM_EOL
;
228 scm_freelist_t scm_master_freelist2
= {
229 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
232 scm_freelist_t scm_freelist
= { SCM_EOL
, 1, 0, 0 };
233 scm_freelist_t scm_freelist2
= { SCM_EOL
, 2, 0, 0 };
237 * is the number of bytes of must_malloc allocation needed to trigger gc.
239 unsigned long scm_mtrigger
;
243 * If set, don't expand the heap. Set only during gc, during which no allocation
244 * is supposed to take place anyway.
246 int scm_gc_heap_lock
= 0;
249 * Don't pause for collection if this is set -- just
253 int scm_block_gc
= 1;
255 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
256 * collection (GC) more space is allocated for the heap.
258 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
260 /* During collection, this accumulates objects holding
263 SCM scm_weak_vectors
;
265 /* GC Statistics Keeping
267 unsigned long scm_cells_allocated
= 0;
268 long scm_mallocated
= 0;
269 unsigned long scm_gc_cells_collected
;
270 #ifdef GUILE_NEW_GC_SCHEME
271 unsigned long scm_gc_yield
;
272 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
274 unsigned long scm_gc_malloc_collected
;
275 unsigned long scm_gc_ports_collected
;
276 unsigned long scm_gc_rt
;
277 unsigned long scm_gc_time_taken
= 0;
279 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
280 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
281 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
282 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
283 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
284 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
286 typedef struct scm_heap_seg_data_t
288 /* lower and upper bounds of the segment */
289 SCM_CELLPTR bounds
[2];
291 /* address of the head-of-freelist pointer for this segment's cells.
292 All segments usually point to the same one, scm_freelist. */
293 scm_freelist_t
*freelist
;
295 /* number of SCM words per object in this segment */
298 /* If SEG_DATA->valid is non-zero, the conservative marking
299 functions will apply SEG_DATA->valid to the purported pointer and
300 SEG_DATA, and mark the object iff the function returns non-zero.
301 At the moment, I don't think anyone uses this. */
303 } scm_heap_seg_data_t
;
307 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
308 static void alloc_some_heap (scm_freelist_t
*);
312 /* Debugging functions. */
314 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
316 /* Return the number of the heap segment containing CELL. */
322 for (i
= 0; i
< scm_n_heap_segs
; i
++)
323 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], SCM2PTR (cell
))
324 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], SCM2PTR (cell
)))
326 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
332 #ifdef GUILE_NEW_GC_SCHEME
334 map_free_list (scm_freelist_t
*master
, SCM freelist
)
336 int last_seg
= -1, count
= 0;
339 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
341 int this_seg
= which_seg (f
);
343 if (this_seg
!= last_seg
)
346 fprintf (stderr
, " %5d %d-cells in segment %d\n",
347 count
, master
->span
, last_seg
);
354 fprintf (stderr
, " %5d %d-cells in segment %d\n",
355 count
, master
->span
, last_seg
);
359 map_free_list (scm_freelist_t
*freelist
)
361 int last_seg
= -1, count
= 0;
364 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
))
366 int this_seg
= which_seg (f
);
368 if (this_seg
!= last_seg
)
371 fprintf (stderr
, " %5d %d-cells in segment %d\n",
372 count
, freelist
->span
, last_seg
);
379 fprintf (stderr
, " %5d %d-cells in segment %d\n",
380 count
, freelist
->span
, last_seg
);
384 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
386 "Print debugging information about the free-list.\n"
387 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
388 #define FUNC_NAME s_scm_map_free_list
391 fprintf (stderr
, "%d segments total (%d:%d",
393 scm_heap_table
[0].span
,
394 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
395 for (i
= 1; i
< scm_n_heap_segs
; i
++)
396 fprintf (stderr
, ", %d:%d",
397 scm_heap_table
[i
].span
,
398 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
399 fprintf (stderr
, ")\n");
400 #ifdef GUILE_NEW_GC_SCHEME
401 map_free_list (&scm_master_freelist
, scm_freelist
);
402 map_free_list (&scm_master_freelist2
, scm_freelist2
);
404 map_free_list (&scm_freelist
);
405 map_free_list (&scm_freelist2
);
409 return SCM_UNSPECIFIED
;
413 #ifdef GUILE_NEW_GC_SCHEME
414 static int last_cluster
;
415 static int last_size
;
418 free_list_length (char *title
, int i
, SCM freelist
)
422 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
423 if (SCM_UNPACK_CAR (ls
) == scm_tc_free_cell
)
427 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
434 if (last_cluster
== i
- 1)
435 fprintf (stderr
, "\t%d\n", last_size
);
437 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
440 fprintf (stderr
, "%s %d", title
, i
);
442 fprintf (stderr
, "%s\t%d\n", title
, n
);
450 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
453 int i
= 0, len
, n
= 0;
454 fprintf (stderr
, "%s\n\n", title
);
455 n
+= free_list_length ("free list", -1, freelist
);
456 for (clusters
= master
->clusters
;
457 SCM_NNULLP (clusters
);
458 clusters
= SCM_CDR (clusters
))
460 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
463 if (last_cluster
== i
- 1)
464 fprintf (stderr
, "\t%d\n", last_size
);
466 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
467 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
470 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
472 "Print debugging information about the free-list.\n"
473 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
474 #define FUNC_NAME s_scm_free_list_length
476 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
477 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
478 return SCM_UNSPECIFIED
;
485 #ifdef GUILE_DEBUG_FREELIST
487 /* Number of calls to SCM_NEWCELL since startup. */
488 static unsigned long scm_newcell_count
;
489 static unsigned long scm_newcell2_count
;
491 /* Search freelist for anything that isn't marked as a free cell.
492 Abort if we find something. */
493 #ifdef GUILE_NEW_GC_SCHEME
495 scm_check_freelist (SCM freelist
)
500 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
501 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
503 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
504 scm_newcell_count
, i
);
511 scm_check_freelist (scm_freelist_t
*freelist
)
516 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
517 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
519 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
520 scm_newcell_count
, i
);
527 static int scm_debug_check_freelist
= 0;
529 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
531 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
532 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
533 "compile-time flag was selected.\n")
534 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
536 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
537 return SCM_UNSPECIFIED
;
542 #ifdef GUILE_NEW_GC_SCHEME
545 scm_debug_newcell (void)
550 if (scm_debug_check_freelist
)
552 scm_check_freelist (scm_freelist
);
556 /* The rest of this is supposed to be identical to the SCM_NEWCELL
558 if (SCM_IMP (scm_freelist
))
559 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
563 scm_freelist
= SCM_CDR (scm_freelist
);
564 SCM_SETCAR (new, scm_tc16_allocated
);
571 scm_debug_newcell2 (void)
575 scm_newcell2_count
++;
576 if (scm_debug_check_freelist
)
578 scm_check_freelist (scm_freelist2
);
582 /* The rest of this is supposed to be identical to the SCM_NEWCELL
584 if (SCM_IMP (scm_freelist2
))
585 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
589 scm_freelist2
= SCM_CDR (scm_freelist2
);
590 SCM_SETCAR (new, scm_tc16_allocated
);
596 #else /* GUILE_NEW_GC_SCHEME */
599 scm_debug_newcell (void)
604 if (scm_debug_check_freelist
)
606 scm_check_freelist (&scm_freelist
);
610 /* The rest of this is supposed to be identical to the SCM_NEWCELL
612 if (SCM_IMP (scm_freelist
.cells
))
613 new = scm_gc_for_newcell (&scm_freelist
);
616 new = scm_freelist
.cells
;
617 scm_freelist
.cells
= SCM_CDR (scm_freelist
.cells
);
618 SCM_SETCAR (new, scm_tc16_allocated
);
619 ++scm_cells_allocated
;
626 scm_debug_newcell2 (void)
630 scm_newcell2_count
++;
631 if (scm_debug_check_freelist
) {
632 scm_check_freelist (&scm_freelist2
);
636 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
638 if (SCM_IMP (scm_freelist2
.cells
))
639 new = scm_gc_for_newcell (&scm_freelist2
);
642 new = scm_freelist2
.cells
;
643 scm_freelist2
.cells
= SCM_CDR (scm_freelist2
.cells
);
644 SCM_SETCAR (new, scm_tc16_allocated
);
645 scm_cells_allocated
+= 2;
651 #endif /* GUILE_NEW_GC_SCHEME */
652 #endif /* GUILE_DEBUG_FREELIST */
656 #ifdef GUILE_NEW_GC_SCHEME
658 master_cells_allocated (scm_freelist_t
*master
)
660 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
661 if (SCM_NULLP (master
->clusters
))
662 objects
-= master
->left_to_collect
;
663 return master
->span
* objects
;
667 freelist_length (SCM freelist
)
670 for (n
= 0; SCM_NNULLP (freelist
); freelist
= SCM_CDR (freelist
))
676 compute_cells_allocated ()
678 return (scm_cells_allocated
679 + master_cells_allocated (&scm_master_freelist
)
680 + master_cells_allocated (&scm_master_freelist2
)
681 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
682 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
686 /* {Scheme Interface to GC}
689 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
691 "Returns an association list of statistics about Guile's current use of storage. ")
692 #define FUNC_NAME s_scm_gc_stats
697 long int local_scm_mtrigger
;
698 long int local_scm_mallocated
;
699 long int local_scm_heap_size
;
700 long int local_scm_cells_allocated
;
701 long int local_scm_gc_time_taken
;
709 for (i
= scm_n_heap_segs
; i
--; )
710 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
711 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
713 if (scm_n_heap_segs
!= n
)
717 /* Below, we cons to produce the resulting list. We want a snapshot of
718 * the heap situation before consing.
720 local_scm_mtrigger
= scm_mtrigger
;
721 local_scm_mallocated
= scm_mallocated
;
722 local_scm_heap_size
= SCM_HEAP_SIZE
;
723 #ifdef GUILE_NEW_GC_SCHEME
724 local_scm_cells_allocated
= compute_cells_allocated ();
726 local_scm_cells_allocated
= scm_cells_allocated
;
728 local_scm_gc_time_taken
= scm_gc_time_taken
;
730 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
731 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
732 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
733 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
734 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
735 scm_cons (sym_heap_segments
, heap_segs
),
744 scm_gc_start (const char *what
)
746 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
747 scm_gc_cells_collected
= 0;
748 #ifdef GUILE_NEW_GC_SCHEME
749 scm_gc_yield_1
= scm_gc_yield
;
750 scm_gc_yield
= (scm_cells_allocated
751 + master_cells_allocated (&scm_master_freelist
)
752 + master_cells_allocated (&scm_master_freelist2
));
754 scm_gc_malloc_collected
= 0;
755 scm_gc_ports_collected
= 0;
761 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
762 scm_gc_time_taken
+= scm_gc_rt
;
763 scm_system_async_mark (scm_gc_async
);
767 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
769 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
770 "returned by this function for @var{obj}")
771 #define FUNC_NAME s_scm_object_address
773 return scm_ulong2num ((unsigned long) SCM_UNPACK (obj
));
778 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
780 "Scans all of SCM objects and reclaims for further use those that are\n"
781 "no longer accessible.")
782 #define FUNC_NAME s_scm_gc
787 return SCM_UNSPECIFIED
;
793 /* {C Interface For When GC is Triggered}
796 #ifdef GUILE_NEW_GC_SCHEME
799 adjust_min_yield (scm_freelist_t
*freelist
)
801 /* min yield is adjusted upwards so that next predicted total yield
802 * (allocated cells actually freed by GC) becomes
803 * `min_yield_fraction' of total heap size. Note, however, that
804 * the absolute value of min_yield will correspond to `collected'
805 * on one master (the one which currently is triggering GC).
807 * The reason why we look at total yield instead of cells collected
808 * on one list is that we want to take other freelists into account.
809 * On this freelist, we know that (local) yield = collected cells,
810 * but that's probably not the case on the other lists.
812 * (We might consider computing a better prediction, for example
813 * by computing an average over multiple GC:s.)
815 if (freelist
->min_yield_fraction
)
817 /* Pick largest of last two yields. */
818 int delta
= ((SCM_HEAP_SIZE
* freelist
->min_yield_fraction
/ 100)
819 - (long) SCM_MAX (scm_gc_yield_1
, scm_gc_yield
));
821 fprintf (stderr
, " after GC = %d, delta = %d\n",
826 freelist
->min_yield
+= delta
;
830 /* When we get POSIX threads support, the master will be global and
831 * common while the freelist will be individual for each thread.
835 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
841 if (SCM_NULLP (master
->clusters
))
843 if (master
->grow_heap_p
)
845 master
->grow_heap_p
= 0;
846 alloc_some_heap (master
);
851 fprintf (stderr
, "allocated = %d, ",
853 + master_cells_allocated (&scm_master_freelist
)
854 + master_cells_allocated (&scm_master_freelist2
));
857 adjust_min_yield (master
);
860 cell
= SCM_CAR (master
->clusters
);
861 master
->clusters
= SCM_CDR (master
->clusters
);
862 ++master
->clusters_allocated
;
864 while (SCM_NULLP (cell
));
866 *freelist
= SCM_CDR (cell
);
867 SCM_SET_CELL_TYPE (cell
, scm_tc16_allocated
);
872 /* This is a support routine which can be used to reserve a cluster
873 * for some special use, such as debugging. It won't be useful until
874 * free cells are preserved between garbage collections.
878 scm_alloc_cluster (scm_freelist_t
*master
)
881 cell
= scm_gc_for_newcell (master
, &freelist
);
882 SCM_SETCDR (cell
, freelist
);
887 #else /* GUILE_NEW_GC_SCHEME */
890 scm_gc_for_alloc (scm_freelist_t
*freelist
)
894 #ifdef GUILE_DEBUG_FREELIST
895 fprintf (stderr
, "Collected: %d, min_yield: %d\n",
896 freelist
->collected
, MIN_GC_YIELD (freelist
));
898 if ((freelist
->collected
< MIN_GC_YIELD (freelist
))
899 || SCM_IMP (freelist
->cells
))
900 alloc_some_heap (freelist
);
906 scm_gc_for_newcell (scm_freelist_t
*freelist
)
909 scm_gc_for_alloc (freelist
);
910 fl
= freelist
->cells
;
911 freelist
->cells
= SCM_CDR (fl
);
912 SCM_SETCAR (fl
, scm_tc16_allocated
);
916 #endif /* GUILE_NEW_GC_SCHEME */
918 SCM scm_after_gc_hook
;
920 scm_c_hook_t scm_before_gc_c_hook
;
921 scm_c_hook_t scm_before_mark_c_hook
;
922 scm_c_hook_t scm_before_sweep_c_hook
;
923 scm_c_hook_t scm_after_sweep_c_hook
;
924 scm_c_hook_t scm_after_gc_c_hook
;
927 scm_igc (const char *what
)
931 scm_c_hook_run (&scm_before_gc_c_hook
, 0);
934 SCM_NULLP (scm_freelist
)
936 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
939 /* During the critical section, only the current thread may run. */
940 SCM_THREAD_CRITICAL_SECTION_START
;
943 /* fprintf (stderr, "gc: %s\n", what); */
947 if (!scm_stack_base
|| scm_block_gc
)
953 if (scm_mallocated
< 0)
954 /* The byte count of allocated objects has underflowed. This is
955 probably because you forgot to report the sizes of objects you
956 have allocated, by calling scm_done_malloc or some such. When
957 the GC freed them, it subtracted their size from
958 scm_mallocated, which underflowed. */
961 if (scm_gc_heap_lock
)
962 /* We've invoked the collector while a GC is already in progress.
963 That should never happen. */
968 /* unprotect any struct types with no instances */
974 pos
= &scm_type_obj_list
;
975 type_list
= scm_type_obj_list
;
976 while (type_list
!= SCM_EOL
)
977 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
979 pos
= SCM_CDRLOC (type_list
);
980 type_list
= SCM_CDR (type_list
);
984 *pos
= SCM_CDR (type_list
);
985 type_list
= SCM_CDR (type_list
);
990 /* flush dead entries from the continuation stack */
995 elts
= SCM_VELTS (scm_continuation_stack
);
996 bound
= SCM_LENGTH (scm_continuation_stack
);
997 x
= SCM_INUM (scm_continuation_stack_ptr
);
1000 elts
[x
] = SCM_BOOL_F
;
1005 scm_c_hook_run (&scm_before_mark_c_hook
, 0);
1009 /* Protect from the C stack. This must be the first marking
1010 * done because it provides information about what objects
1011 * are "in-use" by the C code. "in-use" objects are those
1012 * for which the values from SCM_LENGTH and SCM_CHARS must remain
1013 * usable. This requirement is stricter than a liveness
1014 * requirement -- in particular, it constrains the implementation
1015 * of scm_vector_set_length_x.
1017 SCM_FLUSH_REGISTER_WINDOWS
;
1018 /* This assumes that all registers are saved into the jmp_buf */
1019 setjmp (scm_save_regs_gc_mark
);
1020 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1021 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
1022 sizeof scm_save_regs_gc_mark
)
1023 / sizeof (SCM_STACKITEM
)));
1026 /* stack_len is long rather than scm_sizet in order to guarantee that
1027 &stack_len is long aligned */
1028 #ifdef SCM_STACK_GROWS_UP
1030 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
1032 long stack_len
= scm_stack_size (scm_stack_base
);
1034 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
1037 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
1039 long stack_len
= scm_stack_size (scm_stack_base
);
1041 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
1045 #else /* USE_THREADS */
1047 /* Mark every thread's stack and registers */
1048 scm_threads_mark_stacks ();
1050 #endif /* USE_THREADS */
1052 /* FIXME: insert a phase to un-protect string-data preserved
1053 * in scm_vector_set_length_x.
1056 j
= SCM_NUM_PROTECTS
;
1058 scm_gc_mark (scm_sys_protects
[j
]);
1060 /* FIXME: we should have a means to register C functions to be run
1061 * in different phases of GC
1063 scm_mark_subr_table ();
1066 scm_gc_mark (scm_root
->handle
);
1069 scm_c_hook_run (&scm_before_sweep_c_hook
, 0);
1073 scm_c_hook_run (&scm_after_sweep_c_hook
, 0);
1079 SCM_THREAD_CRITICAL_SECTION_END
;
1081 scm_c_hook_run (&scm_after_gc_c_hook
, 0);
1090 /* Mark an object precisely.
1105 if (SCM_NCELLP (ptr
))
1106 scm_wta (ptr
, "rogue pointer in heap", NULL
);
1108 switch (SCM_TYP7 (ptr
))
1110 case scm_tcs_cons_nimcar
:
1111 if (SCM_GCMARKP (ptr
))
1113 SCM_SETGCMARK (ptr
);
1114 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
1116 ptr
= SCM_CAR (ptr
);
1119 scm_gc_mark (SCM_CAR (ptr
));
1120 ptr
= SCM_GCCDR (ptr
);
1122 case scm_tcs_cons_imcar
:
1123 if (SCM_GCMARKP (ptr
))
1125 SCM_SETGCMARK (ptr
);
1126 ptr
= SCM_GCCDR (ptr
);
1129 if (SCM_GCMARKP (ptr
))
1131 SCM_SETGCMARK (ptr
);
1132 scm_gc_mark (SCM_CELL_OBJECT_2 (ptr
));
1133 ptr
= SCM_GCCDR (ptr
);
1135 case scm_tcs_cons_gloc
:
1136 if (SCM_GCMARKP (ptr
))
1138 SCM_SETGCMARK (ptr
);
1140 /* Dirk:FIXME:: The following code is super ugly: ptr may be a struct
1141 * or a gloc. If it is a gloc, the cell word #0 of ptr is a pointer
1142 * to a heap cell. If it is a struct, the cell word #0 of ptr is a
1143 * pointer to a struct vtable data region. The fact that these are
1144 * accessed in the same way restricts the possibilites to change the
1145 * data layout of structs or heap cells.
1147 scm_bits_t word0
= SCM_CELL_WORD_0 (ptr
) - scm_tc3_cons_gloc
;
1148 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1149 switch (vtable_data
[scm_vtable_index_vcell
])
1154 SCM gloc_car
= SCM_PACK (word0
);
1155 scm_gc_mark (gloc_car
);
1156 ptr
= SCM_GCCDR (ptr
);
1162 /* ptr is a struct */
1163 SCM layout
= SCM_PACK (vtable_data
[scm_vtable_index_layout
]);
1164 int len
= SCM_LENGTH (layout
);
1165 char * fields_desc
= SCM_CHARS (layout
);
1166 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1167 that it removes the mark */
1168 scm_bits_t
* struct_data
= (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (ptr
));
1170 if (vtable_data
[scm_struct_i_flags
] & SCM_STRUCTF_ENTITY
)
1172 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_procedure
]));
1173 scm_gc_mark (SCM_PACK (struct_data
[scm_struct_i_setter
]));
1179 for (x
= 0; x
< len
- 2; x
+= 2, ++struct_data
)
1180 if (fields_desc
[x
] == 'p')
1181 scm_gc_mark (SCM_PACK (*struct_data
));
1182 if (fields_desc
[x
] == 'p')
1184 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1185 for (x
= *struct_data
; x
; --x
)
1186 scm_gc_mark (SCM_PACK (*++struct_data
));
1188 scm_gc_mark (SCM_PACK (*struct_data
));
1191 if (vtable_data
[scm_vtable_index_vcell
] == 0)
1193 vtable_data
[scm_vtable_index_vcell
] = 1;
1194 ptr
= SCM_PACK (vtable_data
[scm_vtable_index_vtable
]);
1201 case scm_tcs_closures
:
1202 if (SCM_GCMARKP (ptr
))
1204 SCM_SETGCMARK (ptr
);
1205 if (SCM_IMP (SCM_CDR (ptr
)))
1207 ptr
= SCM_CLOSCAR (ptr
);
1210 scm_gc_mark (SCM_CLOSCAR (ptr
));
1211 ptr
= SCM_GCCDR (ptr
);
1213 case scm_tc7_vector
:
1214 case scm_tc7_lvector
:
1218 if (SCM_GC8MARKP (ptr
))
1220 SCM_SETGC8MARK (ptr
);
1221 i
= SCM_LENGTH (ptr
);
1225 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1226 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1227 ptr
= SCM_VELTS (ptr
)[0];
1229 case scm_tc7_contin
:
1232 SCM_SETGC8MARK (ptr
);
1233 if (SCM_VELTS (ptr
))
1234 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1237 (sizeof (SCM_STACKITEM
) + -1 +
1238 sizeof (scm_contregs
)) /
1239 sizeof (SCM_STACKITEM
)));
1243 case scm_tc7_byvect
:
1250 #ifdef HAVE_LONG_LONGS
1251 case scm_tc7_llvect
:
1254 case scm_tc7_string
:
1255 SCM_SETGC8MARK (ptr
);
1258 case scm_tc7_substring
:
1259 if (SCM_GC8MARKP(ptr
))
1261 SCM_SETGC8MARK (ptr
);
1262 ptr
= SCM_CDR (ptr
);
1266 if (SCM_GC8MARKP(ptr
))
1268 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1269 scm_weak_vectors
= ptr
;
1270 SCM_SETGC8MARK (ptr
);
1271 if (SCM_IS_WHVEC_ANY (ptr
))
1278 len
= SCM_LENGTH (ptr
);
1279 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1280 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1282 for (x
= 0; x
< len
; ++x
)
1285 alist
= SCM_VELTS (ptr
)[x
];
1287 /* mark everything on the alist except the keys or
1288 * values, according to weak_values and weak_keys. */
1289 while ( SCM_CONSP (alist
)
1290 && !SCM_GCMARKP (alist
)
1291 && SCM_CONSP (SCM_CAR (alist
)))
1296 kvpair
= SCM_CAR (alist
);
1297 next_alist
= SCM_CDR (alist
);
1300 * SCM_SETGCMARK (alist);
1301 * SCM_SETGCMARK (kvpair);
1303 * It may be that either the key or value is protected by
1304 * an escaped reference to part of the spine of this alist.
1305 * If we mark the spine here, and only mark one or neither of the
1306 * key and value, they may never be properly marked.
1307 * This leads to a horrible situation in which an alist containing
1308 * freelist cells is exported.
1310 * So only mark the spines of these arrays last of all marking.
1311 * If somebody confuses us by constructing a weak vector
1312 * with a circular alist then we are hosed, but at least we
1313 * won't prematurely drop table entries.
1316 scm_gc_mark (SCM_CAR (kvpair
));
1318 scm_gc_mark (SCM_GCCDR (kvpair
));
1321 if (SCM_NIMP (alist
))
1322 scm_gc_mark (alist
);
1327 case scm_tc7_msymbol
:
1328 if (SCM_GC8MARKP(ptr
))
1330 SCM_SETGC8MARK (ptr
);
1331 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1332 ptr
= SCM_SYMBOL_PROPS (ptr
);
1334 case scm_tc7_ssymbol
:
1335 if (SCM_GC8MARKP(ptr
))
1337 SCM_SETGC8MARK (ptr
);
1342 i
= SCM_PTOBNUM (ptr
);
1343 if (!(i
< scm_numptob
))
1345 if (SCM_GC8MARKP (ptr
))
1347 SCM_SETGC8MARK (ptr
);
1348 if (SCM_PTAB_ENTRY(ptr
))
1349 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1350 if (scm_ptobs
[i
].mark
)
1352 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1359 if (SCM_GC8MARKP (ptr
))
1361 SCM_SETGC8MARK (ptr
);
1362 switch (SCM_GCTYP16 (ptr
))
1363 { /* should be faster than going through scm_smobs */
1364 case scm_tc_free_cell
:
1365 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1366 case scm_tc16_allocated
:
1369 case scm_tc16_complex
:
1372 i
= SCM_SMOBNUM (ptr
);
1373 if (!(i
< scm_numsmob
))
1375 if (scm_smobs
[i
].mark
)
1377 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1385 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1390 /* Mark a Region Conservatively
1394 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1396 register long m
= n
;
1398 register SCM_CELLPTR ptr
;
1401 if (SCM_CELLP (* (SCM
*) &x
[m
]))
1403 ptr
= SCM2PTR (* (SCM
*) &x
[m
]);
1405 j
= scm_n_heap_segs
- 1;
1406 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1407 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1414 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1416 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1424 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1428 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1433 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1437 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1443 if (!scm_heap_table
[seg_id
].valid
1444 || scm_heap_table
[seg_id
].valid (ptr
,
1445 &scm_heap_table
[seg_id
]))
1446 if (scm_heap_table
[seg_id
].span
== 1
1447 || SCM_DOUBLE_CELLP (* (SCM
*) &x
[m
]))
1448 scm_gc_mark (* (SCM
*) &x
[m
]);
1457 /* The function scm_cellp determines whether an SCM value can be regarded as a
1458 * pointer to a cell on the heap. Binary search is used in order to determine
1459 * the heap segment that contains the cell.
1462 scm_cellp (SCM value
)
1464 if (SCM_CELLP (value
)) {
1465 scm_cell
* ptr
= SCM2PTR (value
);
1467 unsigned int j
= scm_n_heap_segs
- 1;
1470 int k
= (i
+ j
) / 2;
1471 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
)) {
1473 } else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
)) {
1478 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1479 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
)
1480 && (!scm_heap_table
[i
].valid
|| scm_heap_table
[i
].valid (ptr
, &scm_heap_table
[i
]))
1481 && (scm_heap_table
[i
].span
== 1 || SCM_DOUBLE_CELLP (value
))) {
1492 #ifdef GUILE_NEW_GC_SCHEME
1494 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1496 freelist
->cells
= SCM_EOL
;
1497 freelist
->left_to_collect
= freelist
->cluster_size
;
1498 freelist
->clusters_allocated
= 0;
1499 freelist
->clusters
= SCM_EOL
;
1500 freelist
->clustertail
= &freelist
->clusters
;
1501 freelist
->collected_1
= freelist
->collected
;
1502 freelist
->collected
= 0;
1506 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1509 *freelist
->clustertail
= freelist
->cells
;
1510 if (SCM_NNULLP (freelist
->cells
))
1512 SCM c
= freelist
->cells
;
1513 SCM_SETCAR (c
, SCM_CDR (c
));
1514 SCM_SETCDR (c
, SCM_EOL
);
1515 freelist
->collected
+=
1516 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1518 scm_gc_cells_collected
+= freelist
->collected
;
1520 /* Although freelist->min_yield is used to test freelist->collected
1521 * (which is the local GC yield for freelist), it is adjusted so
1522 * that *total* yield is freelist->min_yield_fraction of total heap
1523 * size. This means that a too low yield is compensated by more
1524 * heap on the list which is currently doing most work, which is
1525 * just what we want.
1527 collected
= SCM_MAX (freelist
->collected_1
, freelist
->collected
);
1528 freelist
->grow_heap_p
= (collected
< freelist
->min_yield
);
1535 register SCM_CELLPTR ptr
;
1536 register SCM nfreelist
;
1537 register scm_freelist_t
*freelist
;
1545 #ifdef GUILE_NEW_GC_SCHEME
1546 gc_sweep_freelist_start (&scm_master_freelist
);
1547 gc_sweep_freelist_start (&scm_master_freelist2
);
1549 /* Reset all free list pointers. We'll reconstruct them completely
1551 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1552 scm_heap_table
[i
].freelist
->cells
= SCM_EOL
;
1555 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1557 #ifdef GUILE_NEW_GC_SCHEME
1558 register unsigned int left_to_collect
;
1560 register scm_sizet n
= 0;
1562 register scm_sizet j
;
1564 /* Unmarked cells go onto the front of the freelist this heap
1565 segment points to. Rather than updating the real freelist
1566 pointer as we go along, we accumulate the new head in
1567 nfreelist. Then, if it turns out that the entire segment is
1568 free, we free (i.e., malloc's free) the whole segment, and
1569 simply don't assign nfreelist back into the real freelist. */
1570 freelist
= scm_heap_table
[i
].freelist
;
1571 nfreelist
= freelist
->cells
;
1572 #ifdef GUILE_NEW_GC_SCHEME
1573 left_to_collect
= freelist
->left_to_collect
;
1575 span
= scm_heap_table
[i
].span
;
1577 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1578 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1579 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1581 SCM scmptr
= PTR2SCM (ptr
);
1583 switch SCM_TYP7 (scmptr
)
1585 case scm_tcs_cons_gloc
:
1587 /* Dirk:FIXME:: Again, super ugly code: scmptr may be a
1588 * struct or a gloc. See the corresponding comment in
1591 scm_bits_t word0
= SCM_CELL_WORD_0 (scmptr
) - scm_tc3_cons_gloc
;
1592 scm_bits_t
* vtable_data
= (scm_bits_t
*) word0
; /* access as struct */
1593 if (SCM_GCMARKP (scmptr
))
1595 if (vtable_data
[scm_vtable_index_vcell
] == 1)
1596 vtable_data
[scm_vtable_index_vcell
] = 0;
1601 if (vtable_data
[scm_vtable_index_vcell
] == 0
1602 || vtable_data
[scm_vtable_index_vcell
] == 1)
1604 scm_struct_free_t free
1605 = (scm_struct_free_t
) vtable_data
[scm_struct_i_free
];
1606 m
+= free (vtable_data
, (scm_bits_t
*) SCM_UNPACK (SCM_GCCDR (scmptr
)));
1611 case scm_tcs_cons_imcar
:
1612 case scm_tcs_cons_nimcar
:
1613 case scm_tcs_closures
:
1615 if (SCM_GCMARKP (scmptr
))
1619 if (SCM_GC8MARKP (scmptr
))
1625 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1626 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1630 case scm_tc7_vector
:
1631 case scm_tc7_lvector
:
1635 if (SCM_GC8MARKP (scmptr
))
1638 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1640 scm_must_free (SCM_CHARS (scmptr
));
1641 /* SCM_SETCHARS(scmptr, 0);*/
1645 if SCM_GC8MARKP (scmptr
)
1647 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1649 case scm_tc7_byvect
:
1650 if SCM_GC8MARKP (scmptr
)
1652 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1656 if SCM_GC8MARKP (scmptr
)
1658 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1661 if SCM_GC8MARKP (scmptr
)
1663 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1665 #ifdef HAVE_LONG_LONGS
1666 case scm_tc7_llvect
:
1667 if SCM_GC8MARKP (scmptr
)
1669 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1673 if SCM_GC8MARKP (scmptr
)
1675 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1678 if SCM_GC8MARKP (scmptr
)
1680 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1683 if SCM_GC8MARKP (scmptr
)
1685 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1688 case scm_tc7_substring
:
1689 if (SCM_GC8MARKP (scmptr
))
1692 case scm_tc7_string
:
1693 if (SCM_GC8MARKP (scmptr
))
1695 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1697 case scm_tc7_msymbol
:
1698 if (SCM_GC8MARKP (scmptr
))
1700 m
+= (SCM_LENGTH (scmptr
) + 1
1701 + (SCM_CHARS (scmptr
) - (char *) SCM_SLOTS (scmptr
)));
1702 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1704 case scm_tc7_contin
:
1705 if SCM_GC8MARKP (scmptr
)
1707 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1708 if (SCM_VELTS (scmptr
))
1710 case scm_tc7_ssymbol
:
1711 if SCM_GC8MARKP(scmptr
)
1717 if SCM_GC8MARKP (scmptr
)
1719 if SCM_OPENP (scmptr
)
1721 int k
= SCM_PTOBNUM (scmptr
);
1722 if (!(k
< scm_numptob
))
1724 /* Keep "revealed" ports alive. */
1725 if (scm_revealed_count (scmptr
) > 0)
1727 /* Yes, I really do mean scm_ptobs[k].free */
1728 /* rather than ftobs[k].close. .close */
1729 /* is for explicit CLOSE-PORT by user */
1730 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1731 SCM_SETSTREAM (scmptr
, 0);
1732 scm_remove_from_port_table (scmptr
);
1733 scm_gc_ports_collected
++;
1734 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1738 switch SCM_GCTYP16 (scmptr
)
1740 case scm_tc_free_cell
:
1742 if SCM_GC8MARKP (scmptr
)
1747 if SCM_GC8MARKP (scmptr
)
1749 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1751 #endif /* def SCM_BIGDIG */
1752 case scm_tc16_complex
:
1753 if SCM_GC8MARKP (scmptr
)
1755 m
+= 2 * sizeof (double);
1758 if SCM_GC8MARKP (scmptr
)
1763 k
= SCM_SMOBNUM (scmptr
);
1764 if (!(k
< scm_numsmob
))
1766 m
+= (scm_smobs
[k
].free
) (scmptr
);
1772 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1775 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1778 #ifndef GUILE_NEW_GC_SCHEME
1781 if (!--left_to_collect
)
1783 SCM_SETCAR (scmptr
, nfreelist
);
1784 *freelist
->clustertail
= scmptr
;
1785 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1787 nfreelist
= SCM_EOL
;
1788 freelist
->collected
+= span
* freelist
->cluster_size
;
1789 left_to_collect
= freelist
->cluster_size
;
1794 /* Stick the new cell on the front of nfreelist. It's
1795 critical that we mark this cell as freed; otherwise, the
1796 conservative collector might trace it as some other type
1798 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
1799 SCM_SETCDR (scmptr
, nfreelist
);
1805 SCM_CLRGC8MARK (scmptr
);
1808 SCM_CLRGCMARK (scmptr
);
1810 #ifdef GC_FREE_SEGMENTS
1815 freelist
->heap_size
-= seg_size
;
1816 free ((char *) scm_heap_table
[i
].bounds
[0]);
1817 scm_heap_table
[i
].bounds
[0] = 0;
1818 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1819 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1820 scm_n_heap_segs
-= 1;
1821 i
--; /* We need to scan the segment just moved. */
1824 #endif /* ifdef GC_FREE_SEGMENTS */
1826 /* Update the real freelist pointer to point to the head of
1827 the list of free cells we've built for this segment. */
1828 freelist
->cells
= nfreelist
;
1829 #ifdef GUILE_NEW_GC_SCHEME
1830 freelist
->left_to_collect
= left_to_collect
;
1834 #ifndef GUILE_NEW_GC_SCHEME
1835 freelist
->collected
+= n
;
1838 #ifdef GUILE_DEBUG_FREELIST
1839 #ifdef GUILE_NEW_GC_SCHEME
1840 scm_check_freelist (freelist
== &scm_master_freelist
1844 scm_check_freelist (freelist
);
1846 scm_map_free_list ();
1850 #ifdef GUILE_NEW_GC_SCHEME
1851 gc_sweep_freelist_finish (&scm_master_freelist
);
1852 gc_sweep_freelist_finish (&scm_master_freelist2
);
1854 /* When we move to POSIX threads private freelists should probably
1855 be GC-protected instead. */
1856 scm_freelist
= SCM_EOL
;
1857 scm_freelist2
= SCM_EOL
;
1860 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1861 #ifdef GUILE_NEW_GC_SCHEME
1862 scm_gc_yield
-= scm_cells_allocated
;
1864 scm_mallocated
-= m
;
1865 scm_gc_malloc_collected
= m
;
1871 /* {Front end to malloc}
1873 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1875 * These functions provide services comperable to malloc, realloc, and
1876 * free. They are for allocating malloced parts of scheme objects.
1877 * The primary purpose of the front end is to impose calls to gc.
1882 * Return newly malloced storage or throw an error.
1884 * The parameter WHAT is a string for error reporting.
1885 * If the threshold scm_mtrigger will be passed by this
1886 * allocation, or if the first call to malloc fails,
1887 * garbage collect -- on the presumption that some objects
1888 * using malloced storage may be collected.
1890 * The limit scm_mtrigger may be raised by this allocation.
1893 scm_must_malloc (scm_sizet size
, const char *what
)
1896 unsigned long nm
= scm_mallocated
+ size
;
1898 if (nm
<= scm_mtrigger
)
1900 SCM_SYSCALL (ptr
= malloc (size
));
1903 scm_mallocated
= nm
;
1904 #ifdef GUILE_DEBUG_MALLOC
1905 scm_malloc_register (ptr
, what
);
1913 nm
= scm_mallocated
+ size
;
1914 SCM_SYSCALL (ptr
= malloc (size
));
1917 scm_mallocated
= nm
;
1918 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1919 if (nm
> scm_mtrigger
)
1920 scm_mtrigger
= nm
+ nm
/ 2;
1922 scm_mtrigger
+= scm_mtrigger
/ 2;
1924 #ifdef GUILE_DEBUG_MALLOC
1925 scm_malloc_register (ptr
, what
);
1931 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1932 return 0; /* never reached */
1937 * is similar to scm_must_malloc.
1940 scm_must_realloc (void *where
,
1946 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
1948 if (nm
<= scm_mtrigger
)
1950 SCM_SYSCALL (ptr
= realloc (where
, size
));
1953 scm_mallocated
= nm
;
1954 #ifdef GUILE_DEBUG_MALLOC
1955 scm_malloc_reregister (where
, ptr
, what
);
1963 nm
= scm_mallocated
+ size
- old_size
;
1964 SCM_SYSCALL (ptr
= realloc (where
, size
));
1967 scm_mallocated
= nm
;
1968 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
1969 if (nm
> scm_mtrigger
)
1970 scm_mtrigger
= nm
+ nm
/ 2;
1972 scm_mtrigger
+= scm_mtrigger
/ 2;
1974 #ifdef GUILE_DEBUG_MALLOC
1975 scm_malloc_reregister (where
, ptr
, what
);
1980 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
1981 return 0; /* never reached */
1985 scm_must_free (void *obj
)
1987 #ifdef GUILE_DEBUG_MALLOC
1988 scm_malloc_unregister (obj
);
1993 scm_wta (SCM_INUM0
, "already free", "");
1996 /* Announce that there has been some malloc done that will be freed
1997 * during gc. A typical use is for a smob that uses some malloced
1998 * memory but can not get it from scm_must_malloc (for whatever
1999 * reason). When a new object of this smob is created you call
2000 * scm_done_malloc with the size of the object. When your smob free
2001 * function is called, be sure to include this size in the return
2005 scm_done_malloc (long size
)
2007 scm_mallocated
+= size
;
2009 if (scm_mallocated
> scm_mtrigger
)
2011 scm_igc ("foreign mallocs");
2012 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
2014 if (scm_mallocated
> scm_mtrigger
)
2015 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
2017 scm_mtrigger
+= scm_mtrigger
/ 2;
2027 * Each heap segment is an array of objects of a particular size.
2028 * Every segment has an associated (possibly shared) freelist.
2029 * A table of segment records is kept that records the upper and
2030 * lower extents of the segment; this is used during the conservative
2031 * phase of gc to identify probably gc roots (because they point
2032 * into valid segments at reasonable offsets). */
2035 * is true if the first segment was smaller than INIT_HEAP_SEG.
2036 * If scm_expmem is set to one, subsequent segment allocations will
2037 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2041 scm_sizet scm_max_segment_size
;
2044 * is the lowest base address of any heap segment.
2046 SCM_CELLPTR scm_heap_org
;
2048 scm_heap_seg_data_t
* scm_heap_table
= 0;
2049 int scm_n_heap_segs
= 0;
2052 * initializes a new heap segment and return the number of objects it contains.
2054 * The segment origin, segment size in bytes, and the span of objects
2055 * in cells are input parameters. The freelist is both input and output.
2057 * This function presume that the scm_heap_table has already been expanded
2058 * to accomodate a new segment record.
2063 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
2065 register SCM_CELLPTR ptr
;
2066 SCM_CELLPTR seg_end
;
2069 int span
= freelist
->span
;
2071 if (seg_org
== NULL
)
2074 ptr
= CELL_UP (seg_org
, span
);
2076 /* Compute the ceiling on valid object pointers w/in this segment.
2078 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
2080 /* Find the right place and insert the segment record.
2083 for (new_seg_index
= 0;
2084 ( (new_seg_index
< scm_n_heap_segs
)
2085 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
2091 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2092 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2097 scm_heap_table
[new_seg_index
].valid
= 0;
2098 scm_heap_table
[new_seg_index
].span
= span
;
2099 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2100 scm_heap_table
[new_seg_index
].bounds
[0] = ptr
;
2101 scm_heap_table
[new_seg_index
].bounds
[1] = seg_end
;
2104 /* Compute the least valid object pointer w/in this segment
2106 ptr
= CELL_UP (ptr
, span
);
2110 n_new_cells
= seg_end
- ptr
;
2112 #ifdef GUILE_NEW_GC_SCHEME
2114 freelist
->heap_size
+= n_new_cells
;
2116 /* Partition objects in this segment into clusters */
2119 SCM
*clusterp
= &clusters
;
2120 int n_cluster_cells
= span
* freelist
->cluster_size
;
2122 while (n_new_cells
> span
) /* at least one spine + one freecell */
2124 /* Determine end of cluster
2126 if (n_new_cells
>= n_cluster_cells
)
2128 seg_end
= ptr
+ n_cluster_cells
;
2129 n_new_cells
-= n_cluster_cells
;
2132 /* [cmm] looks like the segment size doesn't divide cleanly by
2133 cluster size. bad cmm! */
2136 /* Allocate cluster spine
2138 *clusterp
= PTR2SCM (ptr
);
2139 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
2140 clusterp
= SCM_CDRLOC (*clusterp
);
2143 while (ptr
< seg_end
)
2145 SCM scmptr
= PTR2SCM (ptr
);
2147 SCM_SET_CELL_TYPE (scmptr
, scm_tc_free_cell
);
2148 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2152 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
2155 /* Patch up the last cluster pointer in the segment
2156 * to join it to the input freelist.
2158 *clusterp
= freelist
->clusters
;
2159 freelist
->clusters
= clusters
;
2162 #else /* GUILE_NEW_GC_SCHEME */
2164 /* Prepend objects in this segment to the freelist.
2166 while (ptr
< seg_end
)
2168 SCM scmptr
= PTR2SCM (ptr
);
2170 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
2171 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2177 /* Patch up the last freelist pointer in the segment
2178 * to join it to the input freelist.
2180 SCM_SETCDR (PTR2SCM (ptr
), freelist
->cells
);
2181 freelist
->cells
= PTR2SCM (CELL_UP (seg_org
, span
));
2183 freelist
->heap_size
+= n_new_cells
;
2185 #endif /* GUILE_NEW_GC_SCHEME */
2188 fprintf (stderr
, "H");
2193 #ifndef GUILE_NEW_GC_SCHEME
2194 #define round_to_cluster_size(freelist, len) len
2198 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
2200 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2203 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2204 + ALIGNMENT_SLACK (freelist
);
2210 alloc_some_heap (scm_freelist_t
*freelist
)
2212 scm_heap_seg_data_t
* tmptable
;
2216 /* Critical code sections (such as the garbage collector)
2217 * aren't supposed to add heap segments.
2219 if (scm_gc_heap_lock
)
2220 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
2222 /* Expand the heap tables to have room for the new segment.
2223 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2224 * only if the allocation of the segment itself succeeds.
2226 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
2228 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
2229 realloc ((char *)scm_heap_table
, len
)));
2231 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
2233 scm_heap_table
= tmptable
;
2236 /* Pick a size for the new heap segment.
2237 * The rule for picking the size of a segment is explained in
2240 #ifdef GUILE_NEW_GC_SCHEME
2242 /* Assure that the new segment is predicted to be large enough.
2244 * New yield should at least equal GC fraction of new heap size, i.e.
2246 * y + dh > f * (h + dh)
2249 * f : min yield fraction
2251 * dh : size of new heap segment
2253 * This gives dh > (f * h - y) / (1 - f)
2255 int f
= freelist
->min_yield_fraction
;
2256 long h
= SCM_HEAP_SIZE
;
2257 long min_cells
= (f
* h
- 100 * (long) scm_gc_yield
) / (99 - f
);
2258 len
= SCM_EXPHEAP (freelist
->heap_size
);
2260 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2262 if (len
< min_cells
)
2263 len
= min_cells
+ freelist
->cluster_size
;
2264 len
*= sizeof (scm_cell
);
2265 /* force new sampling */
2266 freelist
->collected
= LONG_MAX
;
2269 if (len
> scm_max_segment_size
)
2270 len
= scm_max_segment_size
;
2274 len
= (scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
));
2275 if ((scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
))
2280 len
= SCM_HEAP_SEG_SIZE
;
2281 #endif /* GUILE_NEW_GC_SCHEME */
2286 #ifndef GUILE_NEW_GC_SCHEME
2287 smallest
= (freelist
->span
* sizeof (scm_cell
));
2289 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2295 /* Allocate with decaying ambition. */
2296 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2297 && (len
>= smallest
))
2299 scm_sizet rounded_len
= round_to_cluster_size (freelist
, len
);
2300 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2303 init_heap_seg (ptr
, rounded_len
, freelist
);
2310 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2314 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2317 #define FUNC_NAME s_scm_unhash_name
2321 SCM_VALIDATE_SYMBOL (1,name
);
2323 bound
= scm_n_heap_segs
;
2324 for (x
= 0; x
< bound
; ++x
)
2328 p
= scm_heap_table
[x
].bounds
[0];
2329 pbound
= scm_heap_table
[x
].bounds
[1];
2332 SCM cell
= PTR2SCM (p
);
2333 if (SCM_TYP3 (cell
) == scm_tc3_cons_gloc
)
2335 /* Dirk:FIXME:: Again, super ugly code: cell may be a gloc or a
2336 * struct cell. See the corresponding comment in scm_gc_mark.
2338 scm_bits_t word0
= SCM_CELL_WORD_0 (cell
) - scm_tc3_cons_gloc
;
2339 SCM gloc_car
= SCM_PACK (word0
); /* access as gloc */
2340 SCM vcell
= SCM_CELL_OBJECT_1 (gloc_car
);
2341 if ((SCM_TRUE_P (name
) || SCM_EQ_P (SCM_CAR (gloc_car
), name
))
2342 && (SCM_UNPACK (vcell
) != 0) && (SCM_UNPACK (vcell
) != 1))
2344 SCM_SET_CELL_OBJECT_0 (cell
, name
);
2357 /* {GC Protection Helper Functions}
2362 scm_remember (SCM
*ptr
)
2367 These crazy functions prevent garbage collection
2368 of arguments after the first argument by
2369 ensuring they remain live throughout the
2370 function because they are used in the last
2371 line of the code block.
2372 It'd be better to have a nice compiler hint to
2373 aid the conservative stack-scanning GC. --03/09/00 gjb */
2375 scm_return_first (SCM elt
, ...)
2381 scm_return_first_int (int i
, ...)
2388 scm_permanent_object (SCM obj
)
2391 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2397 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2398 even if all other references are dropped, until someone applies
2399 scm_unprotect_object to it. This function returns OBJ.
2401 Calls to scm_protect_object nest. For every object OBJ, there is a
2402 counter which scm_protect_object(OBJ) increments and
2403 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2404 an object's counter is greater than zero, the garbage collector
2407 Of course, that's not how it's implemented. scm_protect_object and
2408 scm_unprotect_object just maintain a list of references to things.
2409 Since the GC knows about this list, all objects it mentions stay
2410 alive. scm_protect_object adds its argument to the list;
2411 scm_unprotect_object removes the first occurrence of its argument
2414 scm_protect_object (SCM obj
)
2416 scm_protects
= scm_cons (obj
, scm_protects
);
2422 /* Remove any protection for OBJ established by a prior call to
2423 scm_protect_object. This function returns OBJ.
2425 See scm_protect_object for more information. */
2427 scm_unprotect_object (SCM obj
)
2429 SCM
*tail_ptr
= &scm_protects
;
2431 while (SCM_CONSP (*tail_ptr
))
2432 if (SCM_EQ_P (SCM_CAR (*tail_ptr
), obj
))
2434 *tail_ptr
= SCM_CDR (*tail_ptr
);
2438 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
2445 /* called on process termination. */
2451 extern int on_exit (void (*procp
) (), int arg
);
2454 cleanup (int status
, void *arg
)
2456 #error Dont know how to setup a cleanup handler on your system.
2461 scm_flush_all_ports ();
2466 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2468 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2469 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2473 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2474 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2482 #ifdef GUILE_NEW_GC_SCHEME
2483 if (freelist
->min_yield_fraction
)
2484 freelist
->min_yield
= (freelist
->heap_size
* freelist
->min_yield_fraction
2486 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->min_yield
);
2493 #ifdef GUILE_NEW_GC_SCHEME
2495 init_freelist (scm_freelist_t
*freelist
,
2500 freelist
->clusters
= SCM_EOL
;
2501 freelist
->cluster_size
= cluster_size
+ 1;
2502 freelist
->left_to_collect
= 0;
2503 freelist
->clusters_allocated
= 0;
2504 freelist
->min_yield
= 0;
2505 freelist
->min_yield_fraction
= min_yield
;
2506 freelist
->span
= span
;
2507 freelist
->collected
= 0;
2508 freelist
->collected_1
= 0;
2509 freelist
->heap_size
= 0;
2513 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2514 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2515 scm_sizet max_segment_size
)
2518 scm_init_storage (scm_sizet init_heap_size_1
, scm_sizet init_heap_size_2
)
2523 if (!init_heap_size_1
)
2524 init_heap_size_1
= SCM_INIT_HEAP_SIZE_1
;
2525 if (!init_heap_size_2
)
2526 init_heap_size_2
= SCM_INIT_HEAP_SIZE_2
;
2528 j
= SCM_NUM_PROTECTS
;
2530 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2533 #ifdef GUILE_NEW_GC_SCHEME
2534 scm_freelist
= SCM_EOL
;
2535 scm_freelist2
= SCM_EOL
;
2536 init_freelist (&scm_master_freelist
,
2537 1, SCM_CLUSTER_SIZE_1
,
2538 gc_trigger_1
? gc_trigger_1
: SCM_MIN_YIELD_1
);
2539 init_freelist (&scm_master_freelist2
,
2540 2, SCM_CLUSTER_SIZE_2
,
2541 gc_trigger_2
? gc_trigger_2
: SCM_MIN_YIELD_2
);
2542 scm_max_segment_size
2543 = max_segment_size
? max_segment_size
: SCM_MAX_SEGMENT_SIZE
;
2545 scm_freelist
.cells
= SCM_EOL
;
2546 scm_freelist
.span
= 1;
2547 scm_freelist
.collected
= 0;
2548 scm_freelist
.heap_size
= 0;
2550 scm_freelist2
.cells
= SCM_EOL
;
2551 scm_freelist2
.span
= 2;
2552 scm_freelist2
.collected
= 0;
2553 scm_freelist2
.heap_size
= 0;
2558 j
= SCM_HEAP_SEG_SIZE
;
2559 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2560 scm_heap_table
= ((scm_heap_seg_data_t
*)
2561 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2563 #ifdef GUILE_NEW_GC_SCHEME
2564 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2565 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2568 if (make_initial_segment (init_heap_size_1
, &scm_freelist
) ||
2569 make_initial_segment (init_heap_size_2
, &scm_freelist2
))
2573 /* scm_hplims[0] can change. do not remove scm_heap_org */
2574 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2576 scm_c_hook_init (&scm_before_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2577 scm_c_hook_init (&scm_before_mark_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2578 scm_c_hook_init (&scm_before_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2579 scm_c_hook_init (&scm_after_sweep_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2580 scm_c_hook_init (&scm_after_gc_c_hook
, 0, SCM_C_HOOK_NORMAL
);
2582 /* Initialise the list of ports. */
2583 scm_port_table
= (scm_port
**)
2584 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2585 if (!scm_port_table
)
2592 on_exit (cleanup
, 0);
2596 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2597 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2599 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2600 scm_nullstr
= scm_makstr (0L, 0);
2601 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2602 scm_symhash
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2603 scm_weak_symhash
= scm_make_weak_key_hash_table (SCM_MAKINUM (scm_symhash_dim
));
2604 scm_symhash_vars
= scm_make_vector (SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2605 scm_stand_in_procs
= SCM_EOL
;
2606 scm_permobjs
= SCM_EOL
;
2607 scm_protects
= SCM_EOL
;
2608 scm_asyncs
= SCM_EOL
;
2609 scm_sysintern ("most-positive-fixnum", SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2610 scm_sysintern ("most-negative-fixnum", SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2612 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));
2621 scm_after_gc_hook
= scm_create_hook ("after-gc-hook", 0);
2622 #include "libguile/gc.x"