1 /* Copyright (C) 1995, 96, 97, 98, 99, 2000 Free Software Foundation, Inc.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2, or (at your option)
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this software; see the file COPYING. If not, write to
15 * the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
16 * Boston, MA 02111-1307 USA
18 * As a special exception, the Free Software Foundation gives permission
19 * for additional uses of the text contained in its release of GUILE.
21 * The exception is that, if you link the GUILE library with other files
22 * to produce an executable, this does not by itself cause the
23 * resulting executable to be covered by the GNU General Public License.
24 * Your use of that executable is in no way restricted on account of
25 * linking the GUILE library code into it.
27 * This exception does not however invalidate any other reasons why
28 * the executable file might be covered by the GNU General Public License.
30 * This exception applies only to the code released by the
31 * Free Software Foundation under the name GUILE. If you copy
32 * code from other Free Software Foundation releases into a copy of
33 * GUILE, as the General Public License permits, the exception does
34 * not apply to the code that you add in this way. To avoid misleading
35 * anyone as to the status of such modified files, you must delete
36 * this exception notice from them.
38 * If you write modifications of your own for GUILE, it is your choice
39 * whether to permit this exception to apply to your modifications.
40 * If you do not wish that, delete this exception notice. */
42 /* Software engineering face-lift by Greg J. Badros, 11-Dec-1999,
43 gjb@cs.washington.edu, http://www.cs.washington.edu/homes/gjb */
45 /* #define DEBUGINFO */
54 #include "guardians.h"
76 #define var_start(x, y) va_start(x, y)
79 #define var_start(x, y) va_start(x)
83 /* {heap tuning parameters}
85 * These are parameters for controlling memory allocation. The heap
86 * is the area out of which scm_cons, and object headers are allocated.
88 * Each heap cell is 8 bytes on a 32 bit machine and 16 bytes on a
89 * 64 bit machine. The units of the _SIZE parameters are bytes.
90 * Cons pairs and object headers occupy one heap cell.
92 * SCM_INIT_HEAP_SIZE is the initial size of heap. If this much heap is
93 * allocated initially the heap will grow by half its current size
94 * each subsequent time more heap is needed.
96 * If SCM_INIT_HEAP_SIZE heap cannot be allocated initially, SCM_HEAP_SEG_SIZE
97 * will be used, and the heap will grow by SCM_HEAP_SEG_SIZE when more
98 * heap is needed. SCM_HEAP_SEG_SIZE must fit into type scm_sizet. This code
99 * is in scm_init_storage() and alloc_some_heap() in sys.c
101 * If SCM_INIT_HEAP_SIZE can be allocated initially, the heap will grow by
102 * SCM_EXPHEAP(scm_heap_size) when more heap is needed.
104 * SCM_MIN_HEAP_SEG_SIZE is minimum size of heap to accept when more heap
107 * INIT_MALLOC_LIMIT is the initial amount of malloc usage which will
110 * SCM_MTRIGGER_HYSTERESIS is the amount of malloc storage that must be
111 * reclaimed by a GC triggered by must_malloc. If less than this is
112 * reclaimed, the trigger threshold is raised. [I don't know what a
113 * good value is. I arbitrarily chose 1/10 of the INIT_MALLOC_LIMIT to
114 * work around a oscillation that caused almost constant GC.]
117 #define SCM_INIT_HEAP_SIZE_1 (50000L * sizeof (scm_cell))
118 #define SCM_CLUSTER_SIZE_1 2000L
119 #define SCM_GC_TRIGGER_1 -45
121 #define SCM_INIT_HEAP_SIZE_2 (2500L * 2 * sizeof (scm_cell))
122 #define SCM_CLUSTER_SIZE_2 1000L
123 /* The following value may seem large, but note that if we get to GC at
124 * all, this means that we have a numerically intensive application
126 #define SCM_GC_TRIGGER_2 -45
128 #define SCM_MAX_SEGMENT_SIZE 2097000L /* a little less (adm) than 2 Mb */
130 #define SCM_MIN_HEAP_SEG_SIZE (2048L * sizeof (scm_cell))
132 # define SCM_HEAP_SEG_SIZE 32768L
135 # define SCM_HEAP_SEG_SIZE (7000L * sizeof (scm_cell))
137 # define SCM_HEAP_SEG_SIZE (16384L * sizeof (scm_cell))
140 /* Make heap grow with factor 1.5 */
141 #define SCM_EXPHEAP(scm_heap_size) (scm_heap_size / 2)
142 #define SCM_INIT_MALLOC_LIMIT 100000
143 #define SCM_MTRIGGER_HYSTERESIS (SCM_INIT_MALLOC_LIMIT/10)
145 /* CELL_UP and CELL_DN are used by scm_init_heap_seg to find scm_cell aligned inner
146 bounds for allocated storage */
149 /*in 386 protected mode we must only adjust the offset */
150 # define CELL_UP(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&(FP_OFF(p)+8*(span)-1))
151 # define CELL_DN(p, span) MK_FP(FP_SEG(p), ~(8*(span)-1)&FP_OFF(p))
154 # define CELL_UP(p, span) (SCM_CELLPTR)(~(span) & ((long)(p)+(span)))
155 # define CELL_DN(p, span) (SCM_CELLPTR)(~(span) & (long)(p))
157 # define CELL_UP(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & ((long)(p)+sizeof(scm_cell)*(span)-1L))
158 # define CELL_DN(p, span) (SCM_CELLPTR)(~(sizeof(scm_cell)*(span)-1L) & (long)(p))
161 #define CLUSTER_SIZE_IN_BYTES(freelist) ((freelist)->cluster_size * (freelist)->span * sizeof(scm_cell))
162 #define ALIGNMENT_SLACK(freelist) (sizeof (scm_cell) * (freelist)->span - 1)
163 #ifdef GUILE_NEW_GC_SCHEME
164 #define SCM_HEAP_SIZE \
165 (scm_master_freelist.heap_size + scm_master_freelist2.heap_size)
167 #define SCM_HEAP_SIZE (scm_freelist.heap_size + scm_freelist2.heap_size)
175 typedef struct scm_freelist_t
{
176 /* collected cells */
178 #ifdef GUILE_NEW_GC_SCHEME
179 /* number of cells left to collect before cluster is full */
180 unsigned int left_to_collect
;
181 /* number of clusters which have been allocated */
182 unsigned int clusters_allocated
;
183 /* a list of freelists, each of size gc_trigger,
184 except the last one which may be shorter */
187 /* this is the number of objects in each cluster, including the spine cell */
189 /* set to grow the heap when we run out of clusters
192 /* minimum number of objects allocated before GC is triggered
195 /* defines gc_trigger as percent of heap size
196 * 0 => constant trigger
198 int gc_trigger_fraction
;
200 /* number of cells per object on this list */
202 /* number of collected cells during last GC */
204 /* total number of cells in heap segments
205 * belonging to this list.
210 #ifdef GUILE_NEW_GC_SCHEME
211 SCM scm_freelist
= SCM_EOL
;
212 scm_freelist_t scm_master_freelist
= {
213 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_1
, 0, 0, 0, 1, 0, 0
215 SCM scm_freelist2
= SCM_EOL
;
216 scm_freelist_t scm_master_freelist2
= {
217 SCM_EOL
, 0, 0, SCM_EOL
, 0, SCM_CLUSTER_SIZE_2
, 0, 0, 0, 2, 0, 0
220 scm_freelist_t scm_freelist
= { SCM_EOL
, 1, 0, 0 };
221 scm_freelist_t scm_freelist2
= { SCM_EOL
, 2, 0, 0 };
225 * is the number of bytes of must_malloc allocation needed to trigger gc.
227 unsigned long scm_mtrigger
;
231 * If set, don't expand the heap. Set only during gc, during which no allocation
232 * is supposed to take place anyway.
234 int scm_gc_heap_lock
= 0;
237 * Don't pause for collection if this is set -- just
241 int scm_block_gc
= 1;
243 /* If fewer than MIN_GC_YIELD cells are recovered during a garbage
244 * collection (GC) more space is allocated for the heap.
246 #define MIN_GC_YIELD(freelist) (freelist->heap_size / 4)
248 /* During collection, this accumulates objects holding
251 SCM scm_weak_vectors
;
253 /* GC Statistics Keeping
255 unsigned long scm_cells_allocated
= 0;
256 long scm_mallocated
= 0;
257 unsigned long scm_gc_cells_collected
;
258 #ifdef GUILE_NEW_GC_SCHEME
259 unsigned long scm_gc_yield
;
260 static unsigned long scm_gc_yield_1
= 0; /* previous GC yield */
262 unsigned long scm_gc_malloc_collected
;
263 unsigned long scm_gc_ports_collected
;
264 unsigned long scm_gc_rt
;
265 unsigned long scm_gc_time_taken
= 0;
267 SCM_SYMBOL (sym_cells_allocated
, "cells-allocated");
268 SCM_SYMBOL (sym_heap_size
, "cell-heap-size");
269 SCM_SYMBOL (sym_mallocated
, "bytes-malloced");
270 SCM_SYMBOL (sym_mtrigger
, "gc-malloc-threshold");
271 SCM_SYMBOL (sym_heap_segments
, "cell-heap-segments");
272 SCM_SYMBOL (sym_gc_time_taken
, "gc-time-taken");
274 typedef struct scm_heap_seg_data_t
276 /* lower and upper bounds of the segment */
277 SCM_CELLPTR bounds
[2];
279 /* address of the head-of-freelist pointer for this segment's cells.
280 All segments usually point to the same one, scm_freelist. */
281 scm_freelist_t
*freelist
;
283 /* number of SCM words per object in this segment */
286 /* If SEG_DATA->valid is non-zero, the conservative marking
287 functions will apply SEG_DATA->valid to the purported pointer and
288 SEG_DATA, and mark the object iff the function returns non-zero.
289 At the moment, I don't think anyone uses this. */
291 } scm_heap_seg_data_t
;
296 static void scm_mark_weak_vector_spines (void);
297 static scm_sizet
init_heap_seg (SCM_CELLPTR
, scm_sizet
, scm_freelist_t
*);
298 static void alloc_some_heap (scm_freelist_t
*);
302 /* Debugging functions. */
304 #if defined (GUILE_DEBUG) || defined (GUILE_DEBUG_FREELIST)
306 /* Return the number of the heap segment containing CELL. */
312 for (i
= 0; i
< scm_n_heap_segs
; i
++)
313 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], (SCM_CELLPTR
) cell
)
314 && SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], (SCM_CELLPTR
) cell
))
316 fprintf (stderr
, "which_seg: can't find segment containing cell %lx\n",
322 #ifdef GUILE_NEW_GC_SCHEME
324 map_free_list (scm_freelist_t
*master
, SCM freelist
)
326 int last_seg
= -1, count
= 0;
329 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
))
331 int this_seg
= which_seg (f
);
333 if (this_seg
!= last_seg
)
336 fprintf (stderr
, " %5d %d-cells in segment %d\n",
337 count
, master
->span
, last_seg
);
344 fprintf (stderr
, " %5d %d-cells in segment %d\n",
345 count
, master
->span
, last_seg
);
349 map_free_list (scm_freelist_t
*freelist
)
351 int last_seg
= -1, count
= 0;
354 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
))
356 int this_seg
= which_seg (f
);
358 if (this_seg
!= last_seg
)
361 fprintf (stderr
, " %5d %d-cells in segment %d\n",
362 count
, freelist
->span
, last_seg
);
369 fprintf (stderr
, " %5d %d-cells in segment %d\n",
370 count
, freelist
->span
, last_seg
);
374 SCM_DEFINE (scm_map_free_list
, "map-free-list", 0, 0, 0,
376 "Print debugging information about the free-list.\n"
377 "`map-free-list' is only included in --enable-guile-debug builds of Guile.")
378 #define FUNC_NAME s_scm_map_free_list
381 fprintf (stderr
, "%d segments total (%d:%d",
383 scm_heap_table
[0].span
,
384 scm_heap_table
[0].bounds
[1] - scm_heap_table
[0].bounds
[0]);
385 for (i
= 1; i
< scm_n_heap_segs
; i
++)
386 fprintf (stderr
, ", %d:%d",
387 scm_heap_table
[i
].span
,
388 scm_heap_table
[i
].bounds
[1] - scm_heap_table
[i
].bounds
[0]);
389 fprintf (stderr
, ")\n");
390 #ifdef GUILE_NEW_GC_SCHEME
391 map_free_list (&scm_master_freelist
, scm_freelist
);
392 map_free_list (&scm_master_freelist2
, scm_freelist2
);
394 map_free_list (&scm_freelist
);
395 map_free_list (&scm_freelist2
);
399 return SCM_UNSPECIFIED
;
403 #ifdef GUILE_NEW_GC_SCHEME
404 static int last_cluster
;
405 static int last_size
;
408 free_list_length (char *title
, int i
, SCM freelist
)
412 for (ls
= freelist
; SCM_NNULLP (ls
); ls
= SCM_CDR (ls
))
413 if (SCM_UNPACK_CAR (ls
) == scm_tc_free_cell
)
417 fprintf (stderr
, "bad cell in %s at position %d\n", title
, n
);
424 if (last_cluster
== i
- 1)
425 fprintf (stderr
, "\t%d\n", last_size
);
427 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
430 fprintf (stderr
, "%s %d", title
, i
);
432 fprintf (stderr
, "%s\t%d\n", title
, n
);
440 free_list_lengths (char *title
, scm_freelist_t
*master
, SCM freelist
)
443 int i
= 0, len
, n
= 0;
444 fprintf (stderr
, "%s\n\n", title
);
445 n
+= free_list_length ("free list", -1, freelist
);
446 for (clusters
= master
->clusters
;
447 SCM_NNULLP (clusters
);
448 clusters
= SCM_CDR (clusters
))
450 len
= free_list_length ("cluster", i
++, SCM_CAR (clusters
));
453 if (last_cluster
== i
- 1)
454 fprintf (stderr
, "\t%d\n", last_size
);
456 fprintf (stderr
, "-%d\t%d\n", i
- 1, last_size
);
457 fprintf (stderr
, "\ntotal %d objects\n\n", n
);
460 SCM_DEFINE (scm_free_list_length
, "free-list-length", 0, 0, 0,
462 "Print debugging information about the free-list.\n"
463 "`free-list-length' is only included in --enable-guile-debug builds of Guile.")
464 #define FUNC_NAME s_scm_free_list_length
466 free_list_lengths ("1-cells", &scm_master_freelist
, scm_freelist
);
467 free_list_lengths ("2-cells", &scm_master_freelist2
, scm_freelist2
);
468 return SCM_UNSPECIFIED
;
475 #ifdef GUILE_DEBUG_FREELIST
477 /* Number of calls to SCM_NEWCELL since startup. */
478 static unsigned long scm_newcell_count
;
479 static unsigned long scm_newcell2_count
;
481 /* Search freelist for anything that isn't marked as a free cell.
482 Abort if we find something. */
483 #ifdef GUILE_NEW_GC_SCHEME
485 scm_check_freelist (SCM freelist
)
490 for (f
= freelist
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
491 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
493 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
494 scm_newcell_count
, i
);
501 scm_check_freelist (scm_freelist_t
*freelist
)
506 for (f
= freelist
->cells
; SCM_NIMP (f
); f
= SCM_CDR (f
), i
++)
507 if (SCM_CAR (f
) != (SCM
) scm_tc_free_cell
)
509 fprintf (stderr
, "Bad cell in freelist on newcell %lu: %d'th elt\n",
510 scm_newcell_count
, i
);
517 static int scm_debug_check_freelist
= 0;
519 SCM_DEFINE (scm_gc_set_debug_check_freelist_x
, "gc-set-debug-check-freelist!", 1, 0, 0,
521 "If FLAG is #t, check the freelist for consistency on each cell allocation.\n"
522 "This procedure only exists because the GUILE_DEBUG_FREELIST \n"
523 "compile-time flag was selected.\n")
524 #define FUNC_NAME s_scm_gc_set_debug_check_freelist_x
526 SCM_VALIDATE_BOOL_COPY (1, flag
, scm_debug_check_freelist
);
527 return SCM_UNSPECIFIED
;
532 #ifdef GUILE_NEW_GC_SCHEME
535 scm_debug_newcell (void)
540 if (scm_debug_check_freelist
)
542 scm_check_freelist (scm_freelist
);
546 /* The rest of this is supposed to be identical to the SCM_NEWCELL
548 if (SCM_IMP (scm_freelist
))
549 new = scm_gc_for_newcell (&scm_master_freelist
, &scm_freelist
);
553 scm_freelist
= SCM_CDR (scm_freelist
);
554 SCM_SETCAR (new, scm_tc16_allocated
);
561 scm_debug_newcell2 (void)
565 scm_newcell2_count
++;
566 if (scm_debug_check_freelist
)
568 scm_check_freelist (scm_freelist2
);
572 /* The rest of this is supposed to be identical to the SCM_NEWCELL
574 if (SCM_IMP (scm_freelist2
))
575 new = scm_gc_for_newcell (&scm_master_freelist2
, &scm_freelist2
);
579 scm_freelist2
= SCM_CDR (scm_freelist2
);
580 SCM_SETCAR (new, scm_tc16_allocated
);
586 #else /* GUILE_NEW_GC_SCHEME */
589 scm_debug_newcell (void)
594 if (scm_debug_check_freelist
)
596 scm_check_freelist (&scm_freelist
);
600 /* The rest of this is supposed to be identical to the SCM_NEWCELL
602 if (SCM_IMP (scm_freelist
.cells
))
603 new = scm_gc_for_newcell (&scm_freelist
);
606 new = scm_freelist
.cells
;
607 scm_freelist
.cells
= SCM_CDR (scm_freelist
.cells
);
608 SCM_SETCAR (new, scm_tc16_allocated
);
609 ++scm_cells_allocated
;
616 scm_debug_newcell2 (void)
620 scm_newcell2_count
++;
621 if (scm_debug_check_freelist
) {
622 scm_check_freelist (&scm_freelist2
);
626 /* The rest of this is supposed to be identical to the SCM_NEWCELL2
628 if (SCM_IMP (scm_freelist2
.cells
))
629 new = scm_gc_for_newcell (&scm_freelist2
);
632 new = scm_freelist2
.cells
;
633 scm_freelist2
.cells
= SCM_CDR (scm_freelist2
.cells
);
634 SCM_SETCAR (new, scm_tc16_allocated
);
635 scm_cells_allocated
+= 2;
641 #endif /* GUILE_NEW_GC_SCHEME */
642 #endif /* GUILE_DEBUG_FREELIST */
646 #ifdef GUILE_NEW_GC_SCHEME
648 master_cells_allocated (scm_freelist_t
*master
)
650 int objects
= master
->clusters_allocated
* (master
->cluster_size
- 1);
651 if (SCM_NULLP (master
->clusters
))
652 objects
-= master
->left_to_collect
;
653 return master
->span
* objects
;
657 freelist_length (SCM freelist
)
660 for (n
= 0; SCM_NNULLP (freelist
); freelist
= SCM_CDR (freelist
))
666 compute_cells_allocated ()
668 return (scm_cells_allocated
669 + master_cells_allocated (&scm_master_freelist
)
670 + master_cells_allocated (&scm_master_freelist2
)
671 - scm_master_freelist
.span
* freelist_length (scm_freelist
)
672 - scm_master_freelist2
.span
* freelist_length (scm_freelist2
));
676 /* {Scheme Interface to GC}
679 SCM_DEFINE (scm_gc_stats
, "gc-stats", 0, 0, 0,
681 "Returns an association list of statistics about Guile's current use of storage. ")
682 #define FUNC_NAME s_scm_gc_stats
687 long int local_scm_mtrigger
;
688 long int local_scm_mallocated
;
689 long int local_scm_heap_size
;
690 long int local_scm_cells_allocated
;
691 long int local_scm_gc_time_taken
;
699 for (i
= scm_n_heap_segs
; i
--; )
700 heap_segs
= scm_cons (scm_cons (scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[1]),
701 scm_ulong2num ((unsigned long)scm_heap_table
[i
].bounds
[0])),
703 if (scm_n_heap_segs
!= n
)
707 /* Below, we cons to produce the resulting list. We want a snapshot of
708 * the heap situation before consing.
710 local_scm_mtrigger
= scm_mtrigger
;
711 local_scm_mallocated
= scm_mallocated
;
712 local_scm_heap_size
= SCM_HEAP_SIZE
;
713 #ifdef GUILE_NEW_GC_SCHEME
714 local_scm_cells_allocated
= compute_cells_allocated ();
716 local_scm_cells_allocated
= scm_cells_allocated
;
718 local_scm_gc_time_taken
= scm_gc_time_taken
;
720 answer
= scm_listify (scm_cons (sym_gc_time_taken
, scm_ulong2num (local_scm_gc_time_taken
)),
721 scm_cons (sym_cells_allocated
, scm_ulong2num (local_scm_cells_allocated
)),
722 scm_cons (sym_heap_size
, scm_ulong2num (local_scm_heap_size
)),
723 scm_cons (sym_mallocated
, scm_ulong2num (local_scm_mallocated
)),
724 scm_cons (sym_mtrigger
, scm_ulong2num (local_scm_mtrigger
)),
725 scm_cons (sym_heap_segments
, heap_segs
),
734 scm_gc_start (const char *what
)
736 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ());
737 scm_gc_cells_collected
= 0;
738 #ifdef GUILE_NEW_GC_SCHEME
739 scm_gc_yield_1
= scm_gc_yield
;
740 scm_gc_yield
= (scm_cells_allocated
741 + master_cells_allocated (&scm_master_freelist
)
742 + master_cells_allocated (&scm_master_freelist2
));
744 scm_gc_malloc_collected
= 0;
745 scm_gc_ports_collected
= 0;
751 scm_gc_rt
= SCM_INUM (scm_get_internal_run_time ()) - scm_gc_rt
;
752 scm_gc_time_taken
+= scm_gc_rt
;
753 scm_system_async_mark (scm_gc_async
);
757 SCM_DEFINE (scm_object_address
, "object-address", 1, 0, 0,
759 "Return an integer that for the lifetime of @var{obj} is uniquely\n"
760 "returned by this function for @var{obj}")
761 #define FUNC_NAME s_scm_object_address
763 return scm_ulong2num ((unsigned long) obj
);
768 SCM_DEFINE (scm_gc
, "gc", 0, 0, 0,
770 "Scans all of SCM objects and reclaims for further use those that are\n"
771 "no longer accessible.")
772 #define FUNC_NAME s_scm_gc
777 return SCM_UNSPECIFIED
;
783 /* {C Interface For When GC is Triggered}
786 #ifdef GUILE_NEW_GC_SCHEME
789 adjust_gc_trigger (scm_freelist_t
*freelist
)
791 /* GC trigger is adjusted upwards so that next predicted total yield
792 * (allocated cells actually freed by GC) becomes
793 * `gc_trigger_fraction' of total heap size. Note, however, that
794 * the absolute value of gc_trigger will correspond to `collected'
795 * on one master (the one which currently is triggering GC).
797 * The reason why we look at total yield instead of cells collected
798 * on one list is that we want to take other freelists into account.
799 * On this freelist, we know that (local) yield = collected cells,
800 * but that's probably not the case on the other lists.
802 * (We might consider computing a better prediction, for example
803 * by computing an average over multiple GC:s.)
805 if (freelist
->gc_trigger_fraction
)
807 /* Pick largest of last two yields. */
808 int yield
= (scm_gc_yield_1
> scm_gc_yield
811 int delta
= ((SCM_HEAP_SIZE
* freelist
->gc_trigger_fraction
/ 100)
814 fprintf (stderr
, " after GC = %d, delta = %d\n",
819 freelist
->gc_trigger
+= delta
;
823 /* When we get POSIX threads support, the master will be global and
824 * common while the freelist will be individual for each thread.
828 scm_gc_for_newcell (scm_freelist_t
*master
, SCM
*freelist
)
834 if (SCM_NULLP (master
->clusters
))
836 if (master
->grow_heap_p
)
838 master
->grow_heap_p
= 0;
839 alloc_some_heap (master
);
844 fprintf (stderr
, "allocated = %d, ",
846 + master_cells_allocated (&scm_master_freelist
)
847 + master_cells_allocated (&scm_master_freelist2
));
850 adjust_gc_trigger (master
);
853 cell
= SCM_CAR (master
->clusters
);
854 master
->clusters
= SCM_CDR (master
->clusters
);
855 ++master
->clusters_allocated
;
857 while (SCM_NULLP (cell
));
859 *freelist
= SCM_CDR (cell
);
860 SCM_SETCAR (cell
, scm_tc16_allocated
);
865 /* This is a support routine which can be used to reserve a cluster
866 * for some special use, such as debugging. It won't be useful until
867 * free cells are preserved between garbage collections.
871 scm_alloc_cluster (scm_freelist_t
*master
)
874 cell
= scm_gc_for_newcell (master
, &freelist
);
875 SCM_SETCDR (cell
, freelist
);
880 #else /* GUILE_NEW_GC_SCHEME */
883 scm_gc_for_alloc (scm_freelist_t
*freelist
)
887 #ifdef GUILE_DEBUG_FREELIST
888 fprintf (stderr
, "Collected: %d, min_yield: %d\n",
889 freelist
->collected
, MIN_GC_YIELD (freelist
));
891 if ((freelist
->collected
< MIN_GC_YIELD (freelist
))
892 || SCM_IMP (freelist
->cells
))
893 alloc_some_heap (freelist
);
899 scm_gc_for_newcell (scm_freelist_t
*freelist
)
902 scm_gc_for_alloc (freelist
);
903 fl
= freelist
->cells
;
904 freelist
->cells
= SCM_CDR (fl
);
905 SCM_SETCAR (fl
, scm_tc16_allocated
);
909 #endif /* GUILE_NEW_GC_SCHEME */
912 scm_igc (const char *what
)
918 SCM_NULLP (scm_freelist
)
920 : (SCM_NULLP (scm_freelist2
) ? "o" : "m"));
923 /* During the critical section, only the current thread may run. */
924 SCM_THREAD_CRITICAL_SECTION_START
;
927 /* fprintf (stderr, "gc: %s\n", what); */
931 if (!scm_stack_base
|| scm_block_gc
)
937 if (scm_mallocated
< 0)
938 /* The byte count of allocated objects has underflowed. This is
939 probably because you forgot to report the sizes of objects you
940 have allocated, by calling scm_done_malloc or some such. When
941 the GC freed them, it subtracted their size from
942 scm_mallocated, which underflowed. */
945 if (scm_gc_heap_lock
)
946 /* We've invoked the collector while a GC is already in progress.
947 That should never happen. */
952 scm_weak_vectors
= SCM_EOL
;
954 scm_guardian_gc_init ();
956 /* unprotect any struct types with no instances */
962 pos
= &scm_type_obj_list
;
963 type_list
= scm_type_obj_list
;
964 while (type_list
!= SCM_EOL
)
965 if (SCM_VELTS (SCM_CAR (type_list
))[scm_struct_i_refcnt
])
967 pos
= SCM_CDRLOC (type_list
);
968 type_list
= SCM_CDR (type_list
);
972 *pos
= SCM_CDR (type_list
);
973 type_list
= SCM_CDR (type_list
);
978 /* flush dead entries from the continuation stack */
983 elts
= SCM_VELTS (scm_continuation_stack
);
984 bound
= SCM_LENGTH (scm_continuation_stack
);
985 x
= SCM_INUM (scm_continuation_stack_ptr
);
988 elts
[x
] = SCM_BOOL_F
;
995 /* Protect from the C stack. This must be the first marking
996 * done because it provides information about what objects
997 * are "in-use" by the C code. "in-use" objects are those
998 * for which the values from SCM_LENGTH and SCM_CHARS must remain
999 * usable. This requirement is stricter than a liveness
1000 * requirement -- in particular, it constrains the implementation
1001 * of scm_vector_set_length_x.
1003 SCM_FLUSH_REGISTER_WINDOWS
;
1004 /* This assumes that all registers are saved into the jmp_buf */
1005 setjmp (scm_save_regs_gc_mark
);
1006 scm_mark_locations ((SCM_STACKITEM
*) scm_save_regs_gc_mark
,
1007 ( (scm_sizet
) (sizeof (SCM_STACKITEM
) - 1 +
1008 sizeof scm_save_regs_gc_mark
)
1009 / sizeof (SCM_STACKITEM
)));
1012 /* stack_len is long rather than scm_sizet in order to guarantee that
1013 &stack_len is long aligned */
1014 #ifdef SCM_STACK_GROWS_UP
1016 long stack_len
= (SCM_STACKITEM
*) (&stack_len
) - scm_stack_base
;
1018 long stack_len
= scm_stack_size (scm_stack_base
);
1020 scm_mark_locations (scm_stack_base
, (scm_sizet
) stack_len
);
1023 long stack_len
= scm_stack_base
- (SCM_STACKITEM
*) (&stack_len
);
1025 long stack_len
= scm_stack_size (scm_stack_base
);
1027 scm_mark_locations ((scm_stack_base
- stack_len
), (scm_sizet
) stack_len
);
1031 #else /* USE_THREADS */
1033 /* Mark every thread's stack and registers */
1034 scm_threads_mark_stacks ();
1036 #endif /* USE_THREADS */
1038 /* FIXME: insert a phase to un-protect string-data preserved
1039 * in scm_vector_set_length_x.
1042 j
= SCM_NUM_PROTECTS
;
1044 scm_gc_mark (scm_sys_protects
[j
]);
1046 /* FIXME: we should have a means to register C functions to be run
1047 * in different phases of GC
1049 scm_mark_subr_table ();
1052 scm_gc_mark (scm_root
->handle
);
1055 scm_mark_weak_vector_spines ();
1057 scm_guardian_zombify ();
1065 SCM_THREAD_CRITICAL_SECTION_END
;
1075 /* Mark an object precisely.
1090 if (SCM_NCELLP (ptr
))
1091 scm_wta (ptr
, "rogue pointer in heap", NULL
);
1093 switch (SCM_TYP7 (ptr
))
1095 case scm_tcs_cons_nimcar
:
1096 if (SCM_GCMARKP (ptr
))
1098 SCM_SETGCMARK (ptr
);
1099 if (SCM_IMP (SCM_CDR (ptr
))) /* SCM_IMP works even with a GC mark */
1101 ptr
= SCM_CAR (ptr
);
1104 scm_gc_mark (SCM_CAR (ptr
));
1105 ptr
= SCM_GCCDR (ptr
);
1107 case scm_tcs_cons_imcar
:
1108 if (SCM_GCMARKP (ptr
))
1110 SCM_SETGCMARK (ptr
);
1111 ptr
= SCM_GCCDR (ptr
);
1114 if (SCM_GCMARKP (ptr
))
1116 SCM_SETGCMARK (ptr
);
1117 scm_gc_mark (SCM_CELL_WORD (ptr
, 2));
1118 ptr
= SCM_GCCDR (ptr
);
1120 case scm_tcs_cons_gloc
:
1121 if (SCM_GCMARKP (ptr
))
1123 SCM_SETGCMARK (ptr
);
1126 vcell
= SCM_CAR (ptr
) - 1L;
1127 switch (SCM_UNPACK (SCM_CDR (vcell
)))
1130 scm_gc_mark (vcell
);
1131 ptr
= SCM_GCCDR (ptr
);
1143 vtable_data
= (SCM
*)vcell
;
1144 layout
= vtable_data
[scm_vtable_index_layout
];
1145 len
= SCM_LENGTH (layout
);
1146 fields_desc
= SCM_CHARS (layout
);
1147 /* We're using SCM_GCCDR here like STRUCT_DATA, except
1148 that it removes the mark */
1149 mem
= (SCM
*)SCM_GCCDR (ptr
);
1151 if (SCM_UNPACK (vtable_data
[scm_struct_i_flags
]) & SCM_STRUCTF_ENTITY
)
1153 scm_gc_mark (mem
[scm_struct_i_procedure
]);
1154 scm_gc_mark (mem
[scm_struct_i_setter
]);
1158 for (x
= 0; x
< len
- 2; x
+= 2, ++mem
)
1159 if (fields_desc
[x
] == 'p')
1161 if (fields_desc
[x
] == 'p')
1164 if (SCM_LAYOUT_TAILP (fields_desc
[x
+ 1]))
1165 for (j
= (long int) *mem
; x
; --x
)
1166 scm_gc_mark (*++mem
);
1171 if (!SCM_CDR (vcell
))
1173 SCM_SETGCMARK (vcell
);
1174 ptr
= vtable_data
[scm_vtable_index_vtable
];
1181 case scm_tcs_closures
:
1182 if (SCM_GCMARKP (ptr
))
1184 SCM_SETGCMARK (ptr
);
1185 if (SCM_IMP (SCM_CDR (ptr
)))
1187 ptr
= SCM_CLOSCAR (ptr
);
1190 scm_gc_mark (SCM_CLOSCAR (ptr
));
1191 ptr
= SCM_GCCDR (ptr
);
1193 case scm_tc7_vector
:
1194 case scm_tc7_lvector
:
1198 if (SCM_GC8MARKP (ptr
))
1200 SCM_SETGC8MARK (ptr
);
1201 i
= SCM_LENGTH (ptr
);
1205 if (SCM_NIMP (SCM_VELTS (ptr
)[i
]))
1206 scm_gc_mark (SCM_VELTS (ptr
)[i
]);
1207 ptr
= SCM_VELTS (ptr
)[0];
1209 case scm_tc7_contin
:
1212 SCM_SETGC8MARK (ptr
);
1213 if (SCM_VELTS (ptr
))
1214 scm_mark_locations (SCM_VELTS_AS_STACKITEMS (ptr
),
1217 (sizeof (SCM_STACKITEM
) + -1 +
1218 sizeof (scm_contregs
)) /
1219 sizeof (SCM_STACKITEM
)));
1223 case scm_tc7_byvect
:
1230 #ifdef HAVE_LONG_LONGS
1231 case scm_tc7_llvect
:
1234 case scm_tc7_string
:
1235 SCM_SETGC8MARK (ptr
);
1238 case scm_tc7_substring
:
1239 if (SCM_GC8MARKP(ptr
))
1241 SCM_SETGC8MARK (ptr
);
1242 ptr
= SCM_CDR (ptr
);
1246 if (SCM_GC8MARKP(ptr
))
1248 SCM_WVECT_GC_CHAIN (ptr
) = scm_weak_vectors
;
1249 scm_weak_vectors
= ptr
;
1250 SCM_SETGC8MARK (ptr
);
1251 if (SCM_IS_WHVEC_ANY (ptr
))
1258 len
= SCM_LENGTH (ptr
);
1259 weak_keys
= SCM_IS_WHVEC (ptr
) || SCM_IS_WHVEC_B (ptr
);
1260 weak_values
= SCM_IS_WHVEC_V (ptr
) || SCM_IS_WHVEC_B (ptr
);
1262 for (x
= 0; x
< len
; ++x
)
1265 alist
= SCM_VELTS (ptr
)[x
];
1267 /* mark everything on the alist except the keys or
1268 * values, according to weak_values and weak_keys. */
1269 while ( SCM_CONSP (alist
)
1270 && !SCM_GCMARKP (alist
)
1271 && SCM_CONSP (SCM_CAR (alist
)))
1276 kvpair
= SCM_CAR (alist
);
1277 next_alist
= SCM_CDR (alist
);
1280 * SCM_SETGCMARK (alist);
1281 * SCM_SETGCMARK (kvpair);
1283 * It may be that either the key or value is protected by
1284 * an escaped reference to part of the spine of this alist.
1285 * If we mark the spine here, and only mark one or neither of the
1286 * key and value, they may never be properly marked.
1287 * This leads to a horrible situation in which an alist containing
1288 * freelist cells is exported.
1290 * So only mark the spines of these arrays last of all marking.
1291 * If somebody confuses us by constructing a weak vector
1292 * with a circular alist then we are hosed, but at least we
1293 * won't prematurely drop table entries.
1296 scm_gc_mark (SCM_CAR (kvpair
));
1298 scm_gc_mark (SCM_GCCDR (kvpair
));
1301 if (SCM_NIMP (alist
))
1302 scm_gc_mark (alist
);
1307 case scm_tc7_msymbol
:
1308 if (SCM_GC8MARKP(ptr
))
1310 SCM_SETGC8MARK (ptr
);
1311 scm_gc_mark (SCM_SYMBOL_FUNC (ptr
));
1312 ptr
= SCM_SYMBOL_PROPS (ptr
);
1314 case scm_tc7_ssymbol
:
1315 if (SCM_GC8MARKP(ptr
))
1317 SCM_SETGC8MARK (ptr
);
1322 i
= SCM_PTOBNUM (ptr
);
1323 if (!(i
< scm_numptob
))
1325 if (SCM_GC8MARKP (ptr
))
1327 SCM_SETGC8MARK (ptr
);
1328 if (SCM_PTAB_ENTRY(ptr
))
1329 scm_gc_mark (SCM_PTAB_ENTRY(ptr
)->file_name
);
1330 if (scm_ptobs
[i
].mark
)
1332 ptr
= (scm_ptobs
[i
].mark
) (ptr
);
1339 if (SCM_GC8MARKP (ptr
))
1341 SCM_SETGC8MARK (ptr
);
1342 switch (SCM_GCTYP16 (ptr
))
1343 { /* should be faster than going through scm_smobs */
1344 case scm_tc_free_cell
:
1345 /* printf("found free_cell %X ", ptr); fflush(stdout); */
1346 case scm_tc16_allocated
:
1349 case scm_tc16_complex
:
1352 i
= SCM_SMOBNUM (ptr
);
1353 if (!(i
< scm_numsmob
))
1355 if (scm_smobs
[i
].mark
)
1357 ptr
= (scm_smobs
[i
].mark
) (ptr
);
1365 def
:scm_wta (ptr
, "unknown type in ", "gc_mark");
1370 /* Mark a Region Conservatively
1374 scm_mark_locations (SCM_STACKITEM x
[], scm_sizet n
)
1376 register long m
= n
;
1378 register SCM_CELLPTR ptr
;
1381 if (SCM_CELLP (*(SCM
**) (& x
[m
])))
1383 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & x
[m
]));
1385 j
= scm_n_heap_segs
- 1;
1386 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1387 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1394 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1396 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1404 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1408 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1413 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1417 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1423 if ( !scm_heap_table
[seg_id
].valid
1424 || scm_heap_table
[seg_id
].valid (ptr
,
1425 &scm_heap_table
[seg_id
]))
1426 if ( scm_heap_table
[seg_id
].span
== 1
1427 || SCM_DOUBLE_CELLP (*(SCM
**) (& x
[m
])))
1428 scm_gc_mark (*(SCM
*) & x
[m
]);
1437 /* The following is a C predicate which determines if an SCM value can be
1438 regarded as a pointer to a cell on the heap. The code is duplicated
1439 from scm_mark_locations. */
1443 scm_cellp (SCM value
)
1446 register SCM_CELLPTR ptr
;
1448 if SCM_CELLP (*(SCM
**) (& value
))
1450 ptr
= (SCM_CELLPTR
) SCM2PTR ((*(SCM
**) & value
));
1452 j
= scm_n_heap_segs
- 1;
1453 if ( SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
)
1454 && SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1461 || SCM_PTR_GT (scm_heap_table
[i
].bounds
[1], ptr
))
1463 else if (SCM_PTR_LE (scm_heap_table
[j
].bounds
[0], ptr
))
1471 if (SCM_PTR_GT (scm_heap_table
[k
].bounds
[1], ptr
))
1475 if (SCM_PTR_LE (scm_heap_table
[i
].bounds
[0], ptr
))
1480 else if (SCM_PTR_LE (scm_heap_table
[k
].bounds
[0], ptr
))
1484 if (SCM_PTR_GT (scm_heap_table
[j
].bounds
[1], ptr
))
1490 if ( !scm_heap_table
[seg_id
].valid
1491 || scm_heap_table
[seg_id
].valid (ptr
,
1492 &scm_heap_table
[seg_id
]))
1504 scm_mark_weak_vector_spines ()
1508 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1510 if (SCM_IS_WHVEC_ANY (w
))
1518 ptr
= SCM_VELTS (w
);
1520 for (j
= 0; j
< n
; ++j
)
1525 while ( SCM_CONSP (alist
)
1526 && !SCM_GCMARKP (alist
)
1527 && SCM_CONSP (SCM_CAR (alist
)))
1529 SCM_SETGCMARK (alist
);
1530 SCM_SETGCMARK (SCM_CAR (alist
));
1531 alist
= SCM_GCCDR (alist
);
1539 #ifdef GUILE_NEW_GC_SCHEME
1541 gc_sweep_freelist_start (scm_freelist_t
*freelist
)
1543 freelist
->cells
= SCM_EOL
;
1544 freelist
->left_to_collect
= freelist
->cluster_size
;
1545 freelist
->clusters_allocated
= 0;
1546 freelist
->clusters
= SCM_EOL
;
1547 freelist
->clustertail
= &freelist
->clusters
;
1548 freelist
->collected
= 0;
1552 gc_sweep_freelist_finish (scm_freelist_t
*freelist
)
1554 *freelist
->clustertail
= freelist
->cells
;
1555 if (SCM_NNULLP (freelist
->cells
))
1557 SCM c
= freelist
->cells
;
1558 SCM_SETCAR (c
, SCM_CDR (c
));
1559 SCM_SETCDR (c
, SCM_EOL
);
1560 freelist
->collected
+=
1561 freelist
->span
* (freelist
->cluster_size
- freelist
->left_to_collect
);
1563 scm_gc_cells_collected
+= freelist
->collected
;
1565 freelist
->grow_heap_p
= (freelist
->collected
< freelist
->gc_trigger
);
1572 register SCM_CELLPTR ptr
;
1573 #ifdef SCM_POINTERS_MUNGED
1574 register SCM scmptr
;
1577 #define scmptr (SCM)ptr
1579 register SCM nfreelist
;
1580 register scm_freelist_t
*freelist
;
1588 #ifdef GUILE_NEW_GC_SCHEME
1589 gc_sweep_freelist_start (&scm_master_freelist
);
1590 gc_sweep_freelist_start (&scm_master_freelist2
);
1592 /* Reset all free list pointers. We'll reconstruct them completely
1594 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1595 scm_heap_table
[i
].freelist
->cells
= SCM_EOL
;
1598 for (i
= 0; i
< scm_n_heap_segs
; i
++)
1600 #ifdef GUILE_NEW_GC_SCHEME
1601 register unsigned int left_to_collect
;
1603 register scm_sizet n
= 0;
1605 register scm_sizet j
;
1607 /* Unmarked cells go onto the front of the freelist this heap
1608 segment points to. Rather than updating the real freelist
1609 pointer as we go along, we accumulate the new head in
1610 nfreelist. Then, if it turns out that the entire segment is
1611 free, we free (i.e., malloc's free) the whole segment, and
1612 simply don't assign nfreelist back into the real freelist. */
1613 freelist
= scm_heap_table
[i
].freelist
;
1614 nfreelist
= freelist
->cells
;
1615 #ifdef GUILE_NEW_GC_SCHEME
1616 left_to_collect
= freelist
->left_to_collect
;
1618 span
= scm_heap_table
[i
].span
;
1620 ptr
= CELL_UP (scm_heap_table
[i
].bounds
[0], span
);
1621 seg_size
= CELL_DN (scm_heap_table
[i
].bounds
[1], span
) - ptr
;
1622 for (j
= seg_size
+ span
; j
-= span
; ptr
+= span
)
1624 #ifdef SCM_POINTERS_MUNGED
1625 scmptr
= PTR2SCM (ptr
);
1627 switch SCM_TYP7 (scmptr
)
1629 case scm_tcs_cons_gloc
:
1630 if (SCM_GCMARKP (scmptr
))
1632 if (SCM_CDR (SCM_CAR (scmptr
) - 1) == (SCM
)1)
1633 SCM_SETCDR (SCM_CAR (scmptr
) - 1, (SCM
) 0);
1638 vcell
= SCM_CAR (scmptr
) - 1L;
1640 if ((SCM_CDR (vcell
) == 0) || (SCM_UNPACK (SCM_CDR (vcell
)) == 1))
1642 scm_struct_free_t free
1643 = (scm_struct_free_t
) ((SCM
*) vcell
)[scm_struct_i_free
];
1644 m
+= free ((SCM
*) vcell
, (SCM
*) SCM_GCCDR (scmptr
));
1648 case scm_tcs_cons_imcar
:
1649 case scm_tcs_cons_nimcar
:
1650 case scm_tcs_closures
:
1652 if (SCM_GCMARKP (scmptr
))
1656 if (SCM_GC8MARKP (scmptr
))
1662 m
+= (2 + SCM_LENGTH (scmptr
)) * sizeof (SCM
);
1663 scm_must_free ((char *)(SCM_VELTS (scmptr
) - 2));
1667 case scm_tc7_vector
:
1668 case scm_tc7_lvector
:
1672 if (SCM_GC8MARKP (scmptr
))
1675 m
+= (SCM_LENGTH (scmptr
) * sizeof (SCM
));
1677 scm_must_free (SCM_CHARS (scmptr
));
1678 /* SCM_SETCHARS(scmptr, 0);*/
1682 if SCM_GC8MARKP (scmptr
)
1684 m
+= sizeof (long) * ((SCM_HUGE_LENGTH (scmptr
) + SCM_LONG_BIT
- 1) / SCM_LONG_BIT
);
1686 case scm_tc7_byvect
:
1687 if SCM_GC8MARKP (scmptr
)
1689 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (char);
1693 if SCM_GC8MARKP (scmptr
)
1695 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long);
1698 if SCM_GC8MARKP (scmptr
)
1700 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (short);
1702 #ifdef HAVE_LONG_LONGS
1703 case scm_tc7_llvect
:
1704 if SCM_GC8MARKP (scmptr
)
1706 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (long_long
);
1710 if SCM_GC8MARKP (scmptr
)
1712 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (float);
1715 if SCM_GC8MARKP (scmptr
)
1717 m
+= SCM_HUGE_LENGTH (scmptr
) * sizeof (double);
1720 if SCM_GC8MARKP (scmptr
)
1722 m
+= SCM_HUGE_LENGTH (scmptr
) * 2 * sizeof (double);
1725 case scm_tc7_substring
:
1726 if (SCM_GC8MARKP (scmptr
))
1729 case scm_tc7_string
:
1730 if (SCM_GC8MARKP (scmptr
))
1732 m
+= SCM_HUGE_LENGTH (scmptr
) + 1;
1734 case scm_tc7_msymbol
:
1735 if (SCM_GC8MARKP (scmptr
))
1737 m
+= ( SCM_LENGTH (scmptr
)
1739 + sizeof (SCM
) * ((SCM
*)SCM_CHARS (scmptr
) - SCM_SLOTS(scmptr
)));
1740 scm_must_free ((char *)SCM_SLOTS (scmptr
));
1742 case scm_tc7_contin
:
1743 if SCM_GC8MARKP (scmptr
)
1745 m
+= SCM_LENGTH (scmptr
) * sizeof (SCM_STACKITEM
) + sizeof (scm_contregs
);
1746 if (SCM_VELTS (scmptr
))
1748 case scm_tc7_ssymbol
:
1749 if SCM_GC8MARKP(scmptr
)
1755 if SCM_GC8MARKP (scmptr
)
1757 if SCM_OPENP (scmptr
)
1759 int k
= SCM_PTOBNUM (scmptr
);
1760 if (!(k
< scm_numptob
))
1762 /* Keep "revealed" ports alive. */
1763 if (scm_revealed_count (scmptr
) > 0)
1765 /* Yes, I really do mean scm_ptobs[k].free */
1766 /* rather than ftobs[k].close. .close */
1767 /* is for explicit CLOSE-PORT by user */
1768 m
+= (scm_ptobs
[k
].free
) (scmptr
);
1769 SCM_SETSTREAM (scmptr
, 0);
1770 scm_remove_from_port_table (scmptr
);
1771 scm_gc_ports_collected
++;
1772 SCM_SETAND_CAR (scmptr
, ~SCM_OPN
);
1776 switch SCM_GCTYP16 (scmptr
)
1778 case scm_tc_free_cell
:
1780 if SCM_GC8MARKP (scmptr
)
1785 if SCM_GC8MARKP (scmptr
)
1787 m
+= (SCM_NUMDIGS (scmptr
) * SCM_BITSPERDIG
/ SCM_CHAR_BIT
);
1789 #endif /* def SCM_BIGDIG */
1790 case scm_tc16_complex
:
1791 if SCM_GC8MARKP (scmptr
)
1793 m
+= 2 * sizeof (double);
1796 if SCM_GC8MARKP (scmptr
)
1801 k
= SCM_SMOBNUM (scmptr
);
1802 if (!(k
< scm_numsmob
))
1804 m
+= (scm_smobs
[k
].free
) ((SCM
) scmptr
);
1810 sweeperr
:scm_wta (scmptr
, "unknown type in ", "gc_sweep");
1813 if (SCM_CAR (scmptr
) == (SCM
) scm_tc_free_cell
)
1816 #ifndef GUILE_NEW_GC_SCHEME
1819 if (!--left_to_collect
)
1821 SCM_SETCAR (scmptr
, nfreelist
);
1822 *freelist
->clustertail
= scmptr
;
1823 freelist
->clustertail
= SCM_CDRLOC (scmptr
);
1825 nfreelist
= SCM_EOL
;
1826 freelist
->collected
+= span
* freelist
->cluster_size
;
1827 left_to_collect
= freelist
->cluster_size
;
1832 /* Stick the new cell on the front of nfreelist. It's
1833 critical that we mark this cell as freed; otherwise, the
1834 conservative collector might trace it as some other type
1836 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
1837 SCM_SETCDR (scmptr
, nfreelist
);
1843 SCM_CLRGC8MARK (scmptr
);
1846 SCM_CLRGCMARK (scmptr
);
1848 #ifdef GC_FREE_SEGMENTS
1853 freelist
->heap_size
-= seg_size
;
1854 free ((char *) scm_heap_table
[i
].bounds
[0]);
1855 scm_heap_table
[i
].bounds
[0] = 0;
1856 for (j
= i
+ 1; j
< scm_n_heap_segs
; j
++)
1857 scm_heap_table
[j
- 1] = scm_heap_table
[j
];
1858 scm_n_heap_segs
-= 1;
1859 i
--; /* We need to scan the segment just moved. */
1862 #endif /* ifdef GC_FREE_SEGMENTS */
1864 /* Update the real freelist pointer to point to the head of
1865 the list of free cells we've built for this segment. */
1866 freelist
->cells
= nfreelist
;
1867 #ifdef GUILE_NEW_GC_SCHEME
1868 freelist
->left_to_collect
= left_to_collect
;
1872 #ifndef GUILE_NEW_GC_SCHEME
1873 freelist
->collected
+= n
;
1876 #ifdef GUILE_DEBUG_FREELIST
1877 #ifdef GUILE_NEW_GC_SCHEME
1878 scm_check_freelist (freelist
== &scm_master_freelist
1882 scm_check_freelist (freelist
);
1884 scm_map_free_list ();
1888 #ifdef GUILE_NEW_GC_SCHEME
1889 gc_sweep_freelist_finish (&scm_master_freelist
);
1890 gc_sweep_freelist_finish (&scm_master_freelist2
);
1892 /* When we move to POSIX threads private freelists should probably
1893 be GC-protected instead. */
1894 scm_freelist
= SCM_EOL
;
1895 scm_freelist2
= SCM_EOL
;
1898 /* Scan weak vectors. */
1901 for (w
= scm_weak_vectors
; w
!= SCM_EOL
; w
= SCM_WVECT_GC_CHAIN (w
))
1903 if (!SCM_IS_WHVEC_ANY (w
))
1907 ptr
= SCM_VELTS (w
);
1909 for (j
= 0; j
< n
; ++j
)
1910 if (SCM_FREEP (ptr
[j
]))
1911 ptr
[j
] = SCM_BOOL_F
;
1913 else /* if (SCM_IS_WHVEC_ANY (scm_weak_vectors[i])) */
1916 register long n
= SCM_LENGTH (w
);
1919 ptr
= SCM_VELTS (w
);
1921 for (j
= 0; j
< n
; ++j
)
1928 weak_keys
= SCM_IS_WHVEC (obj
) || SCM_IS_WHVEC_B (obj
);
1929 weak_values
= SCM_IS_WHVEC_V (obj
) || SCM_IS_WHVEC_B (obj
);
1934 while ( SCM_CONSP (alist
)
1935 && SCM_CONSP (SCM_CAR (alist
)))
1940 key
= SCM_CAAR (alist
);
1941 value
= SCM_CDAR (alist
);
1942 if ( (weak_keys
&& SCM_FREEP (key
))
1943 || (weak_values
&& SCM_FREEP (value
)))
1945 *fixup
= SCM_CDR (alist
);
1948 fixup
= SCM_CDRLOC (alist
);
1949 alist
= SCM_CDR (alist
);
1955 scm_cells_allocated
= (SCM_HEAP_SIZE
- scm_gc_cells_collected
);
1956 #ifdef GUILE_NEW_GC_SCHEME
1957 scm_gc_yield
-= scm_cells_allocated
;
1959 scm_mallocated
-= m
;
1960 scm_gc_malloc_collected
= m
;
1966 /* {Front end to malloc}
1968 * scm_must_malloc, scm_must_realloc, scm_must_free, scm_done_malloc
1970 * These functions provide services comperable to malloc, realloc, and
1971 * free. They are for allocating malloced parts of scheme objects.
1972 * The primary purpose of the front end is to impose calls to gc.
1976 * Return newly malloced storage or throw an error.
1978 * The parameter WHAT is a string for error reporting.
1979 * If the threshold scm_mtrigger will be passed by this
1980 * allocation, or if the first call to malloc fails,
1981 * garbage collect -- on the presumption that some objects
1982 * using malloced storage may be collected.
1984 * The limit scm_mtrigger may be raised by this allocation.
1987 scm_must_malloc (scm_sizet size
, const char *what
)
1990 unsigned long nm
= scm_mallocated
+ size
;
1992 if (nm
<= scm_mtrigger
)
1994 SCM_SYSCALL (ptr
= malloc (size
));
1997 scm_mallocated
= nm
;
2004 nm
= scm_mallocated
+ size
;
2005 SCM_SYSCALL (ptr
= malloc (size
));
2008 scm_mallocated
= nm
;
2009 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
2010 if (nm
> scm_mtrigger
)
2011 scm_mtrigger
= nm
+ nm
/ 2;
2013 scm_mtrigger
+= scm_mtrigger
/ 2;
2018 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
2019 return 0; /* never reached */
2024 * is similar to scm_must_malloc.
2027 scm_must_realloc (void *where
,
2033 scm_sizet nm
= scm_mallocated
+ size
- old_size
;
2035 if (nm
<= scm_mtrigger
)
2037 SCM_SYSCALL (ptr
= realloc (where
, size
));
2040 scm_mallocated
= nm
;
2047 nm
= scm_mallocated
+ size
- old_size
;
2048 SCM_SYSCALL (ptr
= realloc (where
, size
));
2051 scm_mallocated
= nm
;
2052 if (nm
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
) {
2053 if (nm
> scm_mtrigger
)
2054 scm_mtrigger
= nm
+ nm
/ 2;
2056 scm_mtrigger
+= scm_mtrigger
/ 2;
2061 scm_wta (SCM_MAKINUM (size
), (char *) SCM_NALLOC
, what
);
2062 return 0; /* never reached */
2066 scm_must_free (void *obj
)
2071 scm_wta (SCM_INUM0
, "already free", "");
2074 /* Announce that there has been some malloc done that will be freed
2075 * during gc. A typical use is for a smob that uses some malloced
2076 * memory but can not get it from scm_must_malloc (for whatever
2077 * reason). When a new object of this smob is created you call
2078 * scm_done_malloc with the size of the object. When your smob free
2079 * function is called, be sure to include this size in the return
2083 scm_done_malloc (long size
)
2085 scm_mallocated
+= size
;
2087 if (scm_mallocated
> scm_mtrigger
)
2089 scm_igc ("foreign mallocs");
2090 if (scm_mallocated
> scm_mtrigger
- SCM_MTRIGGER_HYSTERESIS
)
2092 if (scm_mallocated
> scm_mtrigger
)
2093 scm_mtrigger
= scm_mallocated
+ scm_mallocated
/ 2;
2095 scm_mtrigger
+= scm_mtrigger
/ 2;
2105 * Each heap segment is an array of objects of a particular size.
2106 * Every segment has an associated (possibly shared) freelist.
2107 * A table of segment records is kept that records the upper and
2108 * lower extents of the segment; this is used during the conservative
2109 * phase of gc to identify probably gc roots (because they point
2110 * into valid segments at reasonable offsets). */
2113 * is true if the first segment was smaller than INIT_HEAP_SEG.
2114 * If scm_expmem is set to one, subsequent segment allocations will
2115 * allocate segments of size SCM_EXPHEAP(scm_heap_size).
2119 scm_sizet scm_max_segment_size
;
2122 * is the lowest base address of any heap segment.
2124 SCM_CELLPTR scm_heap_org
;
2126 scm_heap_seg_data_t
* scm_heap_table
= 0;
2127 int scm_n_heap_segs
= 0;
2130 * initializes a new heap segment and return the number of objects it contains.
2132 * The segment origin, segment size in bytes, and the span of objects
2133 * in cells are input parameters. The freelist is both input and output.
2135 * This function presume that the scm_heap_table has already been expanded
2136 * to accomodate a new segment record.
2141 init_heap_seg (SCM_CELLPTR seg_org
, scm_sizet size
, scm_freelist_t
*freelist
)
2143 register SCM_CELLPTR ptr
;
2144 #ifdef SCM_POINTERS_MUNGED
2145 register SCM scmptr
;
2150 SCM_CELLPTR seg_end
;
2153 int span
= freelist
->span
;
2155 if (seg_org
== NULL
)
2158 ptr
= CELL_UP (seg_org
, span
);
2160 /* Compute the ceiling on valid object pointers w/in this segment.
2162 seg_end
= CELL_DN ((char *) seg_org
+ size
, span
);
2164 /* Find the right place and insert the segment record.
2167 for (new_seg_index
= 0;
2168 ( (new_seg_index
< scm_n_heap_segs
)
2169 && SCM_PTR_LE (scm_heap_table
[new_seg_index
].bounds
[0], seg_org
));
2175 for (i
= scm_n_heap_segs
; i
> new_seg_index
; --i
)
2176 scm_heap_table
[i
] = scm_heap_table
[i
- 1];
2181 scm_heap_table
[new_seg_index
].valid
= 0;
2182 scm_heap_table
[new_seg_index
].span
= span
;
2183 scm_heap_table
[new_seg_index
].freelist
= freelist
;
2184 scm_heap_table
[new_seg_index
].bounds
[0] = (SCM_CELLPTR
)ptr
;
2185 scm_heap_table
[new_seg_index
].bounds
[1] = (SCM_CELLPTR
)seg_end
;
2188 /* Compute the least valid object pointer w/in this segment
2190 ptr
= CELL_UP (ptr
, span
);
2194 n_new_cells
= seg_end
- ptr
;
2196 #ifdef GUILE_NEW_GC_SCHEME
2198 freelist
->heap_size
+= n_new_cells
;
2200 /* Partition objects in this segment into clusters */
2203 SCM
*clusterp
= &clusters
;
2204 int n_cluster_cells
= span
* freelist
->cluster_size
;
2206 while (n_new_cells
> span
) /* at least one spine + one freecell */
2208 /* Determine end of cluster
2210 if (n_new_cells
>= n_cluster_cells
)
2212 seg_end
= ptr
+ n_cluster_cells
;
2213 n_new_cells
-= n_cluster_cells
;
2216 /* [cmm] looks like the segment size doesn't divide cleanly by
2217 cluster size. bad cmm! */
2220 /* Allocate cluster spine
2222 *clusterp
= PTR2SCM (ptr
);
2223 SCM_SETCAR (*clusterp
, PTR2SCM (ptr
+ span
));
2224 clusterp
= SCM_CDRLOC (*clusterp
);
2227 while (ptr
< seg_end
)
2229 #ifdef SCM_POINTERS_MUNGED
2230 scmptr
= PTR2SCM (ptr
);
2232 SCM_SETCAR (scmptr
, scm_tc_free_cell
);
2233 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2237 SCM_SETCDR (PTR2SCM (ptr
- span
), SCM_EOL
);
2240 /* Patch up the last cluster pointer in the segment
2241 * to join it to the input freelist.
2243 *clusterp
= freelist
->clusters
;
2244 freelist
->clusters
= clusters
;
2247 #else /* GUILE_NEW_GC_SCHEME */
2249 /* Prepend objects in this segment to the freelist.
2251 while (ptr
< seg_end
)
2253 #ifdef SCM_POINTERS_MUNGED
2254 scmptr
= PTR2SCM (ptr
);
2256 SCM_SETCAR (scmptr
, (SCM
) scm_tc_free_cell
);
2257 SCM_SETCDR (scmptr
, PTR2SCM (ptr
+ span
));
2263 /* Patch up the last freelist pointer in the segment
2264 * to join it to the input freelist.
2266 SCM_SETCDR (PTR2SCM (ptr
), freelist
->cells
);
2267 freelist
->cells
= PTR2SCM (CELL_UP (seg_org
, span
));
2269 freelist
->heap_size
+= n_new_cells
;
2271 #endif /* GUILE_NEW_GC_SCHEME */
2274 fprintf (stderr
, "H");
2282 #ifndef GUILE_NEW_GC_SCHEME
2283 #define round_to_cluster_size(freelist, len) len
2287 round_to_cluster_size (scm_freelist_t
*freelist
, scm_sizet len
)
2289 scm_sizet cluster_size_in_bytes
= CLUSTER_SIZE_IN_BYTES (freelist
);
2292 (len
+ cluster_size_in_bytes
- 1) / cluster_size_in_bytes
* cluster_size_in_bytes
2293 + ALIGNMENT_SLACK (freelist
);
2299 alloc_some_heap (scm_freelist_t
*freelist
)
2301 scm_heap_seg_data_t
* tmptable
;
2305 /* Critical code sections (such as the garbage collector)
2306 * aren't supposed to add heap segments.
2308 if (scm_gc_heap_lock
)
2309 scm_wta (SCM_UNDEFINED
, "need larger initial", "heap");
2311 /* Expand the heap tables to have room for the new segment.
2312 * Do not yet increment scm_n_heap_segs -- that is done by init_heap_seg
2313 * only if the allocation of the segment itself succeeds.
2315 len
= (1 + scm_n_heap_segs
) * sizeof (scm_heap_seg_data_t
);
2317 SCM_SYSCALL (tmptable
= ((scm_heap_seg_data_t
*)
2318 realloc ((char *)scm_heap_table
, len
)));
2320 scm_wta (SCM_UNDEFINED
, "could not grow", "hplims");
2322 scm_heap_table
= tmptable
;
2325 /* Pick a size for the new heap segment.
2326 * The rule for picking the size of a segment is explained in
2329 #ifdef GUILE_NEW_GC_SCHEME
2331 /* Assure that the new segment is predicted to be large enough for
2334 int slack
= freelist
->gc_trigger
- freelist
->collected
;
2335 int min_cells
= 100 * slack
/ (99 - freelist
->gc_trigger_fraction
);
2336 len
= SCM_EXPHEAP (freelist
->heap_size
);
2338 fprintf (stderr
, "(%d < %d)", len
, min_cells
);
2340 if (len
< min_cells
)
2341 len
= min_cells
+ 1;
2342 len
*= sizeof (scm_cell
);
2345 if (len
> scm_max_segment_size
)
2346 len
= scm_max_segment_size
;
2350 len
= (scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
));
2351 if ((scm_sizet
) SCM_EXPHEAP (freelist
->heap_size
* sizeof (scm_cell
))
2356 len
= SCM_HEAP_SEG_SIZE
;
2357 #endif /* GUILE_NEW_GC_SCHEME */
2362 #ifndef GUILE_NEW_GC_SCHEME
2363 smallest
= (freelist
->span
* sizeof (scm_cell
));
2365 smallest
= CLUSTER_SIZE_IN_BYTES (freelist
);
2371 /* Allocate with decaying ambition. */
2372 while ((len
>= SCM_MIN_HEAP_SEG_SIZE
)
2373 && (len
>= smallest
))
2375 scm_sizet rounded_len
= round_to_cluster_size(freelist
, len
);
2376 SCM_SYSCALL (ptr
= (SCM_CELLPTR
) malloc (rounded_len
));
2379 init_heap_seg (ptr
, rounded_len
, freelist
);
2386 scm_wta (SCM_UNDEFINED
, "could not grow", "heap");
2391 SCM_DEFINE (scm_unhash_name
, "unhash-name", 1, 0, 0,
2394 #define FUNC_NAME s_scm_unhash_name
2398 SCM_VALIDATE_SYMBOL (1,name
);
2400 bound
= scm_n_heap_segs
;
2401 for (x
= 0; x
< bound
; ++x
)
2405 p
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[0];
2406 pbound
= (SCM_CELLPTR
)scm_heap_table
[x
].bounds
[1];
2411 if (1 == (7 & (int)incar
))
2414 if ( ((name
== SCM_BOOL_T
) || (SCM_CAR (incar
) == name
))
2415 && (SCM_CDR (incar
) != 0)
2416 && (SCM_UNPACK (SCM_CDR (incar
)) != 1))
2431 /* {GC Protection Helper Functions}
2436 scm_remember (SCM
*ptr
)
2441 These crazy functions prevent garbage collection
2442 of arguments after the first argument by
2443 ensuring they remain live throughout the
2444 function because they are used in the last
2445 line of the code block.
2446 It'd be better to have a nice compiler hint to
2447 aid the conservative stack-scanning GC. --03/09/00 gjb */
2449 scm_return_first (SCM elt
, ...)
2455 scm_return_first_int (int i
, ...)
2462 scm_permanent_object (SCM obj
)
2465 scm_permobjs
= scm_cons (obj
, scm_permobjs
);
2471 /* Protect OBJ from the garbage collector. OBJ will not be freed,
2472 even if all other references are dropped, until someone applies
2473 scm_unprotect_object to it. This function returns OBJ.
2475 Calls to scm_protect_object nest. For every object OBJ, there is a
2476 counter which scm_protect_object(OBJ) increments and
2477 scm_unprotect_object(OBJ) decrements, if it is greater than zero. If
2478 an object's counter is greater than zero, the garbage collector
2481 Of course, that's not how it's implemented. scm_protect_object and
2482 scm_unprotect_object just maintain a list of references to things.
2483 Since the GC knows about this list, all objects it mentions stay
2484 alive. scm_protect_object adds its argument to the list;
2485 scm_unprotect_object removes the first occurrence of its argument
2488 scm_protect_object (SCM obj
)
2490 scm_protects
= scm_cons (obj
, scm_protects
);
2496 /* Remove any protection for OBJ established by a prior call to
2497 scm_protect_object. This function returns OBJ.
2499 See scm_protect_object for more information. */
2501 scm_unprotect_object (SCM obj
)
2503 SCM
*tail_ptr
= &scm_protects
;
2505 while (SCM_CONSP (*tail_ptr
))
2506 if (SCM_CAR (*tail_ptr
) == obj
)
2508 *tail_ptr
= SCM_CDR (*tail_ptr
);
2512 tail_ptr
= SCM_CDRLOC (*tail_ptr
);
2519 /* called on process termination. */
2525 extern int on_exit (void (*procp
) (), int arg
);
2528 cleanup (int status
, void *arg
)
2530 #error Dont know how to setup a cleanup handler on your system.
2535 scm_flush_all_ports ();
2540 make_initial_segment (scm_sizet init_heap_size
, scm_freelist_t
*freelist
)
2542 scm_sizet rounded_size
= round_to_cluster_size (freelist
, init_heap_size
);
2543 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2547 rounded_size
= round_to_cluster_size (freelist
, SCM_HEAP_SEG_SIZE
);
2548 if (!init_heap_seg ((SCM_CELLPTR
) malloc (rounded_size
),
2556 #ifdef GUILE_NEW_GC_SCHEME
2557 if (freelist
->gc_trigger_fraction
)
2558 freelist
->gc_trigger
= (freelist
->heap_size
* freelist
->gc_trigger_fraction
2560 freelist
->grow_heap_p
= (freelist
->heap_size
< freelist
->gc_trigger
);
2567 #ifdef GUILE_NEW_GC_SCHEME
2569 init_freelist (scm_freelist_t
*freelist
,
2574 freelist
->clusters
= SCM_EOL
;
2575 freelist
->cluster_size
= cluster_size
+ 1;
2576 freelist
->left_to_collect
= 0;
2577 freelist
->clusters_allocated
= 0;
2579 freelist
->gc_trigger_fraction
= - gc_trigger
;
2582 freelist
->gc_trigger
= gc_trigger
;
2583 freelist
->gc_trigger_fraction
= 0;
2585 freelist
->span
= span
;
2586 freelist
->collected
= 0;
2587 freelist
->heap_size
= 0;
2591 scm_init_storage (scm_sizet init_heap_size_1
, int gc_trigger_1
,
2592 scm_sizet init_heap_size_2
, int gc_trigger_2
,
2593 scm_sizet max_segment_size
)
2596 scm_init_storage (scm_sizet init_heap_size_1
, scm_sizet init_heap_size_2
)
2601 if (!init_heap_size_1
)
2602 init_heap_size_1
= SCM_INIT_HEAP_SIZE_1
;
2603 if (!init_heap_size_2
)
2604 init_heap_size_2
= SCM_INIT_HEAP_SIZE_2
;
2606 j
= SCM_NUM_PROTECTS
;
2608 scm_sys_protects
[--j
] = SCM_BOOL_F
;
2611 #ifdef GUILE_NEW_GC_SCHEME
2612 scm_freelist
= SCM_EOL
;
2613 scm_freelist2
= SCM_EOL
;
2614 init_freelist (&scm_master_freelist
,
2615 1, SCM_CLUSTER_SIZE_1
,
2616 gc_trigger_1
? gc_trigger_1
: SCM_GC_TRIGGER_1
);
2617 init_freelist (&scm_master_freelist2
,
2618 2, SCM_CLUSTER_SIZE_2
,
2619 gc_trigger_2
? gc_trigger_2
: SCM_GC_TRIGGER_2
);
2620 scm_max_segment_size
2621 = max_segment_size
? max_segment_size
: SCM_MAX_SEGMENT_SIZE
;
2623 scm_freelist
.cells
= SCM_EOL
;
2624 scm_freelist
.span
= 1;
2625 scm_freelist
.collected
= 0;
2626 scm_freelist
.heap_size
= 0;
2628 scm_freelist2
.cells
= SCM_EOL
;
2629 scm_freelist2
.span
= 2;
2630 scm_freelist2
.collected
= 0;
2631 scm_freelist2
.heap_size
= 0;
2636 j
= SCM_HEAP_SEG_SIZE
;
2637 scm_mtrigger
= SCM_INIT_MALLOC_LIMIT
;
2638 scm_heap_table
= ((scm_heap_seg_data_t
*)
2639 scm_must_malloc (sizeof (scm_heap_seg_data_t
) * 2, "hplims"));
2641 #ifdef GUILE_NEW_GC_SCHEME
2642 if (make_initial_segment (init_heap_size_1
, &scm_master_freelist
) ||
2643 make_initial_segment (init_heap_size_2
, &scm_master_freelist2
))
2646 if (make_initial_segment (init_heap_size_1
, &scm_freelist
) ||
2647 make_initial_segment (init_heap_size_2
, &scm_freelist2
))
2651 scm_heap_org
= CELL_UP (scm_heap_table
[0].bounds
[0], 1);
2653 /* scm_hplims[0] can change. do not remove scm_heap_org */
2654 scm_weak_vectors
= SCM_EOL
;
2656 /* Initialise the list of ports. */
2657 scm_port_table
= (scm_port
**)
2658 malloc (sizeof (scm_port
*) * scm_port_table_room
);
2659 if (!scm_port_table
)
2666 on_exit (cleanup
, 0);
2670 scm_undefineds
= scm_cons (SCM_UNDEFINED
, SCM_EOL
);
2671 SCM_SETCDR (scm_undefineds
, scm_undefineds
);
2673 scm_listofnull
= scm_cons (SCM_EOL
, SCM_EOL
);
2674 scm_nullstr
= scm_makstr (0L, 0);
2675 scm_nullvect
= scm_make_vector (SCM_INUM0
, SCM_UNDEFINED
);
2676 scm_symhash
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2677 scm_weak_symhash
= scm_make_weak_key_hash_table ((SCM
) SCM_MAKINUM (scm_symhash_dim
));
2678 scm_symhash_vars
= scm_make_vector ((SCM
) SCM_MAKINUM (scm_symhash_dim
), SCM_EOL
);
2679 scm_stand_in_procs
= SCM_EOL
;
2680 scm_permobjs
= SCM_EOL
;
2681 scm_protects
= SCM_EOL
;
2682 scm_asyncs
= SCM_EOL
;
2683 scm_sysintern ("most-positive-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_POSITIVE_FIXNUM
));
2684 scm_sysintern ("most-negative-fixnum", (SCM
) SCM_MAKINUM (SCM_MOST_NEGATIVE_FIXNUM
));
2686 scm_sysintern ("bignum-radix", SCM_MAKINUM (SCM_BIGRAD
));